summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEldar Nugaev <enugaev@griddynamics.com>2011-01-07 06:18:01 +0300
committerEldar Nugaev <enugaev@griddynamics.com>2011-01-07 06:18:01 +0300
commit1a6fba0ada49a464b372e681b83bac59d3a3a79a (patch)
tree5246b72f9bf695fee0f2cf0d5511a5c5e71b0f98
parent579d0e1437efb32ef1a1c50ddbfca9093cfa3d18 (diff)
parent3478e90442ad7a22497b53153ae893df96e55b4e (diff)
downloadnova-1a6fba0ada49a464b372e681b83bac59d3a3a79a.tar.gz
nova-1a6fba0ada49a464b372e681b83bac59d3a3a79a.tar.xz
nova-1a6fba0ada49a464b372e681b83bac59d3a3a79a.zip
merge
-rw-r--r--.bzrignore1
-rw-r--r--.mailmap2
-rw-r--r--Authors2
-rwxr-xr-xbin/nova-api-paste109
-rwxr-xr-xbin/nova-manage50
-rw-r--r--doc/.autogenerated97
-rw-r--r--doc/source/adminguide/distros/ubuntu.10.04.rst13
-rw-r--r--doc/source/adminguide/getting.started.rst6
-rw-r--r--doc/source/adminguide/multi.node.install.rst27
-rw-r--r--doc/source/api/autoindex.rst99
-rw-r--r--doc/source/api/nova..adminclient.rst6
-rw-r--r--doc/source/api/nova..api.cloud.rst6
-rw-r--r--doc/source/api/nova..api.ec2.admin.rst6
-rw-r--r--doc/source/api/nova..api.ec2.apirequest.rst6
-rw-r--r--doc/source/api/nova..api.ec2.cloud.rst6
-rw-r--r--doc/source/api/nova..api.ec2.images.rst6
-rw-r--r--doc/source/api/nova..api.ec2.metadatarequesthandler.rst6
-rw-r--r--doc/source/api/nova..api.openstack.auth.rst6
-rw-r--r--doc/source/api/nova..api.openstack.backup_schedules.rst6
-rw-r--r--doc/source/api/nova..api.openstack.faults.rst6
-rw-r--r--doc/source/api/nova..api.openstack.flavors.rst6
-rw-r--r--doc/source/api/nova..api.openstack.images.rst6
-rw-r--r--doc/source/api/nova..api.openstack.servers.rst6
-rw-r--r--doc/source/api/nova..api.openstack.sharedipgroups.rst6
-rw-r--r--doc/source/api/nova..auth.dbdriver.rst6
-rw-r--r--doc/source/api/nova..auth.fakeldap.rst6
-rw-r--r--doc/source/api/nova..auth.ldapdriver.rst6
-rw-r--r--doc/source/api/nova..auth.manager.rst6
-rw-r--r--doc/source/api/nova..auth.signer.rst6
-rw-r--r--doc/source/api/nova..cloudpipe.pipelib.rst6
-rw-r--r--doc/source/api/nova..compute.disk.rst6
-rw-r--r--doc/source/api/nova..compute.instance_types.rst6
-rw-r--r--doc/source/api/nova..compute.manager.rst6
-rw-r--r--doc/source/api/nova..compute.monitor.rst6
-rw-r--r--doc/source/api/nova..compute.power_state.rst6
-rw-r--r--doc/source/api/nova..context.rst6
-rw-r--r--doc/source/api/nova..crypto.rst6
-rw-r--r--doc/source/api/nova..db.api.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.api.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.models.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.session.rst6
-rw-r--r--doc/source/api/nova..exception.rst6
-rw-r--r--doc/source/api/nova..fakerabbit.rst6
-rw-r--r--doc/source/api/nova..flags.rst6
-rw-r--r--doc/source/api/nova..image.service.rst6
-rw-r--r--doc/source/api/nova..manager.rst6
-rw-r--r--doc/source/api/nova..network.linux_net.rst6
-rw-r--r--doc/source/api/nova..network.manager.rst6
-rw-r--r--doc/source/api/nova..objectstore.bucket.rst6
-rw-r--r--doc/source/api/nova..objectstore.handler.rst6
-rw-r--r--doc/source/api/nova..objectstore.image.rst6
-rw-r--r--doc/source/api/nova..objectstore.stored.rst6
-rw-r--r--doc/source/api/nova..process.rst6
-rw-r--r--doc/source/api/nova..quota.rst6
-rw-r--r--doc/source/api/nova..rpc.rst6
-rw-r--r--doc/source/api/nova..scheduler.chance.rst6
-rw-r--r--doc/source/api/nova..scheduler.driver.rst6
-rw-r--r--doc/source/api/nova..scheduler.manager.rst6
-rw-r--r--doc/source/api/nova..scheduler.simple.rst6
-rw-r--r--doc/source/api/nova..server.rst6
-rw-r--r--doc/source/api/nova..service.rst6
-rw-r--r--doc/source/api/nova..test.rst6
-rw-r--r--doc/source/api/nova..tests.access_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.api.fakes.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.fakes.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_api.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_auth.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_faults.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_flavors.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_images.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_servers.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_sharedipgroups.rst6
-rw-r--r--doc/source/api/nova..tests.api.test_wsgi.rst6
-rw-r--r--doc/source/api/nova..tests.api_integration.rst6
-rw-r--r--doc/source/api/nova..tests.api_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.auth_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.cloud_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.compute_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.declare_flags.rst6
-rw-r--r--doc/source/api/nova..tests.fake_flags.rst6
-rw-r--r--doc/source/api/nova..tests.flags_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.network_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.objectstore_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.process_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.quota_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.real_flags.rst6
-rw-r--r--doc/source/api/nova..tests.rpc_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.runtime_flags.rst6
-rw-r--r--doc/source/api/nova..tests.scheduler_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.service_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.twistd_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.validator_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.virt_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.volume_unittest.rst6
-rw-r--r--doc/source/api/nova..twistd.rst6
-rw-r--r--doc/source/api/nova..utils.rst6
-rw-r--r--doc/source/api/nova..validate.rst6
-rw-r--r--doc/source/api/nova..virt.connection.rst6
-rw-r--r--doc/source/api/nova..virt.fake.rst6
-rw-r--r--doc/source/api/nova..virt.images.rst6
-rw-r--r--doc/source/api/nova..virt.libvirt_conn.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi.rst6
-rw-r--r--doc/source/api/nova..volume.driver.rst6
-rw-r--r--doc/source/api/nova..volume.manager.rst6
-rw-r--r--doc/source/api/nova..wsgi.rst6
-rw-r--r--doc/source/community.rst3
-rw-r--r--doc/source/devref/addmethod.openstackapi.rst8
-rw-r--r--doc/source/devref/development.environment.rst7
-rw-r--r--doc/source/devref/rabbit.rst4
-rw-r--r--doc/source/nova.concepts.rst14
-rw-r--r--doc/source/quickstart.rst47
-rw-r--r--etc/nova-api.conf63
-rw-r--r--nova/adminclient.py7
-rw-r--r--nova/api/ec2/__init__.py55
-rw-r--r--nova/api/ec2/cloud.py352
-rw-r--r--nova/api/ec2/metadatarequesthandler.py4
-rw-r--r--nova/api/openstack/__init__.py25
-rw-r--r--nova/api/openstack/auth.py9
-rw-r--r--nova/api/openstack/images.py13
-rw-r--r--nova/api/openstack/ratelimiting/__init__.py6
-rw-r--r--nova/api/openstack/servers.py37
-rw-r--r--nova/compute/__init__.py15
-rw-r--r--nova/compute/api.py195
-rw-r--r--nova/compute/manager.py50
-rw-r--r--nova/db/api.py38
-rw-r--r--nova/db/sqlalchemy/__init__.py21
-rw-r--r--nova/db/sqlalchemy/api.py112
-rw-r--r--nova/db/sqlalchemy/models.py50
-rw-r--r--nova/db/sqlalchemy/session.py4
-rw-r--r--nova/flags.py8
-rw-r--r--nova/network/__init__.py15
-rw-r--r--nova/network/api.py87
-rw-r--r--nova/quota.py6
-rw-r--r--nova/scheduler/driver.py5
-rw-r--r--nova/scheduler/simple.py26
-rw-r--r--nova/service.py12
-rw-r--r--nova/tests/api/openstack/fakes.py9
-rw-r--r--nova/tests/api/openstack/test_images.py11
-rw-r--r--nova/tests/api/openstack/test_servers.py37
-rw-r--r--nova/tests/hyperv_unittest.py71
-rw-r--r--nova/tests/test_cloud.py31
-rw-r--r--nova/tests/test_compute.py18
-rw-r--r--nova/tests/test_quota.py10
-rw-r--r--nova/tests/test_scheduler.py131
-rw-r--r--nova/tests/test_service.py41
-rw-r--r--nova/tests/test_xenapi.py122
-rw-r--r--nova/tests/xenapi/stubs.py66
-rw-r--r--nova/virt/connection.py3
-rw-r--r--nova/virt/fake.py17
-rw-r--r--nova/virt/hyperv.py459
-rw-r--r--nova/virt/images.py45
-rw-r--r--nova/virt/libvirt_conn.py13
-rw-r--r--nova/virt/xenapi/fake.py64
-rw-r--r--nova/virt/xenapi/vm_utils.py144
-rw-r--r--nova/virt/xenapi/vmops.py288
-rw-r--r--nova/virt/xenapi/volume_utils.py12
-rw-r--r--nova/virt/xenapi_conn.py44
-rw-r--r--nova/volume/__init__.py14
-rw-r--r--nova/volume/api.py101
-rw-r--r--plugins/xenserver/xenapi/etc/xapi.d/plugins/glance132
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py28
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py180
-rw-r--r--run_tests.py68
-rwxr-xr-xrun_tests.sh11
-rw-r--r--tools/pip-requires4
166 files changed, 2949 insertions, 1505 deletions
diff --git a/.bzrignore b/.bzrignore
index 82db46fa2..d81a7d829 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -6,6 +6,7 @@ keys
networks
nova.sqlite
CA/cacert.pem
+CA/crl.pem
CA/index.txt*
CA/openssl.cnf
CA/serial*
diff --git a/.mailmap b/.mailmap
index 9e7fb1ec0..2af2d7cd9 100644
--- a/.mailmap
+++ b/.mailmap
@@ -27,5 +27,7 @@
<vishvananda@gmail.com> <root@ubuntu>
<sleepsonthefloor@gmail.com> <root@tonbuntu>
<rlane@wikimedia.org> <laner@controller>
+<rconradharris@gmail.com> <rick.harris@rackspace.com>
<corywright@gmail.com> <cory.wright@rackspace.com>
<ant@openstack.org> <amesserl@rackspace.com>
+<chiradeep@cloud.com> <chiradeep@chiradeep-lt2>
diff --git a/Authors b/Authors
index 0fbdfde01..e67363308 100644
--- a/Authors
+++ b/Authors
@@ -3,6 +3,7 @@ Anne Gentle <anne@openstack.org>
Anthony Young <sleepsonthefloor@gmail.com>
Antony Messerli <ant@openstack.org>
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
+Chiradeep Vittal <chiradeep@cloud.com>
Chris Behrens <cbehrens@codestud.com>
Chmouel Boudjnah <chmouel@chmouel.com>
Cory Wright <corywright@gmail.com>
@@ -28,6 +29,7 @@ Michael Gundlach <michael.gundlach@rackspace.com>
Monty Taylor <mordred@inaugust.com>
Paul Voccio <paul@openstack.org>
Rick Clark <rick@openstack.org>
+Rick Harris <rconradharris@gmail.com>
Ryan Lane <rlane@wikimedia.org>
Ryan Lucio <rlucio@internap.com>
Salvatore Orlando <salvatore.orlando@eu.citrix.com>
diff --git a/bin/nova-api-paste b/bin/nova-api-paste
new file mode 100755
index 000000000..6ee833a18
--- /dev/null
+++ b/bin/nova-api-paste
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+# pylint: disable-msg=C0103
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Starter script for Nova API."""
+
+import gettext
+import logging
+import os
+import sys
+
+from paste import deploy
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+gettext.install('nova', unicode=1)
+
+from nova import flags
+from nova import wsgi
+
+LOG = logging.getLogger('nova.api')
+LOG.setLevel(logging.DEBUG)
+LOG.addHandler(logging.StreamHandler())
+
+FLAGS = flags.FLAGS
+
+API_ENDPOINTS = ['ec2', 'openstack']
+
+
+def load_configuration(paste_config):
+ """Load the paste configuration from the config file and return it."""
+ config = None
+ # Try each known name to get the global DEFAULTS, which will give ports
+ for name in API_ENDPOINTS:
+ try:
+ config = deploy.appconfig("config:%s" % paste_config, name=name)
+ except LookupError:
+ pass
+ if config:
+ verbose = config.get('verbose', None)
+ if verbose:
+ FLAGS.verbose = int(verbose) == 1
+ if FLAGS.verbose:
+ logging.getLogger().setLevel(logging.DEBUG)
+ return config
+ LOG.debug(_("Paste config at %s has no secion for known apis"),
+ paste_config)
+ print _("Paste config at %s has no secion for any known apis") % \
+ paste_config
+ os.exit(1)
+
+
+def launch_api(paste_config_file, section, server, port, host):
+ """Launch an api server from the specified port and IP."""
+ LOG.debug(_("Launching %s api on %s:%s"), section, host, port)
+ app = deploy.loadapp('config:%s' % paste_config_file, name=section)
+ server.start(app, int(port), host)
+
+
+def run_app(paste_config_file):
+ LOG.debug(_("Using paste.deploy config at: %s"), configfile)
+ config = load_configuration(paste_config_file)
+ LOG.debug(_("Configuration: %r"), config)
+ server = wsgi.Server()
+ ip = config.get('host', '0.0.0.0')
+ for api in API_ENDPOINTS:
+ port = config.get("%s_port" % api, None)
+ if not port:
+ continue
+ host = config.get("%s_host" % api, ip)
+ launch_api(configfile, api, server, port, host)
+ LOG.debug(_("All api servers launched, now waiting"))
+ server.wait()
+
+
+if __name__ == '__main__':
+ FLAGS(sys.argv)
+ configfiles = ['/etc/nova/nova-api.conf']
+ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ configfiles.insert(0,
+ os.path.join(possible_topdir, 'etc', 'nova-api.conf'))
+ for configfile in configfiles:
+ if os.path.exists(configfile):
+ run_app(configfile)
+ break
+ else:
+ LOG.debug(_("Skipping missing configuration: %s"), configfile)
diff --git a/bin/nova-manage b/bin/nova-manage
index 599e02a7e..3416c1a52 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -53,6 +53,7 @@
CLI interface for nova management.
"""
+import datetime
import gettext
import logging
import os
@@ -452,6 +453,52 @@ class NetworkCommands(object):
int(network_size), int(vlan_start),
int(vpn_start))
+
+class ServiceCommands(object):
+ """Enable and disable running services"""
+
+ def list(self, host=None, service=None):
+ """Show a list of all running services. Filter by host & service name.
+ args: [host] [service]"""
+ ctxt = context.get_admin_context()
+ now = datetime.datetime.utcnow()
+ services = db.service_get_all(ctxt)
+ if host:
+ services = [s for s in services if s['host'] == host]
+ if service:
+ services = [s for s in services if s['binary'] == service]
+ for svc in services:
+ delta = now - (svc['updated_at'] or svc['created_at'])
+ alive = (delta.seconds <= 15)
+ art = (alive and ":-)") or "XXX"
+ active = 'enabled'
+ if svc['disabled']:
+ active = 'disabled'
+ print "%-10s %-10s %-8s %s %s" % (svc['host'], svc['binary'],
+ active, art,
+ svc['updated_at'])
+
+ def enable(self, host, service):
+ """Enable scheduling for a service
+ args: host service"""
+ ctxt = context.get_admin_context()
+ svc = db.service_get_by_args(ctxt, host, service)
+ if not svc:
+ print "Unable to find service"
+ return
+ db.service_update(ctxt, svc['id'], {'disabled': False})
+
+ def disable(self, host, service):
+ """Disable scheduling for a service
+ args: host service"""
+ ctxt = context.get_admin_context()
+ svc = db.service_get_by_args(ctxt, host, service)
+ if not svc:
+ print "Unable to find service"
+ return
+ db.service_update(ctxt, svc['id'], {'disabled': True})
+
+
CATEGORIES = [
('user', UserCommands),
('project', ProjectCommands),
@@ -459,7 +506,8 @@ CATEGORIES = [
('shell', ShellCommands),
('vpn', VpnCommands),
('floating', FloatingIpCommands),
- ('network', NetworkCommands)]
+ ('network', NetworkCommands),
+ ('service', ServiceCommands)]
def lazy_match(name, key_value_tuples):
diff --git a/doc/.autogenerated b/doc/.autogenerated
deleted file mode 100644
index 3a70f8780..000000000
--- a/doc/.autogenerated
+++ /dev/null
@@ -1,97 +0,0 @@
-source/api/nova..adminclient.rst
-source/api/nova..api.cloud.rst
-source/api/nova..api.ec2.admin.rst
-source/api/nova..api.ec2.apirequest.rst
-source/api/nova..api.ec2.cloud.rst
-source/api/nova..api.ec2.images.rst
-source/api/nova..api.ec2.metadatarequesthandler.rst
-source/api/nova..api.openstack.auth.rst
-source/api/nova..api.openstack.backup_schedules.rst
-source/api/nova..api.openstack.faults.rst
-source/api/nova..api.openstack.flavors.rst
-source/api/nova..api.openstack.images.rst
-source/api/nova..api.openstack.servers.rst
-source/api/nova..api.openstack.sharedipgroups.rst
-source/api/nova..auth.dbdriver.rst
-source/api/nova..auth.fakeldap.rst
-source/api/nova..auth.ldapdriver.rst
-source/api/nova..auth.manager.rst
-source/api/nova..auth.signer.rst
-source/api/nova..cloudpipe.pipelib.rst
-source/api/nova..compute.disk.rst
-source/api/nova..compute.instance_types.rst
-source/api/nova..compute.manager.rst
-source/api/nova..compute.monitor.rst
-source/api/nova..compute.power_state.rst
-source/api/nova..context.rst
-source/api/nova..crypto.rst
-source/api/nova..db.api.rst
-source/api/nova..db.sqlalchemy.api.rst
-source/api/nova..db.sqlalchemy.models.rst
-source/api/nova..db.sqlalchemy.session.rst
-source/api/nova..exception.rst
-source/api/nova..fakerabbit.rst
-source/api/nova..flags.rst
-source/api/nova..image.service.rst
-source/api/nova..manager.rst
-source/api/nova..network.linux_net.rst
-source/api/nova..network.manager.rst
-source/api/nova..objectstore.bucket.rst
-source/api/nova..objectstore.handler.rst
-source/api/nova..objectstore.image.rst
-source/api/nova..objectstore.stored.rst
-source/api/nova..process.rst
-source/api/nova..quota.rst
-source/api/nova..rpc.rst
-source/api/nova..scheduler.chance.rst
-source/api/nova..scheduler.driver.rst
-source/api/nova..scheduler.manager.rst
-source/api/nova..scheduler.simple.rst
-source/api/nova..server.rst
-source/api/nova..service.rst
-source/api/nova..test.rst
-source/api/nova..tests.access_unittest.rst
-source/api/nova..tests.api.fakes.rst
-source/api/nova..tests.api.openstack.fakes.rst
-source/api/nova..tests.api.openstack.test_api.rst
-source/api/nova..tests.api.openstack.test_auth.rst
-source/api/nova..tests.api.openstack.test_faults.rst
-source/api/nova..tests.api.openstack.test_flavors.rst
-source/api/nova..tests.api.openstack.test_images.rst
-source/api/nova..tests.api.openstack.test_ratelimiting.rst
-source/api/nova..tests.api.openstack.test_servers.rst
-source/api/nova..tests.api.openstack.test_sharedipgroups.rst
-source/api/nova..tests.api.test_wsgi.rst
-source/api/nova..tests.api_integration.rst
-source/api/nova..tests.api_unittest.rst
-source/api/nova..tests.auth_unittest.rst
-source/api/nova..tests.cloud_unittest.rst
-source/api/nova..tests.compute_unittest.rst
-source/api/nova..tests.declare_flags.rst
-source/api/nova..tests.fake_flags.rst
-source/api/nova..tests.flags_unittest.rst
-source/api/nova..tests.network_unittest.rst
-source/api/nova..tests.objectstore_unittest.rst
-source/api/nova..tests.process_unittest.rst
-source/api/nova..tests.quota_unittest.rst
-source/api/nova..tests.real_flags.rst
-source/api/nova..tests.rpc_unittest.rst
-source/api/nova..tests.runtime_flags.rst
-source/api/nova..tests.scheduler_unittest.rst
-source/api/nova..tests.service_unittest.rst
-source/api/nova..tests.twistd_unittest.rst
-source/api/nova..tests.validator_unittest.rst
-source/api/nova..tests.virt_unittest.rst
-source/api/nova..tests.volume_unittest.rst
-source/api/nova..twistd.rst
-source/api/nova..utils.rst
-source/api/nova..validate.rst
-source/api/nova..virt.connection.rst
-source/api/nova..virt.fake.rst
-source/api/nova..virt.images.rst
-source/api/nova..virt.libvirt_conn.rst
-source/api/nova..virt.xenapi.rst
-source/api/nova..volume.driver.rst
-source/api/nova..volume.manager.rst
-source/api/nova..wsgi.rst
-source/api/autoindex.rst
diff --git a/doc/source/adminguide/distros/ubuntu.10.04.rst b/doc/source/adminguide/distros/ubuntu.10.04.rst
index ce368fab8..9d856458a 100644
--- a/doc/source/adminguide/distros/ubuntu.10.04.rst
+++ b/doc/source/adminguide/distros/ubuntu.10.04.rst
@@ -16,13 +16,13 @@ Here's a script you can use to install (and then run) Nova on Ubuntu or Debian (
Step 2: Install dependencies
----------------------------
-Nova requires rabbitmq for messaging and optionally you can use redis for storing state, so install these first.
+Nova requires rabbitmq for messaging, so install that first.
*Note:* You must have sudo installed to run these commands as shown here.
::
- sudo apt-get install rabbitmq-server redis-server
+ sudo apt-get install rabbitmq-server
You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue.
@@ -31,11 +31,10 @@ If you're running on Ubuntu 10.04, you'll need to install Twisted and python-gfl
::
- sudo apt-get install python-twisted
-
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 95C71FE2
- sudo sh -c 'echo "deb http://ppa.launchpad.net/openstack/openstack-ppa/ubuntu lucid main" > /etc/apt/sources.list.d/openstackppa.list'
- sudo apt-get update && sudo apt-get install python-gflags
+ sudo add-get install python-software-properties
+ sudo add-apt-repository ppa:nova-core/trunk
+ sudo apt-get update
+ sudo apt-get install python-twisted python-gflags
Once you've done this, continue at Step 3 here: :doc:`../single.node.install`
diff --git a/doc/source/adminguide/getting.started.rst b/doc/source/adminguide/getting.started.rst
index 3e8073606..0cadeb45e 100644
--- a/doc/source/adminguide/getting.started.rst
+++ b/doc/source/adminguide/getting.started.rst
@@ -76,11 +76,11 @@ External unix tools that are required:
* aoetools and vblade-persist (if you use aoe-volumes)
Nova uses cutting-edge versions of many packages. There are ubuntu packages in
-the nova-core ppa. You can use add this ppa to your sources list on an ubuntu
-machine with the following commands::
+the nova-core trunk ppa. You can use add this ppa to your sources list on an
+ubuntu machine with the following commands::
sudo apt-get install -y python-software-properties
- sudo add-apt-repository ppa:nova-core/ppa
+ sudo add-apt-repository ppa:nova-core/trunk
Recommended
-----------
diff --git a/doc/source/adminguide/multi.node.install.rst b/doc/source/adminguide/multi.node.install.rst
index fcb76c5e5..a652e44b7 100644
--- a/doc/source/adminguide/multi.node.install.rst
+++ b/doc/source/adminguide/multi.node.install.rst
@@ -46,12 +46,12 @@ Assumptions
Step 1 Use apt-get to get the latest code
-----------------------------------------
-1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/ppa.
+1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/trunk.
::
sudo apt-get install python-software-properties
- sudo add-apt-repository ppa:nova-core/ppa
+ sudo add-apt-repository ppa:nova-core/trunk
2. Run update.
@@ -77,21 +77,20 @@ Nova development has consolidated all .conf files to nova.conf as of November 20
#. These need to be defined in the nova.conf configuration file::
- --sql_connection=mysql://root:nova@$CC_ADDR/nova # location of nova sql db
- --s3_host=$CC_ADDR # This is where nova is hosting the objectstore service, which
- # will contain the VM images and buckets
- --rabbit_host=$CC_ADDR # This is where the rabbit AMQP messaging service is hosted
- --cc_host=$CC_ADDR # This is where the the nova-api service lives
- --verbose # Optional but very helpful during initial setup
- --ec2_url=http://$CC_ADDR:8773/services/Cloud
- --network_manager=nova.network.manager.FlatManager # simple, no-vlan networking type
-
- --fixed_range=<network/prefix> # ip network to use for VM guests, ex 192.168.2.64/26
- --network_size=<# of addrs> # number of ip addrs to use for VM guests, ex 64
+ --sql_connection=mysql://root:nova@$CC_ADDR/nova # location of nova sql db
+ --s3_host=$CC_ADDR # This is where Nova is hosting the objectstore service, which
+ # will contain the VM images and buckets
+ --rabbit_host=$CC_ADDR # This is where the rabbit AMQP messaging service is hosted
+ --cc_host=$CC_ADDR # This is where the the nova-api service lives
+ --verbose # Optional but very helpful during initial setup
+ --ec2_url=http://$CC_ADDR:8773/services/Cloud
+ --network_manager=nova.network.manager.FlatManager # simple, no-vlan networking type
+ --fixed_range=<network/prefix> # ip network to use for VM guests, ex 192.168.2.64/26
+ --network_size=<# of addrs> # number of ip addrs to use for VM guests, ex 64
#. Create a nova group::
- sudo addgroup nova
+ sudo addgroup nova
The Nova config file should have its owner set to root:nova, and mode set to 0640, since they contain your MySQL server's root password.
diff --git a/doc/source/api/autoindex.rst b/doc/source/api/autoindex.rst
deleted file mode 100644
index 6265b082b..000000000
--- a/doc/source/api/autoindex.rst
+++ /dev/null
@@ -1,99 +0,0 @@
-.. toctree::
- :maxdepth: 1
-
- nova..adminclient.rst
- nova..api.cloud.rst
- nova..api.ec2.admin.rst
- nova..api.ec2.apirequest.rst
- nova..api.ec2.cloud.rst
- nova..api.ec2.images.rst
- nova..api.ec2.metadatarequesthandler.rst
- nova..api.openstack.auth.rst
- nova..api.openstack.backup_schedules.rst
- nova..api.openstack.faults.rst
- nova..api.openstack.flavors.rst
- nova..api.openstack.images.rst
- nova..api.openstack.servers.rst
- nova..api.openstack.sharedipgroups.rst
- nova..auth.dbdriver.rst
- nova..auth.fakeldap.rst
- nova..auth.ldapdriver.rst
- nova..auth.manager.rst
- nova..auth.signer.rst
- nova..cloudpipe.pipelib.rst
- nova..compute.disk.rst
- nova..compute.instance_types.rst
- nova..compute.manager.rst
- nova..compute.monitor.rst
- nova..compute.power_state.rst
- nova..context.rst
- nova..crypto.rst
- nova..db.api.rst
- nova..db.sqlalchemy.api.rst
- nova..db.sqlalchemy.models.rst
- nova..db.sqlalchemy.session.rst
- nova..exception.rst
- nova..fakerabbit.rst
- nova..flags.rst
- nova..image.service.rst
- nova..manager.rst
- nova..network.linux_net.rst
- nova..network.manager.rst
- nova..objectstore.bucket.rst
- nova..objectstore.handler.rst
- nova..objectstore.image.rst
- nova..objectstore.stored.rst
- nova..process.rst
- nova..quota.rst
- nova..rpc.rst
- nova..scheduler.chance.rst
- nova..scheduler.driver.rst
- nova..scheduler.manager.rst
- nova..scheduler.simple.rst
- nova..server.rst
- nova..service.rst
- nova..test.rst
- nova..tests.access_unittest.rst
- nova..tests.api.fakes.rst
- nova..tests.api.openstack.fakes.rst
- nova..tests.api.openstack.test_api.rst
- nova..tests.api.openstack.test_auth.rst
- nova..tests.api.openstack.test_faults.rst
- nova..tests.api.openstack.test_flavors.rst
- nova..tests.api.openstack.test_images.rst
- nova..tests.api.openstack.test_ratelimiting.rst
- nova..tests.api.openstack.test_servers.rst
- nova..tests.api.openstack.test_sharedipgroups.rst
- nova..tests.api.test_wsgi.rst
- nova..tests.api_integration.rst
- nova..tests.api_unittest.rst
- nova..tests.auth_unittest.rst
- nova..tests.cloud_unittest.rst
- nova..tests.compute_unittest.rst
- nova..tests.declare_flags.rst
- nova..tests.fake_flags.rst
- nova..tests.flags_unittest.rst
- nova..tests.network_unittest.rst
- nova..tests.objectstore_unittest.rst
- nova..tests.process_unittest.rst
- nova..tests.quota_unittest.rst
- nova..tests.real_flags.rst
- nova..tests.rpc_unittest.rst
- nova..tests.runtime_flags.rst
- nova..tests.scheduler_unittest.rst
- nova..tests.service_unittest.rst
- nova..tests.twistd_unittest.rst
- nova..tests.validator_unittest.rst
- nova..tests.virt_unittest.rst
- nova..tests.volume_unittest.rst
- nova..twistd.rst
- nova..utils.rst
- nova..validate.rst
- nova..virt.connection.rst
- nova..virt.fake.rst
- nova..virt.images.rst
- nova..virt.libvirt_conn.rst
- nova..virt.xenapi.rst
- nova..volume.driver.rst
- nova..volume.manager.rst
- nova..wsgi.rst
diff --git a/doc/source/api/nova..adminclient.rst b/doc/source/api/nova..adminclient.rst
deleted file mode 100644
index 35fa839e1..000000000
--- a/doc/source/api/nova..adminclient.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..adminclient` Module
-==============================================================================
-.. automodule:: nova..adminclient
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.cloud.rst b/doc/source/api/nova..api.cloud.rst
deleted file mode 100644
index 413840185..000000000
--- a/doc/source/api/nova..api.cloud.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.cloud` Module
-==============================================================================
-.. automodule:: nova..api.cloud
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.admin.rst b/doc/source/api/nova..api.ec2.admin.rst
deleted file mode 100644
index 4e9ab308b..000000000
--- a/doc/source/api/nova..api.ec2.admin.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.ec2.admin` Module
-==============================================================================
-.. automodule:: nova..api.ec2.admin
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.apirequest.rst b/doc/source/api/nova..api.ec2.apirequest.rst
deleted file mode 100644
index c17a2ff3a..000000000
--- a/doc/source/api/nova..api.ec2.apirequest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.ec2.apirequest` Module
-==============================================================================
-.. automodule:: nova..api.ec2.apirequest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.cloud.rst b/doc/source/api/nova..api.ec2.cloud.rst
deleted file mode 100644
index f6145c217..000000000
--- a/doc/source/api/nova..api.ec2.cloud.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.ec2.cloud` Module
-==============================================================================
-.. automodule:: nova..api.ec2.cloud
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.images.rst b/doc/source/api/nova..api.ec2.images.rst
deleted file mode 100644
index 012d800e4..000000000
--- a/doc/source/api/nova..api.ec2.images.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.ec2.images` Module
-==============================================================================
-.. automodule:: nova..api.ec2.images
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.metadatarequesthandler.rst b/doc/source/api/nova..api.ec2.metadatarequesthandler.rst
deleted file mode 100644
index 75f5169e5..000000000
--- a/doc/source/api/nova..api.ec2.metadatarequesthandler.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.ec2.metadatarequesthandler` Module
-==============================================================================
-.. automodule:: nova..api.ec2.metadatarequesthandler
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.auth.rst b/doc/source/api/nova..api.openstack.auth.rst
deleted file mode 100644
index 8c3f8f2da..000000000
--- a/doc/source/api/nova..api.openstack.auth.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.auth` Module
-==============================================================================
-.. automodule:: nova..api.openstack.auth
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.backup_schedules.rst b/doc/source/api/nova..api.openstack.backup_schedules.rst
deleted file mode 100644
index 6b406f12d..000000000
--- a/doc/source/api/nova..api.openstack.backup_schedules.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.backup_schedules` Module
-==============================================================================
-.. automodule:: nova..api.openstack.backup_schedules
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.faults.rst b/doc/source/api/nova..api.openstack.faults.rst
deleted file mode 100644
index 7b25561f7..000000000
--- a/doc/source/api/nova..api.openstack.faults.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.faults` Module
-==============================================================================
-.. automodule:: nova..api.openstack.faults
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.flavors.rst b/doc/source/api/nova..api.openstack.flavors.rst
deleted file mode 100644
index 0deb724de..000000000
--- a/doc/source/api/nova..api.openstack.flavors.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.flavors` Module
-==============================================================================
-.. automodule:: nova..api.openstack.flavors
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.images.rst b/doc/source/api/nova..api.openstack.images.rst
deleted file mode 100644
index 82bd5f1e8..000000000
--- a/doc/source/api/nova..api.openstack.images.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.images` Module
-==============================================================================
-.. automodule:: nova..api.openstack.images
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.servers.rst b/doc/source/api/nova..api.openstack.servers.rst
deleted file mode 100644
index c36856ea2..000000000
--- a/doc/source/api/nova..api.openstack.servers.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.servers` Module
-==============================================================================
-.. automodule:: nova..api.openstack.servers
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.sharedipgroups.rst b/doc/source/api/nova..api.openstack.sharedipgroups.rst
deleted file mode 100644
index 07632acc8..000000000
--- a/doc/source/api/nova..api.openstack.sharedipgroups.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.sharedipgroups` Module
-==============================================================================
-.. automodule:: nova..api.openstack.sharedipgroups
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..auth.dbdriver.rst b/doc/source/api/nova..auth.dbdriver.rst
deleted file mode 100644
index 7de68b6e0..000000000
--- a/doc/source/api/nova..auth.dbdriver.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..auth.dbdriver` Module
-==============================================================================
-.. automodule:: nova..auth.dbdriver
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..auth.fakeldap.rst b/doc/source/api/nova..auth.fakeldap.rst
deleted file mode 100644
index ca8a3ad4d..000000000
--- a/doc/source/api/nova..auth.fakeldap.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..auth.fakeldap` Module
-==============================================================================
-.. automodule:: nova..auth.fakeldap
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..auth.ldapdriver.rst b/doc/source/api/nova..auth.ldapdriver.rst
deleted file mode 100644
index c44463522..000000000
--- a/doc/source/api/nova..auth.ldapdriver.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..auth.ldapdriver` Module
-==============================================================================
-.. automodule:: nova..auth.ldapdriver
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..auth.manager.rst b/doc/source/api/nova..auth.manager.rst
deleted file mode 100644
index bc5ce2ec3..000000000
--- a/doc/source/api/nova..auth.manager.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..auth.manager` Module
-==============================================================================
-.. automodule:: nova..auth.manager
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..auth.signer.rst b/doc/source/api/nova..auth.signer.rst
deleted file mode 100644
index aad824ead..000000000
--- a/doc/source/api/nova..auth.signer.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..auth.signer` Module
-==============================================================================
-.. automodule:: nova..auth.signer
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..cloudpipe.pipelib.rst b/doc/source/api/nova..cloudpipe.pipelib.rst
deleted file mode 100644
index 054aaf484..000000000
--- a/doc/source/api/nova..cloudpipe.pipelib.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..cloudpipe.pipelib` Module
-==============================================================================
-.. automodule:: nova..cloudpipe.pipelib
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..compute.disk.rst b/doc/source/api/nova..compute.disk.rst
deleted file mode 100644
index 6410af6f3..000000000
--- a/doc/source/api/nova..compute.disk.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..compute.disk` Module
-==============================================================================
-.. automodule:: nova..compute.disk
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..compute.instance_types.rst b/doc/source/api/nova..compute.instance_types.rst
deleted file mode 100644
index d206ff3a4..000000000
--- a/doc/source/api/nova..compute.instance_types.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..compute.instance_types` Module
-==============================================================================
-.. automodule:: nova..compute.instance_types
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..compute.manager.rst b/doc/source/api/nova..compute.manager.rst
deleted file mode 100644
index 33a337c39..000000000
--- a/doc/source/api/nova..compute.manager.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..compute.manager` Module
-==============================================================================
-.. automodule:: nova..compute.manager
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..compute.monitor.rst b/doc/source/api/nova..compute.monitor.rst
deleted file mode 100644
index a91169ecd..000000000
--- a/doc/source/api/nova..compute.monitor.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..compute.monitor` Module
-==============================================================================
-.. automodule:: nova..compute.monitor
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..compute.power_state.rst b/doc/source/api/nova..compute.power_state.rst
deleted file mode 100644
index 41b1080e5..000000000
--- a/doc/source/api/nova..compute.power_state.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..compute.power_state` Module
-==============================================================================
-.. automodule:: nova..compute.power_state
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..context.rst b/doc/source/api/nova..context.rst
deleted file mode 100644
index 9de1adb24..000000000
--- a/doc/source/api/nova..context.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..context` Module
-==============================================================================
-.. automodule:: nova..context
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..crypto.rst b/doc/source/api/nova..crypto.rst
deleted file mode 100644
index af9f63634..000000000
--- a/doc/source/api/nova..crypto.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..crypto` Module
-==============================================================================
-.. automodule:: nova..crypto
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.api.rst b/doc/source/api/nova..db.api.rst
deleted file mode 100644
index 6d998fbb2..000000000
--- a/doc/source/api/nova..db.api.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.api` Module
-==============================================================================
-.. automodule:: nova..db.api
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.api.rst b/doc/source/api/nova..db.sqlalchemy.api.rst
deleted file mode 100644
index 76d0c1bd3..000000000
--- a/doc/source/api/nova..db.sqlalchemy.api.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.sqlalchemy.api` Module
-==============================================================================
-.. automodule:: nova..db.sqlalchemy.api
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.models.rst b/doc/source/api/nova..db.sqlalchemy.models.rst
deleted file mode 100644
index 9c795d7f5..000000000
--- a/doc/source/api/nova..db.sqlalchemy.models.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.sqlalchemy.models` Module
-==============================================================================
-.. automodule:: nova..db.sqlalchemy.models
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.session.rst b/doc/source/api/nova..db.sqlalchemy.session.rst
deleted file mode 100644
index cbfd6416a..000000000
--- a/doc/source/api/nova..db.sqlalchemy.session.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.sqlalchemy.session` Module
-==============================================================================
-.. automodule:: nova..db.sqlalchemy.session
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..exception.rst b/doc/source/api/nova..exception.rst
deleted file mode 100644
index 97ac6b752..000000000
--- a/doc/source/api/nova..exception.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..exception` Module
-==============================================================================
-.. automodule:: nova..exception
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..fakerabbit.rst b/doc/source/api/nova..fakerabbit.rst
deleted file mode 100644
index f1e27c266..000000000
--- a/doc/source/api/nova..fakerabbit.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..fakerabbit` Module
-==============================================================================
-.. automodule:: nova..fakerabbit
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..flags.rst b/doc/source/api/nova..flags.rst
deleted file mode 100644
index 08165be44..000000000
--- a/doc/source/api/nova..flags.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..flags` Module
-==============================================================================
-.. automodule:: nova..flags
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..image.service.rst b/doc/source/api/nova..image.service.rst
deleted file mode 100644
index 78ef1ecca..000000000
--- a/doc/source/api/nova..image.service.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..image.service` Module
-==============================================================================
-.. automodule:: nova..image.service
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..manager.rst b/doc/source/api/nova..manager.rst
deleted file mode 100644
index 576902491..000000000
--- a/doc/source/api/nova..manager.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..manager` Module
-==============================================================================
-.. automodule:: nova..manager
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..network.linux_net.rst b/doc/source/api/nova..network.linux_net.rst
deleted file mode 100644
index 7af78d5ad..000000000
--- a/doc/source/api/nova..network.linux_net.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..network.linux_net` Module
-==============================================================================
-.. automodule:: nova..network.linux_net
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..network.manager.rst b/doc/source/api/nova..network.manager.rst
deleted file mode 100644
index 0ea705533..000000000
--- a/doc/source/api/nova..network.manager.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..network.manager` Module
-==============================================================================
-.. automodule:: nova..network.manager
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..objectstore.bucket.rst b/doc/source/api/nova..objectstore.bucket.rst
deleted file mode 100644
index 3bfdf639c..000000000
--- a/doc/source/api/nova..objectstore.bucket.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..objectstore.bucket` Module
-==============================================================================
-.. automodule:: nova..objectstore.bucket
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..objectstore.handler.rst b/doc/source/api/nova..objectstore.handler.rst
deleted file mode 100644
index 0eb8c4efb..000000000
--- a/doc/source/api/nova..objectstore.handler.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..objectstore.handler` Module
-==============================================================================
-.. automodule:: nova..objectstore.handler
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..objectstore.image.rst b/doc/source/api/nova..objectstore.image.rst
deleted file mode 100644
index fa4c971f1..000000000
--- a/doc/source/api/nova..objectstore.image.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..objectstore.image` Module
-==============================================================================
-.. automodule:: nova..objectstore.image
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..objectstore.stored.rst b/doc/source/api/nova..objectstore.stored.rst
deleted file mode 100644
index 2b1d997a3..000000000
--- a/doc/source/api/nova..objectstore.stored.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..objectstore.stored` Module
-==============================================================================
-.. automodule:: nova..objectstore.stored
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..process.rst b/doc/source/api/nova..process.rst
deleted file mode 100644
index 91eff8379..000000000
--- a/doc/source/api/nova..process.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..process` Module
-==============================================================================
-.. automodule:: nova..process
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..quota.rst b/doc/source/api/nova..quota.rst
deleted file mode 100644
index 4140d95d6..000000000
--- a/doc/source/api/nova..quota.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..quota` Module
-==============================================================================
-.. automodule:: nova..quota
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..rpc.rst b/doc/source/api/nova..rpc.rst
deleted file mode 100644
index 5b2a9b8e2..000000000
--- a/doc/source/api/nova..rpc.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..rpc` Module
-==============================================================================
-.. automodule:: nova..rpc
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.chance.rst b/doc/source/api/nova..scheduler.chance.rst
deleted file mode 100644
index 89c074c8f..000000000
--- a/doc/source/api/nova..scheduler.chance.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..scheduler.chance` Module
-==============================================================================
-.. automodule:: nova..scheduler.chance
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.driver.rst b/doc/source/api/nova..scheduler.driver.rst
deleted file mode 100644
index 793ed9c7b..000000000
--- a/doc/source/api/nova..scheduler.driver.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..scheduler.driver` Module
-==============================================================================
-.. automodule:: nova..scheduler.driver
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.manager.rst b/doc/source/api/nova..scheduler.manager.rst
deleted file mode 100644
index d0fc7c423..000000000
--- a/doc/source/api/nova..scheduler.manager.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..scheduler.manager` Module
-==============================================================================
-.. automodule:: nova..scheduler.manager
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.simple.rst b/doc/source/api/nova..scheduler.simple.rst
deleted file mode 100644
index dacc2cf30..000000000
--- a/doc/source/api/nova..scheduler.simple.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..scheduler.simple` Module
-==============================================================================
-.. automodule:: nova..scheduler.simple
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..server.rst b/doc/source/api/nova..server.rst
deleted file mode 100644
index 7cb2cfa54..000000000
--- a/doc/source/api/nova..server.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..server` Module
-==============================================================================
-.. automodule:: nova..server
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..service.rst b/doc/source/api/nova..service.rst
deleted file mode 100644
index 2d2dfcf2e..000000000
--- a/doc/source/api/nova..service.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..service` Module
-==============================================================================
-.. automodule:: nova..service
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..test.rst b/doc/source/api/nova..test.rst
deleted file mode 100644
index a6bdb6f1f..000000000
--- a/doc/source/api/nova..test.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..test` Module
-==============================================================================
-.. automodule:: nova..test
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.access_unittest.rst b/doc/source/api/nova..tests.access_unittest.rst
deleted file mode 100644
index 89554e430..000000000
--- a/doc/source/api/nova..tests.access_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.access_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.access_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.fakes.rst b/doc/source/api/nova..tests.api.fakes.rst
deleted file mode 100644
index 5728b18f3..000000000
--- a/doc/source/api/nova..tests.api.fakes.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.fakes` Module
-==============================================================================
-.. automodule:: nova..tests.api.fakes
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.fakes.rst b/doc/source/api/nova..tests.api.openstack.fakes.rst
deleted file mode 100644
index 4a9ff5938..000000000
--- a/doc/source/api/nova..tests.api.openstack.fakes.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.fakes` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.fakes
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_api.rst b/doc/source/api/nova..tests.api.openstack.test_api.rst
deleted file mode 100644
index 68106d221..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_api.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_api` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_api
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_auth.rst b/doc/source/api/nova..tests.api.openstack.test_auth.rst
deleted file mode 100644
index 9f0011669..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_auth.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_auth` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_auth
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_faults.rst b/doc/source/api/nova..tests.api.openstack.test_faults.rst
deleted file mode 100644
index b839ae8a3..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_faults.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_faults` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_faults
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_flavors.rst b/doc/source/api/nova..tests.api.openstack.test_flavors.rst
deleted file mode 100644
index 471fac56e..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_flavors.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_flavors` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_flavors
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_images.rst b/doc/source/api/nova..tests.api.openstack.test_images.rst
deleted file mode 100644
index 57ae93c8c..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_images.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_images` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_images
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst b/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst
deleted file mode 100644
index 9a857f795..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_ratelimiting` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_ratelimiting
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_servers.rst b/doc/source/api/nova..tests.api.openstack.test_servers.rst
deleted file mode 100644
index ea602e6ab..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_servers.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_servers` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_servers
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_sharedipgroups.rst b/doc/source/api/nova..tests.api.openstack.test_sharedipgroups.rst
deleted file mode 100644
index 1fad49147..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_sharedipgroups.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_sharedipgroups` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_sharedipgroups
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.test_wsgi.rst b/doc/source/api/nova..tests.api.test_wsgi.rst
deleted file mode 100644
index 8e79caa4d..000000000
--- a/doc/source/api/nova..tests.api.test_wsgi.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.test_wsgi` Module
-==============================================================================
-.. automodule:: nova..tests.api.test_wsgi
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api_integration.rst b/doc/source/api/nova..tests.api_integration.rst
deleted file mode 100644
index fd217acf7..000000000
--- a/doc/source/api/nova..tests.api_integration.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api_integration` Module
-==============================================================================
-.. automodule:: nova..tests.api_integration
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api_unittest.rst b/doc/source/api/nova..tests.api_unittest.rst
deleted file mode 100644
index 44a65d48c..000000000
--- a/doc/source/api/nova..tests.api_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.api_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.auth_unittest.rst b/doc/source/api/nova..tests.auth_unittest.rst
deleted file mode 100644
index 5805dcf38..000000000
--- a/doc/source/api/nova..tests.auth_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.auth_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.auth_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.cloud_unittest.rst b/doc/source/api/nova..tests.cloud_unittest.rst
deleted file mode 100644
index d2ca3b013..000000000
--- a/doc/source/api/nova..tests.cloud_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.cloud_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.cloud_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.compute_unittest.rst b/doc/source/api/nova..tests.compute_unittest.rst
deleted file mode 100644
index 6a30bf744..000000000
--- a/doc/source/api/nova..tests.compute_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.compute_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.compute_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.declare_flags.rst b/doc/source/api/nova..tests.declare_flags.rst
deleted file mode 100644
index 524e72e91..000000000
--- a/doc/source/api/nova..tests.declare_flags.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.declare_flags` Module
-==============================================================================
-.. automodule:: nova..tests.declare_flags
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.fake_flags.rst b/doc/source/api/nova..tests.fake_flags.rst
deleted file mode 100644
index a8dc3df36..000000000
--- a/doc/source/api/nova..tests.fake_flags.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.fake_flags` Module
-==============================================================================
-.. automodule:: nova..tests.fake_flags
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.flags_unittest.rst b/doc/source/api/nova..tests.flags_unittest.rst
deleted file mode 100644
index 61087e683..000000000
--- a/doc/source/api/nova..tests.flags_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.flags_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.flags_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.network_unittest.rst b/doc/source/api/nova..tests.network_unittest.rst
deleted file mode 100644
index df057d813..000000000
--- a/doc/source/api/nova..tests.network_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.network_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.network_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.objectstore_unittest.rst b/doc/source/api/nova..tests.objectstore_unittest.rst
deleted file mode 100644
index 0ae252f04..000000000
--- a/doc/source/api/nova..tests.objectstore_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.objectstore_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.objectstore_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.process_unittest.rst b/doc/source/api/nova..tests.process_unittest.rst
deleted file mode 100644
index 30d1e129c..000000000
--- a/doc/source/api/nova..tests.process_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.process_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.process_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.quota_unittest.rst b/doc/source/api/nova..tests.quota_unittest.rst
deleted file mode 100644
index 6ab813104..000000000
--- a/doc/source/api/nova..tests.quota_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.quota_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.quota_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.real_flags.rst b/doc/source/api/nova..tests.real_flags.rst
deleted file mode 100644
index e9c0d1abd..000000000
--- a/doc/source/api/nova..tests.real_flags.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.real_flags` Module
-==============================================================================
-.. automodule:: nova..tests.real_flags
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.rpc_unittest.rst b/doc/source/api/nova..tests.rpc_unittest.rst
deleted file mode 100644
index e6c7ceb2e..000000000
--- a/doc/source/api/nova..tests.rpc_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.rpc_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.rpc_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.runtime_flags.rst b/doc/source/api/nova..tests.runtime_flags.rst
deleted file mode 100644
index 984e21199..000000000
--- a/doc/source/api/nova..tests.runtime_flags.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.runtime_flags` Module
-==============================================================================
-.. automodule:: nova..tests.runtime_flags
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.scheduler_unittest.rst b/doc/source/api/nova..tests.scheduler_unittest.rst
deleted file mode 100644
index ae3a06616..000000000
--- a/doc/source/api/nova..tests.scheduler_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.scheduler_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.scheduler_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.service_unittest.rst b/doc/source/api/nova..tests.service_unittest.rst
deleted file mode 100644
index c7c746d17..000000000
--- a/doc/source/api/nova..tests.service_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.service_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.service_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.twistd_unittest.rst b/doc/source/api/nova..tests.twistd_unittest.rst
deleted file mode 100644
index ce88202e1..000000000
--- a/doc/source/api/nova..tests.twistd_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.twistd_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.twistd_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.validator_unittest.rst b/doc/source/api/nova..tests.validator_unittest.rst
deleted file mode 100644
index 980284327..000000000
--- a/doc/source/api/nova..tests.validator_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.validator_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.validator_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.virt_unittest.rst b/doc/source/api/nova..tests.virt_unittest.rst
deleted file mode 100644
index 2189be41e..000000000
--- a/doc/source/api/nova..tests.virt_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.virt_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.virt_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.volume_unittest.rst b/doc/source/api/nova..tests.volume_unittest.rst
deleted file mode 100644
index 791e192f5..000000000
--- a/doc/source/api/nova..tests.volume_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.volume_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.volume_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..twistd.rst b/doc/source/api/nova..twistd.rst
deleted file mode 100644
index d4145396d..000000000
--- a/doc/source/api/nova..twistd.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..twistd` Module
-==============================================================================
-.. automodule:: nova..twistd
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..utils.rst b/doc/source/api/nova..utils.rst
deleted file mode 100644
index 1131d1080..000000000
--- a/doc/source/api/nova..utils.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..utils` Module
-==============================================================================
-.. automodule:: nova..utils
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..validate.rst b/doc/source/api/nova..validate.rst
deleted file mode 100644
index 1d142f103..000000000
--- a/doc/source/api/nova..validate.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..validate` Module
-==============================================================================
-.. automodule:: nova..validate
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.connection.rst b/doc/source/api/nova..virt.connection.rst
deleted file mode 100644
index caf766765..000000000
--- a/doc/source/api/nova..virt.connection.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.connection` Module
-==============================================================================
-.. automodule:: nova..virt.connection
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.fake.rst b/doc/source/api/nova..virt.fake.rst
deleted file mode 100644
index 06ecdbf7d..000000000
--- a/doc/source/api/nova..virt.fake.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.fake` Module
-==============================================================================
-.. automodule:: nova..virt.fake
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.images.rst b/doc/source/api/nova..virt.images.rst
deleted file mode 100644
index 4fdeb7af8..000000000
--- a/doc/source/api/nova..virt.images.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.images` Module
-==============================================================================
-.. automodule:: nova..virt.images
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.libvirt_conn.rst b/doc/source/api/nova..virt.libvirt_conn.rst
deleted file mode 100644
index 7fb8aed5f..000000000
--- a/doc/source/api/nova..virt.libvirt_conn.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.libvirt_conn` Module
-==============================================================================
-.. automodule:: nova..virt.libvirt_conn
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi.rst b/doc/source/api/nova..virt.xenapi.rst
deleted file mode 100644
index 2e396bf06..000000000
--- a/doc/source/api/nova..virt.xenapi.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.xenapi` Module
-==============================================================================
-.. automodule:: nova..virt.xenapi
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..volume.driver.rst b/doc/source/api/nova..volume.driver.rst
deleted file mode 100644
index 51f5c0729..000000000
--- a/doc/source/api/nova..volume.driver.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..volume.driver` Module
-==============================================================================
-.. automodule:: nova..volume.driver
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..volume.manager.rst b/doc/source/api/nova..volume.manager.rst
deleted file mode 100644
index 91a192a8f..000000000
--- a/doc/source/api/nova..volume.manager.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..volume.manager` Module
-==============================================================================
-.. automodule:: nova..volume.manager
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..wsgi.rst b/doc/source/api/nova..wsgi.rst
deleted file mode 100644
index 0bff1c332..000000000
--- a/doc/source/api/nova..wsgi.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..wsgi` Module
-==============================================================================
-.. automodule:: nova..wsgi
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/community.rst b/doc/source/community.rst
index bfb93414c..01ff5f055 100644
--- a/doc/source/community.rst
+++ b/doc/source/community.rst
@@ -35,7 +35,8 @@ Contributing Code
To contribute code, sign up for a Launchpad account and sign a contributor license agreement,
available on the `OpenStack Wiki <http://wiki.openstack.org/CLA>`_. Once the CLA is signed you
-can contribute code through the Bazaar version control system which is related to your Launchpad account.
+can contribute code through the Bazaar version control system which is related to your Launchpad
+account. See the :doc:`devref/development.environment` page to get started.
#openstack on Freenode IRC Network
----------------------------------
diff --git a/doc/source/devref/addmethod.openstackapi.rst b/doc/source/devref/addmethod.openstackapi.rst
index 6484613df..4baa46e20 100644
--- a/doc/source/devref/addmethod.openstackapi.rst
+++ b/doc/source/devref/addmethod.openstackapi.rst
@@ -24,7 +24,7 @@ Routing
To map URLs to controllers+actions, OpenStack uses the Routes package, a clone of Rails routes for Python implementations. See http://routes.groovie.org/ fore more information.
-URLs are mapped to "action" methods on "controller" classes in nova/api/openstack/__init__/ApiRouter.__init__ .
+URLs are mapped to "action" methods on "controller" classes in `nova/api/openstack/__init__/ApiRouter.__init__` .
See http://routes.groovie.org/manual.html for all syntax, but you'll probably just need these two:
- mapper.connect() lets you map a single URL to a single action on a controller.
@@ -33,9 +33,9 @@ See http://routes.groovie.org/manual.html for all syntax, but you'll probably ju
Controllers and actions
-----------------------
-Controllers live in nova/api/openstack, and inherit from nova.wsgi.Controller.
+Controllers live in `nova/api/openstack`, and inherit from nova.wsgi.Controller.
-See nova/api/openstack/servers.py for an example.
+See `nova/api/openstack/servers.py` for an example.
Action methods take parameters that are sucked out of the URL by mapper.connect() or .resource(). The first two parameters are self and the WebOb request, from which you can get the req.environ, req.body, req.headers, etc.
@@ -46,7 +46,7 @@ Actions return a dictionary, and wsgi.Controller serializes that to JSON or XML
If you define a new controller, you'll need to define a _serialization_metadata attribute on the class, to tell wsgi.Controller how to convert your dictionary to XML. It needs to know the singular form of any list tag (e.g. <servers> list contains <server> tags) and which dictionary keys are to be XML attributes as opposed to subtags (e.g. <server id="4"/> instead of <server><id>4</id></server>).
-See nova/api/openstack/servers.py for an example.
+See `nova/api/openstack/servers.py` for an example.
Faults
------
diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst
index 6344c5382..3de2e2287 100644
--- a/doc/source/devref/development.environment.rst
+++ b/doc/source/devref/development.environment.rst
@@ -88,7 +88,12 @@ Here's how to get the latest code::
source .nova_venv/bin/activate
./run_tests.sh
-And then you can do cleaning work or hack hack hack with a branched named cleaning::
+Then you can do cleaning work or hack hack hack with a branched named cleaning.
+
+Contributing Your Work
+----------------------
+
+Once your work is complete you may wish to contribute it to the project. Add your name and email address to the `Authors` file, and also to the `.mailmap` file if you use multiple email addresses. Your contributions can not be merged into trunk unless you are listed in the Authors file. Now, push the branch to Launchpad::
bzr push lp:~launchpaduserid/nova/cleaning
diff --git a/doc/source/devref/rabbit.rst b/doc/source/devref/rabbit.rst
index 423284a55..ae0bac49d 100644
--- a/doc/source/devref/rabbit.rst
+++ b/doc/source/devref/rabbit.rst
@@ -71,8 +71,8 @@ RPC Casts
The diagram below the message flow during an rp.cast operation:
- 1. a Topic Publisher is instantiated to send the message request to the queuing system.
- 2. once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task.
+ 1. A Topic Publisher is instantiated to send the message request to the queuing system.
+ 2. Once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task.
.. image:: /images/rabbit/flow2.png
:width: 60%
diff --git a/doc/source/nova.concepts.rst b/doc/source/nova.concepts.rst
index 18368546b..fb3969a43 100644
--- a/doc/source/nova.concepts.rst
+++ b/doc/source/nova.concepts.rst
@@ -75,7 +75,7 @@ Nova is built on a shared-nothing, messaging-based architecture. All of the majo
To achieve the shared-nothing property with multiple copies of the same component, Nova keeps all the cloud system state in a distributed data store. Updates to system state are written into this store, using atomic transactions when required. Requests for system state are read out of this store. In limited cases, the read results are cached within controllers for short periods of time (for example, the current list of system users.)
- .. note:: The database schema is available on the `OpenStack Wiki <http://wiki.openstack.org/NovaDatabaseSchema>_`.
+ .. note:: The database schema is available on the `OpenStack Wiki <http://wiki.openstack.org/NovaDatabaseSchema>`_.
Concept: Storage
----------------
@@ -129,12 +129,12 @@ The simplest networking mode. Each instance receives a fixed ip from the pool.
Flat DHCP Mode
~~~~~~~~~~~~~~
-This is similar to the flat mode, in that all instances are attached to the same bridge. In this mode nova does a bit more configuration, it will attempt to bridge into an ethernet device (eth0 by default). It will also run dnsmasq as a dhcpserver listening on this bridge. Instances receive their fixed IPs by doing a dhcpdiscover.
+This is similar to the flat mode, in that all instances are attached to the same bridge. In this mode Nova does a bit more configuration, it will attempt to bridge into an ethernet device (eth0 by default). It will also run dnsmasq as a dhcpserver listening on this bridge. Instances receive their fixed IPs by doing a dhcpdiscover.
VLAN DHCP Mode
~~~~~~~~~~~~~~
-This is the default networking mode and supports the most features. For multiple machine installation, it requires a switch that supports host-managed vlan tagging. In this mode, nova will create a vlan and bridge for each project. The project gets a range of private ips that are only accessible from inside the vlan. In order for a user to access the instances in their project, a special vpn instance (code named :ref:`cloudpipe <cloudpipe>`) needs to be created. Nova generates a certificate and key for the user to access the vpn and starts the vpn automatically. More information on cloudpipe can be found :ref:`here <cloudpipe>`.
+This is the default networking mode and supports the most features. For multiple machine installation, it requires a switch that supports host-managed vlan tagging. In this mode, Nova will create a vlan and bridge for each project. The project gets a range of private ips that are only accessible from inside the vlan. In order for a user to access the instances in their project, a special vpn instance (code named :ref:`cloudpipe <cloudpipe>`) needs to be created. Nova generates a certificate and key for the user to access the vpn and starts the vpn automatically. More information on cloudpipe can be found :ref:`here <cloudpipe>`.
The following diagram illustrates how the communication that occurs between the vlan (the dashed box) and the public internet (represented by the two clouds)
@@ -154,16 +154,16 @@ Concept: nova-manage
--------------------
The nova-manage command is used to perform many essential functions for
-administration and ongoing maintenance of nova, such as user creation,
+administration and ongoing maintenance of Nova, such as user creation,
vpn management, and much more.
-See doc:`nova.manage` in the Administration Guide for more details.
+See :doc:`nova.manage` in the Administration Guide for more details.
Concept: Flags
--------------
-Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within flag files. When you install Nova packages, each nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth.
+Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within flag files. When you install Nova packages, each Nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth.
Concept: Plugins
@@ -181,7 +181,7 @@ Concept: Plugins
Concept: IPC/RPC
----------------
-Nova utilizes the RabbitMQ implementation of the AMQP messaging standard for performing communication between the various nova services. This message queuing service is used for both local and remote communication because Nova is designed so that there is no requirement that any of the services exist on the same physical machine. RabbitMQ in particular is very robust and provides the efficiency and reliability that Nova needs. More information about RabbitMQ can be found at http://www.rabbitmq.com/.
+Nova utilizes the RabbitMQ implementation of the AMQP messaging standard for performing communication between the various Nova services. This message queuing service is used for both local and remote communication because Nova is designed so that there is no requirement that any of the services exist on the same physical machine. RabbitMQ in particular is very robust and provides the efficiency and reliability that Nova needs. More information about RabbitMQ can be found at http://www.rabbitmq.com/.
Concept: Fakes
--------------
diff --git a/doc/source/quickstart.rst b/doc/source/quickstart.rst
index ae2b64d8a..fa5d96738 100644
--- a/doc/source/quickstart.rst
+++ b/doc/source/quickstart.rst
@@ -59,38 +59,21 @@ different configurations (though for more complex setups you should see
* HOST_IP
* Default: address of first interface from the ifconfig command
* Values: 127.0.0.1, or any other valid address
-
-TEST
-~~~~
-
-**Default**: 0
-**Values**: 1, run tests after checkout and initial setup
-
-USE_MYSQL
-~~~~~~~~~
-
-**Default**: 0, use sqlite3
-**Values**: 1, use mysql instead of sqlite3
-
-MYSQL_PASS
-~~~~~~~~~~
-
-Only useful if $USE_MYSQL=1.
-
-**Default**: nova
-**Values**: value of root password for mysql
-
-USE_LDAP
-~~~~~~~~
-
-**Default**: 0, use :mod:`nova.auth.dbdriver`
-**Values**: 1, use :mod:`nova.auth.ldapdriver`
-
-LIBVIRT_TYPE
-~~~~~~~~~~~~
-
-**Default**: qemu
-**Values**: uml, kvm
+* TEST
+ * Default: 0
+ * Values: 1, run tests after checkout and initial setup
+* USE_MYSQL
+ * Default: 0, use sqlite3
+ * Values: 1, use mysql instead of sqlite3
+* MYSQL_PASS (Only useful if $USE_MYSQL=1)
+ * Default: nova
+ * Values: value of root password for mysql
+* USE_LDAP
+ * Default: 0, use :mod:`nova.auth.dbdriver`
+ * Values: 1, use :mod:`nova.auth.ldapdriver`
+* LIBVIRT_TYPE
+ * Default: qemu
+ * Values: uml, kvm
Usage
-----
diff --git a/etc/nova-api.conf b/etc/nova-api.conf
new file mode 100644
index 000000000..c5dd0aaec
--- /dev/null
+++ b/etc/nova-api.conf
@@ -0,0 +1,63 @@
+[DEFAULT]
+verbose = 1
+ec2_port = 8773
+ec2_address = 0.0.0.0
+openstack_port = 8774
+openstack_address = 0.0.0.0
+
+#######
+# EC2 #
+#######
+
+[composite:ec2]
+use = egg:Paste#urlmap
+/: ec2versions
+/services: ec2api
+/latest: ec2metadata
+/200: ec2metadata
+/1.0: ec2metadata
+
+[pipeline:ec2api]
+pipeline = authenticate router authorizer ec2executor
+
+[filter:authenticate]
+paste.filter_factory = nova.api.ec2:authenticate_factory
+
+[filter:router]
+paste.filter_factory = nova.api.ec2:router_factory
+
+[filter:authorizer]
+paste.filter_factory = nova.api.ec2:authorizer_factory
+
+[app:ec2executor]
+paste.app_factory = nova.api.ec2:executor_factory
+
+[app:ec2versions]
+paste.app_factory = nova.api.ec2:versions_factory
+
+[app:ec2metadata]
+paste.app_factory = nova.api.ec2.metadatarequesthandler:metadata_factory
+
+#############
+# Openstack #
+#############
+
+[composite:openstack]
+use = egg:Paste#urlmap
+/: osversions
+/v1.0: openstackapi
+
+[pipeline:openstackapi]
+pipeline = auth ratelimit osapi
+
+[filter:auth]
+paste.filter_factory = nova.api.openstack.auth:auth_factory
+
+[filter:ratelimit]
+paste.filter_factory = nova.api.openstack.ratelimiting:ratelimit_factory
+
+[app:osapi]
+paste.app_factory = nova.api.openstack:router_factory
+
+[app:osversions]
+paste.app_factory = nova.api.openstack:versions_factory
diff --git a/nova/adminclient.py b/nova/adminclient.py
index 6ae9f0c0f..b2609c8c4 100644
--- a/nova/adminclient.py
+++ b/nova/adminclient.py
@@ -23,12 +23,9 @@ import base64
import boto
import httplib
-from nova import flags
from boto.ec2.regioninfo import RegionInfo
-FLAGS = flags.FLAGS
-
DEFAULT_CLC_URL = 'http://127.0.0.1:8773'
DEFAULT_REGION = 'nova'
@@ -199,8 +196,8 @@ class NovaAdminClient(object):
self,
clc_url=DEFAULT_CLC_URL,
region=DEFAULT_REGION,
- access_key=FLAGS.aws_access_key_id,
- secret_key=FLAGS.aws_secret_access_key,
+ access_key=None,
+ secret_key=None,
**kwargs):
parts = self.split_clc_url(clc_url)
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 51d33bcc6..aa3bfaeb4 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -294,10 +294,9 @@ class Executor(wsgi.Application):
args = req.environ['ec2.action_args']
api_request = apirequest.APIRequest(controller, action)
+ result = None
try:
result = api_request.send(context, **args)
- req.headers['Content-Type'] = 'text/xml'
- return result
except exception.ApiError as ex:
if ex.code:
@@ -307,6 +306,12 @@ class Executor(wsgi.Application):
# TODO(vish): do something more useful with unknown exceptions
except Exception as ex:
return self._error(req, type(ex).__name__, str(ex))
+ else:
+ resp = webob.Response()
+ resp.status = 200
+ resp.headers['Content-Type'] = 'text/xml'
+ resp.body = str(result)
+ return resp
def _error(self, req, code, message):
logging.error("%s: %s", code, message)
@@ -318,3 +323,49 @@ class Executor(wsgi.Application):
'<Message>%s</Message></Error></Errors>'
'<RequestID>?</RequestID></Response>' % (code, message))
return resp
+
+
+class Versions(wsgi.Application):
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ """Respond to a request for all EC2 versions."""
+ # available api versions
+ versions = [
+ '1.0',
+ '2007-01-19',
+ '2007-03-01',
+ '2007-08-29',
+ '2007-10-10',
+ '2007-12-15',
+ '2008-02-01',
+ '2008-09-01',
+ '2009-04-04',
+ ]
+ return ''.join('%s\n' % v for v in versions)
+
+
+def authenticate_factory(global_args, **local_args):
+ def authenticator(app):
+ return Authenticate(app)
+ return authenticator
+
+
+def router_factory(global_args, **local_args):
+ def router(app):
+ return Router(app)
+ return router
+
+
+def authorizer_factory(global_args, **local_args):
+ def authorizer(app):
+ return Authorizer(app)
+ return authorizer
+
+
+def executor_factory(global_args, **local_args):
+ return Executor()
+
+
+def versions_factory(global_args, **local_args):
+ return Versions()
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 9fa422301..b6966e605 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -31,19 +31,19 @@ import os
from nova import context
import IPy
+from nova import compute
from nova import crypto
from nova import db
from nova import exception
from nova import flags
-from nova import quota
+from nova import network
from nova import rpc
from nova import utils
-from nova.compute import api as compute_api
+from nova import volume
from nova.compute import instance_types
FLAGS = flags.FLAGS
-flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
InvalidInputException = exception.InvalidInputException
@@ -71,16 +71,16 @@ def _gen_key(context, user_id, key_name):
return {'private_key': private_key, 'fingerprint': fingerprint}
-def ec2_id_to_internal_id(ec2_id):
- """Convert an ec2 ID (i-[base 36 number]) to an internal id (int)"""
+def ec2_id_to_id(ec2_id):
+ """Convert an ec2 ID (i-[base 36 number]) to an instance id (int)"""
return int(ec2_id[2:], 36)
-def internal_id_to_ec2_id(internal_id):
- """Convert an internal ID (int) to an ec2 ID (i-[base 36 number])"""
+def id_to_ec2_id(instance_id):
+ """Convert an instance ID (int) to an ec2 ID (i-[base 36 number])"""
digits = []
- while internal_id != 0:
- internal_id, remainder = divmod(internal_id, 36)
+ while instance_id != 0:
+ instance_id, remainder = divmod(instance_id, 36)
digits.append('0123456789abcdefghijklmnopqrstuvwxyz'[remainder])
return "i-%s" % ''.join(reversed(digits))
@@ -91,10 +91,11 @@ class CloudController(object):
sent to the other nodes.
"""
def __init__(self):
- self.network_manager = utils.import_object(FLAGS.network_manager)
self.image_service = utils.import_object(FLAGS.image_service)
- self.compute_api = compute_api.ComputeAPI(self.network_manager,
- self.image_service)
+ self.network_api = network.API()
+ self.volume_api = volume.API()
+ self.compute_api = compute.API(self.image_service, self.network_api,
+ self.volume_api)
self.setup()
def __str__(self):
@@ -118,7 +119,8 @@ class CloudController(object):
def _get_mpi_data(self, context, project_id):
result = {}
- for instance in self.compute_api.get_instances(context, project_id):
+ for instance in self.compute_api.get_all(context,
+ project_id=project_id):
if instance['fixed_ip']:
line = '%s slots=%d' % (instance['fixed_ip']['address'],
instance['vcpus'])
@@ -138,15 +140,15 @@ class CloudController(object):
{"method": "refresh_security_group",
"args": {"security_group_id": security_group.id}})
- def _get_availability_zone_by_host(self, context, hostname):
- services = db.service_get_all_compute_by_host(context, hostname)
+ def _get_availability_zone_by_host(self, context, host):
+ services = db.service_get_all_by_host(context, host)
if len(services) > 0:
return services[0]['availability_zone']
- raise Exception(_('No service with hostname: %s' % hostname))
+ return 'unknown zone'
def get_metadata(self, address):
ctxt = context.get_admin_context()
- instance_ref = db.fixed_ip_get_instance(ctxt, address)
+ instance_ref = self.compute_api.get_all(ctxt, fixed_ip=address)
if instance_ref is None:
return None
mpi = self._get_mpi_data(ctxt, instance_ref['project_id'])
@@ -156,10 +158,11 @@ class CloudController(object):
else:
keys = ''
hostname = instance_ref['hostname']
- availability_zone = self._get_availability_zone_by_host(ctxt, hostname)
+ availability_zone = self._get_availability_zone_by_host(ctxt,
+ instance_ref['host'])
floating_ip = db.instance_get_floating_address(ctxt,
instance_ref['id'])
- ec2_id = internal_id_to_ec2_id(instance_ref['internal_id'])
+ ec2_id = id_to_ec2_id(instance_ref['id'])
data = {
'user-data': base64.b64decode(instance_ref['user_data']),
'meta-data': {
@@ -194,12 +197,26 @@ class CloudController(object):
return data
def describe_availability_zones(self, context, **kwargs):
- enabled_services = db.service_get_all_by_topic(context, 'compute')
- disabled_services = db.service_get_all_by_topic(context, 'compute', True)
- available_zones = [service.availability_zone for service in enabled_services]
- not_available_zones = [service.availability_zone for service in disabled_services
- and not service['availability_zone'] in available_zones]
-
+ if ('zone_name' in kwargs and
+ 'verbose' in kwargs['zone_name'] and
+ context.is_admin):
+ return self._describe_availability_zones_verbose(context,
+ **kwargs)
+ else:
+ return self._describe_availability_zones(context, **kwargs)
+
+ def _describe_availability_zones(self, context, **kwargs):
+ enabled_services = db.service_get_all(context)
+ disabled_services = db.service_get_all(context, True)
+ available_zones = []
+ for zone in [service.availability_zone for service in enabled_services]:
+ if not zone in available_zones:
+ available_zones.append(zone)
+ not_available_zones = []
+ for zone in [service.availability_zone for service in disabled_services
+ and not service['availability_zone'] in available_zones]:
+ if not zone in not_available_zones:
+ not_available_zones.append(zone)
result = []
for zone in available_zones:
result.append({'zoneName': zone,
@@ -209,6 +226,34 @@ class CloudController(object):
'zoneState': "not available"})
return {'availabilityZoneInfo': result}
+ def _describe_availability_zones_verbose(self, context, **kwargs):
+ rv = {'availabilityZoneInfo': [{'zoneName': 'nova',
+ 'zoneState': 'available'}]}
+
+ services = db.service_get_all(context)
+ now = db.get_time()
+ hosts = []
+ for host in [service['host'] for service in services]:
+ if not host in hosts:
+ hosts.append(host)
+ for host in hosts:
+ rv['availabilityZoneInfo'].append({'zoneName': '|- %s' % host,
+ 'zoneState': ''})
+ hsvcs = [service for service in services \
+ if service['host'] == host]
+ for svc in hsvcs:
+ delta = now - (svc['updated_at'] or svc['created_at'])
+ alive = (delta.seconds <= FLAGS.service_down_time)
+ art = (alive and ":-)") or "XXX"
+ active = 'enabled'
+ if svc['disabled']:
+ active = 'disabled'
+ rv['availabilityZoneInfo'].append({
+ 'zoneName': '| |- %s' % svc['binary'],
+ 'zoneState': '%s %s %s' % (active, art,
+ svc['updated_at'])})
+ return rv
+
def describe_regions(self, context, region_name=None, **kwargs):
if FLAGS.region_list:
regions = []
@@ -459,8 +504,8 @@ class CloudController(object):
def get_console_output(self, context, instance_id, **kwargs):
# instance_id is passed in as a list of instances
ec2_id = instance_id[0]
- internal_id = ec2_id_to_internal_id(ec2_id)
- instance_ref = self.compute_api.get_instance(context, internal_id)
+ instance_id = ec2_id_to_id(ec2_id)
+ instance_ref = self.compute_api.get(context, instance_id)
output = rpc.call(context,
'%s.%s' % (FLAGS.compute_topic,
instance_ref['host']),
@@ -473,27 +518,22 @@ class CloudController(object):
"output": base64.b64encode(output)}
def describe_volumes(self, context, volume_id=None, **kwargs):
- if context.user.is_admin():
- volumes = db.volume_get_all(context)
- else:
- volumes = db.volume_get_all_by_project(context, context.project_id)
-
+ volumes = self.volume_api.get_all(context)
# NOTE(vish): volume_id is an optional list of volume ids to filter by.
volumes = [self._format_volume(context, v) for v in volumes
- if volume_id is None or v['ec2_id'] in volume_id]
-
+ if volume_id is None or v['id'] in volume_id]
return {'volumeSet': volumes}
def _format_volume(self, context, volume):
instance_ec2_id = None
instance_data = None
if volume.get('instance', None):
- internal_id = volume['instance']['internal_id']
- instance_ec2_id = internal_id_to_ec2_id(internal_id)
+ instance_id = volume['instance']['id']
+ instance_ec2_id = id_to_ec2_id(instance_id)
instance_data = '%s[%s]' % (instance_ec2_id,
volume['instance']['host'])
v = {}
- v['volumeId'] = volume['ec2_id']
+ v['volumeId'] = volume['id']
v['status'] = volume['status']
v['size'] = volume['size']
v['availabilityZone'] = volume['availability_zone']
@@ -520,88 +560,47 @@ class CloudController(object):
return v
def create_volume(self, context, size, **kwargs):
- # check quota
- if quota.allowed_volumes(context, 1, size) < 1:
- logging.warn("Quota exceeeded for %s, tried to create %sG volume",
- context.project_id, size)
- raise quota.QuotaError("Volume quota exceeded. You cannot "
- "create a volume of size %s" % size)
- vol = {}
- vol['size'] = size
- vol['user_id'] = context.user.id
- vol['project_id'] = context.project_id
- vol['availability_zone'] = FLAGS.storage_availability_zone
- vol['status'] = "creating"
- vol['attach_status'] = "detached"
- vol['display_name'] = kwargs.get('display_name')
- vol['display_description'] = kwargs.get('display_description')
- volume_ref = db.volume_create(context, vol)
-
- rpc.cast(context,
- FLAGS.scheduler_topic,
- {"method": "create_volume",
- "args": {"topic": FLAGS.volume_topic,
- "volume_id": volume_ref['id']}})
-
+ volume = self.volume_api.create(context, size,
+ kwargs.get('display_name'),
+ kwargs.get('display_description'))
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
return {'volumeSet': [self._format_volume(context, dict(volume_ref))]}
+ def delete_volume(self, context, volume_id, **kwargs):
+ self.volume_api.delete(context, volume_id)
+ return True
+
+ def update_volume(self, context, volume_id, **kwargs):
+ updatable_fields = ['display_name', 'display_description']
+ changes = {}
+ for field in updatable_fields:
+ if field in kwargs:
+ changes[field] = kwargs[field]
+ if changes:
+ self.volume_api.update(context, volume_id, kwargs)
+ return True
+
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
- volume_ref = db.volume_get_by_ec2_id(context, volume_id)
- if not re.match("^/dev/[a-z]d[a-z]+$", device):
- raise exception.ApiError(_("Invalid device specified: %s. "
- "Example device: /dev/vdb") % device)
- # TODO(vish): abstract status checking?
- if volume_ref['status'] != "available":
- raise exception.ApiError(_("Volume status must be available"))
- if volume_ref['attach_status'] == "attached":
- raise exception.ApiError(_("Volume is already attached"))
- internal_id = ec2_id_to_internal_id(instance_id)
- instance_ref = self.compute_api.get_instance(context, internal_id)
- host = instance_ref['host']
- rpc.cast(context,
- db.queue_get_for(context, FLAGS.compute_topic, host),
- {"method": "attach_volume",
- "args": {"volume_id": volume_ref['id'],
- "instance_id": instance_ref['id'],
- "mountpoint": device}})
- return {'attachTime': volume_ref['attach_time'],
- 'device': volume_ref['mountpoint'],
- 'instanceId': instance_ref['id'],
+ self.compute_api.attach_volume(context, instance_id, volume_id, device)
+ volume = self.volume_api.get(context, volume_id)
+ return {'attachTime': volume['attach_time'],
+ 'device': volume['mountpoint'],
+ 'instanceId': instance_id,
'requestId': context.request_id,
- 'status': volume_ref['attach_status'],
- 'volumeId': volume_ref['id']}
+ 'status': volume['attach_status'],
+ 'volumeId': volume_id}
def detach_volume(self, context, volume_id, **kwargs):
- volume_ref = db.volume_get_by_ec2_id(context, volume_id)
- instance_ref = db.volume_get_instance(context.elevated(),
- volume_ref['id'])
- if not instance_ref:
- raise exception.ApiError(_("Volume isn't attached to anything!"))
- # TODO(vish): abstract status checking?
- if volume_ref['status'] == "available":
- raise exception.ApiError(_("Volume is already detached"))
- try:
- host = instance_ref['host']
- rpc.cast(context,
- db.queue_get_for(context, FLAGS.compute_topic, host),
- {"method": "detach_volume",
- "args": {"instance_id": instance_ref['id'],
- "volume_id": volume_ref['id']}})
- except exception.NotFound:
- # If the instance doesn't exist anymore,
- # then we need to call detach blind
- db.volume_detached(context)
- internal_id = instance_ref['internal_id']
- ec2_id = internal_id_to_ec2_id(internal_id)
- return {'attachTime': volume_ref['attach_time'],
- 'device': volume_ref['mountpoint'],
- 'instanceId': internal_id,
+ volume = self.volume_api.get(context, volume_id)
+ instance = self.compute_api.detach_volume(context, volume_id)
+ return {'attachTime': volume['attach_time'],
+ 'device': volume['mountpoint'],
+ 'instanceId': id_to_ec2_id(instance['id']),
'requestId': context.request_id,
- 'status': volume_ref['attach_status'],
- 'volumeId': volume_ref['id']}
+ 'status': volume['attach_status'],
+ 'volumeId': volume_id}
def _convert_to_set(self, lst, label):
if lst == None or lst == []:
@@ -610,16 +609,6 @@ class CloudController(object):
lst = [lst]
return [{label: x} for x in lst]
- def update_volume(self, context, volume_id, **kwargs):
- updatable_fields = ['display_name', 'display_description']
- changes = {}
- for field in updatable_fields:
- if field in kwargs:
- changes[field] = kwargs[field]
- if changes:
- db.volume_update(context, volume_id, kwargs)
- return True
-
def describe_instances(self, context, **kwargs):
return self._format_describe_instances(context)
@@ -627,24 +616,20 @@ class CloudController(object):
return {'reservationSet': self._format_instances(context)}
def _format_run_instances(self, context, reservation_id):
- i = self._format_instances(context, reservation_id)
+ i = self._format_instances(context, reservation_id=reservation_id)
assert len(i) == 1
return i[0]
- def _format_instances(self, context, reservation_id=None):
+ def _format_instances(self, context, **kwargs):
reservations = {}
- if reservation_id:
- instances = db.instance_get_all_by_reservation(context,
- reservation_id)
- else:
- instances = self.compute_api.get_instances(context)
+ instances = self.compute_api.get_all(context, **kwargs)
for instance in instances:
if not context.user.is_admin():
if instance['image_id'] == FLAGS.vpn_image_id:
continue
i = {}
- internal_id = instance['internal_id']
- ec2_id = internal_id_to_ec2_id(internal_id)
+ instance_id = instance['id']
+ ec2_id = id_to_ec2_id(instance_id)
i['instanceId'] = ec2_id
i['imageId'] = instance['image_id']
i['instanceState'] = {
@@ -671,6 +656,8 @@ class CloudController(object):
i['amiLaunchIndex'] = instance['launch_index']
i['displayName'] = instance['display_name']
i['displayDescription'] = instance['display_description']
+ availability_zone = self._get_availability_zone_by_host(context, instance['host'])
+ i['placement'] = {'availabilityZone': availability_zone}
if instance['reservation_id'] not in reservations:
r = {}
r['reservationId'] = instance['reservation_id']
@@ -678,8 +665,6 @@ class CloudController(object):
r['groupSet'] = self._convert_to_set([], 'groups')
r['instancesSet'] = []
reservations[instance['reservation_id']] = r
- availability_zone = self._get_availability_zone_by_host(context, instance['hostname'])
- i['placement'] = {'availabilityZone': availability_zone}
reservations[instance['reservation_id']]['instancesSet'].append(i)
return list(reservations.values())
@@ -699,8 +684,8 @@ class CloudController(object):
ec2_id = None
if (floating_ip_ref['fixed_ip']
and floating_ip_ref['fixed_ip']['instance']):
- internal_id = floating_ip_ref['fixed_ip']['instance']['ec2_id']
- ec2_id = internal_id_to_ec2_id(internal_id)
+ instance_id = floating_ip_ref['fixed_ip']['instance']['ec2_id']
+ ec2_id = id_to_ec2_id(instance_id)
address_rv = {'public_ip': address,
'instance_id': ec2_id}
if context.user.is_admin():
@@ -711,73 +696,25 @@ class CloudController(object):
return {'addressesSet': addresses}
def allocate_address(self, context, **kwargs):
- # check quota
- if quota.allowed_floating_ips(context, 1) < 1:
- logging.warn(_("Quota exceeeded for %s, tried to allocate "
- "address"),
- context.project_id)
- raise quota.QuotaError(_("Address quota exceeded. You cannot "
- "allocate any more addresses"))
- # NOTE(vish): We don't know which network host should get the ip
- # when we allocate, so just send it to any one. This
- # will probably need to move into a network supervisor
- # at some point.
- public_ip = rpc.call(context,
- FLAGS.network_topic,
- {"method": "allocate_floating_ip",
- "args": {"project_id": context.project_id}})
+ public_ip = self.network_api.allocate_floating_ip(context)
return {'addressSet': [{'publicIp': public_ip}]}
def release_address(self, context, public_ip, **kwargs):
- floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
- # NOTE(vish): We don't know which network host should get the ip
- # when we deallocate, so just send it to any one. This
- # will probably need to move into a network supervisor
- # at some point.
- rpc.cast(context,
- FLAGS.network_topic,
- {"method": "deallocate_floating_ip",
- "args": {"floating_address": floating_ip_ref['address']}})
+ self.network_api.release_floating_ip(context, public_ip)
return {'releaseResponse': ["Address released."]}
def associate_address(self, context, instance_id, public_ip, **kwargs):
- internal_id = ec2_id_to_internal_id(instance_id)
- instance_ref = self.compute_api.get_instance(context, internal_id)
- fixed_address = db.instance_get_fixed_address(context,
- instance_ref['id'])
- floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
- # NOTE(vish): Perhaps we should just pass this on to compute and
- # let compute communicate with network.
- network_topic = self.compute_api.get_network_topic(context,
- internal_id)
- rpc.cast(context,
- network_topic,
- {"method": "associate_floating_ip",
- "args": {"floating_address": floating_ip_ref['address'],
- "fixed_address": fixed_address}})
+ instance_id = ec2_id_to_id(instance_id)
+ self.compute_api.associate_floating_ip(context, instance_id, public_ip)
return {'associateResponse': ["Address associated."]}
def disassociate_address(self, context, public_ip, **kwargs):
- floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
- # NOTE(vish): Get the topic from the host name of the network of
- # the associated fixed ip.
- if not floating_ip_ref.get('fixed_ip'):
- raise exception.ApiError('Address is not associated.')
- host = floating_ip_ref['fixed_ip']['network']['host']
- topic = db.queue_get_for(context, FLAGS.network_topic, host)
- rpc.cast(context,
- topic,
- {"method": "disassociate_floating_ip",
- "args": {"floating_address": floating_ip_ref['address']}})
+ self.network_api.disassociate_floating_ip(context, public_ip)
return {'disassociateResponse': ["Address disassociated."]}
def run_instances(self, context, **kwargs):
max_count = int(kwargs.get('max_count', 1))
- placement = kwargs.get('placement')
- avzone = None
- if placement is not None:
- avzone = placement['availability_zone']
- instances = self.compute_api.create_instances(context,
+ instances = self.compute_api.create(context,
instance_types.get_by_type(kwargs.get('instance_type', None)),
kwargs['image_id'],
min_count=int(kwargs.get('min_count', max_count)),
@@ -785,12 +722,13 @@ class CloudController(object):
kernel_id=kwargs.get('kernel_id', None),
ramdisk_id=kwargs.get('ramdisk_id'),
display_name=kwargs.get('display_name'),
- description=kwargs.get('display_description'),
+ display_description=kwargs.get('display_description'),
key_name=kwargs.get('key_name'),
user_data=kwargs.get('user_data'),
security_group=kwargs.get('security_group'),
- generate_hostname=internal_id_to_ec2_id,
- availability_zone=avzone)
+ availability_zone=kwargs.get('placement', {}).get(
+ 'AvailabilityZone'),
+ generate_hostname=id_to_ec2_id)
return self._format_run_instances(context,
instances[0]['reservation_id'])
@@ -799,27 +737,27 @@ class CloudController(object):
instance_id is a kwarg so its name cannot be modified."""
logging.debug("Going to start terminating instances")
for ec2_id in instance_id:
- internal_id = ec2_id_to_internal_id(ec2_id)
- self.compute_api.delete_instance(context, internal_id)
+ instance_id = ec2_id_to_id(ec2_id)
+ self.compute_api.delete(context, instance_id)
return True
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids"""
for ec2_id in instance_id:
- internal_id = ec2_id_to_internal_id(ec2_id)
- self.compute_api.reboot(context, internal_id)
+ instance_id = ec2_id_to_id(ec2_id)
+ self.compute_api.reboot(context, instance_id)
return True
def rescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
- internal_id = ec2_id_to_internal_id(instance_id)
- self.compute_api.rescue(context, internal_id)
+ instance_id = ec2_id_to_id(instance_id)
+ self.compute_api.rescue(context, instance_id)
return True
def unrescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
- internal_id = ec2_id_to_internal_id(instance_id)
- self.compute_api.unrescue(context, internal_id)
+ instance_id = ec2_id_to_id(instance_id)
+ self.compute_api.unrescue(context, instance_id)
return True
def update_instance(self, context, ec2_id, **kwargs):
@@ -829,24 +767,8 @@ class CloudController(object):
if field in kwargs:
changes[field] = kwargs[field]
if changes:
- internal_id = ec2_id_to_internal_id(ec2_id)
- inst = self.compute_api.get_instance(context, internal_id)
- db.instance_update(context, inst['id'], kwargs)
- return True
-
- def delete_volume(self, context, volume_id, **kwargs):
- # TODO: return error if not authorized
- volume_ref = db.volume_get_by_ec2_id(context, volume_id)
- if volume_ref['status'] != "available":
- raise exception.ApiError(_("Volume status must be available"))
- now = datetime.datetime.utcnow()
- db.volume_update(context, volume_ref['id'], {'status': 'deleting',
- 'terminated_at': now})
- host = volume_ref['host']
- rpc.cast(context,
- db.queue_get_for(context, FLAGS.volume_topic, host),
- {"method": "delete_volume",
- "args": {"volume_id": volume_ref['id']}})
+ instance_id = ec2_id_to_id(ec2_id)
+ self.compute_api.update(context, instance_id, **kwargs)
return True
def describe_images(self, context, image_id=None, **kwargs):
diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py
index f832863a9..a57a6698a 100644
--- a/nova/api/ec2/metadatarequesthandler.py
+++ b/nova/api/ec2/metadatarequesthandler.py
@@ -79,3 +79,7 @@ class MetadataRequestHandler(object):
if data is None:
raise webob.exc.HTTPNotFound()
return self.print_data(data)
+
+
+def metadata_factory(global_args, **local_args):
+ return MetadataRequestHandler()
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index 66aceee2d..a1430caed 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -20,7 +20,6 @@
WSGI middleware for OpenStack API controllers.
"""
-import json
import time
import logging
@@ -41,7 +40,6 @@ from nova.api.openstack import images
from nova.api.openstack import ratelimiting
from nova.api.openstack import servers
from nova.api.openstack import sharedipgroups
-from nova.auth import manager
FLAGS = flags.FLAGS
@@ -93,6 +91,8 @@ class APIRouter(wsgi.Router):
logging.debug("Including admin operations in API.")
server_members['pause'] = 'POST'
server_members['unpause'] = 'POST'
+ server_members["diagnostics"] = "GET"
+ server_members["actions"] = "GET"
server_members['suspend'] = 'POST'
server_members['resume'] = 'POST'
@@ -113,3 +113,24 @@ class APIRouter(wsgi.Router):
controller=sharedipgroups.Controller())
super(APIRouter, self).__init__(mapper)
+
+
+class Versions(wsgi.Application):
+ @webob.dec.wsgify
+ def __call__(self, req):
+ """Respond to a request for all OpenStack API versions."""
+ response = {
+ "versions": [
+ dict(status="CURRENT", id="v1.0")]}
+ metadata = {
+ "application/xml": {
+ "attributes": dict(version=["status", "id"])}}
+ return wsgi.Serializer(req.environ, metadata).to_content_type(response)
+
+
+def router_factory(global_cof, **local_conf):
+ return APIRouter()
+
+
+def versions_factory(global_conf, **local_conf):
+ return Versions()
diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py
index e24e58fd3..00e817c8d 100644
--- a/nova/api/openstack/auth.py
+++ b/nova/api/openstack/auth.py
@@ -55,7 +55,8 @@ class AuthMiddleware(wsgi.Middleware):
if not user:
return faults.Fault(webob.exc.HTTPUnauthorized())
- req.environ['nova.context'] = context.RequestContext(user, user)
+ project = self.auth.get_project(FLAGS.default_project)
+ req.environ['nova.context'] = context.RequestContext(user, project)
return self.application
def has_authentication(self, req):
@@ -133,3 +134,9 @@ class AuthMiddleware(wsgi.Middleware):
token = self.db.auth_create_token(ctxt, token_dict)
return token, user
return None, None
+
+
+def auth_factory(global_conf, **local_conf):
+ def auth(app):
+ return AuthMiddleware(app)
+ return auth
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index ba35fbc78..0b239aab8 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -17,15 +17,14 @@
from webob import exc
+from nova import compute
from nova import flags
from nova import utils
from nova import wsgi
import nova.api.openstack
-import nova.image.service
-
from nova.api.openstack import common
from nova.api.openstack import faults
-
+import nova.image.service
FLAGS = flags.FLAGS
@@ -127,9 +126,11 @@ class Controller(wsgi.Controller):
raise faults.Fault(exc.HTTPNotFound())
def create(self, req):
- # Only public images are supported for now, so a request to
- # make a backup of a server cannot be supproted.
- raise faults.Fault(exc.HTTPNotFound())
+ context = req.environ['nova.context']
+ env = self._deserialize(req.body, req)
+ instance_id = env["image"]["serverId"]
+ name = env["image"]["name"]
+ return compute.API().snapshot(context, instance_id, name)
def update(self, req, id):
# Users may not modify public images, and that's all that
diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py
index cbb4b897e..81b83142f 100644
--- a/nova/api/openstack/ratelimiting/__init__.py
+++ b/nova/api/openstack/ratelimiting/__init__.py
@@ -219,3 +219,9 @@ class WSGIAppProxy(object):
# No delay
return None
return float(resp.getheader('X-Wait-Seconds'))
+
+
+def ratelimit_factory(global_conf, **local_conf):
+ def rl(app):
+ return RateLimitingMiddleware(app)
+ return rl
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 845183258..ce64ac7ad 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -20,12 +20,12 @@ import traceback
from webob import exc
+from nova import compute
from nova import exception
from nova import wsgi
from nova.api.openstack import common
from nova.api.openstack import faults
from nova.auth import manager as auth_manager
-from nova.compute import api as compute_api
from nova.compute import instance_types
from nova.compute import power_state
import nova.api.openstack
@@ -51,7 +51,7 @@ def _translate_detail_keys(inst):
inst_dict = {}
mapped_keys = dict(status='state', imageId='image_id',
- flavorId='instance_type', name='display_name', id='internal_id')
+ flavorId='instance_type', name='display_name', id='id')
for k, v in mapped_keys.iteritems():
inst_dict[k] = inst[v]
@@ -67,7 +67,7 @@ def _translate_detail_keys(inst):
def _translate_keys(inst):
""" Coerces into dictionary format, excluding all model attributes
save for id and name """
- return dict(server=dict(id=inst['internal_id'], name=inst['display_name']))
+ return dict(server=dict(id=inst['id'], name=inst['display_name']))
class Controller(wsgi.Controller):
@@ -80,7 +80,7 @@ class Controller(wsgi.Controller):
"status", "progress"]}}}
def __init__(self):
- self.compute_api = compute_api.ComputeAPI()
+ self.compute_api = compute.API()
super(Controller, self).__init__()
def index(self, req):
@@ -96,8 +96,7 @@ class Controller(wsgi.Controller):
entity_maker - either _translate_detail_keys or _translate_keys
"""
- instance_list = self.compute_api.get_instances(
- req.environ['nova.context'])
+ instance_list = self.compute_api.get_all(req.environ['nova.context'])
limited_list = common.limited(instance_list, req)
res = [entity_maker(inst)['server'] for inst in limited_list]
return dict(servers=res)
@@ -105,8 +104,7 @@ class Controller(wsgi.Controller):
def show(self, req, id):
""" Returns server details by server id """
try:
- instance = self.compute_api.get_instance(
- req.environ['nova.context'], int(id))
+ instance = self.compute_api.get(req.environ['nova.context'], id)
return _translate_detail_keys(instance)
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
@@ -114,8 +112,7 @@ class Controller(wsgi.Controller):
def delete(self, req, id):
""" Destroys a server """
try:
- self.compute_api.delete_instance(req.environ['nova.context'],
- int(id))
+ self.compute_api.delete(req.environ['nova.context'], id)
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
@@ -128,12 +125,12 @@ class Controller(wsgi.Controller):
key_pair = auth_manager.AuthManager.get_key_pairs(
req.environ['nova.context'])[0]
- instances = self.compute_api.create_instances(
+ instances = self.compute_api.create(
req.environ['nova.context'],
instance_types.get_by_flavor_id(env['server']['flavorId']),
env['server']['imageId'],
display_name=env['server']['name'],
- description=env['server']['name'],
+ display_description=env['server']['name'],
key_name=key_pair['name'],
key_data=key_pair['public_key'])
return _translate_keys(instances[0])
@@ -151,10 +148,8 @@ class Controller(wsgi.Controller):
update_dict['display_name'] = inst_dict['server']['name']
try:
- ctxt = req.environ['nova.context']
- self.compute_api.update_instance(ctxt,
- id,
- **update_dict)
+ self.compute_api.update(req.environ['nova.context'], id,
+ **update_dict)
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPNoContent()
@@ -218,3 +213,13 @@ class Controller(wsgi.Controller):
logging.error(_("compute.api::resume %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+
+ def diagnostics(self, req, id):
+ """Permit Admins to retrieve server diagnostics."""
+ ctxt = req.environ["nova.context"]
+ return self.compute_api.get_diagnostics(ctxt, id)
+
+ def actions(self, req, id):
+ """Permit Admins to retrieve server actions."""
+ ctxt = req.environ["nova.context"]
+ return self.compute_api.get_actions(ctxt, id)
diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py
index a5df2ec1a..b94f971d1 100644
--- a/nova/compute/__init__.py
+++ b/nova/compute/__init__.py
@@ -16,17 +16,4 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-:mod:`nova.compute` -- Compute Nodes using LibVirt
-=====================================================
-
-.. automodule:: nova.compute
- :platform: Unix
- :synopsis: Thin wrapper around libvirt for VM mgmt.
-.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
-.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
-.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
-.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
-.. moduleauthor:: Manish Singh <yosh@gimp.org>
-.. moduleauthor:: Andy Smith <andy@anarkystic.com>
-"""
+from nova.compute.api import API
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 789514967..3ba91fe05 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -17,7 +17,7 @@
# under the License.
"""
-Handles all API requests relating to instances (guest vms).
+Handles all requests relating to instances (guest vms).
"""
import datetime
@@ -27,36 +27,41 @@ import time
from nova import db
from nova import exception
from nova import flags
+from nova import network
from nova import quota
from nova import rpc
from nova import utils
+from nova import volume
from nova.compute import instance_types
from nova.db import base
FLAGS = flags.FLAGS
-def generate_default_hostname(internal_id):
+def generate_default_hostname(instance_id):
"""Default function to generate a hostname given an instance reference."""
- return str(internal_id)
+ return str(instance_id)
-class ComputeAPI(base.Base):
+class API(base.Base):
"""API for interacting with the compute manager."""
- def __init__(self, network_manager=None, image_service=None, **kwargs):
- if not network_manager:
- network_manager = utils.import_object(FLAGS.network_manager)
- self.network_manager = network_manager
+ def __init__(self, image_service=None, network_api=None, volume_api=None,
+ **kwargs):
if not image_service:
image_service = utils.import_object(FLAGS.image_service)
self.image_service = image_service
- super(ComputeAPI, self).__init__(**kwargs)
+ if not network_api:
+ network_api = network.API()
+ self.network_api = network_api
+ if not volume_api:
+ volume_api = volume.API()
+ self.volume_api = volume_api
+ super(API, self).__init__(**kwargs)
def get_network_topic(self, context, instance_id):
try:
- instance = self.db.instance_get_by_internal_id(context,
- instance_id)
+ instance = self.get(context, instance_id)
except exception.NotFound as e:
logging.warning("Instance %d was not found in get_network_topic",
instance_id)
@@ -70,18 +75,18 @@ class ComputeAPI(base.Base):
topic,
{"method": "get_network_topic", "args": {'fake': 1}})
- def create_instances(self, context, instance_type, image_id, min_count=1,
- max_count=1, kernel_id=None, ramdisk_id=None,
- display_name='', description='', key_name=None,
- key_data=None, security_group='default',
- user_data=None,
- generate_hostname=generate_default_hostname,
- availability_zone=None):
- """Create the number of instances requested if quote and
+ def create(self, context, instance_type,
+ image_id, kernel_id=None, ramdisk_id=None,
+ min_count=1, max_count=1,
+ display_name='', display_description='',
+ key_name=None, key_data=None, security_group='default',
+ availability_zone=None, user_data=None,
+ generate_hostname=generate_default_hostname):
+ """Create the number of instances requested if quota and
other arguments check out ok."""
- num_instances = quota.allowed_instances(context, max_count,
- instance_type)
+ type_data = instance_types.INSTANCE_TYPES[instance_type]
+ num_instances = quota.allowed_instances(context, max_count, type_data)
if num_instances < min_count:
logging.warn("Quota exceeeded for %s, tried to run %s instances",
context.project_id, min_count)
@@ -96,7 +101,7 @@ class ComputeAPI(base.Base):
kernel_id = image.get('kernelId', None)
if ramdisk_id is None:
ramdisk_id = image.get('ramdiskId', None)
- #No kernel and ramdisk for raw images
+ # No kernel and ramdisk for raw images
if kernel_id == str(FLAGS.null_kernel):
kernel_id = None
ramdisk_id = None
@@ -124,7 +129,6 @@ class ComputeAPI(base.Base):
key_pair = db.key_pair_get(context, context.user_id, key_name)
key_data = key_pair['public_key']
- type_data = instance_types.INSTANCE_TYPES[instance_type]
base_options = {
'reservation_id': utils.generate_uid('r'),
'image_id': image_id,
@@ -139,10 +143,11 @@ class ComputeAPI(base.Base):
'vcpus': type_data['vcpus'],
'local_gb': type_data['local_gb'],
'display_name': display_name,
- 'display_description': description,
+ 'display_description': display_description,
'user_data': user_data or '',
'key_name': key_name,
- 'key_data': key_data}
+ 'key_data': key_data,
+ 'availability_zone': availability_zone}
elevated = context.elevated()
instances = []
@@ -153,7 +158,6 @@ class ComputeAPI(base.Base):
**base_options)
instance = self.db.instance_create(context, instance)
instance_id = instance['id']
- internal_id = instance['internal_id']
elevated = context.elevated()
if not security_groups:
@@ -164,11 +168,11 @@ class ComputeAPI(base.Base):
security_group_id)
# Set sane defaults if not specified
- updates = dict(hostname=generate_hostname(internal_id))
+ updates = dict(hostname=generate_hostname(instance_id))
if 'display_name' not in instance:
- updates['display_name'] = "Server %s" % internal_id
+ updates['display_name'] = "Server %s" % instance_id
- instance = self.update_instance(context, instance_id, **updates)
+ instance = self.update(context, instance_id, **updates)
instances.append(instance)
logging.debug(_("Casting to scheduler for %s/%s's instance %s"),
@@ -199,7 +203,7 @@ class ComputeAPI(base.Base):
'project_id': context.project_id}
db.security_group_create(context, values)
- def update_instance(self, context, instance_id, **kwargs):
+ def update(self, context, instance_id, **kwargs):
"""Updates the instance in the datastore.
:param context: The security context
@@ -213,111 +217,172 @@ class ComputeAPI(base.Base):
"""
return self.db.instance_update(context, instance_id, kwargs)
- def delete_instance(self, context, instance_id):
- logging.debug("Going to try and terminate %d" % instance_id)
+ def delete(self, context, instance_id):
+ logging.debug("Going to try and terminate %s" % instance_id)
try:
- instance = self.db.instance_get_by_internal_id(context,
- instance_id)
+ instance = self.get(context, instance_id)
except exception.NotFound as e:
- logging.warning(_("Instance %d was not found during terminate"),
+ logging.warning(_("Instance %s was not found during terminate"),
instance_id)
raise e
if (instance['state_description'] == 'terminating'):
- logging.warning(_("Instance %d is already being terminated"),
+ logging.warning(_("Instance %s is already being terminated"),
instance_id)
return
- self.update_instance(context,
- instance['id'],
- state_description='terminating',
- state=0,
- terminated_at=datetime.datetime.utcnow())
+ self.update(context,
+ instance['id'],
+ state_description='terminating',
+ state=0,
+ terminated_at=datetime.datetime.utcnow())
host = instance['host']
if host:
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "terminate_instance",
- "args": {"instance_id": instance['id']}})
+ "args": {"instance_id": instance_id}})
else:
- self.db.instance_destroy(context, instance['id'])
-
- def get_instances(self, context, project_id=None):
- """Get all instances, possibly filtered by project ID or
- user ID. If there is no filter and the context is an admin,
- it will retreive all instances in the system."""
+ self.db.instance_destroy(context, instance_id)
+
+ def get(self, context, instance_id):
+ """Get a single instance with the given ID."""
+ return self.db.instance_get_by_id(context, instance_id)
+
+ def get_all(self, context, project_id=None, reservation_id=None,
+ fixed_ip=None):
+ """Get all instances, possibly filtered by one of the
+ given parameters. If there is no filter and the context is
+ an admin, it will retreive all instances in the system."""
+ if reservation_id is not None:
+ return self.db.instance_get_all_by_reservation(context,
+ reservation_id)
+ if fixed_ip is not None:
+ return self.db.fixed_ip_get_instance(context, fixed_ip)
if project_id or not context.is_admin:
if not context.project:
return self.db.instance_get_all_by_user(context,
context.user_id)
if project_id is None:
project_id = context.project_id
- return self.db.instance_get_all_by_project(context, project_id)
+ return self.db.instance_get_all_by_project(context,
+ project_id)
return self.db.instance_get_all(context)
- def get_instance(self, context, instance_id):
- return self.db.instance_get_by_internal_id(context, instance_id)
+ def snapshot(self, context, instance_id, name):
+ """Snapshot the given instance."""
+ instance = self.get(context, instance_id)
+ host = instance['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "snapshot_instance",
+ "args": {"instance_id": instance_id, "name": name}})
def reboot(self, context, instance_id):
"""Reboot the given instance."""
- instance = self.db.instance_get_by_internal_id(context, instance_id)
+ instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "reboot_instance",
- "args": {"instance_id": instance['id']}})
+ "args": {"instance_id": instance_id}})
def pause(self, context, instance_id):
"""Pause the given instance."""
- instance = self.db.instance_get_by_internal_id(context, instance_id)
+ instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "pause_instance",
- "args": {"instance_id": instance['id']}})
+ "args": {"instance_id": instance_id}})
def unpause(self, context, instance_id):
"""Unpause the given instance."""
- instance = self.db.instance_get_by_internal_id(context, instance_id)
+ instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "unpause_instance",
- "args": {"instance_id": instance['id']}})
+ "args": {"instance_id": instance_id}})
+
+ def get_diagnostics(self, context, instance_id):
+ """Retrieve diagnostics for the given instance."""
+ instance = self.get(context, instance_id)
+ host = instance["host"]
+ return rpc.call(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "get_diagnostics",
+ "args": {"instance_id": instance_id}})
+
+ def get_actions(self, context, instance_id):
+ """Retrieve actions for the given instance."""
+ return self.db.instance_get_actions(context, instance_id)
def suspend(self, context, instance_id):
"""suspend the instance with instance_id"""
- instance = self.db.instance_get_by_internal_id(context, instance_id)
+ instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "suspend_instance",
- "args": {"instance_id": instance['id']}})
+ "args": {"instance_id": instance_id}})
def resume(self, context, instance_id):
"""resume the instance with instance_id"""
- instance = self.db.instance_get_by_internal_id(context, instance_id)
+ instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "resume_instance",
- "args": {"instance_id": instance['id']}})
+ "args": {"instance_id": instance_id}})
def rescue(self, context, instance_id):
"""Rescue the given instance."""
- instance = self.db.instance_get_by_internal_id(context, instance_id)
+ instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "rescue_instance",
- "args": {"instance_id": instance['id']}})
+ "args": {"instance_id": instance_id}})
def unrescue(self, context, instance_id):
"""Unrescue the given instance."""
- instance = self.db.instance_get_by_internal_id(context, instance_id)
+ instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "unrescue_instance",
- "args": {"instance_id": instance['id']}})
+ "args": {"instance_id": instance_id}})
+
+ def attach_volume(self, context, instance_id, volume_id, device):
+ if not re.match("^/dev/[a-z]d[a-z]+$", device):
+ raise exception.ApiError(_("Invalid device specified: %s. "
+ "Example device: /dev/vdb") % device)
+ self.volume_api.check_attach(context, volume_id)
+ instance = self.get(context, instance_id)
+ host = instance['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "attach_volume",
+ "args": {"volume_id": volume_id,
+ "instance_id": instance_id,
+ "mountpoint": device}})
+
+ def detach_volume(self, context, volume_id):
+ instance = self.db.volume_get_instance(context.elevated(), volume_id)
+ if not instance:
+ raise exception.ApiError(_("Volume isn't attached to anything!"))
+ self.volume_api.check_detach(context, volume_id)
+ host = instance['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "detach_volume",
+ "args": {"instance_id": instance['id'],
+ "volume_id": volume_id}})
+ return instance
+
+ def associate_floating_ip(self, context, instance_id, address):
+ instance = self.get(context, instance_id)
+ self.network_api.associate_floating_ip(context, address,
+ instance['fixed_ip'])
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 70b175e7c..ca6065890 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -211,7 +211,7 @@ class ComputeManager(manager.Manager):
if instance_ref['state'] != power_state.RUNNING:
logging.warn(_('trying to reboot a non-running '
'instance: %s (state: %s excepted: %s)'),
- instance_ref['internal_id'],
+ instance_id,
instance_ref['state'],
power_state.RUNNING)
@@ -225,13 +225,33 @@ class ComputeManager(manager.Manager):
self._update_state(context, instance_id)
@exception.wrap_exception
+ def snapshot_instance(self, context, instance_id, name):
+ """Snapshot an instance on this server."""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+
+ #NOTE(sirp): update_state currently only refreshes the state field
+ # if we add is_snapshotting, we will need this refreshed too,
+ # potentially?
+ self._update_state(context, instance_id)
+
+ logging.debug(_('instance %s: snapshotting'), instance_ref['name'])
+ if instance_ref['state'] != power_state.RUNNING:
+ logging.warn(_('trying to snapshot a non-running '
+ 'instance: %s (state: %s excepted: %s)'),
+ instance_id,
+ instance_ref['state'],
+ power_state.RUNNING)
+
+ self.driver.snapshot(instance_ref, name)
+
+ @exception.wrap_exception
def rescue_instance(self, context, instance_id):
"""Rescue an instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- logging.debug(_('instance %s: rescuing'),
- instance_ref['internal_id'])
+ logging.debug(_('instance %s: rescuing'), instance_id)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -246,8 +266,7 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- logging.debug(_('instance %s: unrescuing'),
- instance_ref['internal_id'])
+ logging.debug(_('instance %s: unrescuing'), instance_id)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -266,8 +285,7 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- logging.debug('instance %s: pausing',
- instance_ref['internal_id'])
+ logging.debug('instance %s: pausing', instance_id)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -284,8 +302,7 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- logging.debug('instance %s: unpausing',
- instance_ref['internal_id'])
+ logging.debug('instance %s: unpausing', instance_id)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -297,13 +314,22 @@ class ComputeManager(manager.Manager):
result))
@exception.wrap_exception
+ def get_diagnostics(self, context, instance_id):
+ """Retrieve diagnostics for an instance on this server."""
+ instance_ref = self.db.instance_get(context, instance_id)
+
+ if instance_ref["state"] == power_state.RUNNING:
+ logging.debug(_("instance %s: retrieving diagnostics"),
+ instance_id)
+ return self.driver.get_diagnostics(instance_ref)
+
+ @exception.wrap_exception
def suspend_instance(self, context, instance_id):
"""suspend the instance with instance_id"""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- logging.debug(_('instance %s: suspending'),
- instance_ref['internal_id'])
+ logging.debug(_('instance %s: suspending'), instance_id)
self.db.instance_set_state(context, instance_id,
power_state.NOSTATE,
'suspending')
@@ -319,7 +345,7 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- logging.debug(_('instance %s: resuming'), instance_ref['internal_id'])
+ logging.debug(_('instance %s: resuming'), instance_id)
self.db.instance_set_state(context, instance_id,
power_state.NOSTATE,
'resuming')
diff --git a/nova/db/api.py b/nova/db/api.py
index c0cab1068..ee4c521a0 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -27,6 +27,9 @@ The underlying driver is loaded as a :class:`LazyPluggable`.
:sql_connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/nova/nova.sqlite`.
+
+:enable_new_services: when adding a new service to the database, is it in the
+ pool of available hardware (Default: True)
"""
from nova import exception
@@ -37,6 +40,8 @@ from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('db_backend', 'sqlalchemy',
'The backend to use for db')
+flags.DEFINE_boolean('enable_new_services', True,
+ 'Services to be added to the available pool on create')
IMPL = utils.LazyPluggable(FLAGS['db_backend'],
@@ -76,13 +81,21 @@ def service_get(context, service_id):
return IMPL.service_get(context, service_id)
-def service_get_all_by_topic(context, topic, disabled=False):
- """Get all compute services for a given topic."""
- return IMPL.service_get_all_by_topic(context, topic, disabled)
+def service_get_all(context, disabled=False):
+ """Get all service."""
+ return IMPL.service_get_all(context, disabled)
+
+
+def service_get_all_by_topic(context, topic):
+ """Get all services for a given topic."""
+ return IMPL.service_get_all_by_topic(context, topic)
+
+
+def service_get_all_by_host(context, host):
+ """Get all services for a given host."""
+ return IMPL.service_get_all_by_host(context, host)
+
-def service_get_all_compute_by_host(context, host):
- """Get all compute service for a given host"""
- return IMPL.service_get_all_compute_by_host(context, host)
def service_get_all_compute_sorted(context):
"""Get all compute services sorted by instance count.
@@ -351,9 +364,9 @@ def instance_get_project_vpn(context, project_id):
return IMPL.instance_get_project_vpn(context, project_id)
-def instance_get_by_internal_id(context, internal_id):
- """Get an instance by internal id."""
- return IMPL.instance_get_by_internal_id(context, internal_id)
+def instance_get_by_id(context, instance_id):
+ """Get an instance by id."""
+ return IMPL.instance_get_by_id(context, instance_id)
def instance_is_vpn(context, instance_id):
@@ -386,6 +399,11 @@ def instance_action_create(context, values):
return IMPL.instance_action_create(context, values)
+def instance_get_actions(context, instance_id):
+ """Get instance actions by instance id."""
+ return IMPL.instance_get_actions(context, instance_id)
+
+
###################
@@ -712,7 +730,7 @@ def security_group_get_all(context):
def security_group_get(context, security_group_id):
- """Get security group by its internal id."""
+ """Get security group by its id."""
return IMPL.security_group_get(context, security_group_id)
diff --git a/nova/db/sqlalchemy/__init__.py b/nova/db/sqlalchemy/__init__.py
index 3288ebd20..22aa1cfe6 100644
--- a/nova/db/sqlalchemy/__init__.py
+++ b/nova/db/sqlalchemy/__init__.py
@@ -19,6 +19,25 @@
"""
SQLAlchemy database backend
"""
+import logging
+import time
+
+from sqlalchemy.exc import OperationalError
+
+from nova import flags
from nova.db.sqlalchemy import models
-models.register_models()
+
+FLAGS = flags.FLAGS
+
+
+for i in xrange(FLAGS.sql_max_retries):
+ if i > 0:
+ time.sleep(FLAGS.sql_retry_interval)
+
+ try:
+ models.register_models()
+ break
+ except OperationalError:
+ logging.exception(_("Data store is unreachable."
+ " Trying again in %d seconds.") % FLAGS.sql_retry_interval)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 55c3c5594..aa0306eb4 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -19,7 +19,6 @@
Implementation of SQLAlchemy backend.
"""
-import random
import warnings
from nova import db
@@ -136,16 +135,33 @@ def service_get(context, service_id, session=None):
@require_admin_context
-def service_get_all_by_topic(context, topic, disabled=False):
+def service_get_all(context, disabled=False):
session = get_session()
return session.query(models.Service).\
filter_by(deleted=False).\
filter_by(disabled=disabled).\
+ all()
+
+@require_admin_context
+def service_get_all_by_topic(context, topic):
+ session = get_session()
+ return session.query(models.Service).\
+ filter_by(deleted=False).\
+ filter_by(disabled=False).\
filter_by(topic=topic).\
all()
@require_admin_context
+def service_get_all_by_host(context, host):
+ session = get_session()
+ return session.query(models.Service).\
+ filter_by(deleted=False).\
+ filter_by(host=host).\
+ all()
+
+
+@require_admin_context
def _service_get_all_topic_subquery(context, session, topic, subq, label):
sort_value = getattr(subq.c, label)
return session.query(models.Service, func.coalesce(sort_value, 0)).\
@@ -156,16 +172,6 @@ def _service_get_all_topic_subquery(context, session, topic, subq, label):
order_by(sort_value).\
all()
-@require_admin_context
-def service_get_all_compute_by_host(context, host):
- session = get_session()
- topic = 'compute'
- return session.query(models.Service).\
- filter_by(host=host).\
- filter_by(deleted=False).\
- filter_by(topic=topic).\
- all()
-
@require_admin_context
def service_get_all_compute_sorted(context):
@@ -246,6 +252,8 @@ def service_get_by_args(context, host, binary):
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
+ if not FLAGS.enable_new_services:
+ service_ref.disabled = True
service_ref.save()
return service_ref
@@ -614,30 +622,18 @@ def fixed_ip_update(context, address, values):
###################
-#TODO(gundlach): instance_create and volume_create are nearly identical
-#and should be refactored. I expect there are other copy-and-paste
-#functions between the two of them as well.
-
-
@require_context
def instance_create(context, values):
"""Create a new Instance record in the database.
context - request context object
values - dict containing column values.
- 'internal_id' is auto-generated and should not be specified.
"""
instance_ref = models.Instance()
instance_ref.update(values)
session = get_session()
with session.begin():
- while instance_ref.internal_id == None:
- # Instances have integer internal ids.
- internal_id = random.randint(0, 2 ** 31 - 1)
- if not instance_internal_id_exists(context, internal_id,
- session=session):
- instance_ref.internal_id = internal_id
instance_ref.save(session=session)
return instance_ref
@@ -759,38 +755,29 @@ def instance_get_project_vpn(context, project_id):
@require_context
-def instance_get_by_internal_id(context, internal_id):
+def instance_get_by_id(context, instance_id):
session = get_session()
if is_admin_context(context):
result = session.query(models.Instance).\
options(joinedload('security_groups')).\
- filter_by(internal_id=internal_id).\
+ filter_by(id=instance_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
elif is_user_context(context):
result = session.query(models.Instance).\
options(joinedload('security_groups')).\
filter_by(project_id=context.project_id).\
- filter_by(internal_id=internal_id).\
+ filter_by(id=instance_id).\
filter_by(deleted=False).\
first()
if not result:
- raise exception.NotFound(_('Instance %s not found') % (internal_id))
+ raise exception.NotFound(_('Instance %s not found') % (instance_id))
return result
@require_context
-def instance_internal_id_exists(context, internal_id, session=None):
- if not session:
- session = get_session()
- return session.query(exists().\
- where(models.Instance.internal_id == internal_id)).\
- one()[0]
-
-
-@require_context
def instance_get_fixed_address(context, instance_id):
session = get_session()
with session.begin():
@@ -866,6 +853,18 @@ def instance_action_create(context, values):
return action_ref
+@require_admin_context
+def instance_get_actions(context, instance_id):
+ """Return the actions associated to the given instance id"""
+ session = get_session()
+ actions = {}
+ for action in session.query(models.InstanceActions).\
+ filter_by(instance_id=instance_id).\
+ all():
+ actions[action.action] = action.error
+ return actions
+
+
###################
@@ -1313,10 +1312,6 @@ def volume_create(context, values):
session = get_session()
with session.begin():
- while volume_ref.ec2_id == None:
- ec2_id = utils.generate_uid('vol')
- if not volume_ec2_id_exists(context, ec2_id, session=session):
- volume_ref.ec2_id = ec2_id
volume_ref.save(session=session)
return volume_ref
@@ -1414,41 +1409,6 @@ def volume_get_all_by_project(context, project_id):
all()
-@require_context
-def volume_get_by_ec2_id(context, ec2_id):
- session = get_session()
- result = None
-
- if is_admin_context(context):
- result = session.query(models.Volume).\
- filter_by(ec2_id=ec2_id).\
- filter_by(deleted=can_read_deleted(context)).\
- first()
- elif is_user_context(context):
- result = session.query(models.Volume).\
- filter_by(project_id=context.project_id).\
- filter_by(ec2_id=ec2_id).\
- filter_by(deleted=False).\
- first()
- else:
- raise exception.NotAuthorized()
-
- if not result:
- raise exception.NotFound(_('Volume %s not found') % ec2_id)
-
- return result
-
-
-@require_context
-def volume_ec2_id_exists(context, ec2_id, session=None):
- if not session:
- session = get_session()
-
- return session.query(exists().\
- where(models.Volume.id == ec2_id)).\
- one()[0]
-
-
@require_admin_context
def volume_get_instance(context, volume_id):
session = get_session()
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 5a020a469..1ffb9298f 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -22,7 +22,7 @@ SQLAlchemy models for nova data.
import datetime
from sqlalchemy.orm import relationship, backref, object_mapper
-from sqlalchemy import Column, Integer, Float, String, schema
+from sqlalchemy import Column, Integer, String, schema
from sqlalchemy import ForeignKey, DateTime, Boolean, Text
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
@@ -151,7 +151,6 @@ class Service(BASE, NovaBase):
disabled = Column(Boolean, default=False)
availability_zone = Column(String(255), default='nova')
-
class Certificate(BASE, NovaBase):
"""Represents a an x509 certificate"""
__tablename__ = 'certificates'
@@ -165,11 +164,13 @@ class Certificate(BASE, NovaBase):
class Instance(BASE, NovaBase):
"""Represents a guest vm."""
__tablename__ = 'instances'
- id = Column(Integer, primary_key=True)
- internal_id = Column(Integer, unique=True)
+ id = Column(Integer, primary_key=True, autoincrement=True)
- admin_pass = Column(String(255))
+ @property
+ def name(self):
+ return "instance-%08x" % self.id
+ admin_pass = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
@@ -181,10 +182,6 @@ class Instance(BASE, NovaBase):
def project(self):
return auth.manager.AuthManager().get_project(self.project_id)
- @property
- def name(self):
- return "instance-%d" % self.internal_id
-
image_id = Column(String(255))
kernel_id = Column(String(255))
ramdisk_id = Column(String(255))
@@ -221,6 +218,8 @@ class Instance(BASE, NovaBase):
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
+ availability_zone = Column(String(255))
+
# User editable field for display in user-facing UIs
display_name = Column(String(255))
display_description = Column(String(255))
@@ -237,21 +236,6 @@ class Instance(BASE, NovaBase):
# 'shutdown', 'shutoff', 'crashed'])
-class InstanceDiagnostics(BASE, NovaBase):
- """Represents a guest VM's diagnostics"""
- __tablename__ = "instance_diagnostics"
- id = Column(Integer, primary_key=True)
- instance_id = Column(Integer, ForeignKey('instances.id'))
-
- memory_available = Column(Float)
- memory_free = Column(Float)
- cpu_load = Column(Float)
- disk_read = Column(Float)
- disk_write = Column(Float)
- net_tx = Column(Float)
- net_rx = Column(Float)
-
-
class InstanceActions(BASE, NovaBase):
"""Represents a guest VM's actions and results"""
__tablename__ = "instance_actions"
@@ -265,8 +249,11 @@ class InstanceActions(BASE, NovaBase):
class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a vm."""
__tablename__ = 'volumes'
- id = Column(Integer, primary_key=True)
- ec2_id = Column(String(12), unique=True)
+ id = Column(Integer, primary_key=True, autoincrement=True)
+
+ @property
+ def name(self):
+ return "volume-%08x" % self.id
user_id = Column(String(255))
project_id = Column(String(255))
@@ -292,10 +279,6 @@ class Volume(BASE, NovaBase):
display_name = Column(String(255))
display_description = Column(String(255))
- @property
- def name(self):
- return self.ec2_id
-
class Quota(BASE, NovaBase):
"""Represents quota overrides for a project."""
@@ -453,7 +436,7 @@ class AuthToken(BASE, NovaBase):
"""
__tablename__ = 'auth_tokens'
token_hash = Column(String(255), primary_key=True)
- user_id = Column(Integer)
+ user_id = Column(String(255))
server_manageent_url = Column(String(255))
storage_url = Column(String(255))
cdn_management_url = Column(String(255))
@@ -559,10 +542,11 @@ def register_models():
"""Register Models and create metadata.
Called from nova.db.sqlalchemy.__init__ as part of loading the driver,
- it will never need to be called explicitly elsewhere.
+ it will never need to be called explicitly elsewhere unless the
+ connection is lost and needs to be reestablished.
"""
from sqlalchemy import create_engine
- models = (Service, Instance, InstanceDiagnostics, InstanceActions,
+ models = (Service, Instance, InstanceActions,
Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp,
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,
diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py
index e0d84c107..c3876c02a 100644
--- a/nova/db/sqlalchemy/session.py
+++ b/nova/db/sqlalchemy/session.py
@@ -36,7 +36,9 @@ def get_session(autocommit=True, expire_on_commit=False):
global _MAKER
if not _MAKER:
if not _ENGINE:
- _ENGINE = create_engine(FLAGS.sql_connection, echo=False)
+ _ENGINE = create_engine(FLAGS.sql_connection,
+ pool_recycle=FLAGS.sql_idle_timeout,
+ echo=False)
_MAKER = (sessionmaker(bind=_ENGINE,
autocommit=autocommit,
expire_on_commit=expire_on_commit))
diff --git a/nova/flags.py b/nova/flags.py
index b157c9e5d..4e71d2152 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -212,6 +212,8 @@ DEFINE_list('region_list',
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
+DEFINE_integer('glance_port', 9292, 'glance port')
+DEFINE_string('glance_host', utils.get_my_ip(), 'glance host')
DEFINE_integer('s3_port', 3333, 's3 port')
DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)')
DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)')
@@ -239,6 +241,7 @@ DEFINE_string('cc_dmz', utils.get_my_ip(), 'internal ip of api server')
DEFINE_integer('cc_port', 8773, 'cloud controller port')
DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2')
+DEFINE_string('default_project', 'openstack', 'default project for openstack')
DEFINE_string('default_image', 'ami-11111',
'default image to use, testing only')
DEFINE_string('default_instance_type', 'm1.small',
@@ -260,6 +263,11 @@ DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
DEFINE_string('sql_connection',
'sqlite:///$state_path/nova.sqlite',
'connection string for sql database')
+DEFINE_string('sql_idle_timeout',
+ '3600',
+ 'timeout for idle sql database connections')
+DEFINE_integer('sql_max_retries', 12, 'sql connection attempts')
+DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval')
DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager',
'Manager for compute')
diff --git a/nova/network/__init__.py b/nova/network/__init__.py
index dcc54db09..6eb3e3ef6 100644
--- a/nova/network/__init__.py
+++ b/nova/network/__init__.py
@@ -16,17 +16,4 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-:mod:`nova.network` -- Network Nodes
-=====================================================
-
-.. automodule:: nova.network
- :platform: Unix
- :synopsis: Network is responsible for managing networking
-.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
-.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
-.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
-.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
-.. moduleauthor:: Manish Singh <yosh@gimp.org>
-.. moduleauthor:: Andy Smith <andy@anarkystic.com>
-"""
+from nova.network.api import API
diff --git a/nova/network/api.py b/nova/network/api.py
new file mode 100644
index 000000000..cbd912047
--- /dev/null
+++ b/nova/network/api.py
@@ -0,0 +1,87 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handles all requests relating to instances (guest vms).
+"""
+
+import logging
+
+from nova import db
+from nova import flags
+from nova import quota
+from nova import rpc
+from nova.db import base
+
+FLAGS = flags.FLAGS
+
+
+class API(base.Base):
+ """API for interacting with the network manager."""
+
+ def allocate_floating_ip(self, context):
+ if quota.allowed_floating_ips(context, 1) < 1:
+ logging.warn(_("Quota exceeeded for %s, tried to allocate "
+ "address"),
+ context.project_id)
+ raise quota.QuotaError(_("Address quota exceeded. You cannot "
+ "allocate any more addresses"))
+ # NOTE(vish): We don't know which network host should get the ip
+ # when we allocate, so just send it to any one. This
+ # will probably need to move into a network supervisor
+ # at some point.
+ return rpc.call(context,
+ FLAGS.network_topic,
+ {"method": "allocate_floating_ip",
+ "args": {"project_id": context.project_id}})
+
+ def release_floating_ip(self, context, address):
+ floating_ip = self.db.floating_ip_get_by_address(context, address)
+ # NOTE(vish): We don't know which network host should get the ip
+ # when we deallocate, so just send it to any one. This
+ # will probably need to move into a network supervisor
+ # at some point.
+ rpc.cast(context,
+ FLAGS.network_topic,
+ {"method": "deallocate_floating_ip",
+ "args": {"floating_address": floating_ip['address']}})
+
+ def associate_floating_ip(self, context, floating_ip, fixed_ip):
+ if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode):
+ fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip)
+ floating_ip = self.db.floating_ip_get_by_address(context, floating_ip)
+ # NOTE(vish): Perhaps we should just pass this on to compute and
+ # let compute communicate with network.
+ host = fixed_ip['network']['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.network_topic, host),
+ {"method": "associate_floating_ip",
+ "args": {"floating_address": floating_ip['address'],
+ "fixed_address": fixed_ip['address']}})
+
+ def disassociate_floating_ip(self, context, address):
+ floating_ip = self.db.floating_ip_get_by_address(context, address)
+ if not floating_ip.get('fixed_ip'):
+ raise exception.ApiError('Address is not associated.')
+ # NOTE(vish): Get the topic from the host name of the network of
+ # the associated fixed ip.
+ host = floating_ip['fixed_ip']['network']['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.network_topic, host),
+ {"method": "disassociate_floating_ip",
+ "args": {"floating_address": floating_ip['address']}})
diff --git a/nova/quota.py b/nova/quota.py
index f6ca9f77c..3884eb308 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -22,7 +22,6 @@ Quotas for instances, volumes, and floating ips
from nova import db
from nova import exception
from nova import flags
-from nova.compute import instance_types
FLAGS = flags.FLAGS
@@ -63,10 +62,9 @@ def allowed_instances(context, num_instances, instance_type):
quota = get_quota(context, project_id)
allowed_instances = quota['instances'] - used_instances
allowed_cores = quota['cores'] - used_cores
- type_cores = instance_types.INSTANCE_TYPES[instance_type]['vcpus']
- num_cores = num_instances * type_cores
+ num_cores = num_instances * instance_type['vcpus']
allowed_instances = min(allowed_instances,
- int(allowed_cores // type_cores))
+ int(allowed_cores // instance_type['vcpus']))
return min(num_instances, allowed_instances)
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 08d7033f5..66e46c1b9 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -37,6 +37,11 @@ class NoValidHost(exception.Error):
pass
+class WillNotSchedule(exception.Error):
+ """The specified host is not up or doesn't exist."""
+ pass
+
+
class Scheduler(object):
"""The base class that all Scheduler clases should inherit from."""
diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py
index f9171ab35..47baf0d73 100644
--- a/nova/scheduler/simple.py
+++ b/nova/scheduler/simple.py
@@ -43,6 +43,19 @@ class SimpleScheduler(chance.ChanceScheduler):
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest running instances."""
instance_ref = db.instance_get(context, instance_id)
+ if instance_ref['availability_zone'] and context.is_admin:
+ zone, _x, host = instance_ref['availability_zone'].partition(':')
+ service = db.service_get_by_args(context.elevated(), host,
+ 'nova-compute')
+ if not self.service_is_up(service):
+ raise driver.WillNotSchedule("Host %s is not alive" % host)
+
+ # TODO(vish): this probably belongs in the manager, if we
+ # can generalize this somehow
+ now = datetime.datetime.utcnow()
+ db.instance_update(context, instance_id, {'host': host,
+ 'scheduled_at': now})
+ return host
results = db.service_get_all_compute_sorted(context)
for result in results:
(service, instance_cores) = result
@@ -62,6 +75,19 @@ class SimpleScheduler(chance.ChanceScheduler):
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest volumes."""
volume_ref = db.volume_get(context, volume_id)
+ if (':' in volume_ref['availability_zone']) and context.is_admin:
+ zone, _x, host = volume_ref['availability_zone'].partition(':')
+ service = db.service_get_by_args(context.elevated(), host,
+ 'nova-volume')
+ if not self.service_is_up(service):
+ raise driver.WillNotSchedule("Host %s not available" % host)
+
+ # TODO(vish): this probably belongs in the manager, if we
+ # can generalize this somehow
+ now = datetime.datetime.utcnow()
+ db.volume_update(context, volume_id, {'host': host,
+ 'scheduled_at': now})
+ return host
results = db.service_get_all_volume_sorted(context)
for result in results:
(service, volume_gigabytes) = result
diff --git a/nova/service.py b/nova/service.py
index a612ac592..d4a6f3839 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -24,17 +24,21 @@ import inspect
import logging
import os
import sys
+import time
from eventlet import event
from eventlet import greenthread
from eventlet import greenpool
+from sqlalchemy.exc import OperationalError
+
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import rpc
from nova import utils
+from nova.db.sqlalchemy import models
FLAGS = flags.FLAGS
@@ -205,6 +209,14 @@ class Service(object):
self.model_disconnected = True
logging.exception(_("model server went away"))
+ try:
+ models.register_models()
+ except OperationalError:
+ logging.exception(_("Data store is unreachable."
+ " Trying again in %d seconds.") %
+ FLAGS.sql_retry_interval)
+ time.sleep(FLAGS.sql_retry_interval)
+
def serve(*services):
argv = FLAGS(sys.argv)
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 79663e43a..291a0e468 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -110,6 +110,12 @@ def stub_out_networking(stubs):
stubs.Set(nova.utils, 'get_my_ip', get_my_ip)
+def stub_out_compute_api_snapshot(stubs):
+ def snapshot(self, context, instance_id, name):
+ return 123
+ stubs.Set(nova.compute.API, 'snapshot', snapshot)
+
+
def stub_out_glance(stubs, initial_fixtures=[]):
class FakeParallaxClient:
@@ -213,6 +219,9 @@ class FakeAuthManager(object):
return v
return None
+ def get_project(self, pid):
+ return None
+
def get_user_from_access_key(self, key):
return FakeAuthManager.auth_data.get(key, None)
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index 1b4031217..0f274bd15 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -50,7 +50,7 @@ class BaseImageServiceTests(object):
'updated': None,
'created': None,
'status': None,
- 'serverId': None,
+ 'instance_id': None,
'progress': None}
num_images = len(self.service.index(self.context))
@@ -67,7 +67,7 @@ class BaseImageServiceTests(object):
'updated': None,
'created': None,
'status': None,
- 'serverId': None,
+ 'instance_id': None,
'progress': None}
num_images = len(self.service.index(self.context))
@@ -87,7 +87,7 @@ class BaseImageServiceTests(object):
'updated': None,
'created': None,
'status': None,
- 'serverId': None,
+ 'instance_id': None,
'progress': None}
id = self.service.create(self.context, fixture)
@@ -105,13 +105,13 @@ class BaseImageServiceTests(object):
'updated': None,
'created': None,
'status': None,
- 'serverId': None,
+ 'instance_id': None,
'progress': None},
{'name': 'test image 2',
'updated': None,
'created': None,
'status': None,
- 'serverId': None,
+ 'instance_id': None,
'progress': None}]
num_images = len(self.service.index(self.context))
@@ -155,6 +155,7 @@ class GlanceImageServiceTest(unittest.TestCase,
def setUp(self):
self.stubs = stubout.StubOutForTesting()
fakes.stub_out_glance(self.stubs)
+ fakes.stub_out_compute_api_snapshot(self.stubs)
service_class = 'nova.image.glance.GlanceImageService'
self.service = utils.import_object(service_class)
self.context = context.RequestContext(None, None)
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 5d23db588..6e611a55d 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -56,8 +56,8 @@ def instance_address(context, instance_id):
def stub_instance(id, user_id=1):
- return Instance(id=int(id) + 123456, state=0, image_id=10, user_id=user_id,
- display_name='server%s' % id, internal_id=id)
+ return Instance(id=id, state=0, image_id=10, user_id=user_id,
+ display_name='server%s' % id)
def fake_compute_api(cls, req, id):
@@ -76,8 +76,7 @@ class ServersTest(unittest.TestCase):
fakes.stub_out_key_pair_funcs(self.stubs)
fakes.stub_out_image_service(self.stubs)
self.stubs.Set(nova.db.api, 'instance_get_all', return_servers)
- self.stubs.Set(nova.db.api, 'instance_get_by_internal_id',
- return_server)
+ self.stubs.Set(nova.db.api, 'instance_get_by_id', return_server)
self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
return_servers)
self.stubs.Set(nova.db.api, 'instance_add_security_group',
@@ -87,14 +86,12 @@ class ServersTest(unittest.TestCase):
instance_address)
self.stubs.Set(nova.db.api, 'instance_get_floating_address',
instance_address)
- self.stubs.Set(nova.compute.api.ComputeAPI, 'pause',
- fake_compute_api)
- self.stubs.Set(nova.compute.api.ComputeAPI, 'unpause',
- fake_compute_api)
- self.stubs.Set(nova.compute.api.ComputeAPI, 'suspend',
- fake_compute_api)
- self.stubs.Set(nova.compute.api.ComputeAPI, 'resume',
- fake_compute_api)
+ self.stubs.Set(nova.compute.API, 'pause', fake_compute_api)
+ self.stubs.Set(nova.compute.API, 'unpause', fake_compute_api)
+ self.stubs.Set(nova.compute.API, 'suspend', fake_compute_api)
+ self.stubs.Set(nova.compute.API, 'resume', fake_compute_api)
+ self.stubs.Set(nova.compute.API, "get_diagnostics", fake_compute_api)
+ self.stubs.Set(nova.compute.API, "get_actions", fake_compute_api)
self.allow_admin = FLAGS.allow_admin_api
def tearDown(self):
@@ -105,7 +102,7 @@ class ServersTest(unittest.TestCase):
req = webob.Request.blank('/v1.0/servers/1')
res = req.get_response(nova.api.API('os'))
res_dict = json.loads(res.body)
- self.assertEqual(res_dict['server']['id'], 1)
+ self.assertEqual(res_dict['server']['id'], '1')
self.assertEqual(res_dict['server']['name'], 'server1')
def test_get_server_list(self):
@@ -122,7 +119,7 @@ class ServersTest(unittest.TestCase):
def test_create_instance(self):
def instance_create(context, inst):
- return {'id': 1, 'internal_id': 1, 'display_name': ''}
+ return {'id': '1', 'display_name': ''}
def server_update(context, id, params):
return instance_create(context, id)
@@ -274,6 +271,18 @@ class ServersTest(unittest.TestCase):
res = req.get_response(nova.api.API('os'))
self.assertEqual(res.status_int, 202)
+ def test_server_diagnostics(self):
+ req = webob.Request.blank("/v1.0/servers/1/diagnostics")
+ req.method = "GET"
+ res = req.get_response(nova.api.API("os"))
+ self.assertEqual(res.status_int, 404)
+
+ def test_server_actions(self):
+ req = webob.Request.blank("/v1.0/servers/1/actions")
+ req.method = "GET"
+ res = req.get_response(nova.api.API("os"))
+ self.assertEqual(res.status_int, 404)
+
def test_server_reboot(self):
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
diff --git a/nova/tests/hyperv_unittest.py b/nova/tests/hyperv_unittest.py
new file mode 100644
index 000000000..3980ae3cb
--- /dev/null
+++ b/nova/tests/hyperv_unittest.py
@@ -0,0 +1,71 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2010 Cloud.com, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Hyper-V driver
+"""
+
+import random
+
+from nova import context
+from nova import db
+from nova import flags
+from nova import test
+from nova.auth import manager
+from nova.virt import hyperv
+
+FLAGS = flags.FLAGS
+FLAGS.connection_type = 'hyperv'
+
+
+class HyperVTestCase(test.TestCase):
+ """Test cases for the Hyper-V driver"""
+ def setUp(self):
+ super(HyperVTestCase, self).setUp()
+ self.manager = manager.AuthManager()
+ self.user = self.manager.create_user('fake', 'fake', 'fake',
+ admin=True)
+ self.project = self.manager.create_project('fake', 'fake', 'fake')
+ self.context = context.RequestContext(self.user, self.project)
+
+ def test_create_destroy(self):
+ """Create a VM and destroy it"""
+ instance = {'internal_id': random.randint(1, 1000000),
+ 'memory_mb': '1024',
+ 'mac_address': '02:12:34:46:56:67',
+ 'vcpus': 2,
+ 'project_id': 'fake',
+ 'instance_type': 'm1.small'}
+ instance_ref = db.instance_create(self.context, instance)
+
+ conn = hyperv.get_connection(False)
+ conn._create_vm(instance_ref) # pylint: disable-msg=W0212
+ found = [n for n in conn.list_instances()
+ if n == instance_ref['name']]
+ self.assertTrue(len(found) == 1)
+ info = conn.get_info(instance_ref['name'])
+ #Unfortunately since the vm is not running at this point,
+ #we cannot obtain memory information from get_info
+ self.assertEquals(info['num_cpu'], instance_ref['vcpus'])
+
+ conn.destroy(instance_ref)
+ found = [n for n in conn.list_instances()
+ if n == instance_ref['name']]
+ self.assertTrue(len(found) == 0)
+
+ def tearDown(self):
+ super(HyperVTestCase, self).tearDown()
+ self.manager.delete_project(self.project)
+ self.manager.delete_user(self.user)
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index 3adecb729..21d212df7 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -62,7 +62,7 @@ class CloudTestCase(test.TestCase):
self.cloud = cloud.CloudController()
# set up services
- self.compute = service.Service.create(binary='nova-compute', host='host1')
+ self.compute = service.Service.create(binary='nova-compute')
self.compute.start()
self.network = service.Service.create(binary='nova-network')
self.network.start()
@@ -106,7 +106,7 @@ class CloudTestCase(test.TestCase):
self.cloud.allocate_address(self.context)
inst = db.instance_create(self.context, {'host': FLAGS.host})
fixed = self.network.allocate_fixed_ip(self.context, inst['id'])
- ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id'])
+ ec2_id = cloud.id_to_ec2_id(inst['id'])
self.cloud.associate_address(self.context,
instance_id=ec2_id,
public_ip=address)
@@ -127,12 +127,13 @@ class CloudTestCase(test.TestCase):
result = self.cloud.describe_volumes(self.context)
self.assertEqual(len(result['volumeSet']), 2)
result = self.cloud.describe_volumes(self.context,
- volume_id=[vol2['ec2_id']])
+ volume_id=[vol2['id']])
self.assertEqual(len(result['volumeSet']), 1)
- self.assertEqual(result['volumeSet'][0]['volumeId'], vol2['ec2_id'])
+ self.assertEqual(result['volumeSet'][0]['volumeId'], vol2['id'])
db.volume_destroy(self.context, vol1['id'])
db.volume_destroy(self.context, vol2['id'])
+
def test_describe_availability_zones(self):
"""Makes sure describe_availability_zones works and filters results."""
service1 = db.service_create(self.context, {'host': 'host1_describe_zones',
@@ -150,6 +151,7 @@ class CloudTestCase(test.TestCase):
db.service_destroy(self.context, service1['id'])
db.service_destroy(self.context, service2['id'])
+
def test_console_output(self):
image_id = FLAGS.default_image
instance_type = FLAGS.default_instance_type
@@ -157,15 +159,16 @@ class CloudTestCase(test.TestCase):
kwargs = {'image_id': image_id,
'instance_type': instance_type,
'max_count': max_count}
- rv = yield self.cloud.run_instances(self.context, **kwargs)
+ rv = self.cloud.run_instances(self.context, **kwargs)
+ print rv
instance_id = rv['instancesSet'][0]['instanceId']
- output = yield self.cloud.get_console_output(context=self.context,
+ output = self.cloud.get_console_output(context=self.context,
instance_id=[instance_id])
self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT')
# TODO(soren): We need this until we can stop polling in the rpc code
# for unit tests.
greenthread.sleep(0.3)
- rv = yield self.cloud.terminate_instances(self.context, [instance_id])
+ rv = self.cloud.terminate_instances(self.context, [instance_id])
def test_key_generation(self):
result = self._create_key('test')
@@ -203,7 +206,7 @@ class CloudTestCase(test.TestCase):
kwargs = {'image_id': image_id,
'instance_type': instance_type,
'max_count': max_count}
- rv = yield self.cloud.run_instances(self.context, **kwargs)
+ rv = self.cloud.run_instances(self.context, **kwargs)
# TODO: check for proper response
instance_id = rv['reservationSet'][0].keys()[0]
instance = rv['reservationSet'][0][instance_id][0]
@@ -226,12 +229,13 @@ class CloudTestCase(test.TestCase):
for instance in reservations[reservations.keys()[0]]:
instance_id = instance['instance_id']
logging.debug("Terminating instance %s" % instance_id)
- rv = yield self.compute.terminate_instance(instance_id)
+ rv = self.compute.terminate_instance(instance_id)
+
def test_describe_instances(self):
"""Makes sure describe_instances works."""
- instance1 = db.instance_create(self.context, {'hostname': 'host2'})
- service1 = db.service_create(self.context, {'host': 'host1',
+ instance1 = db.instance_create(self.context, {'host': 'host2'})
+ service1 = db.service_create(self.context, {'host': 'host2',
'availability_zone': 'zone1',
'topic': "compute"})
result = self.cloud.describe_instances(self.context)
@@ -241,7 +245,7 @@ class CloudTestCase(test.TestCase):
db.instance_destroy(self.context, instance1['id'])
db.service_destroy(self.context, service1['id'])
-
+
def test_instance_update_state(self):
def instance(num):
return {
@@ -291,6 +295,7 @@ class CloudTestCase(test.TestCase):
# data = self.cloud.get_metadata(instance(i)['private_dns_name'])
# self.assert_(data['meta-data']['ami-id'] == 'ami-%s' % i)
+
@staticmethod
def _fake_set_image_description(ctxt, image_id, description):
from nova.objectstore import handler
@@ -327,7 +332,7 @@ class CloudTestCase(test.TestCase):
def test_update_of_instance_display_fields(self):
inst = db.instance_create(self.context, {})
- ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id'])
+ ec2_id = cloud.id_to_ec2_id(inst['id'])
self.cloud.update_instance(self.context, ec2_id,
display_name='c00l 1m4g3')
inst = db.instance_get(self.context, inst['id'])
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index bcb8a1526..1d527b8f0 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -22,6 +22,7 @@ Tests For Compute
import datetime
import logging
+from nova import compute
from nova import context
from nova import db
from nova import exception
@@ -29,7 +30,6 @@ from nova import flags
from nova import test
from nova import utils
from nova.auth import manager
-from nova.compute import api as compute_api
FLAGS = flags.FLAGS
@@ -44,7 +44,7 @@ class ComputeTestCase(test.TestCase):
stub_network=True,
network_manager='nova.network.manager.FlatManager')
self.compute = utils.import_object(FLAGS.compute_manager)
- self.compute_api = compute_api.ComputeAPI()
+ self.compute_api = compute.API()
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake')
self.project = self.manager.create_project('fake', 'fake', 'fake')
@@ -72,7 +72,7 @@ class ComputeTestCase(test.TestCase):
"""Verify that an instance cannot be created without a display_name."""
cases = [dict(), dict(display_name=None)]
for instance in cases:
- ref = self.compute_api.create_instances(self.context,
+ ref = self.compute_api.create(self.context,
FLAGS.default_instance_type, None, **instance)
try:
self.assertNotEqual(ref[0].display_name, None)
@@ -80,13 +80,13 @@ class ComputeTestCase(test.TestCase):
db.instance_destroy(self.context, ref[0]['id'])
def test_create_instance_associates_security_groups(self):
- """Make sure create_instances associates security groups"""
+ """Make sure create associates security groups"""
values = {'name': 'default',
'description': 'default',
'user_id': self.user.id,
'project_id': self.project.id}
group = db.security_group_create(self.context, values)
- ref = self.compute_api.create_instances(self.context,
+ ref = self.compute_api.create(self.context,
FLAGS.default_instance_type, None, security_group=['default'])
try:
self.assertEqual(len(ref[0]['security_groups']), 1)
@@ -151,6 +151,14 @@ class ComputeTestCase(test.TestCase):
self.compute.reboot_instance(self.context, instance_id)
self.compute.terminate_instance(self.context, instance_id)
+ def test_snapshot(self):
+ """Ensure instance can be snapshotted"""
+ instance_id = self._create_instance()
+ name = "myfakesnapshot"
+ self.compute.run_instance(self.context, instance_id)
+ self.compute.snapshot_instance(self.context, instance_id, name)
+ self.compute.terminate_instance(self.context, instance_id)
+
def test_console_output(self):
"""Make sure we can get console output from instance"""
instance_id = self._create_instance()
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 8cf2a5e54..b5f9f30ef 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -27,6 +27,7 @@ from nova import test
from nova import utils
from nova.auth import manager
from nova.api.ec2 import cloud
+from nova.compute import instance_types
FLAGS = flags.FLAGS
@@ -78,14 +79,17 @@ class QuotaTestCase(test.TestCase):
def test_quota_overrides(self):
"""Make sure overriding a projects quotas works"""
- num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
+ num_instances = quota.allowed_instances(self.context, 100,
+ instance_types.INSTANCE_TYPES['m1.small'])
self.assertEqual(num_instances, 2)
db.quota_create(self.context, {'project_id': self.project.id,
'instances': 10})
- num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
+ num_instances = quota.allowed_instances(self.context, 100,
+ instance_types.INSTANCE_TYPES['m1.small'])
self.assertEqual(num_instances, 4)
db.quota_update(self.context, self.project.id, {'cores': 100})
- num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
+ num_instances = quota.allowed_instances(self.context, 100,
+ instance_types.INSTANCE_TYPES['m1.small'])
self.assertEqual(num_instances, 10)
db.quota_destroy(self.context, self.project.id)
diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py
index 188e50aac..127d666e4 100644
--- a/nova/tests/test_scheduler.py
+++ b/nova/tests/test_scheduler.py
@@ -36,6 +36,7 @@ from nova.scheduler import driver
FLAGS = flags.FLAGS
flags.DECLARE('max_cores', 'nova.scheduler.simple')
+flags.DECLARE('stub_network', 'nova.compute.manager')
class TestDriver(driver.Scheduler):
@@ -117,6 +118,48 @@ class ZoneSchedulerTestCase(test.TestCase):
scheduler.run_instance(ctxt, 'compute', instance_id='i-ffffffff', availability_zone='zone1')
+class ZoneSchedulerTestCase(test.TestCase):
+ """Test case for zone scheduler"""
+ def setUp(self):
+ super(ZoneSchedulerTestCase, self).setUp()
+ self.flags(scheduler_driver='nova.scheduler.zone.ZoneScheduler')
+
+ def _create_service_model(self, **kwargs):
+ service = db.sqlalchemy.models.Service()
+ service.host = kwargs['host']
+ service.disabled = False
+ service.deleted = False
+ service.report_count = 0
+ service.binary = 'nova-compute'
+ service.topic = 'compute'
+ service.id = kwargs['id']
+ service.availability_zone = kwargs['zone']
+ service.created_at = datetime.datetime.utcnow()
+ return service
+
+
+ def test_with_two_zones(self):
+ scheduler = manager.SchedulerManager()
+ ctxt = context.get_admin_context()
+ service_list = [
+ self._create_service_model(id=1, host='host1', zone='zone1'),
+ self._create_service_model(id=2, host='host2', zone='zone2'),
+ self._create_service_model(id=3, host='host3', zone='zone2'),
+ self._create_service_model(id=4, host='host4', zone='zone2'),
+ self._create_service_model(id=5, host='host5', zone='zone2')
+ ]
+ self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
+ db.service_get_all_by_topic(IgnoreArg(), IgnoreArg()).AndReturn(service_list)
+ self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
+ rpc.cast(ctxt,
+ 'compute.host1',
+ {'method': 'run_instance',
+ 'args':{'instance_id': 'i-ffffffff',
+ 'availability_zone': 'zone1'}})
+ self.mox.ReplayAll()
+ scheduler.run_instance(ctxt, 'compute', instance_id='i-ffffffff', availability_zone='zone1')
+
+
class SimpleDriverTestCase(test.TestCase):
"""Test case for simple driver"""
def setUp(self):
@@ -138,7 +181,7 @@ class SimpleDriverTestCase(test.TestCase):
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
- def _create_instance(self):
+ def _create_instance(self, **kwargs):
"""Create a test instance"""
inst = {}
inst['image_id'] = 'ami-test'
@@ -149,6 +192,7 @@ class SimpleDriverTestCase(test.TestCase):
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
inst['vcpus'] = 1
+ inst['availability_zone'] = kwargs.get('availability_zone', None)
return db.instance_create(self.context, inst)['id']
def _create_volume(self):
@@ -157,9 +201,33 @@ class SimpleDriverTestCase(test.TestCase):
vol['image_id'] = 'ami-test'
vol['reservation_id'] = 'r-fakeres'
vol['size'] = 1
+ vol['availability_zone'] = 'test'
return db.volume_create(self.context, vol)['id']
- def test_hosts_are_up(self):
+ def test_doesnt_report_disabled_hosts_as_up(self):
+ """Ensures driver doesn't find hosts before they are enabled"""
+ # NOTE(vish): constructing service without create method
+ # because we are going to use it without queue
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ compute2 = service.Service('host2',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute2.start()
+ s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
+ s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
+ db.service_update(self.context, s1['id'], {'disabled': True})
+ db.service_update(self.context, s2['id'], {'disabled': True})
+ hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
+ self.assertEqual(0, len(hosts))
+ compute1.kill()
+ compute2.kill()
+
+ def test_reports_enabled_hosts_as_up(self):
"""Ensures driver can find the hosts that are up"""
# NOTE(vish): constructing service without create method
# because we are going to use it without queue
@@ -174,7 +242,7 @@ class SimpleDriverTestCase(test.TestCase):
FLAGS.compute_manager)
compute2.start()
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
- self.assertEqual(len(hosts), 2)
+ self.assertEqual(2, len(hosts))
compute1.kill()
compute2.kill()
@@ -201,6 +269,63 @@ class SimpleDriverTestCase(test.TestCase):
compute1.kill()
compute2.kill()
+ def test_specific_host_gets_instance(self):
+ """Ensures if you set availability_zone it launches on that zone"""
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ compute2 = service.Service('host2',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute2.start()
+ instance_id1 = self._create_instance()
+ compute1.run_instance(self.context, instance_id1)
+ instance_id2 = self._create_instance(availability_zone='nova:host1')
+ host = self.scheduler.driver.schedule_run_instance(self.context,
+ instance_id2)
+ self.assertEqual('host1', host)
+ compute1.terminate_instance(self.context, instance_id1)
+ db.instance_destroy(self.context, instance_id2)
+ compute1.kill()
+ compute2.kill()
+
+ def test_wont_sechedule_if_specified_host_is_down(self):
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
+ now = datetime.datetime.utcnow()
+ delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
+ past = now - delta
+ db.service_update(self.context, s1['id'], {'updated_at': past})
+ instance_id2 = self._create_instance(availability_zone='nova:host1')
+ self.assertRaises(driver.WillNotSchedule,
+ self.scheduler.driver.schedule_run_instance,
+ self.context,
+ instance_id2)
+ db.instance_destroy(self.context, instance_id2)
+ compute1.kill()
+
+ def test_will_schedule_on_disabled_host_if_specified(self):
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
+ db.service_update(self.context, s1['id'], {'disabled': True})
+ instance_id2 = self._create_instance(availability_zone='nova:host1')
+ host = self.scheduler.driver.schedule_run_instance(self.context,
+ instance_id2)
+ self.assertEqual('host1', host)
+ db.instance_destroy(self.context, instance_id2)
+ compute1.kill()
+
def test_too_many_cores(self):
"""Ensures we don't go over max cores"""
compute1 = service.Service('host1',
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index 1400b88e5..a67c8d1e8 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -22,6 +22,8 @@ Unit Tests for remote procedure calls using queue
import mox
+from nova import context
+from nova import db
from nova import exception
from nova import flags
from nova import rpc
@@ -72,6 +74,30 @@ class ServiceManagerTestCase(test.TestCase):
self.assertEqual(serv.test_method(), 'service')
+class ServiceFlagsTestCase(test.TestCase):
+ def test_service_enabled_on_create_based_on_flag(self):
+ self.flags(enable_new_services=True)
+ host = 'foo'
+ binary = 'nova-fake'
+ app = service.Service.create(host=host, binary=binary)
+ app.start()
+ app.stop()
+ ref = db.service_get(context.get_admin_context(), app.service_id)
+ db.service_destroy(context.get_admin_context(), app.service_id)
+ self.assert_(not ref['disabled'])
+
+ def test_service_disabled_on_create_based_on_flag(self):
+ self.flags(enable_new_services=False)
+ host = 'foo'
+ binary = 'nova-fake'
+ app = service.Service.create(host=host, binary=binary)
+ app.start()
+ app.stop()
+ ref = db.service_get(context.get_admin_context(), app.service_id)
+ db.service_destroy(context.get_admin_context(), app.service_id)
+ self.assert_(ref['disabled'])
+
+
class ServiceTestCase(test.TestCase):
"""Test cases for Services"""
@@ -112,8 +138,7 @@ class ServiceTestCase(test.TestCase):
service_ref = {'host': host,
'binary': binary,
'report_count': 0,
- 'id': 1,
- 'availability_zone': 'nova'}
+ 'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
host,
@@ -143,8 +168,8 @@ class ServiceTestCase(test.TestCase):
'binary': binary,
'topic': topic,
'report_count': 0,
- 'id': 1,
- 'availability_zone': 'nova'}
+ 'availability_zone': 'nova',
+ 'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
host,
@@ -177,8 +202,8 @@ class ServiceTestCase(test.TestCase):
'binary': binary,
'topic': topic,
'report_count': 0,
- 'id': 1,
- 'availability_zone': 'nova'}
+ 'availability_zone': 'nova',
+ 'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
host,
@@ -210,8 +235,8 @@ class ServiceTestCase(test.TestCase):
'binary': binary,
'topic': topic,
'report_count': 0,
- 'id': 1,
- 'availability_zone': 'nova'}
+ 'availability_zone': 'nova',
+ 'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
host,
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index ed2e4ffde..ec9462ada 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -29,9 +29,9 @@ from nova.auth import manager
from nova.compute import instance_types
from nova.compute import power_state
from nova.virt import xenapi_conn
-from nova.virt.xenapi import fake
+from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils
-from nova.tests.db import fakes
+from nova.tests.db import fakes as db_fakes
from nova.tests.xenapi import stubs
FLAGS = flags.FLAGS
@@ -47,9 +47,9 @@ class XenAPIVolumeTestCase(test.TestCase):
FLAGS.target_host = '127.0.0.1'
FLAGS.xenapi_connection_url = 'test_url'
FLAGS.xenapi_connection_password = 'test_pass'
- fakes.stub_out_db_instance_api(self.stubs)
+ db_fakes.stub_out_db_instance_api(self.stubs)
stubs.stub_out_get_target(self.stubs)
- fake.reset()
+ xenapi_fake.reset()
self.values = {'name': 1, 'id': 1,
'project_id': 'fake',
'user_id': 'fake',
@@ -79,11 +79,11 @@ class XenAPIVolumeTestCase(test.TestCase):
helper = volume_utils.VolumeHelper
helper.XenAPI = session.get_imported_xenapi()
vol = self._create_volume()
- info = helper.parse_volume_info(vol['ec2_id'], '/dev/sdc')
- label = 'SR-%s' % vol['ec2_id']
+ info = helper.parse_volume_info(vol['id'], '/dev/sdc')
+ label = 'SR-%s' % vol['id']
description = 'Test-SR'
sr_ref = helper.create_iscsi_storage(session, info, label, description)
- srs = fake.get_all('SR')
+ srs = xenapi_fake.get_all('SR')
self.assertEqual(sr_ref, srs[0])
db.volume_destroy(context.get_admin_context(), vol['id'])
@@ -97,7 +97,7 @@ class XenAPIVolumeTestCase(test.TestCase):
# oops, wrong mount point!
self.assertRaises(volume_utils.StorageError,
helper.parse_volume_info,
- vol['ec2_id'],
+ vol['id'],
'/dev/sd')
db.volume_destroy(context.get_admin_context(), vol['id'])
@@ -107,17 +107,16 @@ class XenAPIVolumeTestCase(test.TestCase):
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
instance = db.instance_create(self.values)
- fake.create_vm(instance.name, 'Running')
- result = conn.attach_volume(instance.name, volume['ec2_id'],
- '/dev/sdc')
+ xenapi_fake.create_vm(instance.name, 'Running')
+ result = conn.attach_volume(instance.name, volume['id'], '/dev/sdc')
def check():
# check that the VM has a VBD attached to it
# Get XenAPI reference for the VM
- vms = fake.get_all('VM')
+ vms = xenapi_fake.get_all('VM')
# Get XenAPI record for VBD
- vbds = fake.get_all('VBD')
- vbd = fake.get_record('VBD', vbds[0])
+ vbds = xenapi_fake.get_all('VBD')
+ vbd = xenapi_fake.get_record('VBD', vbds[0])
vm_ref = vbd['VM']
self.assertEqual(vm_ref, vms[0])
@@ -130,11 +129,11 @@ class XenAPIVolumeTestCase(test.TestCase):
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
instance = db.instance_create(self.values)
- fake.create_vm(instance.name, 'Running')
+ xenapi_fake.create_vm(instance.name, 'Running')
self.assertRaises(Exception,
conn.attach_volume,
instance.name,
- volume['ec2_id'],
+ volume['id'],
'/dev/sdc')
def tearDown(self):
@@ -156,41 +155,70 @@ class XenAPIVMTestCase(test.TestCase):
self.stubs = stubout.StubOutForTesting()
FLAGS.xenapi_connection_url = 'test_url'
FLAGS.xenapi_connection_password = 'test_pass'
- fake.reset()
- fakes.stub_out_db_instance_api(self.stubs)
- fake.create_network('fake', FLAGS.flat_network_bridge)
+ xenapi_fake.reset()
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.get_connection(False)
def test_list_instances_0(self):
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- conn = xenapi_conn.get_connection(False)
- instances = conn.list_instances()
+ instances = self.conn.list_instances()
self.assertEquals(instances, [])
+ def test_get_diagnostics(self):
+ instance = self._create_instance()
+ self.conn.get_diagnostics(instance)
+
+ def test_instance_snapshot(self):
+ stubs.stubout_instance_snapshot(self.stubs)
+ instance = self._create_instance()
+
+ name = "MySnapshot"
+ template_vm_ref = self.conn.snapshot(instance, name)
+
+ def ensure_vm_was_torn_down():
+ vm_labels = []
+ for vm_ref in xenapi_fake.get_all('VM'):
+ vm_rec = xenapi_fake.get_record('VM', vm_ref)
+ if not vm_rec["is_control_domain"]:
+ vm_labels.append(vm_rec["name_label"])
+
+ self.assertEquals(vm_labels, [1])
+
+ def ensure_vbd_was_torn_down():
+ vbd_labels = []
+ for vbd_ref in xenapi_fake.get_all('VBD'):
+ vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
+ vbd_labels.append(vbd_rec["vm_name_label"])
+
+ self.assertEquals(vbd_labels, [1])
+
+ def ensure_vdi_was_torn_down():
+ for vdi_ref in xenapi_fake.get_all('VDI'):
+ vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
+ name_label = vdi_rec["name_label"]
+ self.assert_(not name_label.endswith('snapshot'))
+
+ def check():
+ ensure_vm_was_torn_down()
+ ensure_vbd_was_torn_down()
+ ensure_vdi_was_torn_down()
+
+ check()
+
def test_spawn(self):
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- values = {'name': 1, 'id': 1,
- 'project_id': self.project.id,
- 'user_id': self.user.id,
- 'image_id': 1,
- 'kernel_id': 2,
- 'ramdisk_id': 3,
- 'instance_type': 'm1.large',
- 'mac_address': 'aa:bb:cc:dd:ee:ff',
- }
- conn = xenapi_conn.get_connection(False)
- instance = db.instance_create(values)
- conn.spawn(instance)
+ instance = self._create_instance()
def check():
- instances = conn.list_instances()
+ instances = self.conn.list_instances()
self.assertEquals(instances, [1])
# Get Nova record for VM
- vm_info = conn.get_info(1)
+ vm_info = self.conn.get_info(1)
# Get XenAPI record for VM
- vms = fake.get_all('VM')
- vm = fake.get_record('VM', vms[0])
+ vms = xenapi_fake.get_all('VM')
+ vm = xenapi_fake.get_record('VM', vms[0])
# Check that m1.large above turned into the right thing.
instance_type = instance_types.INSTANCE_TYPES['m1.large']
@@ -218,3 +246,19 @@ class XenAPIVMTestCase(test.TestCase):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
self.stubs.UnsetAll()
+
+ def _create_instance(self):
+ """Creates and spawns a test instance"""
+ values = {
+ 'name': 1,
+ 'id': 1,
+ 'project_id': self.project.id,
+ 'user_id': self.user.id,
+ 'image_id': 1,
+ 'kernel_id': 2,
+ 'ramdisk_id': 3,
+ 'instance_type': 'm1.large',
+ 'mac_address': 'aa:bb:cc:dd:ee:ff'}
+ instance = db.instance_create(values)
+ self.conn.spawn(instance)
+ return instance
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index a7e592fee..55f751f11 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -19,6 +19,54 @@
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi import volume_utils
+from nova.virt.xenapi import vm_utils
+
+
+def stubout_instance_snapshot(stubs):
+ @classmethod
+ def fake_fetch_image(cls, session, instance_id, image, user, project,
+ type):
+ # Stubout wait_for_task
+ def fake_wait_for_task(self, id, task):
+ class FakeEvent:
+
+ def send(self, value):
+ self.rv = value
+
+ def wait(self):
+ return self.rv
+
+ done = FakeEvent()
+ self._poll_task(id, task, done)
+ rv = done.wait()
+ return rv
+
+ stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task',
+ fake_wait_for_task)
+
+ from nova.virt.xenapi.fake import create_vdi
+ name_label = "instance-%s" % instance_id
+ #TODO: create fake SR record
+ sr_ref = "fakesr"
+ vdi_ref = create_vdi(name_label=name_label, read_only=False,
+ sr_ref=sr_ref, sharable=False)
+ vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
+ vdi_uuid = vdi_rec['uuid']
+ return vdi_uuid
+
+ stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
+
+ def fake_parse_xmlrpc_value(val):
+ return val
+
+ stubs.Set(xenapi_conn, '_parse_xmlrpc_value', fake_parse_xmlrpc_value)
+
+ def fake_wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
+ original_parent_uuid):
+ #TODO(sirp): Should we actually fake out the data here
+ return "fakeparent"
+
+ stubs.Set(vm_utils, 'wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce)
def stubout_session(stubs, cls):
@@ -63,6 +111,24 @@ class FakeSessionForVMTests(fake.SessionBase):
vm['is_a_template'] = False
vm['is_control_domain'] = False
+ def VM_snapshot(self, session_ref, vm_ref, label):
+ status = "Running"
+ template_vm_ref = fake.create_vm(label, status, is_a_template=True,
+ is_control_domain=False)
+
+ sr_ref = "fakesr"
+ template_vdi_ref = fake.create_vdi(label, read_only=True,
+ sr_ref=sr_ref, sharable=False)
+
+ template_vbd_ref = fake.create_vbd(template_vm_ref, template_vdi_ref)
+ return template_vm_ref
+
+ def VDI_destroy(self, session_ref, vdi_ref):
+ fake.destroy_vdi(vdi_ref)
+
+ def VM_destroy(self, session_ref, vm_ref):
+ fake.destroy_vm(vm_ref)
+
class FakeSessionForVolumeTests(fake.SessionBase):
""" Stubs out a XenAPISession for Volume tests """
diff --git a/nova/virt/connection.py b/nova/virt/connection.py
index 61e99944e..846423afe 100644
--- a/nova/virt/connection.py
+++ b/nova/virt/connection.py
@@ -26,6 +26,7 @@ from nova import flags
from nova.virt import fake
from nova.virt import libvirt_conn
from nova.virt import xenapi_conn
+from nova.virt import hyperv
FLAGS = flags.FLAGS
@@ -62,6 +63,8 @@ def get_connection(read_only=False):
conn = libvirt_conn.get_connection(read_only)
elif t == 'xenapi':
conn = xenapi_conn.get_connection(read_only)
+ elif t == 'hyperv':
+ conn = hyperv.get_connection(read_only)
else:
raise Exception('Unknown connection type "%s"' % t)
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 706888b0d..32541f5b4 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -112,6 +112,20 @@ class FakeConnection(object):
self.instances[instance.name] = fake_instance
fake_instance._state = power_state.RUNNING
+ def snapshot(self, instance, name):
+ """
+ Snapshots the specified instance.
+
+ The given parameter is an instance of nova.compute.service.Instance,
+ and so the instance is being specified as instance.name.
+
+ The second parameter is the name of the snapshot.
+
+ The work will be done asynchronously. This function returns a
+ Deferred that allows the caller to detect when it is complete.
+ """
+ pass
+
def reboot(self, instance):
"""
Reboot the specified instance.
@@ -202,6 +216,9 @@ class FakeConnection(object):
'num_cpu': 2,
'cpu_time': 0}
+ def get_diagnostics(self, instance_name):
+ pass
+
def list_disks(self, instance_name):
"""
Return the IDs of all the virtual disks attached to the specified
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
new file mode 100644
index 000000000..4b9f6f946
--- /dev/null
+++ b/nova/virt/hyperv.py
@@ -0,0 +1,459 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Cloud.com, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A connection to Hyper-V .
+Uses Windows Management Instrumentation (WMI) calls to interact with Hyper-V
+Hyper-V WMI usage:
+ http://msdn.microsoft.com/en-us/library/cc723875%28v=VS.85%29.aspx
+The Hyper-V object model briefly:
+ The physical computer and its hosted virtual machines are each represented
+ by the Msvm_ComputerSystem class.
+
+ Each virtual machine is associated with a
+ Msvm_VirtualSystemGlobalSettingData (vs_gs_data) instance and one or more
+ Msvm_VirtualSystemSettingData (vmsetting) instances. For each vmsetting
+ there is a series of Msvm_ResourceAllocationSettingData (rasd) objects.
+ The rasd objects describe the settings for each device in a VM.
+ Together, the vs_gs_data, vmsettings and rasds describe the configuration
+ of the virtual machine.
+
+ Creating new resources such as disks and nics involves cloning a default
+ rasd object and appropriately modifying the clone and calling the
+ AddVirtualSystemResources WMI method
+ Changing resources such as memory uses the ModifyVirtualSystemResources
+ WMI method
+
+Using the Python WMI library:
+ Tutorial:
+ http://timgolden.me.uk/python/wmi/tutorial.html
+ Hyper-V WMI objects can be retrieved simply by using the class name
+ of the WMI object and optionally specifying a column to filter the
+ result set. More complex filters can be formed using WQL (sql-like)
+ queries.
+ The parameters and return tuples of WMI method calls can gleaned by
+ examining the doc string. For example:
+ >>> vs_man_svc.ModifyVirtualSystemResources.__doc__
+ ModifyVirtualSystemResources (ComputerSystem, ResourceSettingData[])
+ => (Job, ReturnValue)'
+ When passing setting data (ResourceSettingData) to the WMI method,
+ an XML representation of the data is passed in using GetText_(1).
+ Available methods on a service can be determined using method.keys():
+ >>> vs_man_svc.methods.keys()
+ vmsettings and rasds for a vm can be retrieved using the 'associators'
+ method with the appropriate return class.
+ Long running WMI commands generally return a Job (an instance of
+ Msvm_ConcreteJob) whose state can be polled to determine when it finishes
+
+"""
+
+import os
+import logging
+import time
+
+from nova import exception
+from nova import flags
+from nova.auth import manager
+from nova.compute import power_state
+from nova.virt import images
+
+wmi = None
+
+
+FLAGS = flags.FLAGS
+
+
+HYPERV_POWER_STATE = {
+ 3: power_state.SHUTDOWN,
+ 2: power_state.RUNNING,
+ 32768: power_state.PAUSED,
+}
+
+
+REQ_POWER_STATE = {
+ 'Enabled': 2,
+ 'Disabled': 3,
+ 'Reboot': 10,
+ 'Reset': 11,
+ 'Paused': 32768,
+ 'Suspended': 32769
+}
+
+
+WMI_JOB_STATUS_STARTED = 4096
+WMI_JOB_STATE_RUNNING = 4
+WMI_JOB_STATE_COMPLETED = 7
+
+
+def get_connection(_):
+ global wmi
+ if wmi is None:
+ wmi = __import__('wmi')
+ return HyperVConnection()
+
+
+class HyperVConnection(object):
+ def __init__(self):
+ self._conn = wmi.WMI(moniker='//./root/virtualization')
+ self._cim_conn = wmi.WMI(moniker='//./root/cimv2')
+
+ def init_host(self):
+ #FIXME(chiradeep): implement this
+ logging.debug(_('In init host'))
+ pass
+
+ def list_instances(self):
+ """ Return the names of all the instances known to Hyper-V. """
+ vms = [v.ElementName \
+ for v in self._conn.Msvm_ComputerSystem(['ElementName'])]
+ return vms
+
+ def spawn(self, instance):
+ """ Create a new VM and start it."""
+ vm = self._lookup(instance.name)
+ if vm is not None:
+ raise exception.Duplicate(_('Attempt to create duplicate vm %s') %
+ instance.name)
+
+ user = manager.AuthManager().get_user(instance['user_id'])
+ project = manager.AuthManager().get_project(instance['project_id'])
+ #Fetch the file, assume it is a VHD file.
+ base_vhd_filename = os.path.join(FLAGS.instances_path,
+ instance.name)
+ vhdfile = "%s.vhd" % (base_vhd_filename)
+ images.fetch(instance['image_id'], vhdfile, user, project)
+
+ try:
+ self._create_vm(instance)
+
+ self._create_disk(instance['name'], vhdfile)
+ self._create_nic(instance['name'], instance['mac_address'])
+
+ logging.debug(_('Starting VM %s '), instance.name)
+ self._set_vm_state(instance['name'], 'Enabled')
+ logging.info(_('Started VM %s '), instance.name)
+ except Exception as exn:
+ logging.error(_('spawn vm failed: %s'), exn)
+ self.destroy(instance)
+
+ def _create_vm(self, instance):
+ """Create a VM but don't start it. """
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+
+ vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
+ vs_gs_data.ElementName = instance['name']
+ (job, ret_val) = vs_man_svc.DefineVirtualSystem(
+ [], None, vs_gs_data.GetText_(1))[1:]
+ if ret_val == WMI_JOB_STATUS_STARTED:
+ success = self._check_job_status(job)
+ else:
+ success = (ret_val == 0)
+
+ if not success:
+ raise Exception(_('Failed to create VM %s'), instance.name)
+
+ logging.debug(_('Created VM %s...'), instance.name)
+ vm = self._conn.Msvm_ComputerSystem(ElementName=instance.name)[0]
+
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ vmsetting = [s for s in vmsettings
+ if s.SettingType == 3][0] # avoid snapshots
+ memsetting = vmsetting.associators(
+ wmi_result_class='Msvm_MemorySettingData')[0]
+ #No Dynamic Memory, so reservation, limit and quantity are identical.
+ mem = long(str(instance['memory_mb']))
+ memsetting.VirtualQuantity = mem
+ memsetting.Reservation = mem
+ memsetting.Limit = mem
+
+ (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
+ vm.path_(), [memsetting.GetText_(1)])
+ logging.debug(_('Set memory for vm %s...'), instance.name)
+ procsetting = vmsetting.associators(
+ wmi_result_class='Msvm_ProcessorSettingData')[0]
+ vcpus = long(instance['vcpus'])
+ procsetting.VirtualQuantity = vcpus
+ procsetting.Reservation = vcpus
+ procsetting.Limit = vcpus
+
+ (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
+ vm.path_(), [procsetting.GetText_(1)])
+ logging.debug(_('Set vcpus for vm %s...'), instance.name)
+
+ def _create_disk(self, vm_name, vhdfile):
+ """Create a disk and attach it to the vm"""
+ logging.debug(_('Creating disk for %s by attaching disk file %s'),
+ vm_name, vhdfile)
+ #Find the IDE controller for the vm.
+ vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
+ vm = vms[0]
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ rasds = vmsettings[0].associators(
+ wmi_result_class='MSVM_ResourceAllocationSettingData')
+ ctrller = [r for r in rasds
+ if r.ResourceSubType == 'Microsoft Emulated IDE Controller'\
+ and r.Address == "0"]
+ #Find the default disk drive object for the vm and clone it.
+ diskdflt = self._conn.query(
+ "SELECT * FROM Msvm_ResourceAllocationSettingData \
+ WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\
+ AND InstanceID LIKE '%Default%'")[0]
+ diskdrive = self._clone_wmi_obj(
+ 'Msvm_ResourceAllocationSettingData', diskdflt)
+ #Set the IDE ctrller as parent.
+ diskdrive.Parent = ctrller[0].path_()
+ diskdrive.Address = 0
+ #Add the cloned disk drive object to the vm.
+ new_resources = self._add_virt_resource(diskdrive, vm)
+ if new_resources is None:
+ raise Exception(_('Failed to add diskdrive to VM %s'),
+ vm_name)
+ diskdrive_path = new_resources[0]
+ logging.debug(_('New disk drive path is %s'), diskdrive_path)
+ #Find the default VHD disk object.
+ vhddefault = self._conn.query(
+ "SELECT * FROM Msvm_ResourceAllocationSettingData \
+ WHERE ResourceSubType LIKE 'Microsoft Virtual Hard Disk' AND \
+ InstanceID LIKE '%Default%' ")[0]
+
+ #Clone the default and point it to the image file.
+ vhddisk = self._clone_wmi_obj(
+ 'Msvm_ResourceAllocationSettingData', vhddefault)
+ #Set the new drive as the parent.
+ vhddisk.Parent = diskdrive_path
+ vhddisk.Connection = [vhdfile]
+
+ #Add the new vhd object as a virtual hard disk to the vm.
+ new_resources = self._add_virt_resource(vhddisk, vm)
+ if new_resources is None:
+ raise Exception(_('Failed to add vhd file to VM %s'),
+ vm_name)
+ logging.info(_('Created disk for %s'), vm_name)
+
+ def _create_nic(self, vm_name, mac):
+ """Create a (emulated) nic and attach it to the vm"""
+ logging.debug(_('Creating nic for %s '), vm_name)
+ #Find the vswitch that is connected to the physical nic.
+ vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
+ extswitch = self._find_external_network()
+ vm = vms[0]
+ switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
+ #Find the default nic and clone it to create a new nic for the vm.
+ #Use Msvm_SyntheticEthernetPortSettingData for Windows or Linux with
+ #Linux Integration Components installed.
+ emulatednics_data = self._conn.Msvm_EmulatedEthernetPortSettingData()
+ default_nic_data = [n for n in emulatednics_data
+ if n.InstanceID.rfind('Default') > 0]
+ new_nic_data = self._clone_wmi_obj(
+ 'Msvm_EmulatedEthernetPortSettingData',
+ default_nic_data[0])
+ #Create a port on the vswitch.
+ (new_port, ret_val) = switch_svc.CreateSwitchPort(vm_name, vm_name,
+ "", extswitch.path_())
+ if ret_val != 0:
+ logging.error(_('Failed creating a port on the external vswitch'))
+ raise Exception(_('Failed creating port for %s'),
+ vm_name)
+ logging.debug(_("Created switch port %s on switch %s"),
+ vm_name, extswitch.path_())
+ #Connect the new nic to the new port.
+ new_nic_data.Connection = [new_port]
+ new_nic_data.ElementName = vm_name + ' nic'
+ new_nic_data.Address = ''.join(mac.split(':'))
+ new_nic_data.StaticMacAddress = 'TRUE'
+ #Add the new nic to the vm.
+ new_resources = self._add_virt_resource(new_nic_data, vm)
+ if new_resources is None:
+ raise Exception(_('Failed to add nic to VM %s'),
+ vm_name)
+ logging.info(_("Created nic for %s "), vm_name)
+
+ def _add_virt_resource(self, res_setting_data, target_vm):
+ """Add a new resource (disk/nic) to the VM"""
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+ (job, new_resources, ret_val) = vs_man_svc.\
+ AddVirtualSystemResources([res_setting_data.GetText_(1)],
+ target_vm.path_())
+ success = True
+ if ret_val == WMI_JOB_STATUS_STARTED:
+ success = self._check_job_status(job)
+ else:
+ success = (ret_val == 0)
+ if success:
+ return new_resources
+ else:
+ return None
+
+ #TODO: use the reactor to poll instead of sleep
+ def _check_job_status(self, jobpath):
+ """Poll WMI job state for completion"""
+ #Jobs have a path of the form:
+ #\\WIN-P5IG7367DAG\root\virtualization:Msvm_ConcreteJob.InstanceID=
+ #"8A496B9C-AF4D-4E98-BD3C-1128CD85320D"
+ inst_id = jobpath.split('=')[1].strip('"')
+ jobs = self._conn.Msvm_ConcreteJob(InstanceID=inst_id)
+ if len(jobs) == 0:
+ return False
+ job = jobs[0]
+ while job.JobState == WMI_JOB_STATE_RUNNING:
+ time.sleep(0.1)
+ job = self._conn.Msvm_ConcreteJob(InstanceID=inst_id)[0]
+ if job.JobState != WMI_JOB_STATE_COMPLETED:
+ logging.debug(_("WMI job failed: %s"), job.ErrorSummaryDescription)
+ return False
+ logging.debug(_("WMI job succeeded: %s, Elapsed=%s "), job.Description,
+ job.ElapsedTime)
+ return True
+
+ def _find_external_network(self):
+ """Find the vswitch that is connected to the physical nic.
+ Assumes only one physical nic on the host
+ """
+ #If there are no physical nics connected to networks, return.
+ bound = self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')
+ if len(bound) == 0:
+ return None
+ return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')[0]\
+ .associators(wmi_result_class='Msvm_SwitchLANEndpoint')[0]\
+ .associators(wmi_result_class='Msvm_SwitchPort')[0]\
+ .associators(wmi_result_class='Msvm_VirtualSwitch')[0]
+
+ def _clone_wmi_obj(self, wmi_class, wmi_obj):
+ """Clone a WMI object"""
+ cl = self._conn.__getattr__(wmi_class) # get the class
+ newinst = cl.new()
+ #Copy the properties from the original.
+ for prop in wmi_obj._properties:
+ newinst.Properties_.Item(prop).Value =\
+ wmi_obj.Properties_.Item(prop).Value
+ return newinst
+
+ def reboot(self, instance):
+ """Reboot the specified instance."""
+ vm = self._lookup(instance.name)
+ if vm is None:
+ raise exception.NotFound('instance not present %s' % instance.name)
+ self._set_vm_state(instance.name, 'Reboot')
+
+ def destroy(self, instance):
+ """Destroy the VM. Also destroy the associated VHD disk files"""
+ logging.debug(_("Got request to destroy vm %s"), instance.name)
+ vm = self._lookup(instance.name)
+ if vm is None:
+ return
+ vm = self._conn.Msvm_ComputerSystem(ElementName=instance.name)[0]
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+ #Stop the VM first.
+ self._set_vm_state(instance.name, 'Disabled')
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ rasds = vmsettings[0].associators(
+ wmi_result_class='MSVM_ResourceAllocationSettingData')
+ disks = [r for r in rasds \
+ if r.ResourceSubType == 'Microsoft Virtual Hard Disk']
+ diskfiles = []
+ #Collect disk file information before destroying the VM.
+ for disk in disks:
+ diskfiles.extend([c for c in disk.Connection])
+ #Nuke the VM. Does not destroy disks.
+ (job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
+ if ret_val == WMI_JOB_STATUS_STARTED:
+ success = self._check_job_status(job)
+ elif ret_val == 0:
+ success = True
+ if not success:
+ raise Exception(_('Failed to destroy vm %s') % instance.name)
+ #Delete associated vhd disk files.
+ for disk in diskfiles:
+ vhdfile = self._cim_conn.CIM_DataFile(Name=disk)
+ for vf in vhdfile:
+ vf.Delete()
+ logging.debug(_("Del: disk %s vm %s"), vhdfile, instance.name)
+
+ def get_info(self, instance_id):
+ """Get information about the VM"""
+ vm = self._lookup(instance_id)
+ if vm is None:
+ raise exception.NotFound('instance not present %s' % instance_id)
+ vm = self._conn.Msvm_ComputerSystem(ElementName=instance_id)[0]
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ settings_paths = [v.path_() for v in vmsettings]
+ #See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
+ summary_info = vs_man_svc.GetSummaryInformation(
+ [4, 100, 103, 105], settings_paths)[1]
+ info = summary_info[0]
+ logging.debug(_("Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, \
+ cpu_time=%s"), instance_id,
+ str(HYPERV_POWER_STATE[info.EnabledState]),
+ str(info.MemoryUsage),
+ str(info.NumberOfProcessors),
+ str(info.UpTime))
+
+ return {'state': HYPERV_POWER_STATE[info.EnabledState],
+ 'max_mem': info.MemoryUsage,
+ 'mem': info.MemoryUsage,
+ 'num_cpu': info.NumberOfProcessors,
+ 'cpu_time': info.UpTime}
+
+ def _lookup(self, i):
+ vms = self._conn.Msvm_ComputerSystem(ElementName=i)
+ n = len(vms)
+ if n == 0:
+ return None
+ elif n > 1:
+ raise Exception(_('duplicate name found: %s') % i)
+ else:
+ return vms[0].ElementName
+
+ def _set_vm_state(self, vm_name, req_state):
+ """Set the desired state of the VM"""
+ vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
+ if len(vms) == 0:
+ return False
+ (job, ret_val) = vms[0].RequestStateChange(REQ_POWER_STATE[req_state])
+ success = False
+ if ret_val == WMI_JOB_STATUS_STARTED:
+ success = self._check_job_status(job)
+ elif ret_val == 0:
+ success = True
+ elif ret_val == 32775:
+ #Invalid state for current operation. Typically means it is
+ #already in the state requested
+ success = True
+ if success:
+ logging.info(_("Successfully changed vm state of %s to %s"),
+ vm_name, req_state)
+ else:
+ logging.error(_("Failed to change vm state of %s to %s"),
+ vm_name, req_state)
+ raise Exception(_("Failed to change vm state of %s to %s"),
+ vm_name, req_state)
+
+ def attach_volume(self, instance_name, device_path, mountpoint):
+ vm = self._lookup(instance_name)
+ if vm is None:
+ raise exception.NotFound('Cannot attach volume to missing %s vm' %
+ instance_name)
+
+ def detach_volume(self, instance_name, mountpoint):
+ vm = self._lookup(instance_name)
+ if vm is None:
+ raise exception.NotFound('Cannot detach volume from missing %s ' %
+ instance_name)
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 1c9b2e093..2d03da4b4 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -21,8 +21,12 @@
Handling of VM disk images.
"""
+import logging
import os.path
+import shutil
+import sys
import time
+import urllib2
import urlparse
from nova import flags
@@ -45,6 +49,25 @@ def fetch(image, path, user, project):
return f(image, path, user, project)
+def _fetch_image_no_curl(url, path, headers):
+ request = urllib2.Request(url)
+ for (k, v) in headers.iteritems():
+ request.add_header(k, v)
+
+ def urlretrieve(urlfile, fpath):
+ chunk = 1 * 1024 * 1024
+ f = open(fpath, "wb")
+ while 1:
+ data = urlfile.read(chunk)
+ if not data:
+ break
+ f.write(data)
+
+ urlopened = urllib2.urlopen(request)
+ urlretrieve(urlopened, path)
+ logging.debug(_("Finished retreving %s -- placed in %s"), url, path)
+
+
def _fetch_s3_image(image, path, user, project):
url = image_url(image)
@@ -61,18 +84,24 @@ def _fetch_s3_image(image, path, user, project):
url_path)
headers['Authorization'] = 'AWS %s:%s' % (access, signature)
- cmd = ['/usr/bin/curl', '--fail', '--silent', url]
- for (k, v) in headers.iteritems():
- cmd += ['-H', '"%s: %s"' % (k, v)]
+ if sys.platform.startswith('win'):
+ return _fetch_image_no_curl(url, path, headers)
+ else:
+ cmd = ['/usr/bin/curl', '--fail', '--silent', url]
+ for (k, v) in headers.iteritems():
+ cmd += ['-H', '%s: %s' % (k, v)]
- cmd += ['-o', path]
- cmd_out = ' '.join(cmd)
- return utils.execute(cmd_out)
+ cmd += ['-o', path]
+ cmd_out = ' '.join(cmd)
+ return utils.execute(cmd_out)
def _fetch_local_image(image, path, user, project):
- source = _image_path('%s/image' % image)
- return utils.execute('cp %s %s' % (source, path))
+ source = _image_path(os.path.join(image, 'image'))
+ if sys.platform.startswith('win'):
+ return shutil.copy(source, path)
+ else:
+ return utils.execute('cp %s %s' % (source, path))
def _image_path(path):
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index f6a218fa4..00edfbdc8 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -68,6 +68,9 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image')
flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image')
flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image')
+flags.DEFINE_string('injected_network_template',
+ utils.abspath('virt/interfaces.template'),
+ 'Template file for injected network')
flags.DEFINE_string('libvirt_xml_template',
utils.abspath('virt/libvirt.xml.template'),
'Libvirt XML Template')
@@ -257,6 +260,13 @@ class LibvirtConnection(object):
virt_dom.detachDevice(xml)
@exception.wrap_exception
+ def snapshot(self, instance, name):
+ """ Create snapshot from a running VM instance """
+ raise NotImplementedError(
+ _("Instance snapshotting is not supported for libvirt"
+ "at this time"))
+
+ @exception.wrap_exception
def reboot(self, instance):
self.destroy(instance, False)
xml = self.to_xml(instance)
@@ -577,6 +587,9 @@ class LibvirtConnection(object):
'num_cpu': num_cpu,
'cpu_time': cpu_time}
+ def get_diagnostics(self, instance_name):
+ raise exception.APIError("diagnostics are not supported for libvirt")
+
def get_disks(self, instance_name):
"""
Note that this function takes an instance name, not an Instance, so
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 1eaf31c25..aa4026f97 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -55,6 +55,8 @@ import datetime
import logging
import uuid
+from pprint import pformat
+
from nova import exception
@@ -64,6 +66,10 @@ _CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\
_db_content = {}
+def log_db_contents(msg=None):
+ logging.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content))
+
+
def reset():
for c in _CLASSES:
_db_content[c] = {}
@@ -93,6 +99,24 @@ def create_vm(name_label, status,
})
+def destroy_vm(vm_ref):
+ vm_rec = _db_content['VM'][vm_ref]
+
+ vbd_refs = vm_rec['VBDs']
+ for vbd_ref in vbd_refs:
+ destroy_vbd(vbd_ref)
+
+ del _db_content['VM'][vm_ref]
+
+
+def destroy_vbd(vbd_ref):
+ del _db_content['VBD'][vbd_ref]
+
+
+def destroy_vdi(vdi_ref):
+ del _db_content['VDI'][vdi_ref]
+
+
def create_vdi(name_label, read_only, sr_ref, sharable):
return _create_object('VDI', {
'name_label': name_label,
@@ -109,6 +133,23 @@ def create_vdi(name_label, read_only, sr_ref, sharable):
})
+def create_vbd(vm_ref, vdi_ref):
+ vbd_rec = {'VM': vm_ref, 'VDI': vdi_ref}
+ vbd_ref = _create_object('VBD', vbd_rec)
+ after_VBD_create(vbd_ref, vbd_rec)
+ return vbd_ref
+
+
+def after_VBD_create(vbd_ref, vbd_rec):
+ """Create backref from VM to VBD when VBD is created"""
+ vm_ref = vbd_rec['VM']
+ vm_rec = _db_content['VM'][vm_ref]
+ vm_rec['VBDs'] = [vbd_ref]
+
+ vm_name_label = _db_content['VM'][vm_ref]['name_label']
+ vbd_rec['vm_name_label'] = vm_name_label
+
+
def create_pbd(config, sr_ref, attached):
return _create_object('PBD', {
'device-config': config,
@@ -277,11 +318,12 @@ class SessionBase(object):
self._check_arg_count(params, 2)
return get_record(cls, params[1])
- if (func == 'get_by_name_label' or
- func == 'get_by_uuid'):
+ if func in ('get_by_name_label', 'get_by_uuid'):
self._check_arg_count(params, 2)
+ return_singleton = (func == 'get_by_uuid')
return self._get_by_field(
- _db_content[cls], func[len('get_by_'):], params[1])
+ _db_content[cls], func[len('get_by_'):], params[1],
+ return_singleton=return_singleton)
if len(params) == 2:
field = func[len('get_'):]
@@ -324,6 +366,13 @@ class SessionBase(object):
(cls, _) = name.split('.')
ref = is_sr_create and \
_create_sr(cls, params) or _create_object(cls, params[1])
+
+ # Call hook to provide any fixups needed (ex. creating backrefs)
+ try:
+ globals()["after_%s_create" % cls](ref, params[1])
+ except KeyError:
+ pass
+
obj = get_record(cls, ref)
# Add RO fields
@@ -359,11 +408,18 @@ class SessionBase(object):
raise Failure(['MESSAGE_PARAMETER_COUNT_MISMATCH',
expected, actual])
- def _get_by_field(self, recs, k, v):
+ def _get_by_field(self, recs, k, v, return_singleton):
result = []
for ref, rec in recs.iteritems():
if rec.get(k) == v:
result.append(ref)
+
+ if return_singleton:
+ try:
+ return result[0]
+ except IndexError:
+ return None
+
return result
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 47fb6db53..9d1b51848 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -20,11 +20,14 @@ their attributes like VDIs, VIFs, as well as their lookup functions.
"""
import logging
+import pickle
import urllib
from xml.dom import minidom
+from eventlet import event
from nova import exception
from nova import flags
+from nova import utils
from nova.auth.manager import AuthManager
from nova.compute import instance_types
from nova.compute import power_state
@@ -204,7 +207,54 @@ class VMHelper(HelperBase):
return vif_ref
@classmethod
- def fetch_image(cls, session, image, user, project, type):
+ def create_snapshot(cls, session, instance_id, vm_ref, label):
+ """ Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
+ Snapshot VHD
+ """
+ #TODO(sirp): Add quiesce and VSS locking support when Windows support
+ # is added
+ logging.debug(_("Snapshotting VM %s with label '%s'..."),
+ vm_ref, label)
+
+ vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
+ vm_vdi_uuid = vm_vdi_rec["uuid"]
+ sr_ref = vm_vdi_rec["SR"]
+
+ original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref)
+
+ task = session.call_xenapi('Async.VM.snapshot', vm_ref, label)
+ template_vm_ref = session.wait_for_task(instance_id, task)
+ template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
+ template_vdi_uuid = template_vdi_rec["uuid"]
+
+ logging.debug(_('Created snapshot %s from VM %s.'), template_vm_ref,
+ vm_ref)
+
+ parent_uuid = wait_for_vhd_coalesce(
+ session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid)
+
+ #TODO(sirp): we need to assert only one parent, not parents two deep
+ return template_vm_ref, [template_vdi_uuid, parent_uuid]
+
+ @classmethod
+ def upload_image(cls, session, instance_id, vdi_uuids, image_name):
+ """ Requests that the Glance plugin bundle the specified VDIs and
+ push them into Glance using the specified human-friendly name.
+ """
+ logging.debug(_("Asking xapi to upload %s as '%s'"),
+ vdi_uuids, image_name)
+
+ params = {'vdi_uuids': vdi_uuids,
+ 'image_name': image_name,
+ 'glance_host': FLAGS.glance_host,
+ 'glance_port': FLAGS.glance_port}
+
+ kwargs = {'params': pickle.dumps(params)}
+ task = session.async_call_plugin('glance', 'put_vdis', kwargs)
+ session.wait_for_task(instance_id, task)
+
+ @classmethod
+ def fetch_image(cls, session, instance_id, image, user, project, type):
"""
type is interpreted as an ImageType instance
"""
@@ -223,9 +273,7 @@ class VMHelper(HelperBase):
if type == ImageType.DISK_RAW:
args['raw'] = 'true'
task = session.async_call_plugin('objectstore', fn, args)
- #FIXME(armando): find a solution to missing instance_id
- #with Josh Kearney
- uuid = session.wait_for_task(0, task)
+ uuid = session.wait_for_task(instance_id, task)
return uuid
@classmethod
@@ -299,6 +347,10 @@ class VMHelper(HelperBase):
try:
host = session.get_xenapi_host()
host_ip = session.get_xenapi().host.get_record(host)["address"]
+ except (cls.XenAPI.Failure, KeyError) as e:
+ return {"Unable to retrieve diagnostics": e}
+
+ try:
diags = {}
xml = get_rrd(host_ip, record["uuid"])
if xml:
@@ -325,3 +377,87 @@ def get_rrd(host, uuid):
return xml.read()
except IOError:
return None
+
+
+#TODO(sirp): This code comes from XS5.6 pluginlib.py, we should refactor to
+# use that implmenetation
+def get_vhd_parent(session, vdi_rec):
+ """
+ Returns the VHD parent of the given VDI record, as a (ref, rec) pair.
+ Returns None if we're at the root of the tree.
+ """
+ if 'vhd-parent' in vdi_rec['sm_config']:
+ parent_uuid = vdi_rec['sm_config']['vhd-parent']
+ #NOTE(sirp): changed xenapi -> get_xenapi()
+ parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid)
+ parent_rec = session.get_xenapi().VDI.get_record(parent_ref)
+ #NOTE(sirp): changed log -> logging
+ logging.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref)
+ return parent_ref, parent_rec
+ else:
+ return None
+
+
+def get_vhd_parent_uuid(session, vdi_ref):
+ vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
+ ret = get_vhd_parent(session, vdi_rec)
+ if ret:
+ parent_ref, parent_rec = ret
+ return parent_rec["uuid"]
+ else:
+ return None
+
+
+def scan_sr(session, instance_id, sr_ref):
+ logging.debug(_("Re-scanning SR %s"), sr_ref)
+ task = session.call_xenapi('Async.SR.scan', sr_ref)
+ session.wait_for_task(instance_id, task)
+
+
+def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
+ original_parent_uuid):
+ """ Spin until the parent VHD is coalesced into its parent VHD
+
+ Before coalesce:
+ * original_parent_vhd
+ * parent_vhd
+ snapshot
+
+ Atter coalesce:
+ * parent_vhd
+ snapshot
+ """
+ #TODO(sirp): we need to timeout this req after a while
+
+ def _poll_vhds():
+ scan_sr(session, instance_id, sr_ref)
+ parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
+ if original_parent_uuid and (parent_uuid != original_parent_uuid):
+ logging.debug(
+ _("Parent %s doesn't match original parent %s, "
+ "waiting for coalesce..."),
+ parent_uuid, original_parent_uuid)
+ else:
+ done.send(parent_uuid)
+
+ done = event.Event()
+ loop = utils.LoopingCall(_poll_vhds)
+ loop.start(FLAGS.xenapi_vhd_coalesce_poll_interval, now=True)
+ parent_uuid = done.wait()
+ loop.stop()
+ return parent_uuid
+
+
+def get_vdi_for_vm_safely(session, vm_ref):
+ vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref)
+ if vdi_refs is None:
+ raise Exception(_("No VDIs found for VM %s") % vm_ref)
+ else:
+ num_vdis = len(vdi_refs)
+ if num_vdis != 1:
+ raise Exception(_("Unexpected number of VDIs (%s) found for "
+ "VM %s") % (num_vdis, vm_ref))
+
+ vdi_ref = vdi_refs[0]
+ vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
+ return vdi_ref, vdi_rec
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index ba502ffa2..b6d620782 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
+# Copyright 2010 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -18,6 +19,7 @@
Management class for VM-related functions (spawn, reboot, etc).
"""
+import json
import logging
from nova import db
@@ -36,7 +38,6 @@ class VMOps(object):
"""
Management class for VM-related tasks
"""
-
def __init__(self, session):
self.XenAPI = session.get_imported_xenapi()
self._session = session
@@ -70,7 +71,7 @@ class VMOps(object):
disk_image_type = ImageType.DISK
else:
disk_image_type = ImageType.DISK_RAW
- vdi_uuid = VMHelper.fetch_image(self._session,
+ vdi_uuid = VMHelper.fetch_image(self._session, instance.id,
instance.image_id, user, project, disk_image_type)
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
#Have a look at the VDI and see if it has a PV kernel
@@ -79,11 +80,11 @@ class VMOps(object):
pv_kernel = VMHelper.lookup_image(self._session, vdi_ref)
kernel = None
if instance.kernel_id:
- kernel = VMHelper.fetch_image(self._session,
+ kernel = VMHelper.fetch_image(self._session, instance.id,
instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)
ramdisk = None
if instance.ramdisk_id:
- ramdisk = VMHelper.fetch_image(self._session,
+ ramdisk = VMHelper.fetch_image(self._session, instance.id,
instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)
vm_ref = VMHelper.create_vm(self._session,
instance, kernel, ramdisk, pv_kernel)
@@ -120,71 +121,124 @@ class VMOps(object):
timer.f = _wait_for_boot
return timer.start(interval=0.5, now=True)
+ def _get_vm_opaque_ref(self, instance_or_vm):
+ """Refactored out the common code of many methods that receive either
+ a vm name or a vm instance, and want a vm instance in return.
+ """
+ try:
+ instance_name = instance_or_vm.name
+ vm = VMHelper.lookup(self._session, instance_name)
+ except AttributeError:
+ # A vm opaque ref was passed
+ vm = instance_or_vm
+ if vm is None:
+ raise Exception(_('Instance not present %s') % instance_name)
+ return vm
+
+ def snapshot(self, instance, name):
+ """ Create snapshot from a running VM instance
+
+ :param instance: instance to be snapshotted
+ :param name: name/label to be given to the snapshot
+
+ Steps involved in a XenServer snapshot:
+
+ 1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This
+ creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
+ Snapshot VHD
+
+ 2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to
+ a 'base-copy' VDI. The base_copy is immutable and may be chained
+ with other base_copies. If chained, the base_copies
+ coalesce together, so, we must wait for this coalescing to occur to
+ get a stable representation of the data on disk.
+
+ 3. Push-to-glance: Once coalesced, we call a plugin on the XenServer
+ that will bundle the VHDs together and then push the bundle into
+ Glance.
+ """
+
+ #TODO(sirp): Add quiesce and VSS locking support when Windows support
+ # is added
+
+ logging.debug(_("Starting snapshot for VM %s"), instance)
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+
+ label = "%s-snapshot" % instance.name
+ try:
+ template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot(
+ self._session, instance.id, vm_ref, label)
+ except self.XenAPI.Failure, exc:
+ logging.error(_("Unable to Snapshot %s: %s"), vm_ref, exc)
+ return
+
+ try:
+ # call plugin to ship snapshot off to glance
+ VMHelper.upload_image(
+ self._session, instance.id, template_vdi_uuids, name)
+ finally:
+ self._destroy(instance, template_vm_ref, shutdown=False)
+
+ logging.debug(_("Finished snapshot and upload for VM %s"), instance)
+
def reboot(self, instance):
"""Reboot VM instance"""
- instance_name = instance.name
- vm = VMHelper.lookup(self._session, instance_name)
- if vm is None:
- raise exception.NotFound(_('instance not'
- ' found %s') % instance_name)
+ vm = self._get_vm_opaque_ref(instance)
task = self._session.call_xenapi('Async.VM.clean_reboot', vm)
self._session.wait_for_task(instance.id, task)
def destroy(self, instance):
"""Destroy VM instance"""
vm = VMHelper.lookup(self._session, instance.name)
+ return self._destroy(instance, vm, shutdown=True)
+
+ def _destroy(self, instance, vm, shutdown=True):
+ """ Destroy VM instance """
if vm is None:
# Don't complain, just return. This lets us clean up instances
# that have already disappeared from the underlying platform.
return
# Get the VDIs related to the VM
vdis = VMHelper.lookup_vm_vdis(self._session, vm)
- try:
- task = self._session.call_xenapi('Async.VM.hard_shutdown',
- vm)
- self._session.wait_for_task(instance.id, task)
- except XenAPI.Failure, exc:
- logging.warn(exc)
+ if shutdown:
+ try:
+ task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
+ self._session.wait_for_task(instance.id, task)
+ except self.XenAPI.Failure, exc:
+ logging.warn(exc)
+
# Disk clean-up
if vdis:
for vdi in vdis:
try:
task = self._session.call_xenapi('Async.VDI.destroy', vdi)
self._session.wait_for_task(instance.id, task)
- except XenAPI.Failure, exc:
+ except self.XenAPI.Failure, exc:
logging.warn(exc)
# VM Destroy
try:
task = self._session.call_xenapi('Async.VM.destroy', vm)
self._session.wait_for_task(instance.id, task)
- except XenAPI.Failure, exc:
+ except self.XenAPI.Failure, exc:
logging.warn(exc)
def _wait_with_callback(self, instance_id, task, callback):
ret = None
try:
ret = self._session.wait_for_task(instance_id, task)
- except XenAPI.Failure, exc:
+ except self.XenAPI.Failure, exc:
logging.warn(exc)
callback(ret)
def pause(self, instance, callback):
"""Pause VM instance"""
- instance_name = instance.name
- vm = VMHelper.lookup(self._session, instance_name)
- if vm is None:
- raise exception.NotFound(_('Instance not'
- ' found %s') % instance_name)
+ vm = self._get_vm_opaque_ref(instance)
task = self._session.call_xenapi('Async.VM.pause', vm)
self._wait_with_callback(instance.id, task, callback)
def unpause(self, instance, callback):
"""Unpause VM instance"""
- instance_name = instance.name
- vm = VMHelper.lookup(self._session, instance_name)
- if vm is None:
- raise exception.NotFound(_('Instance not'
- ' found %s') % instance_name)
+ vm = self._get_vm_opaque_ref(instance)
task = self._session.call_xenapi('Async.VM.unpause', vm)
self._wait_with_callback(instance.id, task, callback)
@@ -217,11 +271,9 @@ class VMOps(object):
rec = self._session.get_xenapi().VM.get_record(vm)
return VMHelper.compile_info(rec)
- def get_diagnostics(self, instance_id):
+ def get_diagnostics(self, instance):
"""Return data about VM diagnostics"""
- vm = VMHelper.lookup(self._session, instance_id)
- if vm is None:
- raise exception.NotFound(_("Instance not found %s") % instance_id)
+ vm = self._get_vm_opaque_ref(instance)
rec = self._session.get_xenapi().VM.get_record(vm)
return VMHelper.compile_diagnostics(self._session, rec)
@@ -229,3 +281,175 @@ class VMOps(object):
"""Return snapshot of console"""
# TODO: implement this to fix pylint!
return 'FAKE CONSOLE OUTPUT of instance'
+
+ def list_from_xenstore(self, vm, path):
+ """Runs the xenstore-ls command to get a listing of all records
+ from 'path' downward. Returns a dict with the sub-paths as keys,
+ and the value stored in those paths as values. If nothing is
+ found at that path, returns None.
+ """
+ ret = self._make_xenstore_call('list_records', vm, path)
+ return json.loads(ret)
+
+ def read_from_xenstore(self, vm, path):
+ """Returns the value stored in the xenstore record for the given VM
+ at the specified location. A XenAPIPlugin.PluginError will be raised
+ if any error is encountered in the read process.
+ """
+ try:
+ ret = self._make_xenstore_call('read_record', vm, path,
+ {'ignore_missing_path': 'True'})
+ except self.XenAPI.Failure, e:
+ return None
+ ret = json.loads(ret)
+ if ret == "None":
+ # Can't marshall None over RPC calls.
+ return None
+ return ret
+
+ def write_to_xenstore(self, vm, path, value):
+ """Writes the passed value to the xenstore record for the given VM
+ at the specified location. A XenAPIPlugin.PluginError will be raised
+ if any error is encountered in the write process.
+ """
+ return self._make_xenstore_call('write_record', vm, path,
+ {'value': json.dumps(value)})
+
+ def clear_xenstore(self, vm, path):
+ """Deletes the VM's xenstore record for the specified path.
+ If there is no such record, the request is ignored.
+ """
+ self._make_xenstore_call('delete_record', vm, path)
+
+ def _make_xenstore_call(self, method, vm, path, addl_args={}):
+ """Handles calls to the xenstore xenapi plugin."""
+ return self._make_plugin_call('xenstore.py', method=method, vm=vm,
+ path=path, addl_args=addl_args)
+
+ def _make_plugin_call(self, plugin, method, vm, path, addl_args={}):
+ """Abstracts out the process of calling a method of a xenapi plugin.
+ Any errors raised by the plugin will in turn raise a RuntimeError here.
+ """
+ vm = self._get_vm_opaque_ref(vm)
+ rec = self._session.get_xenapi().VM.get_record(vm)
+ args = {'dom_id': rec['domid'], 'path': path}
+ args.update(addl_args)
+ # If the 'testing_mode' attribute is set, add that to the args.
+ if getattr(self, 'testing_mode', False):
+ args['testing_mode'] = 'true'
+ try:
+ task = self._session.async_call_plugin(plugin, method, args)
+ ret = self._session.wait_for_task(0, task)
+ except self.XenAPI.Failure, e:
+ raise RuntimeError("%s" % e.details[-1])
+ return ret
+
+ def add_to_xenstore(self, vm, path, key, value):
+ """Adds the passed key/value pair to the xenstore record for
+ the given VM at the specified location. A XenAPIPlugin.PluginError
+ will be raised if any error is encountered in the write process.
+ """
+ current = self.read_from_xenstore(vm, path)
+ if not current:
+ # Nothing at that location
+ current = {key: value}
+ else:
+ current[key] = value
+ self.write_to_xenstore(vm, path, current)
+
+ def remove_from_xenstore(self, vm, path, key_or_keys):
+ """Takes either a single key or a list of keys and removes
+ them from the xenstoreirecord data for the given VM.
+ If the key doesn't exist, the request is ignored.
+ """
+ current = self.list_from_xenstore(vm, path)
+ if not current:
+ return
+ if isinstance(key_or_keys, basestring):
+ keys = [key_or_keys]
+ else:
+ keys = key_or_keys
+ keys.sort(lambda x, y: cmp(y.count('/'), x.count('/')))
+ for key in keys:
+ if path:
+ keypath = "%s/%s" % (path, key)
+ else:
+ keypath = key
+ self._make_xenstore_call('delete_record', vm, keypath)
+
+ ########################################################################
+ ###### The following methods interact with the xenstore parameter
+ ###### record, not the live xenstore. They were created before I
+ ###### knew the difference, and are left in here in case they prove
+ ###### to be useful. They all have '_param' added to their method
+ ###### names to distinguish them. (dabo)
+ ########################################################################
+ def read_partial_from_param_xenstore(self, instance_or_vm, key_prefix):
+ """Returns a dict of all the keys in the xenstore parameter record
+ for the given instance that begin with the key_prefix.
+ """
+ data = self.read_from_param_xenstore(instance_or_vm)
+ badkeys = [k for k in data.keys()
+ if not k.startswith(key_prefix)]
+ for badkey in badkeys:
+ del data[badkey]
+ return data
+
+ def read_from_param_xenstore(self, instance_or_vm, keys=None):
+ """Returns the xenstore parameter record data for the specified VM
+ instance as a dict. Accepts an optional key or list of keys; if a
+ value for 'keys' is passed, the returned dict is filtered to only
+ return the values for those keys.
+ """
+ vm = self._get_vm_opaque_ref(instance_or_vm)
+ data = self._session.call_xenapi_request('VM.get_xenstore_data',
+ (vm, ))
+ ret = {}
+ if keys is None:
+ keys = data.keys()
+ elif isinstance(keys, basestring):
+ keys = [keys]
+ for key in keys:
+ raw = data.get(key)
+ if raw:
+ ret[key] = json.loads(raw)
+ else:
+ ret[key] = raw
+ return ret
+
+ def add_to_param_xenstore(self, instance_or_vm, key, val):
+ """Takes a key/value pair and adds it to the xenstore parameter
+ record for the given vm instance. If the key exists in xenstore,
+ it is overwritten"""
+ vm = self._get_vm_opaque_ref(instance_or_vm)
+ self.remove_from_param_xenstore(instance_or_vm, key)
+ jsonval = json.dumps(val)
+ self._session.call_xenapi_request('VM.add_to_xenstore_data',
+ (vm, key, jsonval))
+
+ def write_to_param_xenstore(self, instance_or_vm, mapping):
+ """Takes a dict and writes each key/value pair to the xenstore
+ parameter record for the given vm instance. Any existing data for
+ those keys is overwritten.
+ """
+ for k, v in mapping.iteritems():
+ self.add_to_param_xenstore(instance_or_vm, k, v)
+
+ def remove_from_param_xenstore(self, instance_or_vm, key_or_keys):
+ """Takes either a single key or a list of keys and removes
+ them from the xenstore parameter record data for the given VM.
+ If the key doesn't exist, the request is ignored.
+ """
+ vm = self._get_vm_opaque_ref(instance_or_vm)
+ if isinstance(key_or_keys, basestring):
+ keys = [key_or_keys]
+ else:
+ keys = key_or_keys
+ for key in keys:
+ self._session.call_xenapi_request('VM.remove_from_xenstore_data',
+ (vm, key))
+
+ def clear_param_xenstore(self, instance_or_vm):
+ """Removes all data from the xenstore parameter record for this VM."""
+ self.write_to_param_xenstore(instance_or_vm, {})
+ ########################################################################
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 1ca813bcf..4bbc41b03 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -200,15 +200,19 @@ class VolumeHelper(HelperBase):
return -1
-def _get_volume_id(path):
+def _get_volume_id(path_or_id):
"""Retrieve the volume id from device_path"""
+ # If we have the ID and not a path, just return it.
+ if isinstance(path_or_id, int):
+ return path_or_id
# n must contain at least the volume_id
# /vol- is for remote volumes
# -vol- is for local volumes
# see compute/manager->setup_compute_volume
- volume_id = path[path.find('/vol-') + 1:]
- if volume_id == path:
- volume_id = path[path.find('-vol-') + 1:].replace('--', '-')
+ volume_id = path_or_id[path_or_id.find('/vol-') + 1:]
+ if volume_id == path_or_id:
+ volume_id = path_or_id[path_or_id.find('-vol-') + 1:]
+ volume_id = volume_id.replace('--', '-')
return volume_id
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 7f03d6c2b..c48f5b7cb 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
+# Copyright 2010 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -19,15 +20,15 @@ A connection to XenServer or Xen Cloud Platform.
The concurrency model for this class is as follows:
-All XenAPI calls are on a thread (using t.i.t.deferToThread, via the decorator
-deferredToThread). They are remote calls, and so may hang for the usual
-reasons. They should not be allowed to block the reactor thread.
+All XenAPI calls are on a green thread (using eventlet's "tpool"
+thread pool). They are remote calls, and so may hang for the usual
+reasons.
All long-running XenAPI calls (VM.start, VM.reboot, etc) are called async
-(using XenAPI.VM.async_start etc). These return a task, which can then be
-polled for completion. Polling is handled using reactor.callLater.
+(using XenAPI.VM.async_start etc). These return a task, which can then be
+polled for completion.
-This combination of techniques means that we don't block the reactor thread at
+This combination of techniques means that we don't block the main thread at
all, and at the same time we don't hold lots of threads waiting for
long-running operations.
@@ -81,8 +82,12 @@ flags.DEFINE_string('xenapi_connection_password',
flags.DEFINE_float('xenapi_task_poll_interval',
0.5,
'The interval used for polling of remote tasks '
- '(Async.VM.start, etc). Used only if '
+ '(Async.VM.start, etc). Used only if '
'connection_type=xenapi.')
+flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval',
+ 5.0,
+ 'The interval used for polling of coalescing vhds.'
+ ' Used only if connection_type=xenapi.')
flags.DEFINE_string('target_host',
None,
'iSCSI Target Host')
@@ -131,6 +136,10 @@ class XenAPIConnection(object):
"""Create VM instance"""
self._vmops.spawn(instance)
+ def snapshot(self, instance, name):
+ """ Create snapshot from a running VM instance """
+ self._vmops.snapshot(instance, name)
+
def reboot(self, instance):
"""Reboot VM instance"""
self._vmops.reboot(instance)
@@ -159,9 +168,9 @@ class XenAPIConnection(object):
"""Return data about VM instance"""
return self._vmops.get_info(instance_id)
- def get_diagnostics(self, instance_id):
+ def get_diagnostics(self, instance):
"""Return data about VM diagnostics"""
- return self._vmops.get_diagnostics(instance_id)
+ return self._vmops.get_diagnostics(instance)
def get_console_output(self, instance):
"""Return snapshot of console"""
@@ -205,6 +214,14 @@ class XenAPISession(object):
f = f.__getattr__(m)
return tpool.execute(f, *args)
+ def call_xenapi_request(self, method, *args):
+ """Some interactions with dom0, such as interacting with xenstore's
+ param record, require using the xenapi_request method of the session
+ object. This wraps that call on a background thread.
+ """
+ f = self._session.xenapi_request
+ return tpool.execute(f, method, *args)
+
def async_call_plugin(self, plugin, fn, args):
"""Call Async.host.call_plugin on a background thread."""
return tpool.execute(self._unwrap_plugin_exceptions,
@@ -214,7 +231,6 @@ class XenAPISession(object):
def wait_for_task(self, id, task):
"""Return the result of the given task. The task is polled
until it completes."""
-
done = event.Event()
loop = utils.LoopingCall(self._poll_task, id, task, done)
loop.start(FLAGS.xenapi_task_poll_interval, now=True)
@@ -227,14 +243,14 @@ class XenAPISession(object):
return self.XenAPI.Session(url)
def _poll_task(self, id, task, done):
- """Poll the given XenAPI task, and fire the given Deferred if we
+ """Poll the given XenAPI task, and fire the given action if we
get a result."""
try:
name = self._session.xenapi.task.get_name_label(task)
status = self._session.xenapi.task.get_status(task)
action = dict(
- id=int(id),
- action=name,
+ instance_id=int(id),
+ action=name[0:255], # Ensure action is never > 255
error=None)
if status == "pending":
return
@@ -282,7 +298,7 @@ class XenAPISession(object):
def _parse_xmlrpc_value(val):
- """Parse the given value as if it were an XML-RPC value. This is
+ """Parse the given value as if it were an XML-RPC value. This is
sometimes used as the format for the task.result field."""
if not val:
return val
diff --git a/nova/volume/__init__.py b/nova/volume/__init__.py
index d6e944fc0..56ef9332e 100644
--- a/nova/volume/__init__.py
+++ b/nova/volume/__init__.py
@@ -16,16 +16,4 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-:mod:`nova.volume` -- Nova Block Storage
-=====================================================
-
-.. automodule:: nova.volume
- :platform: Unix
-.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
-.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
-.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
-.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
-.. moduleauthor:: Manish Singh <yosh@gimp.org>
-.. moduleauthor:: Andy Smith <andy@anarkystic.com>
-"""
+from nova.volume.api import API
diff --git a/nova/volume/api.py b/nova/volume/api.py
new file mode 100644
index 000000000..2d7fe3762
--- /dev/null
+++ b/nova/volume/api.py
@@ -0,0 +1,101 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handles all requests relating to volumes.
+"""
+
+import datetime
+import logging
+
+from nova import db
+from nova import exception
+from nova import flags
+from nova import quota
+from nova import rpc
+from nova.db import base
+
+FLAGS = flags.FLAGS
+flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
+
+
+class API(base.Base):
+ """API for interacting with the volume manager."""
+
+ def create(self, context, size, name, description):
+ if quota.allowed_volumes(context, 1, size) < 1:
+ logging.warn("Quota exceeeded for %s, tried to create %sG volume",
+ context.project_id, size)
+ raise quota.QuotaError("Volume quota exceeded. You cannot "
+ "create a volume of size %s" % size)
+
+ options = {
+ 'size': size,
+ 'user_id': context.user.id,
+ 'project_id': context.project_id,
+ 'availability_zone': FLAGS.storage_availability_zone,
+ 'status': "creating",
+ 'attach_status': "detached",
+ 'display_name': name,
+ 'display_description': description}
+
+ volume = self.db.volume_create(context, options)
+ rpc.cast(context,
+ FLAGS.scheduler_topic,
+ {"method": "create_volume",
+ "args": {"topic": FLAGS.volume_topic,
+ "volume_id": volume['id']}})
+ return volume
+
+ def delete(self, context, volume_id):
+ volume = self.get(context, volume_id)
+ if volume['status'] != "available":
+ raise exception.ApiError(_("Volume status must be available"))
+ now = datetime.datetime.utcnow()
+ self.db.volume_update(context, volume_id, {'status': 'deleting',
+ 'terminated_at': now})
+ host = volume['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.volume_topic, host),
+ {"method": "delete_volume",
+ "args": {"volume_id": volume_id}})
+
+ def update(self, context, volume_id, fields):
+ self.db.volume_update(context, volume_id, fields)
+
+ def get(self, context, volume_id):
+ return self.db.volume_get(context, volume_id)
+
+ def get_all(self, context):
+ if context.user.is_admin():
+ return self.db.volume_get_all(context)
+ return self.db.volume_get_all_by_project(context, context.project_id)
+
+ def check_attach(self, context, volume_id):
+ volume = self.get(context, volume_id)
+ # TODO(vish): abstract status checking?
+ if volume['status'] != "available":
+ raise exception.ApiError(_("Volume status must be available"))
+ if volume['attach_status'] == "attached":
+ raise exception.ApiError(_("Volume is already attached"))
+
+ def check_detach(self, context, volume_id):
+ volume = self.get(context, volume_id)
+ # TODO(vish): abstract status checking?
+ if volume['status'] == "available":
+ raise exception.ApiError(_("Volume is already detached"))
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
new file mode 100644
index 000000000..5e648b970
--- /dev/null
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2010 Citrix Systems, Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+#
+# XenAPI plugin for putting images into glance
+#
+
+import base64
+import errno
+import hmac
+import httplib
+import os
+import os.path
+import pickle
+import sha
+import subprocess
+import time
+import urlparse
+
+import XenAPIPlugin
+
+#FIXME(sirp): should this use pluginlib from 5.6?
+from pluginlib_nova import *
+configure_logging('glance')
+
+CHUNK_SIZE = 8192
+FILE_SR_PATH = '/var/run/sr-mount'
+
+def put_vdis(session, args):
+ params = pickle.loads(exists(args, 'params'))
+ vdi_uuids = params["vdi_uuids"]
+ image_name = params["image_name"]
+ glance_host = params["glance_host"]
+ glance_port = params["glance_port"]
+
+ sr_path = get_sr_path(session)
+ #FIXME(sirp): writing to a temp file until Glance supports chunked-PUTs
+ tmp_file = "%s.tar.gz" % os.path.join('/tmp', image_name)
+ tar_cmd = ['tar', '-zcf', tmp_file, '--directory=%s' % sr_path]
+ paths = [ "%s.vhd" % vdi_uuid for vdi_uuid in vdi_uuids ]
+ tar_cmd.extend(paths)
+ logging.debug("Bundling image with cmd: %s", tar_cmd)
+ subprocess.call(tar_cmd)
+ logging.debug("Writing to test file %s", tmp_file)
+ put_bundle_in_glance(tmp_file, image_name, glance_host, glance_port)
+ return "" # FIXME(sirp): return anything useful here?
+
+
+def put_bundle_in_glance(tmp_file, image_name, glance_host, glance_port):
+ size = os.path.getsize(tmp_file)
+ basename = os.path.basename(tmp_file)
+
+ bundle = open(tmp_file, 'r')
+ try:
+ headers = {
+ 'x-image-meta-store': 'file',
+ 'x-image-meta-is_public': 'True',
+ 'x-image-meta-type': 'raw',
+ 'x-image-meta-name': image_name,
+ 'x-image-meta-size': size,
+ 'content-length': size,
+ 'content-type': 'application/octet-stream',
+ }
+ conn = httplib.HTTPConnection(glance_host, glance_port)
+ #NOTE(sirp): httplib under python2.4 won't accept a file-like object
+ # to request
+ conn.putrequest('POST', '/images')
+
+ for header, value in headers.iteritems():
+ conn.putheader(header, value)
+ conn.endheaders()
+
+ chunk = bundle.read(CHUNK_SIZE)
+ while chunk:
+ conn.send(chunk)
+ chunk = bundle.read(CHUNK_SIZE)
+
+
+ res = conn.getresponse()
+ #FIXME(sirp): should this be 201 Created?
+ if res.status != httplib.OK:
+ raise Exception("Unexpected response from Glance %i" % res.status)
+ finally:
+ bundle.close()
+
+def get_sr_path(session):
+ sr_ref = find_sr(session)
+
+ if sr_ref is None:
+ raise Exception('Cannot find SR to read VDI from')
+
+ sr_rec = session.xenapi.SR.get_record(sr_ref)
+ sr_uuid = sr_rec["uuid"]
+ sr_path = os.path.join(FILE_SR_PATH, sr_uuid)
+ return sr_path
+
+
+#TODO(sirp): both objectstore and glance need this, should this be refactored
+#into common lib
+def find_sr(session):
+ host = get_this_host(session)
+ srs = session.xenapi.SR.get_all()
+ for sr in srs:
+ sr_rec = session.xenapi.SR.get_record(sr)
+ if not ('i18n-key' in sr_rec['other_config'] and
+ sr_rec['other_config']['i18n-key'] == 'local-storage'):
+ continue
+ for pbd in sr_rec['PBDs']:
+ pbd_rec = session.xenapi.PBD.get_record(pbd)
+ if pbd_rec['host'] == host:
+ return sr
+ return None
+
+
+if __name__ == '__main__':
+ XenAPIPlugin.dispatch({'put_vdis': put_vdis})
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
index 2d323a016..8e7a829d5 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
@@ -45,6 +45,7 @@ class PluginError(Exception):
def __init__(self, *args):
Exception.__init__(self, *args)
+
class ArgumentError(PluginError):
"""Raised when required arguments are missing, argument values are invalid,
or incompatible arguments are given.
@@ -67,6 +68,7 @@ def ignore_failure(func, *args, **kwargs):
ARGUMENT_PATTERN = re.compile(r'^[a-zA-Z0-9_:\.\-,]+$')
+
def validate_exists(args, key, default=None):
"""Validates that a string argument to a RPC method call is given, and
matches the shell-safe regex, with an optional default value in case it
@@ -76,20 +78,24 @@ def validate_exists(args, key, default=None):
"""
if key in args:
if len(args[key]) == 0:
- raise ArgumentError('Argument %r value %r is too short.' % (key, args[key]))
+ raise ArgumentError('Argument %r value %r is too short.' %
+ (key, args[key]))
if not ARGUMENT_PATTERN.match(args[key]):
- raise ArgumentError('Argument %r value %r contains invalid characters.' % (key, args[key]))
+ raise ArgumentError('Argument %r value %r contains invalid '
+ 'characters.' % (key, args[key]))
if args[key][0] == '-':
- raise ArgumentError('Argument %r value %r starts with a hyphen.' % (key, args[key]))
+ raise ArgumentError('Argument %r value %r starts with a hyphen.'
+ % (key, args[key]))
return args[key]
elif default is not None:
return default
else:
raise ArgumentError('Argument %s is required.' % key)
+
def validate_bool(args, key, default=None):
- """Validates that a string argument to a RPC method call is a boolean string,
- with an optional default value in case it does not exist.
+ """Validates that a string argument to a RPC method call is a boolean
+ string, with an optional default value in case it does not exist.
Returns the python boolean value.
"""
@@ -99,7 +105,9 @@ def validate_bool(args, key, default=None):
elif value.lower() == 'false':
return False
else:
- raise ArgumentError("Argument %s may not take value %r. Valid values are ['true', 'false']." % (key, value))
+ raise ArgumentError("Argument %s may not take value %r. "
+ "Valid values are ['true', 'false']." % (key, value))
+
def exists(args, key):
"""Validates that a freeform string argument to a RPC method call is given.
@@ -110,6 +118,7 @@ def exists(args, key):
else:
raise ArgumentError('Argument %s is required.' % key)
+
def optional(args, key):
"""If the given key is in args, return the corresponding value, otherwise
return None"""
@@ -122,13 +131,14 @@ def get_this_host(session):
def get_domain_0(session):
this_host_ref = get_this_host(session)
- expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"' % this_host_ref
+ expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"'
+ expr = expr % this_host_ref
return session.xenapi.VM.get_all_records_where(expr).keys()[0]
def create_vdi(session, sr_ref, name_label, virtual_size, read_only):
vdi_ref = session.xenapi.VDI.create(
- { 'name_label': name_label,
+ {'name_label': name_label,
'name_description': '',
'SR': sr_ref,
'virtual_size': str(virtual_size),
@@ -138,7 +148,7 @@ def create_vdi(session, sr_ref, name_label, virtual_size, read_only):
'xenstore_data': {},
'other_config': {},
'sm_config': {},
- 'tags': [] })
+ 'tags': []})
logging.debug('Created VDI %s (%s, %s, %s) on %s.', vdi_ref, name_label,
virtual_size, read_only, sr_ref)
return vdi_ref
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py
new file mode 100755
index 000000000..695bf3448
--- /dev/null
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py
@@ -0,0 +1,180 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2010 Citrix Systems, Inc.
+# Copyright 2010 OpenStack LLC.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+#
+# XenAPI plugin for reading/writing information to xenstore
+#
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+import subprocess
+
+import XenAPIPlugin
+
+import pluginlib_nova as pluginlib
+pluginlib.configure_logging("xenstore")
+
+
+def jsonify(fnc):
+ def wrapper(*args, **kwargs):
+ return json.dumps(fnc(*args, **kwargs))
+ return wrapper
+
+
+@jsonify
+def read_record(self, arg_dict):
+ """Returns the value stored at the given path for the given dom_id.
+ These must be encoded as key/value pairs in arg_dict. You can
+ optinally include a key 'ignore_missing_path'; if this is present
+ and boolean True, attempting to read a non-existent path will return
+ the string 'None' instead of raising an exception.
+ """
+ cmd = "xenstore-read /local/domain/%(dom_id)s/%(path)s" % arg_dict
+ try:
+ return _run_command(cmd).rstrip("\n")
+ except pluginlib.PluginError, e:
+ if arg_dict.get("ignore_missing_path", False):
+ cmd = "xenstore-exists /local/domain/%(dom_id)s/%(path)s; echo $?"
+ cmd = cmd % arg_dict
+ ret = _run_command(cmd).strip()
+ # If the path exists, the cmd should return "0"
+ if ret != "0":
+ # No such path, so ignore the error and return the
+ # string 'None', since None can't be marshalled
+ # over RPC.
+ return "None"
+ # Either we shouldn't ignore path errors, or another
+ # error was hit. Re-raise.
+ raise
+
+
+@jsonify
+def write_record(self, arg_dict):
+ """Writes to xenstore at the specified path. If there is information
+ already stored in that location, it is overwritten. As in read_record,
+ the dom_id and path must be specified in the arg_dict; additionally,
+ you must specify a 'value' key, whose value must be a string. Typically,
+ you can json-ify more complex values and store the json output.
+ """
+ cmd = "xenstore-write /local/domain/%(dom_id)s/%(path)s '%(value)s'"
+ cmd = cmd % arg_dict
+ _run_command(cmd)
+ return arg_dict["value"]
+
+
+@jsonify
+def list_records(self, arg_dict):
+ """Returns all the stored data at or below the given path for the
+ given dom_id. The data is returned as a json-ified dict, with the
+ path as the key and the stored value as the value. If the path
+ doesn't exist, an empty dict is returned.
+ """
+ cmd = "xenstore-ls /local/domain/%(dom_id)s/%(path)s" % arg_dict
+ cmd = cmd.rstrip("/")
+ try:
+ recs = _run_command(cmd)
+ except pluginlib.PluginError, e:
+ if "No such file or directory" in "%s" % e:
+ # Path doesn't exist.
+ return {}
+ return str(e)
+ raise
+ base_path = arg_dict["path"]
+ paths = _paths_from_ls(recs)
+ ret = {}
+ for path in paths:
+ if base_path:
+ arg_dict["path"] = "%s/%s" % (base_path, path)
+ else:
+ arg_dict["path"] = path
+ rec = read_record(self, arg_dict)
+ try:
+ val = json.loads(rec)
+ except ValueError:
+ val = rec
+ ret[path] = val
+ return ret
+
+
+@jsonify
+def delete_record(self, arg_dict):
+ """Just like it sounds: it removes the record for the specified
+ VM and the specified path from xenstore.
+ """
+ cmd = "xenstore-rm /local/domain/%(dom_id)s/%(path)s" % arg_dict
+ return _run_command(cmd)
+
+
+def _paths_from_ls(recs):
+ """The xenstore-ls command returns a listing that isn't terribly
+ useful. This method cleans that up into a dict with each path
+ as the key, and the associated string as the value.
+ """
+ ret = {}
+ last_nm = ""
+ level = 0
+ path = []
+ ret = []
+ for ln in recs.splitlines():
+ nm, val = ln.rstrip().split(" = ")
+ barename = nm.lstrip()
+ this_level = len(nm) - len(barename)
+ if this_level == 0:
+ ret.append(barename)
+ level = 0
+ path = []
+ elif this_level == level:
+ # child of same parent
+ ret.append("%s/%s" % ("/".join(path), barename))
+ elif this_level > level:
+ path.append(last_nm)
+ ret.append("%s/%s" % ("/".join(path), barename))
+ level = this_level
+ elif this_level < level:
+ path = path[:this_level]
+ ret.append("%s/%s" % ("/".join(path), barename))
+ level = this_level
+ last_nm = barename
+ return ret
+
+
+def _run_command(cmd):
+ """Abstracts out the basics of issuing system commands. If the command
+ returns anything in stderr, a PluginError is raised with that information.
+ Otherwise, the output from stdout is returned.
+ """
+ pipe = subprocess.PIPE
+ proc = subprocess.Popen([cmd], shell=True, stdin=pipe, stdout=pipe,
+ stderr=pipe, close_fds=True)
+ proc.wait()
+ err = proc.stderr.read()
+ if err:
+ raise pluginlib.PluginError(err)
+ return proc.stdout.read()
+
+
+if __name__ == "__main__":
+ XenAPIPlugin.dispatch(
+ {"read_record": read_record,
+ "write_record": write_record,
+ "list_records": list_records,
+ "delete_record": delete_record})
diff --git a/run_tests.py b/run_tests.py
new file mode 100644
index 000000000..5b8617f63
--- /dev/null
+++ b/run_tests.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import unittest
+import sys
+
+from nose import config
+from nose import result
+from nose import core
+
+
+class NovaTestResult(result.TextTestResult):
+ def __init__(self, *args, **kw):
+ result.TextTestResult.__init__(self, *args, **kw)
+ self._last_case = None
+
+ def getDescription(self, test):
+ return str(test)
+
+ def startTest(self, test):
+ unittest.TestResult.startTest(self, test)
+ current_case = test.test.__class__.__name__
+
+ if self.showAll:
+ if current_case != self._last_case:
+ self.stream.writeln(current_case)
+ self._last_case = current_case
+
+ self.stream.write(
+ ' %s' % str(test.test._testMethodName).ljust(60))
+ self.stream.flush()
+
+
+class NovaTestRunner(core.TextTestRunner):
+ def _makeResult(self):
+ return NovaTestResult(self.stream,
+ self.descriptions,
+ self.verbosity,
+ self.config)
+
+
+if __name__ == '__main__':
+ c = config.Config(stream=sys.stdout,
+ env=os.environ,
+ verbosity=3)
+
+ runner = NovaTestRunner(stream=c.stream,
+ verbosity=c.verbosity,
+ config=c)
+ sys.exit(not core.run(config=c, testRunner=runner))
diff --git a/run_tests.sh b/run_tests.sh
index ffb0b6295..fe703fece 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -32,16 +32,17 @@ never_venv=0
force=0
noseargs=
+
for arg in "$@"; do
process_option $arg
done
-NOSETESTS="nosetests -v $noseargs"
+NOSETESTS="python run_tests.py $noseargs"
if [ $never_venv -eq 1 ]; then
# Just run the test suites in current environment
rm -f nova.sqlite
- $NOSETESTS
+ $NOSETESTS 2> run_tests.err.log
exit
fi
@@ -53,7 +54,7 @@ fi
if [ -e ${venv} ]; then
${with_venv} rm -f nova.sqlite
- ${with_venv} $NOSETESTS
+ ${with_venv} $NOSETESTS 2> run_tests.err.log
else
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
@@ -66,10 +67,10 @@ else
python tools/install_venv.py
else
rm -f nova.sqlite
- $NOSETESTS
+ $NOSETESTS 2> run_tests.err.log
exit
fi
fi
${with_venv} rm -f nova.sqlite
- ${with_venv} $NOSETESTS
+ ${with_venv} $NOSETESTS 2> run_tests.err.log
fi
diff --git a/tools/pip-requires b/tools/pip-requires
index e9559521b..341043114 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -22,4 +22,6 @@ mox==0.5.0
greenlet==0.3.1
nose
bzr
-Twisted>=10.1.0 \ No newline at end of file
+Twisted>=10.1.0
+PasteDeploy
+paste