summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.bzrignore1
-rw-r--r--.mailmap1
-rw-r--r--Authors6
-rw-r--r--README4
-rw-r--r--babel.cfg2
-rwxr-xr-xbin/nova-ajax-console-proxy137
-rwxr-xr-xbin/nova-api2
-rwxr-xr-xbin/nova-console44
-rwxr-xr-xbin/nova-dhcpbridge17
-rwxr-xr-xbin/nova-instancemonitor7
-rw-r--r--bin/nova-logspool156
-rwxr-xr-xbin/nova-manage22
-rw-r--r--bin/nova-spoolsentry97
-rwxr-xr-xcontrib/nova.sh2
-rw-r--r--doc/.autogenerated97
-rw-r--r--doc/source/adminguide/binaries.rst2
-rw-r--r--doc/source/adminguide/flags.rst2
-rw-r--r--doc/source/adminguide/getting.started.rst2
-rw-r--r--doc/source/adminguide/index.rst2
-rw-r--r--doc/source/adminguide/managing.images.rst2
-rw-r--r--doc/source/adminguide/managing.instances.rst2
-rw-r--r--doc/source/adminguide/managing.networks.rst4
-rw-r--r--doc/source/adminguide/managing.projects.rst2
-rw-r--r--doc/source/adminguide/managingsecurity.rst2
-rw-r--r--doc/source/adminguide/monitoring.rst2
-rw-r--r--doc/source/adminguide/multi.node.install.rst365
-rw-r--r--doc/source/adminguide/network.flat.rst2
-rw-r--r--doc/source/adminguide/network.vlan.rst5
-rw-r--r--doc/source/adminguide/nova.manage.rst2
-rw-r--r--doc/source/api/autoindex.rst99
-rw-r--r--doc/source/api/nova..adminclient.rst6
-rw-r--r--doc/source/api/nova..api.cloud.rst6
-rw-r--r--doc/source/api/nova..api.ec2.admin.rst6
-rw-r--r--doc/source/api/nova..api.ec2.apirequest.rst6
-rw-r--r--doc/source/api/nova..api.ec2.cloud.rst6
-rw-r--r--doc/source/api/nova..api.ec2.images.rst6
-rw-r--r--doc/source/api/nova..api.ec2.metadatarequesthandler.rst6
-rw-r--r--doc/source/api/nova..api.openstack.auth.rst6
-rw-r--r--doc/source/api/nova..api.openstack.backup_schedules.rst6
-rw-r--r--doc/source/api/nova..api.openstack.faults.rst6
-rw-r--r--doc/source/api/nova..api.openstack.flavors.rst6
-rw-r--r--doc/source/api/nova..api.openstack.images.rst6
-rw-r--r--doc/source/api/nova..api.openstack.servers.rst6
-rw-r--r--doc/source/api/nova..api.openstack.sharedipgroups.rst6
-rw-r--r--doc/source/api/nova..auth.dbdriver.rst6
-rw-r--r--doc/source/api/nova..auth.fakeldap.rst6
-rw-r--r--doc/source/api/nova..auth.ldapdriver.rst6
-rw-r--r--doc/source/api/nova..auth.manager.rst6
-rw-r--r--doc/source/api/nova..auth.signer.rst6
-rw-r--r--doc/source/api/nova..cloudpipe.pipelib.rst6
-rw-r--r--doc/source/api/nova..compute.disk.rst6
-rw-r--r--doc/source/api/nova..compute.instance_types.rst6
-rw-r--r--doc/source/api/nova..compute.manager.rst6
-rw-r--r--doc/source/api/nova..compute.monitor.rst6
-rw-r--r--doc/source/api/nova..compute.power_state.rst6
-rw-r--r--doc/source/api/nova..context.rst6
-rw-r--r--doc/source/api/nova..crypto.rst6
-rw-r--r--doc/source/api/nova..db.api.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.api.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.models.rst6
-rw-r--r--doc/source/api/nova..db.sqlalchemy.session.rst6
-rw-r--r--doc/source/api/nova..exception.rst6
-rw-r--r--doc/source/api/nova..fakerabbit.rst6
-rw-r--r--doc/source/api/nova..flags.rst6
-rw-r--r--doc/source/api/nova..image.service.rst6
-rw-r--r--doc/source/api/nova..manager.rst6
-rw-r--r--doc/source/api/nova..network.linux_net.rst6
-rw-r--r--doc/source/api/nova..network.manager.rst6
-rw-r--r--doc/source/api/nova..objectstore.bucket.rst6
-rw-r--r--doc/source/api/nova..objectstore.handler.rst6
-rw-r--r--doc/source/api/nova..objectstore.image.rst6
-rw-r--r--doc/source/api/nova..objectstore.stored.rst6
-rw-r--r--doc/source/api/nova..process.rst6
-rw-r--r--doc/source/api/nova..quota.rst6
-rw-r--r--doc/source/api/nova..rpc.rst6
-rw-r--r--doc/source/api/nova..scheduler.chance.rst6
-rw-r--r--doc/source/api/nova..scheduler.driver.rst6
-rw-r--r--doc/source/api/nova..scheduler.manager.rst6
-rw-r--r--doc/source/api/nova..scheduler.simple.rst6
-rw-r--r--doc/source/api/nova..server.rst6
-rw-r--r--doc/source/api/nova..service.rst6
-rw-r--r--doc/source/api/nova..test.rst6
-rw-r--r--doc/source/api/nova..tests.access_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.api.fakes.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.fakes.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_api.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_auth.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_faults.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_flavors.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_images.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_servers.rst6
-rw-r--r--doc/source/api/nova..tests.api.openstack.test_sharedipgroups.rst6
-rw-r--r--doc/source/api/nova..tests.api.test_wsgi.rst6
-rw-r--r--doc/source/api/nova..tests.api_integration.rst6
-rw-r--r--doc/source/api/nova..tests.api_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.auth_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.cloud_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.compute_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.declare_flags.rst6
-rw-r--r--doc/source/api/nova..tests.fake_flags.rst6
-rw-r--r--doc/source/api/nova..tests.flags_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.network_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.objectstore_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.process_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.quota_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.real_flags.rst6
-rw-r--r--doc/source/api/nova..tests.rpc_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.runtime_flags.rst6
-rw-r--r--doc/source/api/nova..tests.scheduler_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.service_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.twistd_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.validator_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.virt_unittest.rst6
-rw-r--r--doc/source/api/nova..tests.volume_unittest.rst6
-rw-r--r--doc/source/api/nova..twistd.rst6
-rw-r--r--doc/source/api/nova..utils.rst6
-rw-r--r--doc/source/api/nova..validate.rst6
-rw-r--r--doc/source/api/nova..virt.connection.rst6
-rw-r--r--doc/source/api/nova..virt.fake.rst6
-rw-r--r--doc/source/api/nova..virt.images.rst6
-rw-r--r--doc/source/api/nova..virt.libvirt_conn.rst6
-rw-r--r--doc/source/api/nova..virt.xenapi.rst6
-rw-r--r--doc/source/api/nova..volume.driver.rst6
-rw-r--r--doc/source/api/nova..volume.manager.rst6
-rw-r--r--doc/source/api/nova..wsgi.rst6
-rw-r--r--doc/source/cloud101.rst8
-rw-r--r--doc/source/community.rst5
-rw-r--r--doc/source/conf.py8
-rw-r--r--doc/source/devref/addmethod.openstackapi.rst2
-rw-r--r--doc/source/devref/api.rst2
-rw-r--r--doc/source/devref/architecture.rst2
-rw-r--r--doc/source/devref/auth.rst2
-rw-r--r--doc/source/devref/cloudpipe.rst2
-rw-r--r--doc/source/devref/compute.rst2
-rw-r--r--doc/source/devref/database.rst4
-rw-r--r--doc/source/devref/development.environment.rst9
-rw-r--r--doc/source/devref/fakes.rst2
-rw-r--r--doc/source/devref/glance.rst2
-rw-r--r--doc/source/devref/index.rst2
-rw-r--r--doc/source/devref/network.rst2
-rw-r--r--doc/source/devref/nova.rst2
-rw-r--r--doc/source/devref/objectstore.rst2
-rw-r--r--doc/source/devref/rabbit.rst5
-rw-r--r--doc/source/devref/scheduler.rst2
-rw-r--r--doc/source/devref/services.rst2
-rw-r--r--doc/source/devref/volume.rst2
-rw-r--r--doc/source/index.rst2
-rw-r--r--doc/source/livecd.rst17
-rw-r--r--doc/source/nova.concepts.rst8
-rw-r--r--doc/source/object.model.rst26
-rw-r--r--doc/source/quickstart.rst2
-rw-r--r--doc/source/service.architecture.rst17
-rw-r--r--krm_mapping.json.sample3
-rw-r--r--locale/nova.pot2130
-rw-r--r--nova/adminclient.py7
-rw-r--r--nova/api/ec2/__init__.py96
-rw-r--r--nova/api/ec2/admin.py38
-rw-r--r--nova/api/ec2/apirequest.py12
-rw-r--r--nova/api/ec2/cloud.py380
-rw-r--r--nova/api/ec2/metadatarequesthandler.py7
-rw-r--r--nova/api/openstack/__init__.py26
-rw-r--r--nova/api/openstack/backup_schedules.py6
-rw-r--r--nova/api/openstack/common.py24
-rw-r--r--nova/api/openstack/consoles.py96
-rw-r--r--nova/api/openstack/images.py26
-rw-r--r--nova/api/openstack/servers.py130
-rw-r--r--nova/api/openstack/shared_ip_groups.py (renamed from nova/api/openstack/sharedipgroups.py)10
-rw-r--r--nova/auth/dbdriver.py1
-rw-r--r--nova/auth/ldapdriver.py99
-rw-r--r--nova/auth/manager.py75
-rw-r--r--nova/auth/novarc.template4
-rw-r--r--nova/auth/signer.py15
-rw-r--r--nova/cloudpipe/pipelib.py4
-rw-r--r--nova/compute/__init__.py15
-rw-r--r--nova/compute/api.py325
-rw-r--r--nova/compute/disk.py11
-rw-r--r--nova/compute/manager.py221
-rw-r--r--nova/compute/monitor.py39
-rw-r--r--nova/console/__init__.py13
-rw-r--r--nova/console/api.py75
-rw-r--r--nova/console/fake.py58
-rw-r--r--nova/console/manager.py127
-rw-r--r--nova/console/xvp.conf.template16
-rw-r--r--nova/console/xvp.py194
-rw-r--r--nova/crypto.py5
-rw-r--r--nova/db/api.py81
-rw-r--r--nova/db/sqlalchemy/__init__.py8
-rw-r--r--nova/db/sqlalchemy/api.py247
-rw-r--r--nova/db/sqlalchemy/models.py56
-rw-r--r--nova/exception.py7
-rw-r--r--nova/fakerabbit.py20
-rw-r--r--nova/flags.py44
-rw-r--r--nova/image/glance.py23
-rw-r--r--nova/log.py254
-rw-r--r--nova/network/__init__.py15
-rw-r--r--nova/network/api.py87
-rw-r--r--nova/network/linux_net.py23
-rw-r--r--nova/network/manager.py20
-rw-r--r--nova/objectstore/handler.py68
-rw-r--r--nova/quota.py6
-rw-r--r--nova/rpc.py30
-rw-r--r--nova/scheduler/manager.py5
-rw-r--r--nova/scheduler/zone.py56
-rw-r--r--nova/service.py36
-rw-r--r--nova/tests/api/openstack/fakes.py4
-rw-r--r--nova/tests/api/openstack/test_images.py3
-rw-r--r--nova/tests/api/openstack/test_servers.py39
-rw-r--r--nova/tests/api/openstack/test_shared_ip_groups.py (renamed from nova/tests/api/openstack/test_sharedipgroups.py)2
-rw-r--r--nova/tests/hyperv_unittest.py71
-rw-r--r--nova/tests/objectstore_unittest.py2
-rw-r--r--nova/tests/test_auth.py9
-rw-r--r--nova/tests/test_cloud.py104
-rw-r--r--nova/tests/test_compute.py47
-rw-r--r--nova/tests/test_console.py129
-rw-r--r--nova/tests/test_log.py110
-rw-r--r--nova/tests/test_network.py9
-rw-r--r--nova/tests/test_quota.py14
-rw-r--r--nova/tests/test_rpc.py11
-rw-r--r--nova/tests/test_scheduler.py54
-rw-r--r--nova/tests/test_service.py15
-rw-r--r--nova/tests/test_virt.py142
-rw-r--r--nova/tests/test_volume.py6
-rw-r--r--nova/tests/test_xenapi.py11
-rw-r--r--nova/tests/xenapi/stubs.py24
-rw-r--r--nova/twistd.py25
-rw-r--r--nova/utils.py33
-rw-r--r--nova/version.py46
-rw-r--r--nova/virt/connection.py8
-rw-r--r--nova/virt/fake.py8
-rw-r--r--nova/virt/hyperv.py462
-rw-r--r--nova/virt/images.py47
-rw-r--r--nova/virt/libvirt.xml.template13
-rw-r--r--nova/virt/libvirt_conn.py495
-rw-r--r--nova/virt/xenapi/fake.py24
-rw-r--r--nova/virt/xenapi/vm_utils.py72
-rw-r--r--nova/virt/xenapi/vmops.py245
-rw-r--r--nova/virt/xenapi/volume_utils.py56
-rw-r--r--nova/virt/xenapi/volumeops.py31
-rw-r--r--nova/virt/xenapi_conn.py71
-rw-r--r--nova/volume/__init__.py14
-rw-r--r--nova/volume/api.py103
-rw-r--r--nova/volume/driver.py16
-rw-r--r--nova/volume/manager.py21
-rw-r--r--nova/wsgi.py19
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py28
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py180
-rw-r--r--setup.cfg14
-rw-r--r--setup.py28
-rw-r--r--smoketests/admin_smoketests.py9
-rw-r--r--smoketests/user_smoketests.py87
-rw-r--r--tools/ajaxterm/README.txt120
-rw-r--r--tools/ajaxterm/ajaxterm.135
-rw-r--r--tools/ajaxterm/ajaxterm.css64
-rw-r--r--tools/ajaxterm/ajaxterm.html25
-rw-r--r--tools/ajaxterm/ajaxterm.js279
-rwxr-xr-xtools/ajaxterm/ajaxterm.py586
-rwxr-xr-xtools/ajaxterm/configure32
-rw-r--r--tools/ajaxterm/configure.ajaxterm.bin2
-rw-r--r--tools/ajaxterm/configure.initd.debian33
-rw-r--r--tools/ajaxterm/configure.initd.gentoo27
-rw-r--r--tools/ajaxterm/configure.initd.redhat75
-rw-r--r--tools/ajaxterm/configure.makefile20
-rw-r--r--tools/ajaxterm/qweb.py1356
-rw-r--r--tools/ajaxterm/sarissa.js647
-rw-r--r--tools/ajaxterm/sarissa_dhtml.js105
-rwxr-xr-xtools/euca-get-ajax-console164
-rw-r--r--tools/install_venv.py3
268 files changed, 11534 insertions, 2181 deletions
diff --git a/.bzrignore b/.bzrignore
index d81a7d829..b271561a3 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -12,3 +12,4 @@ CA/openssl.cnf
CA/serial*
CA/newcerts/*.pem
CA/private/cakey.pem
+nova/vcsversion.py
diff --git a/.mailmap b/.mailmap
index 010678569..2af2d7cd9 100644
--- a/.mailmap
+++ b/.mailmap
@@ -30,3 +30,4 @@
<rconradharris@gmail.com> <rick.harris@rackspace.com>
<corywright@gmail.com> <cory.wright@rackspace.com>
<ant@openstack.org> <amesserl@rackspace.com>
+<chiradeep@cloud.com> <chiradeep@chiradeep-lt2>
diff --git a/Authors b/Authors
index 639e68a59..56344957e 100644
--- a/Authors
+++ b/Authors
@@ -3,6 +3,7 @@ Anne Gentle <anne@openstack.org>
Anthony Young <sleepsonthefloor@gmail.com>
Antony Messerli <ant@openstack.org>
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
+Chiradeep Vittal <chiradeep@cloud.com>
Chris Behrens <cbehrens@codestud.com>
Chmouel Boudjnah <chmouel@chmouel.com>
Cory Wright <corywright@gmail.com>
@@ -14,6 +15,7 @@ Eldar Nugaev <enugaev@griddynamics.com>
Eric Day <eday@oddments.org>
Ewan Mellor <ewan.mellor@citrix.com>
Hisaki Ohara <hisaki.ohara@intel.com>
+Ilya Alekseyev <ialekseev@griddynamics.com>
Jay Pipes <jaypipes@gmail.com>
Jesse Andrews <anotherjesse@gmail.com>
Joe Heck <heckj@mac.com>
@@ -22,8 +24,11 @@ Jonathan Bryce <jbryce@jbryce.com>
Josh Kearney <josh.kearney@rackspace.com>
Joshua McKenty <jmckenty@gmail.com>
Justin Santa Barbara <justin@fathomdb.com>
+Ken Pepple <ken.pepple@gmail.com>
+Lorin Hochstein <lorin@isi.edu>
Matt Dietz <matt.dietz@rackspace.com>
Michael Gundlach <michael.gundlach@rackspace.com>
+Monsyne Dragon <mdragon@rackspace.com>
Monty Taylor <mordred@inaugust.com>
Paul Voccio <paul@openstack.org>
Rick Clark <rick@openstack.org>
@@ -39,4 +44,3 @@ Trey Morris <trey.morris@rackspace.com>
Vishvananda Ishaya <vishvananda@gmail.com>
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
Zhixue Wu <Zhixue.Wu@citrix.com>
-
diff --git a/README b/README
index 851bca9db..f9334c295 100644
--- a/README
+++ b/README
@@ -1,7 +1,7 @@
The Choose Your Own Adventure README for Nova:
You have come across a cloud computing fabric controller. It has identified
- itself as "Nova." It is apparent that it maintains compatability with
+ itself as "Nova." It is apparent that it maintains compatibility with
the popular Amazon EC2 and S3 APIs.
To monitor it from a distance: follow @novacc on twitter
@@ -10,7 +10,7 @@ To tame it for use in your own cloud: read http://nova.openstack.org/getting.sta
To study its anatomy: read http://nova.openstack.org/architecture.html
-To disect it in detail: visit http://code.launchpad.net/nova
+To dissect it in detail: visit http://code.launchpad.net/nova
To taunt it with its weaknesses: use http://bugs.launchpad.net/nova
diff --git a/babel.cfg b/babel.cfg
new file mode 100644
index 000000000..15cd6cb76
--- /dev/null
+++ b/babel.cfg
@@ -0,0 +1,2 @@
+[python: **.py]
+
diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy
new file mode 100755
index 000000000..2bc407658
--- /dev/null
+++ b/bin/nova-ajax-console-proxy
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+# pylint: disable-msg=C0103
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Ajax Console Proxy Server"""
+
+from eventlet import greenthread
+from eventlet.green import urllib2
+
+import exceptions
+import gettext
+import logging
+import os
+import sys
+import time
+import urlparse
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+gettext.install('nova', unicode=1)
+
+from nova import flags
+from nova import log as logging
+from nova import rpc
+from nova import utils
+from nova import wsgi
+
+FLAGS = flags.FLAGS
+
+flags.DEFINE_integer('ajax_console_idle_timeout', 300,
+ 'Seconds before idle connection destroyed')
+
+LOG = logging.getLogger('nova.ajax_console_proxy')
+LOG.setLevel(logging.DEBUG)
+LOG.addHandler(logging.StreamHandler())
+
+
+class AjaxConsoleProxy(object):
+ tokens = {}
+
+ def __call__(self, env, start_response):
+ try:
+ req_url = '%s://%s%s?%s' % (env['wsgi.url_scheme'],
+ env['HTTP_HOST'],
+ env['PATH_INFO'],
+ env['QUERY_STRING'])
+ if 'HTTP_REFERER' in env:
+ auth_url = env['HTTP_REFERER']
+ else:
+ auth_url = req_url
+
+ auth_params = urlparse.parse_qs(urlparse.urlparse(auth_url).query)
+ parsed_url = urlparse.urlparse(req_url)
+
+ auth_info = AjaxConsoleProxy.tokens[auth_params['token'][0]]
+ args = auth_info['args']
+ auth_info['last_activity'] = time.time()
+
+ remote_url = ("http://%s:%s%s?token=%s" % (
+ str(args['host']),
+ str(args['port']),
+ parsed_url.path,
+ str(args['token'])))
+
+ opener = urllib2.urlopen(remote_url, env['wsgi.input'].read())
+ body = opener.read()
+ info = opener.info()
+
+ start_response("200 OK", info.dict.items())
+ return body
+ except (exceptions.KeyError):
+ if env['PATH_INFO'] != '/favicon.ico':
+ LOG.audit("Unauthorized request %s, %s"
+ % (req_url, str(env)))
+ start_response("401 NOT AUTHORIZED", [])
+ return "Not Authorized"
+ except Exception:
+ start_response("500 ERROR", [])
+ return "Server Error"
+
+ def register_listeners(self):
+ class Callback:
+ def __call__(self, data, message):
+ if data['method'] == 'authorize_ajax_console':
+ AjaxConsoleProxy.tokens[data['args']['token']] = \
+ {'args': data['args'], 'last_activity': time.time()}
+
+ conn = rpc.Connection.instance(new=True)
+ consumer = rpc.TopicConsumer(
+ connection=conn,
+ topic=FLAGS.ajax_console_proxy_topic)
+ consumer.register_callback(Callback())
+
+ def delete_expired_tokens():
+ now = time.time()
+ to_delete = []
+ for k, v in AjaxConsoleProxy.tokens.items():
+ if now - v['last_activity'] > FLAGS.ajax_console_idle_timeout:
+ to_delete.append(k)
+
+ for k in to_delete:
+ del AjaxConsoleProxy.tokens[k]
+
+ utils.LoopingCall(consumer.fetch, auto_ack=True,
+ enable_callbacks=True).start(0.1)
+ utils.LoopingCall(delete_expired_tokens).start(1)
+
+if __name__ == '__main__':
+ utils.default_flagfile()
+ FLAGS(sys.argv)
+ server = wsgi.Server()
+ acp = AjaxConsoleProxy()
+ acp.register_listeners()
+ server.start(acp, FLAGS.ajax_console_proxy_port, host='0.0.0.0')
+ server.wait()
diff --git a/bin/nova-api b/bin/nova-api
index 6ee833a18..419f0bbdc 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -21,7 +21,6 @@
"""Starter script for Nova API."""
import gettext
-import logging
import os
import sys
@@ -38,6 +37,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import flags
+from nova import log as logging
from nova import wsgi
LOG = logging.getLogger('nova.api')
diff --git a/bin/nova-console b/bin/nova-console
new file mode 100755
index 000000000..802cc80b6
--- /dev/null
+++ b/bin/nova-console
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Starter script for Nova Console Proxy."""
+
+import eventlet
+eventlet.monkey_patch()
+
+import gettext
+import os
+import sys
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+gettext.install('nova', unicode=1)
+
+from nova import service
+from nova import utils
+
+if __name__ == '__main__':
+ utils.default_flagfile()
+ service.serve()
+ service.wait()
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 828aba3d1..1a994d956 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -22,7 +22,6 @@ Handle lease database updates from DHCP servers.
"""
import gettext
-import logging
import os
import sys
@@ -39,6 +38,7 @@ gettext.install('nova', unicode=1)
from nova import context
from nova import db
from nova import flags
+from nova import log as logging
from nova import rpc
from nova import utils
from nova.network import linux_net
@@ -49,11 +49,13 @@ flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('num_networks', 'nova.network.manager')
flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager')
+LOG = logging.getLogger('nova.dhcpbridge')
+
def add_lease(mac, ip_address, _hostname, _interface):
"""Set the IP that was assigned by the DHCP server."""
if FLAGS.fake_rabbit:
- logging.debug("leasing ip")
+ LOG.debug(_("leasing ip"))
network_manager = utils.import_object(FLAGS.network_manager)
network_manager.lease_fixed_ip(context.get_admin_context(),
mac,
@@ -68,14 +70,14 @@ def add_lease(mac, ip_address, _hostname, _interface):
def old_lease(mac, ip_address, hostname, interface):
"""Update just as add lease."""
- logging.debug("Adopted old lease or got a change of mac/hostname")
+ LOG.debug(_("Adopted old lease or got a change of mac/hostname"))
add_lease(mac, ip_address, hostname, interface)
def del_lease(mac, ip_address, _hostname, _interface):
"""Called when a lease expires."""
if FLAGS.fake_rabbit:
- logging.debug("releasing ip")
+ LOG.debug(_("releasing ip"))
network_manager = utils.import_object(FLAGS.network_manager)
network_manager.release_fixed_ip(context.get_admin_context(),
mac,
@@ -100,6 +102,7 @@ def main():
flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile)
utils.default_flagfile(flagfile)
argv = FLAGS(sys.argv)
+ logging.basicConfig()
interface = os.environ.get('DNSMASQ_INTERFACE', 'br0')
if int(os.environ.get('TESTING', '0')):
FLAGS.fake_rabbit = True
@@ -117,9 +120,9 @@ def main():
mac = argv[2]
ip = argv[3]
hostname = argv[4]
- logging.debug("Called %s for mac %s with ip %s and "
- "hostname %s on interface %s",
- action, mac, ip, hostname, interface)
+ LOG.debug(_("Called %s for mac %s with ip %s and "
+ "hostname %s on interface %s"),
+ action, mac, ip, hostname, interface)
globals()[action + '_lease'](mac, ip, hostname, interface)
else:
print init_leases(interface)
diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor
index 5dac3ffe6..7dca02014 100755
--- a/bin/nova-instancemonitor
+++ b/bin/nova-instancemonitor
@@ -23,7 +23,6 @@
import gettext
import os
-import logging
import sys
from twisted.application import service
@@ -37,19 +36,23 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
+from nova import log as logging
from nova import utils
from nova import twistd
from nova.compute import monitor
+# TODO(todd): shouldn't this be done with flags? And what about verbose?
logging.getLogger('boto').setLevel(logging.WARN)
+LOG = logging.getLogger('nova.instancemonitor')
+
if __name__ == '__main__':
utils.default_flagfile()
twistd.serve(__file__)
if __name__ == '__builtin__':
- logging.warn('Starting instance monitor')
+ LOG.warn(_('Starting instance monitor'))
# pylint: disable-msg=C0103
monitor = monitor.InstanceMonitor()
diff --git a/bin/nova-logspool b/bin/nova-logspool
new file mode 100644
index 000000000..097459b12
--- /dev/null
+++ b/bin/nova-logspool
@@ -0,0 +1,156 @@
+#!/usr/bin/env python
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Tools for working with logs generated by nova components
+"""
+
+
+import json
+import os
+import re
+import sys
+
+
+class Request(object):
+ def __init__(self):
+ self.time = ""
+ self.host = ""
+ self.logger = ""
+ self.message = ""
+ self.trace = ""
+ self.env = ""
+ self.request_id = ""
+
+ def add_error_line(self, error_line):
+ self.time = " ".join(error_line.split(" ")[:3])
+ self.host = error_line.split(" ")[3]
+ self.logger = error_line.split("(")[1].split(" ")[0]
+ self.request_id = error_line.split("[")[1].split(" ")[0]
+ error_lines = error_line.split("#012")
+ self.message = self.clean_log_line(error_lines.pop(0))
+ self.trace = "\n".join([self.clean_trace(l) for l in error_lines])
+
+ def add_environment_line(self, env_line):
+ self.env = self.clean_env_line(env_line)
+
+ def clean_log_line(self, line):
+ """Remove log format for time, level, etc: split after context"""
+ return line.split('] ')[-1]
+
+ def clean_env_line(self, line):
+ """Also has an 'Environment: ' string in the message"""
+ return re.sub(r'^Environment: ', '', self.clean_log_line(line))
+
+ def clean_trace(self, line):
+ """trace has a different format, so split on TRACE:"""
+ return line.split('TRACE: ')[-1]
+
+ def to_dict(self):
+ return {'traceback': self.trace, 'message': self.message,
+ 'host': self.host, 'env': self.env, 'logger': self.logger,
+ 'request_id': self.request_id}
+
+
+class LogReader(object):
+ def __init__(self, filename):
+ self.filename = filename
+ self._errors = {}
+
+ def process(self, spooldir):
+ with open(self.filename) as f:
+ line = f.readline()
+ while len(line) > 0:
+ parts = line.split(" ")
+ level = (len(parts) < 6) or parts[5]
+ if level == 'ERROR':
+ self.handle_logged_error(line)
+ elif level == '[-]' and self.last_error:
+ # twisted stack trace line
+ clean_line = " ".join(line.split(" ")[6:])
+ self.last_error.trace = self.last_error.trace + clean_line
+ else:
+ self.last_error = None
+ line = f.readline()
+ self.update_spool(spooldir)
+
+ def handle_logged_error(self, line):
+ request_id = re.search(r' \[([A-Z0-9\-/]+)', line)
+ if not request_id:
+ raise Exception("Unable to parse request id from %s" % line)
+ request_id = request_id.group(1)
+ data = self._errors.get(request_id, Request())
+ if self.is_env_line(line):
+ data.add_environment_line(line)
+ elif self.is_error_line(line):
+ data.add_error_line(line)
+ else:
+ # possibly error from twsited
+ data.add_error_line(line)
+ self.last_error = data
+ self._errors[request_id] = data
+
+ def is_env_line(self, line):
+ return re.search('Environment: ', line)
+
+ def is_error_line(self, line):
+ return re.search('raised', line)
+
+ def update_spool(self, directory):
+ processed_dir = "%s/processed" % directory
+ self._ensure_dir_exists(processed_dir)
+ for rid, value in self._errors.iteritems():
+ if not self.has_been_processed(processed_dir, rid):
+ with open("%s/%s" % (directory, rid), "w") as spool:
+ spool.write(json.dumps(value.to_dict()))
+ self.flush_old_processed_spool(processed_dir)
+
+ def _ensure_dir_exists(self, d):
+ mkdir = False
+ try:
+ os.stat(d)
+ except:
+ mkdir = True
+ if mkdir:
+ os.mkdir(d)
+
+ def has_been_processed(self, processed_dir, rid):
+ rv = False
+ try:
+ os.stat("%s/%s" % (processed_dir, rid))
+ rv = True
+ except:
+ pass
+ return rv
+
+ def flush_old_processed_spool(self, processed_dir):
+ keys = self._errors.keys()
+ procs = os.listdir(processed_dir)
+ for p in procs:
+ if p not in keys:
+ # log has rotated and the old error won't be seen again
+ os.unlink("%s/%s" % (processed_dir, p))
+
+if __name__ == '__main__':
+ filename = '/var/log/nova.log'
+ spooldir = '/var/spool/nova'
+ if len(sys.argv) > 1:
+ filename = sys.argv[1]
+ if len(sys.argv) > 2:
+ spooldir = sys.argv[2]
+ LogReader(filename).process(spooldir)
diff --git a/bin/nova-manage b/bin/nova-manage
index 3416c1a52..3f5957190 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -55,8 +55,8 @@
import datetime
import gettext
-import logging
import os
+import re
import sys
import time
@@ -333,6 +333,11 @@ class ProjectCommands(object):
arguments: name project_manager [description]"""
self.manager.create_project(name, project_manager, description)
+ def modify(self, name, project_manager, description=None):
+ """Modifies a project
+ arguments: name project_manager [description]"""
+ self.manager.modify_project(name, project_manager, description)
+
def delete(self, name):
"""Deletes an existing project
arguments: name"""
@@ -499,6 +504,15 @@ class ServiceCommands(object):
db.service_update(ctxt, svc['id'], {'disabled': True})
+class LogCommands(object):
+ def request(self, request_id, logfile='/var/log/nova.log'):
+ """Show all fields in the log for the given request. Assumes you
+ haven't changed the log format too much.
+ ARGS: request_id [logfile]"""
+ lines = utils.execute("cat %s | grep '\[%s '" % (logfile, request_id))
+ print re.sub('#012', "\n", "\n".join(lines))
+
+
CATEGORIES = [
('user', UserCommands),
('project', ProjectCommands),
@@ -507,7 +521,8 @@ CATEGORIES = [
('vpn', VpnCommands),
('floating', FloatingIpCommands),
('network', NetworkCommands),
- ('service', ServiceCommands)]
+ ('service', ServiceCommands),
+ ('log', LogCommands)]
def lazy_match(name, key_value_tuples):
@@ -546,9 +561,6 @@ def main():
utils.default_flagfile()
argv = FLAGS(sys.argv)
- if FLAGS.verbose:
- logging.getLogger().setLevel(logging.DEBUG)
-
script_name = argv.pop(0)
if len(argv) < 1:
print script_name + " category action [<args>]"
diff --git a/bin/nova-spoolsentry b/bin/nova-spoolsentry
new file mode 100644
index 000000000..ab20268a9
--- /dev/null
+++ b/bin/nova-spoolsentry
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import base64
+import json
+import logging
+import os
+import shutil
+import sys
+import urllib
+import urllib2
+try:
+ import cPickle as pickle
+except:
+ import pickle
+
+
+class SpoolSentry(object):
+ def __init__(self, spool_dir, sentry_url, key=None):
+ self.spool_dir = spool_dir
+ self.sentry_url = sentry_url
+ self.key = key
+
+ def process(self):
+ for fname in os.listdir(self.spool_dir):
+ if fname == "processed":
+ continue
+ try:
+ sourcefile = "%s/%s" % (self.spool_dir, fname)
+ with open(sourcefile) as f:
+ fdata = f.read()
+ data_from_json = json.loads(fdata)
+ data = self.build_data(data_from_json)
+ self.send_data(data)
+ destfile = "%s/processed/%s" % (self.spool_dir, fname)
+ shutil.move(sourcefile, destfile)
+ except:
+ logging.exception("Unable to upload record %s", fname)
+ raise
+
+ def build_data(self, filejson):
+ env = {'SERVER_NAME': 'unknown', 'SERVER_PORT': '0000',
+ 'SCRIPT_NAME': '/unknown/', 'PATH_INFO': 'unknown'}
+ if filejson['env']:
+ env = json.loads(filejson['env'])
+ url = "http://%s:%s%s%s" % (env['SERVER_NAME'], env['SERVER_PORT'],
+ env['SCRIPT_NAME'], env['PATH_INFO'])
+ rv = {'logger': filejson['logger'], 'level': logging.ERROR,
+ 'server_name': filejson['host'], 'url': url,
+ 'message': filejson['message'],
+ 'traceback': filejson['traceback']}
+ rv['data'] = {}
+ if filejson['env']:
+ rv['data']['META'] = env
+ if filejson['request_id']:
+ rv['data']['request_id'] = filejson['request_id']
+ return rv
+
+ def send_data(self, data):
+ data = {
+ 'data': base64.b64encode(pickle.dumps(data).encode('zlib')),
+ 'key': self.key
+ }
+ req = urllib2.Request(self.sentry_url)
+ res = urllib2.urlopen(req, urllib.urlencode(data))
+ if res.getcode() != 200:
+ raise Exception("Bad HTTP code: %s" % res.getcode())
+ txt = res.read()
+
+if __name__ == '__main__':
+ sentryurl = 'http://127.0.0.1/sentry/store/'
+ key = ''
+ spooldir = '/var/spool/nova'
+ if len(sys.argv) > 1:
+ sentryurl = sys.argv[1]
+ if len(sys.argv) > 2:
+ key = sys.argv[2]
+ if len(sys.argv) > 3:
+ spooldir = sys.argv[3]
+ SpoolSentry(spooldir, sentryurl, key).process()
diff --git a/contrib/nova.sh b/contrib/nova.sh
index da1ba030c..e06706295 100755
--- a/contrib/nova.sh
+++ b/contrib/nova.sh
@@ -78,6 +78,7 @@ if [ "$CMD" == "install" ]; then
sudo apt-get install -y user-mode-linux kvm libvirt-bin
sudo apt-get install -y screen euca2ools vlan curl rabbitmq-server
sudo apt-get install -y lvm2 iscsitarget open-iscsi
+ sudo apt-get install -y socat
echo "ISCSITARGET_ENABLE=true" | sudo tee /etc/default/iscsitarget
sudo /etc/init.d/iscsitarget restart
sudo modprobe kvm
@@ -155,6 +156,7 @@ if [ "$CMD" == "run" ]; then
screen_it network "$NOVA_DIR/bin/nova-network"
screen_it scheduler "$NOVA_DIR/bin/nova-scheduler"
screen_it volume "$NOVA_DIR/bin/nova-volume"
+ screen_it ajax_console_proxy "$NOVA_DIR/bin/nova-ajax-console-proxy"
screen_it test ". $NOVA_DIR/novarc"
screen -S nova -x
fi
diff --git a/doc/.autogenerated b/doc/.autogenerated
deleted file mode 100644
index 3a70f8780..000000000
--- a/doc/.autogenerated
+++ /dev/null
@@ -1,97 +0,0 @@
-source/api/nova..adminclient.rst
-source/api/nova..api.cloud.rst
-source/api/nova..api.ec2.admin.rst
-source/api/nova..api.ec2.apirequest.rst
-source/api/nova..api.ec2.cloud.rst
-source/api/nova..api.ec2.images.rst
-source/api/nova..api.ec2.metadatarequesthandler.rst
-source/api/nova..api.openstack.auth.rst
-source/api/nova..api.openstack.backup_schedules.rst
-source/api/nova..api.openstack.faults.rst
-source/api/nova..api.openstack.flavors.rst
-source/api/nova..api.openstack.images.rst
-source/api/nova..api.openstack.servers.rst
-source/api/nova..api.openstack.sharedipgroups.rst
-source/api/nova..auth.dbdriver.rst
-source/api/nova..auth.fakeldap.rst
-source/api/nova..auth.ldapdriver.rst
-source/api/nova..auth.manager.rst
-source/api/nova..auth.signer.rst
-source/api/nova..cloudpipe.pipelib.rst
-source/api/nova..compute.disk.rst
-source/api/nova..compute.instance_types.rst
-source/api/nova..compute.manager.rst
-source/api/nova..compute.monitor.rst
-source/api/nova..compute.power_state.rst
-source/api/nova..context.rst
-source/api/nova..crypto.rst
-source/api/nova..db.api.rst
-source/api/nova..db.sqlalchemy.api.rst
-source/api/nova..db.sqlalchemy.models.rst
-source/api/nova..db.sqlalchemy.session.rst
-source/api/nova..exception.rst
-source/api/nova..fakerabbit.rst
-source/api/nova..flags.rst
-source/api/nova..image.service.rst
-source/api/nova..manager.rst
-source/api/nova..network.linux_net.rst
-source/api/nova..network.manager.rst
-source/api/nova..objectstore.bucket.rst
-source/api/nova..objectstore.handler.rst
-source/api/nova..objectstore.image.rst
-source/api/nova..objectstore.stored.rst
-source/api/nova..process.rst
-source/api/nova..quota.rst
-source/api/nova..rpc.rst
-source/api/nova..scheduler.chance.rst
-source/api/nova..scheduler.driver.rst
-source/api/nova..scheduler.manager.rst
-source/api/nova..scheduler.simple.rst
-source/api/nova..server.rst
-source/api/nova..service.rst
-source/api/nova..test.rst
-source/api/nova..tests.access_unittest.rst
-source/api/nova..tests.api.fakes.rst
-source/api/nova..tests.api.openstack.fakes.rst
-source/api/nova..tests.api.openstack.test_api.rst
-source/api/nova..tests.api.openstack.test_auth.rst
-source/api/nova..tests.api.openstack.test_faults.rst
-source/api/nova..tests.api.openstack.test_flavors.rst
-source/api/nova..tests.api.openstack.test_images.rst
-source/api/nova..tests.api.openstack.test_ratelimiting.rst
-source/api/nova..tests.api.openstack.test_servers.rst
-source/api/nova..tests.api.openstack.test_sharedipgroups.rst
-source/api/nova..tests.api.test_wsgi.rst
-source/api/nova..tests.api_integration.rst
-source/api/nova..tests.api_unittest.rst
-source/api/nova..tests.auth_unittest.rst
-source/api/nova..tests.cloud_unittest.rst
-source/api/nova..tests.compute_unittest.rst
-source/api/nova..tests.declare_flags.rst
-source/api/nova..tests.fake_flags.rst
-source/api/nova..tests.flags_unittest.rst
-source/api/nova..tests.network_unittest.rst
-source/api/nova..tests.objectstore_unittest.rst
-source/api/nova..tests.process_unittest.rst
-source/api/nova..tests.quota_unittest.rst
-source/api/nova..tests.real_flags.rst
-source/api/nova..tests.rpc_unittest.rst
-source/api/nova..tests.runtime_flags.rst
-source/api/nova..tests.scheduler_unittest.rst
-source/api/nova..tests.service_unittest.rst
-source/api/nova..tests.twistd_unittest.rst
-source/api/nova..tests.validator_unittest.rst
-source/api/nova..tests.virt_unittest.rst
-source/api/nova..tests.volume_unittest.rst
-source/api/nova..twistd.rst
-source/api/nova..utils.rst
-source/api/nova..validate.rst
-source/api/nova..virt.connection.rst
-source/api/nova..virt.fake.rst
-source/api/nova..virt.images.rst
-source/api/nova..virt.libvirt_conn.rst
-source/api/nova..virt.xenapi.rst
-source/api/nova..volume.driver.rst
-source/api/nova..volume.manager.rst
-source/api/nova..wsgi.rst
-source/api/autoindex.rst
diff --git a/doc/source/adminguide/binaries.rst b/doc/source/adminguide/binaries.rst
index 25605adf9..5c50a51f1 100644
--- a/doc/source/adminguide/binaries.rst
+++ b/doc/source/adminguide/binaries.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/adminguide/flags.rst b/doc/source/adminguide/flags.rst
index 4c950aa88..072f0a1a5 100644
--- a/doc/source/adminguide/flags.rst
+++ b/doc/source/adminguide/flags.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/adminguide/getting.started.rst b/doc/source/adminguide/getting.started.rst
index 0cadeb45e..675d8e664 100644
--- a/doc/source/adminguide/getting.started.rst
+++ b/doc/source/adminguide/getting.started.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/adminguide/index.rst b/doc/source/adminguide/index.rst
index 736a154b2..e653c9e8b 100644
--- a/doc/source/adminguide/index.rst
+++ b/doc/source/adminguide/index.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/adminguide/managing.images.rst b/doc/source/adminguide/managing.images.rst
index df71db23b..c5d93a6e8 100644
--- a/doc/source/adminguide/managing.images.rst
+++ b/doc/source/adminguide/managing.images.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/adminguide/managing.instances.rst b/doc/source/adminguide/managing.instances.rst
index d97567bb2..e62352017 100644
--- a/doc/source/adminguide/managing.instances.rst
+++ b/doc/source/adminguide/managing.instances.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/adminguide/managing.networks.rst b/doc/source/adminguide/managing.networks.rst
index 38c1cba78..9eea46d70 100644
--- a/doc/source/adminguide/managing.networks.rst
+++ b/doc/source/adminguide/managing.networks.rst
@@ -1,7 +1,7 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
- Overview Sections Copyright 2010 Citrix
+ Overview Sections Copyright 2010-2011 Citrix
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/doc/source/adminguide/managing.projects.rst b/doc/source/adminguide/managing.projects.rst
index b592e14d7..5dd7f2de9 100644
--- a/doc/source/adminguide/managing.projects.rst
+++ b/doc/source/adminguide/managing.projects.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/adminguide/managingsecurity.rst b/doc/source/adminguide/managingsecurity.rst
index 3b11b181a..7893925e7 100644
--- a/doc/source/adminguide/managingsecurity.rst
+++ b/doc/source/adminguide/managingsecurity.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/adminguide/monitoring.rst b/doc/source/adminguide/monitoring.rst
index e7766a6e7..2c93c71b5 100644
--- a/doc/source/adminguide/monitoring.rst
+++ b/doc/source/adminguide/monitoring.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/adminguide/multi.node.install.rst b/doc/source/adminguide/multi.node.install.rst
index a652e44b7..5918b0d38 100644
--- a/doc/source/adminguide/multi.node.install.rst
+++ b/doc/source/adminguide/multi.node.install.rst
@@ -1,6 +1,7 @@
..
- Copyright 2010 United States Government as represented by the
- Administrator of the National Aeronautics and Space Administration.
+ Copyright 2010-2011 United States Government as represented by the
+ Administrator of the National Aeronautics and Space Administration.
+
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -17,36 +18,35 @@
Installing Nova on Multiple Servers
===================================
-
+
When you move beyond evaluating the technology and into building an actual
production environment, you will need to know how to configure your datacenter
and how to deploy components across your clusters. This guide should help you
through that process.
-
+
You can install multiple nodes to increase performance and availability of the OpenStack Compute installation.
-
+
This setup is based on an Ubuntu Lucid 10.04 installation with the latest updates. Most of this works around issues that need to be resolved in the installation and configuration scripts as of October 18th 2010. It also needs to eventually be generalized, but the intent here is to get the multi-node configuration bootstrapped so folks can move forward.
-
-
+
+
Requirements for a multi-node installation
------------------------------------------
-
+
* You need a real database, compatible with SQLAlchemy (mysql, postgresql) There's not a specific reason to choose one over another, it basically depends what you know. MySQL is easier to do High Availability (HA) with, but people may already know Postgres. We should document both configurations, though.
* For a recommended HA setup, consider a MySQL master/slave replication, with as many slaves as you like, and probably a heartbeat to kick one of the slaves into being a master if it dies.
* For performance optimization, split reads and writes to the database. MySQL proxy is the easiest way to make this work if running MySQL.
-
+
Assumptions
-^^^^^^^^^^^
-
+-----------
+
* Networking is configured between/through the physical machines on a single subnet.
-* Installation and execution are both performed by root user.
-
-
-
-Step 1 Use apt-get to get the latest code
------------------------------------------
+* Installation and execution are both performed by ROOT user.
+
+
+Step 1 - Use apt-get to get the latest code
+-------------------------------------------
-1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/trunk.
+1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/trunk. The ‘python-software-properties’ package is a pre-requisite for setting up the nova package repo:
::
@@ -69,201 +69,260 @@ Step 1 Use apt-get to get the latest code
It is highly likely that there will be errors when the nova services come up since they are not yet configured. Don't worry, you're only at step 1!
Step 2 Setup configuration file (installed in /etc/nova)
----------------------------------------------------------
-
-Note: CC_ADDR=<the external IP address of your cloud controller>
-
-Nova development has consolidated all .conf files to nova.conf as of November 2010. References to specific .conf files may be ignored.
-
-#. These need to be defined in the nova.conf configuration file::
-
- --sql_connection=mysql://root:nova@$CC_ADDR/nova # location of nova sql db
- --s3_host=$CC_ADDR # This is where Nova is hosting the objectstore service, which
- # will contain the VM images and buckets
- --rabbit_host=$CC_ADDR # This is where the rabbit AMQP messaging service is hosted
- --cc_host=$CC_ADDR # This is where the the nova-api service lives
- --verbose # Optional but very helpful during initial setup
- --ec2_url=http://$CC_ADDR:8773/services/Cloud
- --network_manager=nova.network.manager.FlatManager # simple, no-vlan networking type
- --fixed_range=<network/prefix> # ip network to use for VM guests, ex 192.168.2.64/26
- --network_size=<# of addrs> # number of ip addrs to use for VM guests, ex 64
-
-#. Create a nova group::
-
- sudo addgroup nova
-
-The Nova config file should have its owner set to root:nova, and mode set to 0640, since they contain your MySQL server's root password.
+--------------------------------------------------------
+1. Nova development has consolidated all config files to nova.conf as of November 2010. There is a default set of options that are already configured in nova.conf:
+
::
+
+--daemonize=1
+--dhcpbridge_flagfile=/etc/nova/nova.conf
+--dhcpbridge=/usr/bin/nova-dhcpbridge
+--logdir=/var/log/nova
+--state_path=/var/lib/nova
+
+The following items ALSO need to be defined in /etc/nova/nova.conf. I’ve added some explanation of the variables, as comments CANNOT be in nova.conf. There seems to be an issue with nova-manage not processing the comments/whitespace correctly:
- cd /etc/nova
- chown -R root:nova .
+--sql_connection ### Location of Nova SQL DB
-Step 3 Setup the sql db
------------------------
+--s3_host ### This is where Nova is hosting the objectstore service, which will contain the VM images and buckets
-1. First you 'preseed' (using the Quick Start method :doc:`../quickstart`). Run this as root.
+--rabbit_host ### This is where the rabbit AMQP messaging service is hosted
-::
-
- sudo apt-get install bzr git-core
- sudo bash
- export MYSQL_PASS=nova
+--cc_host ### This is where the the nova-api service lives
+--verbose ### Optional but very helpful during initial setup
-::
+--ec2_url ### The location to interface nova-api
- cat <<MYSQL_PRESEED | debconf-set-selections
- mysql-server-5.1 mysql-server/root_password password $MYSQL_PASS
- mysql-server-5.1 mysql-server/root_password_again password $MYSQL_PASS
- mysql-server-5.1 mysql-server/start_on_boot boolean true
- MYSQL_PRESEED
+--network_manager ### Many options here, discussed below. This is how your controller will communicate with additional Nova nodes and VMs:
-2. Install mysql
+nova.network.manager.FlatManager # Simple, no-vlan networking type
+nova.network.manager. FlatDHCPManager # Flat networking with DHCP
+nova.network.manager.VlanManager # Vlan networking with DHCP – /DEFAULT/ if no network manager is defined in nova.conf
-::
+--fixed_range=<network/prefix> ### This will be the IP network that ALL the projects for future VM guests will reside on. E.g. 192.168.0.0/12
- sudo apt-get install -y mysql-server
+--network_size=<# of addrs> ### This is the total number of IP Addrs to use for VM guests, of all projects. E.g. 5000
-4. Edit /etc/mysql/my.cnf and set this line: bind-address=0.0.0.0 and then sighup or restart mysql
+The following code can be cut and paste, and edited to your setup:
-5. create nova's db
+Note: CC_ADDR=<the external IP address of your cloud controller>
+Detailed explanation of the following example is available above.
+
::
+
+--sql_connection=mysql://root:nova@<CC_ADDR>/nova
+--s3_host=<CC_ADDR>
+--rabbit_host=<CC_ADDR>
+--cc_host=<CC_ADDR>
+--verbose
+--ec2_url=http://<CC_ADDR>:8773/services/Cloud
+--network_manager=nova.network.manager.VlanManager
+--fixed_range=<network/prefix>
+--network_size=<# of addrs>
+
+2. Create a “nova” group, and set permissions::
+
+ addgroup nova
+
+The Nova config file should have its owner set to root:nova, and mode set to 0644, since they contain your MySQL server's root password. ::
+
+ chown -R root:nova /etc/nova
+ chmod 644 /etc/nova/nova.conf
+
+Step 3 - Setup the SQL DB (MySQL for this setup)
+------------------------------------------------
+
+1. First you 'preseed' to bypass all the installation prompts::
+
+ bash
+ MYSQL_PASS=nova
+ cat <<MYSQL_PRESEED | debconf-set-selections
+ mysql-server-5.1 mysql-server/root_password password $MYSQL_PASS
+ mysql-server-5.1 mysql-server/root_password_again password $MYSQL_PASS
+ mysql-server-5.1 mysql-server/start_on_boot boolean true
+ MYSQL_PRESEED
+
+2. Install MySQL::
+
+ apt-get install -y mysql-server
+
+3. Edit /etc/mysql/my.cnf to change ‘bind-address’ from localhost to any::
+
+ sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf
+ service mysql restart
+
+3. Network Configuration
+
+If you use FlatManager (as opposed to VlanManager that we set) as your network manager, there are some additional networking changes you’ll have to make to ensure connectivity between your nodes and VMs. If you chose VlanManager or FlatDHCP, you may skip this section, as it’s set up for you automatically.
+
+Nova defaults to a bridge device named 'br100'. This needs to be created and somehow integrated into YOUR network. To keep things as simple as possible, have all the VM guests on the same network as the VM hosts (the compute nodes). To do so, set the compute node's external IP address to be on the bridge and add eth0 to that bridge. To do this, edit your network interfaces config to look like the following::
- mysql -uroot -pnova -e 'CREATE DATABASE nova;'
-
-
-6. Update the db to include user 'root'@'%'
+ < begin /etc/network/interfaces >
+ # The loopback network interface
+ auto lo
+ iface lo inet loopback
+
+ # Networking for NOVA
+ auto br100
+
+ iface br100 inet dhcp
+ bridge_ports eth0
+ bridge_stp off
+ bridge_maxwait 0
+ bridge_fd 0
+ < end /etc/network/interfaces >
+
+Next, restart networking to apply the changes::
+
+ sudo /etc/init.d/networking restart
+
+4. MySQL DB configuration:
+
+Create NOVA database::
+
+ mysql -uroot -p$MYSQL_PASS -e 'CREATE DATABASE nova;'
+
+Update the DB to include user 'root'@'%' with super user privileges::
+
+ mysql -uroot -p$MYSQL_PASS -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;"
+
+Set mySQL root password::
+
+ mysql -uroot -p$MYSQL_PASS -e "SET PASSWORD FOR 'root'@'%' = PASSWORD('$MYSQL_PASS');"
+
+Step 4 - Setup Nova environment
+-------------------------------
+
+These are the commands you run to set up a user and project::
+
+ /usr/bin/python /usr/bin/nova-manage user admin <user_name>
+ /usr/bin/python /usr/bin/nova-manage project create <project_name> <user_name>
+ /usr/bin/python /usr/bin/nova-manage network create <project-network> <number-of-networks-in-project> <IPs in project>
+
+Here is an example of what this looks like with real data::
+
+ /usr/bin/python /usr/bin/nova-manage user admin dub
+ /usr/bin/python /usr/bin/nova-manage project create dubproject dub
+ /usr/bin/python /usr/bin/nova-manage network create 192.168.0.0/24 1 255
+
+(I chose a /24 since that falls inside my /12 range I set in ‘fixed-range’ in nova.conf. Currently, there can only be one network, and I am using the max IP’s available in a /24. You can choose to use any valid amount that you would like.)
+
+Note: The nova-manage service assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range you defined (192.168.0.255). If this is not the case you will need to manually edit the sql db 'networks' table.o.
+
+On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected. This is ONLY necessary if you chose FlatManager as your network type. More information can be found at the end of this document discussing setting up the bridge device.
+
+
+Step 5 - Create Nova certifications
+-----------------------------------
+
+1. Generate the certs as a zip file. These are the certs you will use to launch instances, bundle images, and all the other assorted api functions.
::
- mysql -u root -p nova
- GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;
- SET PASSWORD FOR 'root'@'%' = PASSWORD('nova');
-
-7. Branch and install Nova
+ mkdir –p /root/creds
+ /usr/bin/python /usr/bin/nova-manage project zipfile $NOVA_PROJECT $NOVA_PROJECT_USER /root/creds/novacreds.zip
+
+2. Unzip them in your home directory, and add them to your environment.
::
- sudo -i
- cd ~
- export USE_MYSQL=1
- export MYSQL_PASS=nova
- git clone https://github.com/vishvananda/novascript.git
- cd novascript
- ./nova.sh branch
- ./nova.sh install
- ./nova.sh run
+ unzip /root/creds/novacreds.zip -d /root/creds/
+ cat /root/creds/novarc >> ~/.bashrc
+ source ~/.bashrc
-Step 4 Setup Nova environment
------------------------------
+Step 6 - Restart all relevant services
+--------------------------------------
-::
+Restart all six services in total, just to cover the entire spectrum::
+
+ libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart
- /usr/bin/python /usr/bin/nova-manage user admin <user_name>
- /usr/bin/python /usr/bin/nova-manage project create <project_name> <user_name>
- /usr/bin/python /usr/bin/nova-manage project create network
+Step 7 - Closing steps, and cleaning up
+---------------------------------------
-Note: The nova-manage service assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range you defined (192.168.0.255). If this is not the case you will need to manually edit the sql db 'networks' table.o.
+One of the most commonly missed configuration areas is not allowing the proper access to VMs. Use the 'euca-authorize' command to enable access. Below, you will find the commands to allow 'ping' and 'ssh' to your VMs::
-On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected.
+ euca-authorize -P icmp -t -1:-1 default
+ euca-authorize -P tcp -p 22 default
-More networking details to create a network bridge for flat network
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Another common issue is you cannot ping or SSH your instances after issusing the 'euca-authorize' commands. Something to look at is the amount of 'dnsmasq' processes that are running. If you have a running instance, check to see that TWO 'dnsmasq' processes are running. If not, perform the following::
-Nova defaults to a bridge device named 'br100'. This needs to be created and somehow integrated into YOUR network. In my case, I wanted to keep things as simple as possible and have all the vm guests on the same network as the vm hosts (the compute nodes). Thus, I set the compute node's external IP address to be on the bridge and added eth0 to that bridge. To do this, edit your network interfaces config to look like the following::
+ killall dnsmasq
+ service nova-network restart
- < begin /etc/network/interfaces >
- # The loopback network interface
- auto lo
- iface lo inet loopback
+Step 8 – Testing the installation
+---------------------------------
- # Networking for NOVA
- auto br100
+You can then use `euca2ools` to test some items::
- iface br100 inet dhcp
- bridge_ports eth0
- bridge_stp off
- bridge_maxwait 0
- bridge_fd 0
- < end /etc/network/interfaces >
+ euca-describe-images
+ euca-describe-instances
+
+If you have issues with the API key, you may need to re-source your creds file::
+ . /root/creds/novarc
+
+If you don’t get any immediate errors, you’re successfully making calls to your cloud!
-Next, restart networking to apply the changes::
+Step 9 - Spinning up a VM for testing
+-------------------------------------
- sudo /etc/init.d/networking restart
+(This excerpt is from Thierry Carrez's blog, with reference to http://wiki.openstack.org/GettingImages.)
-Step 5: Create nova certs.
---------------------------
+The image that you will use here will be a ttylinux image, so this is a limited function server. You will be able to ping and SSH to this instance, but it is in no way a full production VM.
-Generate the certs as a zip file::
+Download the image, and publish to your bucket:
- mkdir creds
- sudo /usr/bin/python /usr/bin/nova-manage project zip admin admin creds/nova.zip
+::
-you can get the rc file more easily with::
+ image="ttylinux-uec-amd64-12.1_2.6.35-22_1.tar.gz"
+ wget http://smoser.brickies.net/ubuntu/ttylinux-uec/$image
+ uec-publish-tarball $image mybucket
- sudo /usr/bin/python /usr/bin/nova-manage project env admin admin creds/novarc
+This will output three references, an "emi", an "eri" and an "eki." (Image, ramdisk, and kernel) The emi is the one we use to launch instances, so take note of this.
-unzip them in your home directory, and add them to your environment::
+Create a keypair to SSH to the server:
- unzip creds/nova.zip
- echo ". creds/novarc" >> ~/.bashrc
- ~/.bashrc
+::
-Step 6 Restart all relevant services
-------------------------------------
+ euca-add-keypair mykey > mykey.priv
-Restart Libvirt::
+ chmod 0600 mykey.priv
- sudo /etc/init.d/libvirt-bin restart
+Boot your instance:
-Restart relevant nova services::
+::
- sudo /etc/init.d/nova-compute restart
- sudo /etc/init.d/nova-volume restart
+ euca-run-instances $emi -k mykey -t m1.tiny
+($emi is replaced with the output from the previous command)
-.. todo:: do we still need the content below?
+Checking status, and confirming communication:
-Bare-metal Provisioning Notes
------------------------------
+Once you have booted the instance, you can check the status the the `euca-describe-instances` command. Here you can view the instance ID, IP, and current status of the VM.
-To install the base operating system you can use PXE booting.
+::
-Types of Hosts
---------------
+ euca-describe-instances
-A single machine in your cluster can act as one or more of the following types
-of host:
+Once in a "running" state, you can use your SSH key connect:
-Nova Services
+::
-* Network
-* Compute
-* Volume
-* API
-* Objectstore
+ ssh -i mykey.priv root@$ipaddress
-Other supporting services
+When you are ready to terminate the instance, you may do so with the `euca-terminate-instances` command:
-* Message Queue
-* Database (optional)
-* Authentication database (optional)
+::
-Initial Setup
--------------
+ euca-terminate-instances $instance-id
-* Networking
-* Cloudadmin User Creation
+You can determine the instance-id with `euca-describe-instances`, and the format is "i-" with a series of letter and numbers following: e.g. i-a4g9d.
-Deployment Technologies
------------------------
+For more information in creating you own custom (production ready) instance images, please visit http://wiki.openstack.org/GettingImages for more information!
-Once you have machines with a base operating system installation, you can deploy
-code and configuration with your favorite tools to specify which machines in
-your cluster have which roles:
+Enjoy your new private cloud, and play responsibly!
-* Puppet
-* Chef
diff --git a/doc/source/adminguide/network.flat.rst b/doc/source/adminguide/network.flat.rst
index 1b8661a40..3d8680c6f 100644
--- a/doc/source/adminguide/network.flat.rst
+++ b/doc/source/adminguide/network.flat.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/adminguide/network.vlan.rst b/doc/source/adminguide/network.vlan.rst
index c6c4e7f91..c06ce8e8b 100644
--- a/doc/source/adminguide/network.vlan.rst
+++ b/doc/source/adminguide/network.vlan.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
@@ -91,11 +91,10 @@ These do NOT have IP addresses in the host to protect host access.
Compute nodes have iptables/ebtables entries created per project and
instance to protect against IP/MAC address spoofing and ARP poisoning.
-The network assignment to a project, and IP address assignment to a VM instance, are triggered when a user starts to run a VM instance. When running a VM instance, a user needs to specify a project for the instances, and the security groups (described in Security Groups) when the instance wants to join. If this is the first instance to be created for the project, then Nova (the cloud controller) needs to find a network controller to be the network host for the project; it then sets up a private network by finding an unused VLAN id, an unused subnet, and then the controller assigns them to the project, it also assigns a name to the project's Linux bridge, and allocating a private IP within the project's subnet for the new instance.
+The network assignment to a project, and IP address assignment to a VM instance, are triggered when a user starts to run a VM instance. When running a VM instance, a user needs to specify a project for the instances, and the security groups (described in Security Groups) when the instance wants to join. If this is the first instance to be created for the project, then Nova (the cloud controller) needs to find a network controller to be the network host for the project; it then sets up a private network by finding an unused VLAN id, an unused subnet, and then the controller assigns them to the project, it also assigns a name to the project's Linux bridge (br100 stored in the Nova database), and allocating a private IP within the project's subnet for the new instance.
If the instance the user wants to start is not the project's first, a subnet and a VLAN must have already been assigned to the project; therefore the system needs only to find an available IP address within the subnet and assign it to the new starting instance. If there is no private IP available within the subnet, an exception will be raised to the cloud controller, and the VM creation cannot proceed.
-.. todo:: insert the name of the Linux bridge, is it always named bridge?
External Infrastructure
-----------------------
diff --git a/doc/source/adminguide/nova.manage.rst b/doc/source/adminguide/nova.manage.rst
index 0e5c4e062..0ec67c69c 100644
--- a/doc/source/adminguide/nova.manage.rst
+++ b/doc/source/adminguide/nova.manage.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/api/autoindex.rst b/doc/source/api/autoindex.rst
deleted file mode 100644
index 6265b082b..000000000
--- a/doc/source/api/autoindex.rst
+++ /dev/null
@@ -1,99 +0,0 @@
-.. toctree::
- :maxdepth: 1
-
- nova..adminclient.rst
- nova..api.cloud.rst
- nova..api.ec2.admin.rst
- nova..api.ec2.apirequest.rst
- nova..api.ec2.cloud.rst
- nova..api.ec2.images.rst
- nova..api.ec2.metadatarequesthandler.rst
- nova..api.openstack.auth.rst
- nova..api.openstack.backup_schedules.rst
- nova..api.openstack.faults.rst
- nova..api.openstack.flavors.rst
- nova..api.openstack.images.rst
- nova..api.openstack.servers.rst
- nova..api.openstack.sharedipgroups.rst
- nova..auth.dbdriver.rst
- nova..auth.fakeldap.rst
- nova..auth.ldapdriver.rst
- nova..auth.manager.rst
- nova..auth.signer.rst
- nova..cloudpipe.pipelib.rst
- nova..compute.disk.rst
- nova..compute.instance_types.rst
- nova..compute.manager.rst
- nova..compute.monitor.rst
- nova..compute.power_state.rst
- nova..context.rst
- nova..crypto.rst
- nova..db.api.rst
- nova..db.sqlalchemy.api.rst
- nova..db.sqlalchemy.models.rst
- nova..db.sqlalchemy.session.rst
- nova..exception.rst
- nova..fakerabbit.rst
- nova..flags.rst
- nova..image.service.rst
- nova..manager.rst
- nova..network.linux_net.rst
- nova..network.manager.rst
- nova..objectstore.bucket.rst
- nova..objectstore.handler.rst
- nova..objectstore.image.rst
- nova..objectstore.stored.rst
- nova..process.rst
- nova..quota.rst
- nova..rpc.rst
- nova..scheduler.chance.rst
- nova..scheduler.driver.rst
- nova..scheduler.manager.rst
- nova..scheduler.simple.rst
- nova..server.rst
- nova..service.rst
- nova..test.rst
- nova..tests.access_unittest.rst
- nova..tests.api.fakes.rst
- nova..tests.api.openstack.fakes.rst
- nova..tests.api.openstack.test_api.rst
- nova..tests.api.openstack.test_auth.rst
- nova..tests.api.openstack.test_faults.rst
- nova..tests.api.openstack.test_flavors.rst
- nova..tests.api.openstack.test_images.rst
- nova..tests.api.openstack.test_ratelimiting.rst
- nova..tests.api.openstack.test_servers.rst
- nova..tests.api.openstack.test_sharedipgroups.rst
- nova..tests.api.test_wsgi.rst
- nova..tests.api_integration.rst
- nova..tests.api_unittest.rst
- nova..tests.auth_unittest.rst
- nova..tests.cloud_unittest.rst
- nova..tests.compute_unittest.rst
- nova..tests.declare_flags.rst
- nova..tests.fake_flags.rst
- nova..tests.flags_unittest.rst
- nova..tests.network_unittest.rst
- nova..tests.objectstore_unittest.rst
- nova..tests.process_unittest.rst
- nova..tests.quota_unittest.rst
- nova..tests.real_flags.rst
- nova..tests.rpc_unittest.rst
- nova..tests.runtime_flags.rst
- nova..tests.scheduler_unittest.rst
- nova..tests.service_unittest.rst
- nova..tests.twistd_unittest.rst
- nova..tests.validator_unittest.rst
- nova..tests.virt_unittest.rst
- nova..tests.volume_unittest.rst
- nova..twistd.rst
- nova..utils.rst
- nova..validate.rst
- nova..virt.connection.rst
- nova..virt.fake.rst
- nova..virt.images.rst
- nova..virt.libvirt_conn.rst
- nova..virt.xenapi.rst
- nova..volume.driver.rst
- nova..volume.manager.rst
- nova..wsgi.rst
diff --git a/doc/source/api/nova..adminclient.rst b/doc/source/api/nova..adminclient.rst
deleted file mode 100644
index 35fa839e1..000000000
--- a/doc/source/api/nova..adminclient.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..adminclient` Module
-==============================================================================
-.. automodule:: nova..adminclient
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.cloud.rst b/doc/source/api/nova..api.cloud.rst
deleted file mode 100644
index 413840185..000000000
--- a/doc/source/api/nova..api.cloud.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.cloud` Module
-==============================================================================
-.. automodule:: nova..api.cloud
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.admin.rst b/doc/source/api/nova..api.ec2.admin.rst
deleted file mode 100644
index 4e9ab308b..000000000
--- a/doc/source/api/nova..api.ec2.admin.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.ec2.admin` Module
-==============================================================================
-.. automodule:: nova..api.ec2.admin
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.apirequest.rst b/doc/source/api/nova..api.ec2.apirequest.rst
deleted file mode 100644
index c17a2ff3a..000000000
--- a/doc/source/api/nova..api.ec2.apirequest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.ec2.apirequest` Module
-==============================================================================
-.. automodule:: nova..api.ec2.apirequest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.cloud.rst b/doc/source/api/nova..api.ec2.cloud.rst
deleted file mode 100644
index f6145c217..000000000
--- a/doc/source/api/nova..api.ec2.cloud.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.ec2.cloud` Module
-==============================================================================
-.. automodule:: nova..api.ec2.cloud
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.images.rst b/doc/source/api/nova..api.ec2.images.rst
deleted file mode 100644
index 012d800e4..000000000
--- a/doc/source/api/nova..api.ec2.images.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.ec2.images` Module
-==============================================================================
-.. automodule:: nova..api.ec2.images
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.ec2.metadatarequesthandler.rst b/doc/source/api/nova..api.ec2.metadatarequesthandler.rst
deleted file mode 100644
index 75f5169e5..000000000
--- a/doc/source/api/nova..api.ec2.metadatarequesthandler.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.ec2.metadatarequesthandler` Module
-==============================================================================
-.. automodule:: nova..api.ec2.metadatarequesthandler
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.auth.rst b/doc/source/api/nova..api.openstack.auth.rst
deleted file mode 100644
index 8c3f8f2da..000000000
--- a/doc/source/api/nova..api.openstack.auth.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.auth` Module
-==============================================================================
-.. automodule:: nova..api.openstack.auth
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.backup_schedules.rst b/doc/source/api/nova..api.openstack.backup_schedules.rst
deleted file mode 100644
index 6b406f12d..000000000
--- a/doc/source/api/nova..api.openstack.backup_schedules.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.backup_schedules` Module
-==============================================================================
-.. automodule:: nova..api.openstack.backup_schedules
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.faults.rst b/doc/source/api/nova..api.openstack.faults.rst
deleted file mode 100644
index 7b25561f7..000000000
--- a/doc/source/api/nova..api.openstack.faults.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.faults` Module
-==============================================================================
-.. automodule:: nova..api.openstack.faults
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.flavors.rst b/doc/source/api/nova..api.openstack.flavors.rst
deleted file mode 100644
index 0deb724de..000000000
--- a/doc/source/api/nova..api.openstack.flavors.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.flavors` Module
-==============================================================================
-.. automodule:: nova..api.openstack.flavors
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.images.rst b/doc/source/api/nova..api.openstack.images.rst
deleted file mode 100644
index 82bd5f1e8..000000000
--- a/doc/source/api/nova..api.openstack.images.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.images` Module
-==============================================================================
-.. automodule:: nova..api.openstack.images
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.servers.rst b/doc/source/api/nova..api.openstack.servers.rst
deleted file mode 100644
index c36856ea2..000000000
--- a/doc/source/api/nova..api.openstack.servers.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.servers` Module
-==============================================================================
-.. automodule:: nova..api.openstack.servers
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..api.openstack.sharedipgroups.rst b/doc/source/api/nova..api.openstack.sharedipgroups.rst
deleted file mode 100644
index 07632acc8..000000000
--- a/doc/source/api/nova..api.openstack.sharedipgroups.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..api.openstack.sharedipgroups` Module
-==============================================================================
-.. automodule:: nova..api.openstack.sharedipgroups
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..auth.dbdriver.rst b/doc/source/api/nova..auth.dbdriver.rst
deleted file mode 100644
index 7de68b6e0..000000000
--- a/doc/source/api/nova..auth.dbdriver.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..auth.dbdriver` Module
-==============================================================================
-.. automodule:: nova..auth.dbdriver
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..auth.fakeldap.rst b/doc/source/api/nova..auth.fakeldap.rst
deleted file mode 100644
index ca8a3ad4d..000000000
--- a/doc/source/api/nova..auth.fakeldap.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..auth.fakeldap` Module
-==============================================================================
-.. automodule:: nova..auth.fakeldap
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..auth.ldapdriver.rst b/doc/source/api/nova..auth.ldapdriver.rst
deleted file mode 100644
index c44463522..000000000
--- a/doc/source/api/nova..auth.ldapdriver.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..auth.ldapdriver` Module
-==============================================================================
-.. automodule:: nova..auth.ldapdriver
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..auth.manager.rst b/doc/source/api/nova..auth.manager.rst
deleted file mode 100644
index bc5ce2ec3..000000000
--- a/doc/source/api/nova..auth.manager.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..auth.manager` Module
-==============================================================================
-.. automodule:: nova..auth.manager
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..auth.signer.rst b/doc/source/api/nova..auth.signer.rst
deleted file mode 100644
index aad824ead..000000000
--- a/doc/source/api/nova..auth.signer.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..auth.signer` Module
-==============================================================================
-.. automodule:: nova..auth.signer
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..cloudpipe.pipelib.rst b/doc/source/api/nova..cloudpipe.pipelib.rst
deleted file mode 100644
index 054aaf484..000000000
--- a/doc/source/api/nova..cloudpipe.pipelib.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..cloudpipe.pipelib` Module
-==============================================================================
-.. automodule:: nova..cloudpipe.pipelib
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..compute.disk.rst b/doc/source/api/nova..compute.disk.rst
deleted file mode 100644
index 6410af6f3..000000000
--- a/doc/source/api/nova..compute.disk.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..compute.disk` Module
-==============================================================================
-.. automodule:: nova..compute.disk
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..compute.instance_types.rst b/doc/source/api/nova..compute.instance_types.rst
deleted file mode 100644
index d206ff3a4..000000000
--- a/doc/source/api/nova..compute.instance_types.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..compute.instance_types` Module
-==============================================================================
-.. automodule:: nova..compute.instance_types
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..compute.manager.rst b/doc/source/api/nova..compute.manager.rst
deleted file mode 100644
index 33a337c39..000000000
--- a/doc/source/api/nova..compute.manager.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..compute.manager` Module
-==============================================================================
-.. automodule:: nova..compute.manager
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..compute.monitor.rst b/doc/source/api/nova..compute.monitor.rst
deleted file mode 100644
index a91169ecd..000000000
--- a/doc/source/api/nova..compute.monitor.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..compute.monitor` Module
-==============================================================================
-.. automodule:: nova..compute.monitor
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..compute.power_state.rst b/doc/source/api/nova..compute.power_state.rst
deleted file mode 100644
index 41b1080e5..000000000
--- a/doc/source/api/nova..compute.power_state.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..compute.power_state` Module
-==============================================================================
-.. automodule:: nova..compute.power_state
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..context.rst b/doc/source/api/nova..context.rst
deleted file mode 100644
index 9de1adb24..000000000
--- a/doc/source/api/nova..context.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..context` Module
-==============================================================================
-.. automodule:: nova..context
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..crypto.rst b/doc/source/api/nova..crypto.rst
deleted file mode 100644
index af9f63634..000000000
--- a/doc/source/api/nova..crypto.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..crypto` Module
-==============================================================================
-.. automodule:: nova..crypto
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.api.rst b/doc/source/api/nova..db.api.rst
deleted file mode 100644
index 6d998fbb2..000000000
--- a/doc/source/api/nova..db.api.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.api` Module
-==============================================================================
-.. automodule:: nova..db.api
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.api.rst b/doc/source/api/nova..db.sqlalchemy.api.rst
deleted file mode 100644
index 76d0c1bd3..000000000
--- a/doc/source/api/nova..db.sqlalchemy.api.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.sqlalchemy.api` Module
-==============================================================================
-.. automodule:: nova..db.sqlalchemy.api
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.models.rst b/doc/source/api/nova..db.sqlalchemy.models.rst
deleted file mode 100644
index 9c795d7f5..000000000
--- a/doc/source/api/nova..db.sqlalchemy.models.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.sqlalchemy.models` Module
-==============================================================================
-.. automodule:: nova..db.sqlalchemy.models
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..db.sqlalchemy.session.rst b/doc/source/api/nova..db.sqlalchemy.session.rst
deleted file mode 100644
index cbfd6416a..000000000
--- a/doc/source/api/nova..db.sqlalchemy.session.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..db.sqlalchemy.session` Module
-==============================================================================
-.. automodule:: nova..db.sqlalchemy.session
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..exception.rst b/doc/source/api/nova..exception.rst
deleted file mode 100644
index 97ac6b752..000000000
--- a/doc/source/api/nova..exception.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..exception` Module
-==============================================================================
-.. automodule:: nova..exception
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..fakerabbit.rst b/doc/source/api/nova..fakerabbit.rst
deleted file mode 100644
index f1e27c266..000000000
--- a/doc/source/api/nova..fakerabbit.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..fakerabbit` Module
-==============================================================================
-.. automodule:: nova..fakerabbit
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..flags.rst b/doc/source/api/nova..flags.rst
deleted file mode 100644
index 08165be44..000000000
--- a/doc/source/api/nova..flags.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..flags` Module
-==============================================================================
-.. automodule:: nova..flags
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..image.service.rst b/doc/source/api/nova..image.service.rst
deleted file mode 100644
index 78ef1ecca..000000000
--- a/doc/source/api/nova..image.service.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..image.service` Module
-==============================================================================
-.. automodule:: nova..image.service
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..manager.rst b/doc/source/api/nova..manager.rst
deleted file mode 100644
index 576902491..000000000
--- a/doc/source/api/nova..manager.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..manager` Module
-==============================================================================
-.. automodule:: nova..manager
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..network.linux_net.rst b/doc/source/api/nova..network.linux_net.rst
deleted file mode 100644
index 7af78d5ad..000000000
--- a/doc/source/api/nova..network.linux_net.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..network.linux_net` Module
-==============================================================================
-.. automodule:: nova..network.linux_net
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..network.manager.rst b/doc/source/api/nova..network.manager.rst
deleted file mode 100644
index 0ea705533..000000000
--- a/doc/source/api/nova..network.manager.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..network.manager` Module
-==============================================================================
-.. automodule:: nova..network.manager
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..objectstore.bucket.rst b/doc/source/api/nova..objectstore.bucket.rst
deleted file mode 100644
index 3bfdf639c..000000000
--- a/doc/source/api/nova..objectstore.bucket.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..objectstore.bucket` Module
-==============================================================================
-.. automodule:: nova..objectstore.bucket
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..objectstore.handler.rst b/doc/source/api/nova..objectstore.handler.rst
deleted file mode 100644
index 0eb8c4efb..000000000
--- a/doc/source/api/nova..objectstore.handler.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..objectstore.handler` Module
-==============================================================================
-.. automodule:: nova..objectstore.handler
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..objectstore.image.rst b/doc/source/api/nova..objectstore.image.rst
deleted file mode 100644
index fa4c971f1..000000000
--- a/doc/source/api/nova..objectstore.image.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..objectstore.image` Module
-==============================================================================
-.. automodule:: nova..objectstore.image
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..objectstore.stored.rst b/doc/source/api/nova..objectstore.stored.rst
deleted file mode 100644
index 2b1d997a3..000000000
--- a/doc/source/api/nova..objectstore.stored.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..objectstore.stored` Module
-==============================================================================
-.. automodule:: nova..objectstore.stored
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..process.rst b/doc/source/api/nova..process.rst
deleted file mode 100644
index 91eff8379..000000000
--- a/doc/source/api/nova..process.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..process` Module
-==============================================================================
-.. automodule:: nova..process
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..quota.rst b/doc/source/api/nova..quota.rst
deleted file mode 100644
index 4140d95d6..000000000
--- a/doc/source/api/nova..quota.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..quota` Module
-==============================================================================
-.. automodule:: nova..quota
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..rpc.rst b/doc/source/api/nova..rpc.rst
deleted file mode 100644
index 5b2a9b8e2..000000000
--- a/doc/source/api/nova..rpc.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..rpc` Module
-==============================================================================
-.. automodule:: nova..rpc
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.chance.rst b/doc/source/api/nova..scheduler.chance.rst
deleted file mode 100644
index 89c074c8f..000000000
--- a/doc/source/api/nova..scheduler.chance.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..scheduler.chance` Module
-==============================================================================
-.. automodule:: nova..scheduler.chance
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.driver.rst b/doc/source/api/nova..scheduler.driver.rst
deleted file mode 100644
index 793ed9c7b..000000000
--- a/doc/source/api/nova..scheduler.driver.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..scheduler.driver` Module
-==============================================================================
-.. automodule:: nova..scheduler.driver
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.manager.rst b/doc/source/api/nova..scheduler.manager.rst
deleted file mode 100644
index d0fc7c423..000000000
--- a/doc/source/api/nova..scheduler.manager.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..scheduler.manager` Module
-==============================================================================
-.. automodule:: nova..scheduler.manager
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..scheduler.simple.rst b/doc/source/api/nova..scheduler.simple.rst
deleted file mode 100644
index dacc2cf30..000000000
--- a/doc/source/api/nova..scheduler.simple.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..scheduler.simple` Module
-==============================================================================
-.. automodule:: nova..scheduler.simple
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..server.rst b/doc/source/api/nova..server.rst
deleted file mode 100644
index 7cb2cfa54..000000000
--- a/doc/source/api/nova..server.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..server` Module
-==============================================================================
-.. automodule:: nova..server
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..service.rst b/doc/source/api/nova..service.rst
deleted file mode 100644
index 2d2dfcf2e..000000000
--- a/doc/source/api/nova..service.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..service` Module
-==============================================================================
-.. automodule:: nova..service
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..test.rst b/doc/source/api/nova..test.rst
deleted file mode 100644
index a6bdb6f1f..000000000
--- a/doc/source/api/nova..test.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..test` Module
-==============================================================================
-.. automodule:: nova..test
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.access_unittest.rst b/doc/source/api/nova..tests.access_unittest.rst
deleted file mode 100644
index 89554e430..000000000
--- a/doc/source/api/nova..tests.access_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.access_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.access_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.fakes.rst b/doc/source/api/nova..tests.api.fakes.rst
deleted file mode 100644
index 5728b18f3..000000000
--- a/doc/source/api/nova..tests.api.fakes.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.fakes` Module
-==============================================================================
-.. automodule:: nova..tests.api.fakes
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.fakes.rst b/doc/source/api/nova..tests.api.openstack.fakes.rst
deleted file mode 100644
index 4a9ff5938..000000000
--- a/doc/source/api/nova..tests.api.openstack.fakes.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.fakes` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.fakes
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_api.rst b/doc/source/api/nova..tests.api.openstack.test_api.rst
deleted file mode 100644
index 68106d221..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_api.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_api` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_api
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_auth.rst b/doc/source/api/nova..tests.api.openstack.test_auth.rst
deleted file mode 100644
index 9f0011669..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_auth.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_auth` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_auth
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_faults.rst b/doc/source/api/nova..tests.api.openstack.test_faults.rst
deleted file mode 100644
index b839ae8a3..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_faults.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_faults` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_faults
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_flavors.rst b/doc/source/api/nova..tests.api.openstack.test_flavors.rst
deleted file mode 100644
index 471fac56e..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_flavors.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_flavors` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_flavors
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_images.rst b/doc/source/api/nova..tests.api.openstack.test_images.rst
deleted file mode 100644
index 57ae93c8c..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_images.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_images` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_images
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst b/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst
deleted file mode 100644
index 9a857f795..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_ratelimiting` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_ratelimiting
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_servers.rst b/doc/source/api/nova..tests.api.openstack.test_servers.rst
deleted file mode 100644
index ea602e6ab..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_servers.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_servers` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_servers
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.openstack.test_sharedipgroups.rst b/doc/source/api/nova..tests.api.openstack.test_sharedipgroups.rst
deleted file mode 100644
index 1fad49147..000000000
--- a/doc/source/api/nova..tests.api.openstack.test_sharedipgroups.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.openstack.test_sharedipgroups` Module
-==============================================================================
-.. automodule:: nova..tests.api.openstack.test_sharedipgroups
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api.test_wsgi.rst b/doc/source/api/nova..tests.api.test_wsgi.rst
deleted file mode 100644
index 8e79caa4d..000000000
--- a/doc/source/api/nova..tests.api.test_wsgi.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api.test_wsgi` Module
-==============================================================================
-.. automodule:: nova..tests.api.test_wsgi
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api_integration.rst b/doc/source/api/nova..tests.api_integration.rst
deleted file mode 100644
index fd217acf7..000000000
--- a/doc/source/api/nova..tests.api_integration.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api_integration` Module
-==============================================================================
-.. automodule:: nova..tests.api_integration
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.api_unittest.rst b/doc/source/api/nova..tests.api_unittest.rst
deleted file mode 100644
index 44a65d48c..000000000
--- a/doc/source/api/nova..tests.api_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.api_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.api_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.auth_unittest.rst b/doc/source/api/nova..tests.auth_unittest.rst
deleted file mode 100644
index 5805dcf38..000000000
--- a/doc/source/api/nova..tests.auth_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.auth_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.auth_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.cloud_unittest.rst b/doc/source/api/nova..tests.cloud_unittest.rst
deleted file mode 100644
index d2ca3b013..000000000
--- a/doc/source/api/nova..tests.cloud_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.cloud_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.cloud_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.compute_unittest.rst b/doc/source/api/nova..tests.compute_unittest.rst
deleted file mode 100644
index 6a30bf744..000000000
--- a/doc/source/api/nova..tests.compute_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.compute_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.compute_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.declare_flags.rst b/doc/source/api/nova..tests.declare_flags.rst
deleted file mode 100644
index 524e72e91..000000000
--- a/doc/source/api/nova..tests.declare_flags.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.declare_flags` Module
-==============================================================================
-.. automodule:: nova..tests.declare_flags
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.fake_flags.rst b/doc/source/api/nova..tests.fake_flags.rst
deleted file mode 100644
index a8dc3df36..000000000
--- a/doc/source/api/nova..tests.fake_flags.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.fake_flags` Module
-==============================================================================
-.. automodule:: nova..tests.fake_flags
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.flags_unittest.rst b/doc/source/api/nova..tests.flags_unittest.rst
deleted file mode 100644
index 61087e683..000000000
--- a/doc/source/api/nova..tests.flags_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.flags_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.flags_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.network_unittest.rst b/doc/source/api/nova..tests.network_unittest.rst
deleted file mode 100644
index df057d813..000000000
--- a/doc/source/api/nova..tests.network_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.network_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.network_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.objectstore_unittest.rst b/doc/source/api/nova..tests.objectstore_unittest.rst
deleted file mode 100644
index 0ae252f04..000000000
--- a/doc/source/api/nova..tests.objectstore_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.objectstore_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.objectstore_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.process_unittest.rst b/doc/source/api/nova..tests.process_unittest.rst
deleted file mode 100644
index 30d1e129c..000000000
--- a/doc/source/api/nova..tests.process_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.process_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.process_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.quota_unittest.rst b/doc/source/api/nova..tests.quota_unittest.rst
deleted file mode 100644
index 6ab813104..000000000
--- a/doc/source/api/nova..tests.quota_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.quota_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.quota_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.real_flags.rst b/doc/source/api/nova..tests.real_flags.rst
deleted file mode 100644
index e9c0d1abd..000000000
--- a/doc/source/api/nova..tests.real_flags.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.real_flags` Module
-==============================================================================
-.. automodule:: nova..tests.real_flags
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.rpc_unittest.rst b/doc/source/api/nova..tests.rpc_unittest.rst
deleted file mode 100644
index e6c7ceb2e..000000000
--- a/doc/source/api/nova..tests.rpc_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.rpc_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.rpc_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.runtime_flags.rst b/doc/source/api/nova..tests.runtime_flags.rst
deleted file mode 100644
index 984e21199..000000000
--- a/doc/source/api/nova..tests.runtime_flags.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.runtime_flags` Module
-==============================================================================
-.. automodule:: nova..tests.runtime_flags
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.scheduler_unittest.rst b/doc/source/api/nova..tests.scheduler_unittest.rst
deleted file mode 100644
index ae3a06616..000000000
--- a/doc/source/api/nova..tests.scheduler_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.scheduler_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.scheduler_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.service_unittest.rst b/doc/source/api/nova..tests.service_unittest.rst
deleted file mode 100644
index c7c746d17..000000000
--- a/doc/source/api/nova..tests.service_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.service_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.service_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.twistd_unittest.rst b/doc/source/api/nova..tests.twistd_unittest.rst
deleted file mode 100644
index ce88202e1..000000000
--- a/doc/source/api/nova..tests.twistd_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.twistd_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.twistd_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.validator_unittest.rst b/doc/source/api/nova..tests.validator_unittest.rst
deleted file mode 100644
index 980284327..000000000
--- a/doc/source/api/nova..tests.validator_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.validator_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.validator_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.virt_unittest.rst b/doc/source/api/nova..tests.virt_unittest.rst
deleted file mode 100644
index 2189be41e..000000000
--- a/doc/source/api/nova..tests.virt_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.virt_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.virt_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.volume_unittest.rst b/doc/source/api/nova..tests.volume_unittest.rst
deleted file mode 100644
index 791e192f5..000000000
--- a/doc/source/api/nova..tests.volume_unittest.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.volume_unittest` Module
-==============================================================================
-.. automodule:: nova..tests.volume_unittest
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..twistd.rst b/doc/source/api/nova..twistd.rst
deleted file mode 100644
index d4145396d..000000000
--- a/doc/source/api/nova..twistd.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..twistd` Module
-==============================================================================
-.. automodule:: nova..twistd
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..utils.rst b/doc/source/api/nova..utils.rst
deleted file mode 100644
index 1131d1080..000000000
--- a/doc/source/api/nova..utils.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..utils` Module
-==============================================================================
-.. automodule:: nova..utils
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..validate.rst b/doc/source/api/nova..validate.rst
deleted file mode 100644
index 1d142f103..000000000
--- a/doc/source/api/nova..validate.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..validate` Module
-==============================================================================
-.. automodule:: nova..validate
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.connection.rst b/doc/source/api/nova..virt.connection.rst
deleted file mode 100644
index caf766765..000000000
--- a/doc/source/api/nova..virt.connection.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.connection` Module
-==============================================================================
-.. automodule:: nova..virt.connection
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.fake.rst b/doc/source/api/nova..virt.fake.rst
deleted file mode 100644
index 06ecdbf7d..000000000
--- a/doc/source/api/nova..virt.fake.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.fake` Module
-==============================================================================
-.. automodule:: nova..virt.fake
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.images.rst b/doc/source/api/nova..virt.images.rst
deleted file mode 100644
index 4fdeb7af8..000000000
--- a/doc/source/api/nova..virt.images.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.images` Module
-==============================================================================
-.. automodule:: nova..virt.images
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.libvirt_conn.rst b/doc/source/api/nova..virt.libvirt_conn.rst
deleted file mode 100644
index 7fb8aed5f..000000000
--- a/doc/source/api/nova..virt.libvirt_conn.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.libvirt_conn` Module
-==============================================================================
-.. automodule:: nova..virt.libvirt_conn
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..virt.xenapi.rst b/doc/source/api/nova..virt.xenapi.rst
deleted file mode 100644
index 2e396bf06..000000000
--- a/doc/source/api/nova..virt.xenapi.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..virt.xenapi` Module
-==============================================================================
-.. automodule:: nova..virt.xenapi
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..volume.driver.rst b/doc/source/api/nova..volume.driver.rst
deleted file mode 100644
index 51f5c0729..000000000
--- a/doc/source/api/nova..volume.driver.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..volume.driver` Module
-==============================================================================
-.. automodule:: nova..volume.driver
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..volume.manager.rst b/doc/source/api/nova..volume.manager.rst
deleted file mode 100644
index 91a192a8f..000000000
--- a/doc/source/api/nova..volume.manager.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..volume.manager` Module
-==============================================================================
-.. automodule:: nova..volume.manager
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..wsgi.rst b/doc/source/api/nova..wsgi.rst
deleted file mode 100644
index 0bff1c332..000000000
--- a/doc/source/api/nova..wsgi.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..wsgi` Module
-==============================================================================
-.. automodule:: nova..wsgi
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/cloud101.rst b/doc/source/cloud101.rst
index 7c79d2a70..9902ba502 100644
--- a/doc/source/cloud101.rst
+++ b/doc/source/cloud101.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
@@ -54,7 +54,7 @@ Cloud computing offers different service models depending on the capabilities a
The US-based National Institute of Standards and Technology offers definitions for cloud computing
and the service models that are emerging.
-These definitions are summarized from http://csrc.nist.gov/groups/SNS/cloud-computing/.
+These definitions are summarized from the `U.S. National Institute of Standards and Technology (NIST) cloud computing research group <http://csrc.nist.gov/groups/SNS/cloud-computing/>`_.
SaaS - Software as a Service
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -74,7 +74,6 @@ IaaS - Infrastructure as a Service
Provides infrastructure such as computer instances, network connections, and storage so that people
can run any software or operating system.
-
Types of Cloud Deployments
--------------------------
@@ -87,4 +86,5 @@ A hybrid cloud can be a deployment model, as a composition of both public and pr
Work in the Clouds
------------------
-.. todo:: What people have done/sample projects
+What have people done with cloud computing? Cloud computing can help with large-scale computing needs or can lead consolidation efforts by virtualizing servers to make more use of existing hardware (and possibly release old hardware from service.) People also use cloud computing for collaboration because of the high availability through networked computers. Productivity suites for word processing, number crunching, and email communications, and more are also available through cloud computing. Cloud computing also avails additional storage to the cloud user, avoiding the need for additional hard drives on your desktop and enabling access to large data storage capacity online in the cloud.
+
diff --git a/doc/source/community.rst b/doc/source/community.rst
index bfb93414c..4ae32f1eb 100644
--- a/doc/source/community.rst
+++ b/doc/source/community.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
@@ -35,7 +35,8 @@ Contributing Code
To contribute code, sign up for a Launchpad account and sign a contributor license agreement,
available on the `OpenStack Wiki <http://wiki.openstack.org/CLA>`_. Once the CLA is signed you
-can contribute code through the Bazaar version control system which is related to your Launchpad account.
+can contribute code through the Bazaar version control system which is related to your Launchpad
+account. See the :doc:`devref/development.environment` page to get started.
#openstack on Freenode IRC Network
----------------------------------
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 8f1b370cc..996dfb0a7 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -60,10 +60,12 @@ copyright = u'2010, United States Government as represented by the Administrator
# |version| and |release|, also used in various other places throughout the
# built documents.
#
-# The short X.Y version.
-version = '2011.1'
+from nova import version as nova_version
+#import nova.version
# The full version, including alpha/beta/rc tags.
-release = '2011.1-prerelease'
+release = nova_version.version_string()
+# The short X.Y version.
+version = nova_version.canonical_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/doc/source/devref/addmethod.openstackapi.rst b/doc/source/devref/addmethod.openstackapi.rst
index 4baa46e20..dde50083b 100644
--- a/doc/source/devref/addmethod.openstackapi.rst
+++ b/doc/source/devref/addmethod.openstackapi.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 OpenStack LLC
+ Copyright 2010-2011 OpenStack LLC
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/doc/source/devref/api.rst b/doc/source/devref/api.rst
index 14181529a..35abf1ae0 100644
--- a/doc/source/devref/api.rst
+++ b/doc/source/devref/api.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/architecture.rst b/doc/source/devref/architecture.rst
index 1e23e1361..233cd6f08 100644
--- a/doc/source/devref/architecture.rst
+++ b/doc/source/devref/architecture.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/auth.rst b/doc/source/devref/auth.rst
index c3af3f945..830caba67 100644
--- a/doc/source/devref/auth.rst
+++ b/doc/source/devref/auth.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/cloudpipe.rst b/doc/source/devref/cloudpipe.rst
index fb104c160..4f5d91e28 100644
--- a/doc/source/devref/cloudpipe.rst
+++ b/doc/source/devref/cloudpipe.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/compute.rst b/doc/source/devref/compute.rst
index db9ef6f34..31cc2037f 100644
--- a/doc/source/devref/compute.rst
+++ b/doc/source/devref/compute.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/database.rst b/doc/source/devref/database.rst
index 14559aa8c..a26e48705 100644
--- a/doc/source/devref/database.rst
+++ b/doc/source/devref/database.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
@@ -60,4 +60,4 @@ Tests
-----
Tests are lacking for the db api layer and for the sqlalchemy driver.
-Failures in the drivers would be dectected in other test cases, though.
+Failures in the drivers would be detected in other test cases, though.
diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst
index 6344c5382..f3c454d64 100644
--- a/doc/source/devref/development.environment.rst
+++ b/doc/source/devref/development.environment.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
@@ -88,7 +88,12 @@ Here's how to get the latest code::
source .nova_venv/bin/activate
./run_tests.sh
-And then you can do cleaning work or hack hack hack with a branched named cleaning::
+Then you can do cleaning work or hack hack hack with a branched named cleaning.
+
+Contributing Your Work
+----------------------
+
+Once your work is complete you may wish to contribute it to the project. Add your name and email address to the `Authors` file, and also to the `.mailmap` file if you use multiple email addresses. Your contributions can not be merged into trunk unless you are listed in the Authors file. Now, push the branch to Launchpad::
bzr push lp:~launchpaduserid/nova/cleaning
diff --git a/doc/source/devref/fakes.rst b/doc/source/devref/fakes.rst
index 0ba5d6ef2..6073447f0 100644
--- a/doc/source/devref/fakes.rst
+++ b/doc/source/devref/fakes.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/glance.rst b/doc/source/devref/glance.rst
index d18f7fec6..9a1c14d58 100644
--- a/doc/source/devref/glance.rst
+++ b/doc/source/devref/glance.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst
index 589609ace..9613ba990 100644
--- a/doc/source/devref/index.rst
+++ b/doc/source/devref/index.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/network.rst b/doc/source/devref/network.rst
index d9d091494..eaf13e9ba 100644
--- a/doc/source/devref/network.rst
+++ b/doc/source/devref/network.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/nova.rst b/doc/source/devref/nova.rst
index 53ce6f34f..093fbb3ee 100644
--- a/doc/source/devref/nova.rst
+++ b/doc/source/devref/nova.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/objectstore.rst b/doc/source/devref/objectstore.rst
index 3ccfc8566..f140e85e9 100644
--- a/doc/source/devref/objectstore.rst
+++ b/doc/source/devref/objectstore.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/rabbit.rst b/doc/source/devref/rabbit.rst
index ae0bac49d..c17a13222 100644
--- a/doc/source/devref/rabbit.rst
+++ b/doc/source/devref/rabbit.rst
@@ -1,5 +1,6 @@
..
Copyright (c) 2010 Citrix Systems, Inc.
+ All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
@@ -29,7 +30,7 @@ Nova (Austin release) uses both direct and topic-based exchanges. The architectu
..
-Nova implements RPC (both request+response, and one-way, respectively nicknamed 'rpc.call' and 'rpc.cast') over AMQP by providing an adapter class which take cares of marshalling and unmarshalling of messages into function calls. Each Nova service (for example Compute, Volume, etc.) create two queues at the initialization time, one which accepts messages with routing keys 'NODE-TYPE.NODE-ID' (for example compute.hostname) and another, which accepts messages with routing keys as generic 'NODE-TYPE' (for example compute). The former is used specifically when Nova-API needs to redirect commands to a specific node like 'euca-terminate instance'. In this case, only the compute node whose host's hypervisor is running the virtual machine can kill the instance. The API acts as a consumer when RPC calls are request/response, otherwise is acts as publisher only.
+Nova implements RPC (both request+response, and one-way, respectively nicknamed 'rpc.call' and 'rpc.cast') over AMQP by providing an adapter class which take cares of marshaling and unmarshaling of messages into function calls. Each Nova service (for example Compute, Volume, etc.) create two queues at the initialization time, one which accepts messages with routing keys 'NODE-TYPE.NODE-ID' (for example compute.hostname) and another, which accepts messages with routing keys as generic 'NODE-TYPE' (for example compute). The former is used specifically when Nova-API needs to redirect commands to a specific node like 'euca-terminate instance'. In this case, only the compute node whose host's hypervisor is running the virtual machine can kill the instance. The API acts as a consumer when RPC calls are request/response, otherwise is acts as publisher only.
Nova RPC Mappings
-----------------
@@ -39,7 +40,7 @@ The figure below shows the internals of a RabbitMQ node when a single instance i
Figure 2 shows the following internal elements:
* Topic Publisher: a Topic Publisher comes to life when an rpc.call or an rpc.cast operation is executed; this object is instantiated and used to push a message to the queuing system. Every publisher connects always to the same topic-based exchange; its life-cycle is limited to the message delivery.
- * Direct Consumer: a Direct Consumer comes to life if (an only if) a rpc.call operation is executed; this object is instantiated and used to receive a response message from the queuing system; Every consumer connects to a unique direct-based exchange via a unique exclusive queue; its life-cycle is limited to the message delivery; the exchange and queue identifiers are determined by a UUID generator, and are marshalled in the message sent by the Topic Publisher (only rpc.call operations).
+ * Direct Consumer: a Direct Consumer comes to life if (an only if) a rpc.call operation is executed; this object is instantiated and used to receive a response message from the queuing system; Every consumer connects to a unique direct-based exchange via a unique exclusive queue; its life-cycle is limited to the message delivery; the exchange and queue identifiers are determined by a UUID generator, and are marshaled in the message sent by the Topic Publisher (only rpc.call operations).
* Topic Consumer: a Topic Consumer comes to life as soon as a Worker is instantiated and exists throughout its life-cycle; this object is used to receive messages from the queue and it invokes the appropriate action as defined by the Worker role. A Topic Consumer connects to the same topic-based exchange either via a shared queue or via a unique exclusive queue. Every Worker has two topic consumers, one that is addressed only during rpc.cast operations (and it connects to a shared queue whose exchange key is 'topic') and the other that is addressed only during rpc.call operations (and it connects to a unique queue whose exchange key is 'topic.host').
* Direct Publisher: a Direct Publisher comes to life only during rpc.call operations and it is instantiated to return the message required by the request/response operation. The object connects to a direct-based exchange whose identity is dictated by the incoming message.
* Topic Exchange: The Exchange is a routing table that exists in the context of a virtual host (the multi-tenancy mechanism provided by RabbitMQ); its type (such as topic vs. direct) determines the routing policy; a RabbitMQ node will have only one topic-based exchange for every topic in Nova.
diff --git a/doc/source/devref/scheduler.rst b/doc/source/devref/scheduler.rst
index ab74b6ba8..066781514 100644
--- a/doc/source/devref/scheduler.rst
+++ b/doc/source/devref/scheduler.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/services.rst b/doc/source/devref/services.rst
index f5bba5c12..ae237a248 100644
--- a/doc/source/devref/services.rst
+++ b/doc/source/devref/services.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/volume.rst b/doc/source/devref/volume.rst
index 54a2d4f8b..c4dddb9ea 100644
--- a/doc/source/devref/volume.rst
+++ b/doc/source/devref/volume.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/index.rst b/doc/source/index.rst
index b9ba6208a..6eec09acb 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/livecd.rst b/doc/source/livecd.rst
index b355fa180..37c92c8bc 100644
--- a/doc/source/livecd.rst
+++ b/doc/source/livecd.rst
@@ -1,3 +1,20 @@
+..
+ Copyright 2010-2011 OpenStack LLC
+
+ All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
Installing the Live CD
======================
diff --git a/doc/source/nova.concepts.rst b/doc/source/nova.concepts.rst
index fb3969a43..e9687dc98 100644
--- a/doc/source/nova.concepts.rst
+++ b/doc/source/nova.concepts.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
@@ -105,7 +105,7 @@ It is important to know that there are user-specific (sometimes called global) r
For example: A user can access api commands allowed to the netadmin role (like allocate_address) only if he has the user-specific netadmin role AND the project-specific netadmin role.
-More information about RBAC can be found in the :ref:`auth`.
+More information about RBAC can be found in :ref:`auth`.
Concept: API
------------
@@ -159,12 +159,10 @@ vpn management, and much more.
See :doc:`nova.manage` in the Administration Guide for more details.
-
Concept: Flags
--------------
-Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within flag files. When you install Nova packages, each Nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth.
-
+Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within a flag file. When you install Nova packages for the Austin release, each nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth. In releases beyond Austin which was released in October 2010, all flags are set in nova.conf.
Concept: Plugins
----------------
diff --git a/doc/source/object.model.rst b/doc/source/object.model.rst
index c8d4df736..d02f151fd 100644
--- a/doc/source/object.model.rst
+++ b/doc/source/object.model.rst
@@ -1,3 +1,20 @@
+..
+ Copyright 2010-2011 OpenStack LLC
+
+ All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
Object Model
============
@@ -25,29 +42,38 @@ Object Model
Users
-----
+Each Nova User is authorized based on their access key and secret key, assigned per-user. Read more at :doc:`/adminguide/managing.users`.
+
Projects
--------
+For Nova, access to images is based on the project. Read more at :doc:`/adminguide/managing.projects`.
Images
------
+Images are binary files that run the operating system. Read more at :doc:`/adminguide/managing.images`.
Instances
---------
+Instances are running virtual servers. Read more at :doc:`/adminguide/managing.instances`.
Volumes
-------
+.. todo:: Write doc about volumes
Security Groups
---------------
+In Nova, a security group is a named collection of network access rules, like firewall policies. Read more at `Security Groups <http://nova.openstack.org/nova.concepts.html#concept-security-groups>`_.
VLANs
-----
+VLAN is the default network mode for Nova. Read more at :doc:`/adminguide/network.vlan`.
IP Addresses
------------
+Nova enables floating IP management. \ No newline at end of file
diff --git a/doc/source/quickstart.rst b/doc/source/quickstart.rst
index fa5d96738..17c9e10a8 100644
--- a/doc/source/quickstart.rst
+++ b/doc/source/quickstart.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/service.architecture.rst b/doc/source/service.architecture.rst
index 28a32bec6..8fa1e3306 100644
--- a/doc/source/service.architecture.rst
+++ b/doc/source/service.architecture.rst
@@ -1,3 +1,20 @@
+..
+ Copyright 2010-2011 OpenStack LLC
+
+ All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
Service Architecture
====================
diff --git a/krm_mapping.json.sample b/krm_mapping.json.sample
new file mode 100644
index 000000000..1ecfba635
--- /dev/null
+++ b/krm_mapping.json.sample
@@ -0,0 +1,3 @@
+{
+ "machine" : ["kernel", "ramdisk"]
+}
diff --git a/locale/nova.pot b/locale/nova.pot
new file mode 100644
index 000000000..a96411e33
--- /dev/null
+++ b/locale/nova.pot
@@ -0,0 +1,2130 @@
+# Translations template for nova.
+# Copyright (C) 2011 ORGANIZATION
+# This file is distributed under the same license as the nova project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2011.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: nova 2011.1\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2011-01-10 11:25-0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
+"Language-Team: LANGUAGE <LL@li.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 0.9.4\n"
+
+#: nova/crypto.py:46
+msgid "Filename of root CA"
+msgstr ""
+
+#: nova/crypto.py:49
+msgid "Filename of private key"
+msgstr ""
+
+#: nova/crypto.py:51
+msgid "Filename of root Certificate Revokation List"
+msgstr ""
+
+#: nova/crypto.py:53
+msgid "Where we keep our keys"
+msgstr ""
+
+#: nova/crypto.py:55
+msgid "Where we keep our root CA"
+msgstr ""
+
+#: nova/crypto.py:57
+msgid "Should we use a CA for each project?"
+msgstr ""
+
+#: nova/crypto.py:61
+#, python-format
+msgid "Subject for certificate for users, %s for project, user, timestamp"
+msgstr ""
+
+#: nova/crypto.py:66
+#, python-format
+msgid "Subject for certificate for projects, %s for project, timestamp"
+msgstr ""
+
+#: nova/crypto.py:71
+#, python-format
+msgid "Subject for certificate for vpns, %s for project, timestamp"
+msgstr ""
+
+#: nova/crypto.py:258
+#, python-format
+msgid "Flags path: %s"
+msgstr ""
+
+#: nova/exception.py:33
+msgid "Unexpected error while running command."
+msgstr ""
+
+#: nova/exception.py:36
+#, python-format
+msgid ""
+"%s\n"
+"Command: %s\n"
+"Exit code: %s\n"
+"Stdout: %r\n"
+"Stderr: %r"
+msgstr ""
+
+#: nova/exception.py:86
+msgid "Uncaught exception"
+msgstr ""
+
+#: nova/fakerabbit.py:48
+#, python-format
+msgid "(%s) publish (key: %s) %s"
+msgstr ""
+
+#: nova/fakerabbit.py:53
+#, python-format
+msgid "Publishing to route %s"
+msgstr ""
+
+#: nova/fakerabbit.py:83
+#, python-format
+msgid "Declaring queue %s"
+msgstr ""
+
+#: nova/fakerabbit.py:89
+#, python-format
+msgid "Declaring exchange %s"
+msgstr ""
+
+#: nova/fakerabbit.py:95
+#, python-format
+msgid "Binding %s to %s with key %s"
+msgstr ""
+
+#: nova/fakerabbit.py:120
+#, python-format
+msgid "Getting from %s: %s"
+msgstr ""
+
+#: nova/rpc.py:92
+#, python-format
+msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds."
+msgstr ""
+
+#: nova/rpc.py:99
+#, python-format
+msgid "Unable to connect to AMQP server after %d tries. Shutting down."
+msgstr ""
+
+#: nova/rpc.py:118
+msgid "Reconnected to queue"
+msgstr ""
+
+#: nova/rpc.py:125
+msgid "Failed to fetch message from queue"
+msgstr ""
+
+#: nova/rpc.py:155
+#, python-format
+msgid "Initing the Adapter Consumer for %s"
+msgstr ""
+
+#: nova/rpc.py:170
+#, python-format
+msgid "received %s"
+msgstr ""
+
+#: nova/rpc.py:183
+#, python-format
+msgid "no method for message: %s"
+msgstr ""
+
+#: nova/rpc.py:184
+#, python-format
+msgid "No method for message: %s"
+msgstr ""
+
+#: nova/rpc.py:245
+#, python-format
+msgid "Returning exception %s to caller"
+msgstr ""
+
+#: nova/rpc.py:286
+#, python-format
+msgid "unpacked context: %s"
+msgstr ""
+
+#: nova/rpc.py:305
+msgid "Making asynchronous call..."
+msgstr ""
+
+#: nova/rpc.py:308
+#, python-format
+msgid "MSG_ID is %s"
+msgstr ""
+
+#: nova/rpc.py:356
+#, python-format
+msgid "response %s"
+msgstr ""
+
+#: nova/rpc.py:365
+#, python-format
+msgid "topic is %s"
+msgstr ""
+
+#: nova/rpc.py:366
+#, python-format
+msgid "message %s"
+msgstr ""
+
+#: nova/service.py:157
+#, python-format
+msgid "Starting %s node"
+msgstr ""
+
+#: nova/service.py:169
+msgid "Service killed that has no database entry"
+msgstr ""
+
+#: nova/service.py:190
+msgid "The service database object disappeared, Recreating it."
+msgstr ""
+
+#: nova/service.py:202
+msgid "Recovered model server connection!"
+msgstr ""
+
+#: nova/service.py:208
+msgid "model server went away"
+msgstr ""
+
+#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43
+#, python-format
+msgid "Data store %s is unreachable. Trying again in %d seconds."
+msgstr ""
+
+#: nova/service.py:232 nova/twistd.py:232
+#, python-format
+msgid "Serving %s"
+msgstr ""
+
+#: nova/service.py:234 nova/twistd.py:264
+msgid "Full set of FLAGS:"
+msgstr ""
+
+#: nova/twistd.py:211
+#, python-format
+msgid "pidfile %s does not exist. Daemon not running?\n"
+msgstr ""
+
+#: nova/twistd.py:268
+#, python-format
+msgid "Starting %s"
+msgstr ""
+
+#: nova/utils.py:53
+#, python-format
+msgid "Inner Exception: %s"
+msgstr ""
+
+#: nova/utils.py:54
+#, python-format
+msgid "Class %s cannot be found"
+msgstr ""
+
+#: nova/utils.py:113
+#, python-format
+msgid "Fetching %s"
+msgstr ""
+
+#: nova/utils.py:125
+#, python-format
+msgid "Running cmd (subprocess): %s"
+msgstr ""
+
+#: nova/utils.py:138
+#, python-format
+msgid "Result was %s"
+msgstr ""
+
+#: nova/utils.py:171
+#, python-format
+msgid "debug in callback: %s"
+msgstr ""
+
+#: nova/utils.py:176
+#, python-format
+msgid "Running %s"
+msgstr ""
+
+#: nova/utils.py:207
+#, python-format
+msgid "Couldn't get IP, using 127.0.0.1 %s"
+msgstr ""
+
+#: nova/utils.py:289
+#, python-format
+msgid "Invalid backend: %s"
+msgstr ""
+
+#: nova/utils.py:300
+#, python-format
+msgid "backend %s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:133
+msgid "Too many failed authentications."
+msgstr ""
+
+#: nova/api/ec2/__init__.py:142
+#, python-format
+msgid ""
+"Access key %s has had %d failed authentications and will be locked out "
+"for %d minutes."
+msgstr ""
+
+#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140
+#, python-format
+msgid "Authentication Failure: %s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:190
+#, python-format
+msgid "Authenticated Request For %s:%s)"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:227
+#, python-format
+msgid "action: %s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:229
+#, python-format
+msgid "arg: %s\t\tval: %s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:301
+#, python-format
+msgid "Unauthorized request for controller=%s and action=%s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:339
+#, python-format
+msgid "NotFound raised: %s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:342
+#, python-format
+msgid "ApiError raised: %s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:349
+#, python-format
+msgid "Unexpected error raised: %s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:354
+msgid "An unknown error has occurred. Please try your request again."
+msgstr ""
+
+#: nova/api/ec2/admin.py:84
+#, python-format
+msgid "Creating new user: %s"
+msgstr ""
+
+#: nova/api/ec2/admin.py:92
+#, python-format
+msgid "Deleting user: %s"
+msgstr ""
+
+#: nova/api/ec2/admin.py:114
+#, python-format
+msgid "Adding role %s to user %s for project %s"
+msgstr ""
+
+#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415
+#, python-format
+msgid "Adding sitewide role %s to user %s"
+msgstr ""
+
+#: nova/api/ec2/admin.py:122
+#, python-format
+msgid "Removing role %s from user %s for project %s"
+msgstr ""
+
+#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441
+#, python-format
+msgid "Removing sitewide role %s from user %s"
+msgstr ""
+
+#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192
+msgid "operation must be add or remove"
+msgstr ""
+
+#: nova/api/ec2/admin.py:142
+#, python-format
+msgid "Getting x509 for user: %s on project: %s"
+msgstr ""
+
+#: nova/api/ec2/admin.py:159
+#, python-format
+msgid "Create project %s managed by %s"
+msgstr ""
+
+#: nova/api/ec2/admin.py:170
+#, python-format
+msgid "Delete project: %s"
+msgstr ""
+
+#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533
+#, python-format
+msgid "Adding user %s to project %s"
+msgstr ""
+
+#: nova/api/ec2/admin.py:188
+#, python-format
+msgid "Removing user %s from project %s"
+msgstr ""
+
+#: nova/api/ec2/apirequest.py:95
+#, python-format
+msgid "Unsupported API request: controller = %s,action = %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:117
+#, python-format
+msgid "Generating root CA: %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:277
+#, python-format
+msgid "Create key pair %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:285
+#, python-format
+msgid "Delete key pair %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:357
+#, python-format
+msgid "%s is not a valid ipProtocol"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:361
+msgid "Invalid port range"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:392
+#, python-format
+msgid "Revoke security group ingress %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414
+msgid "No rule for the specified parameters."
+msgstr ""
+
+#: nova/api/ec2/cloud.py:421
+#, python-format
+msgid "Authorize security group ingress %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:432
+#, python-format
+msgid "This rule already exists in group %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:460
+#, python-format
+msgid "Create Security Group %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:463
+#, python-format
+msgid "group %s already exists"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:475
+#, python-format
+msgid "Delete security group %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452
+#, python-format
+msgid "Get console output for instance %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:543
+#, python-format
+msgid "Create volume of %s GB"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:567
+#, python-format
+msgid "Attach volume %s to instacne %s at %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:579
+#, python-format
+msgid "Detach volume %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:686
+msgid "Allocate address"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:691
+#, python-format
+msgid "Release address %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:696
+#, python-format
+msgid "Associate address %s to instance %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:703
+#, python-format
+msgid "Disassociate address %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:730
+msgid "Going to start terminating instances"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:738
+#, python-format
+msgid "Reboot instance %r"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:775
+#, python-format
+msgid "De-registering image %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:783
+#, python-format
+msgid "Registered image %s with id %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804
+#, python-format
+msgid "attribute not supported: %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:794
+#, python-format
+msgid "invalid id: %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:807
+msgid "user or group not specified"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:809
+msgid "only group \"all\" is supported"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:811
+msgid "operation_type must be add or remove"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:812
+#, python-format
+msgid "Updating image %s publicity"
+msgstr ""
+
+#: nova/api/ec2/metadatarequesthandler.py:75
+#, python-format
+msgid "Failed to get metadata for ip: %s"
+msgstr ""
+
+#: nova/api/openstack/__init__.py:70
+#, python-format
+msgid "Caught error: %s"
+msgstr ""
+
+#: nova/api/openstack/__init__.py:86
+msgid "Including admin operations in API."
+msgstr ""
+
+#: nova/api/openstack/servers.py:184
+#, python-format
+msgid "Compute.api::lock %s"
+msgstr ""
+
+#: nova/api/openstack/servers.py:199
+#, python-format
+msgid "Compute.api::unlock %s"
+msgstr ""
+
+#: nova/api/openstack/servers.py:213
+#, python-format
+msgid "Compute.api::get_lock %s"
+msgstr ""
+
+#: nova/api/openstack/servers.py:224
+#, python-format
+msgid "Compute.api::pause %s"
+msgstr ""
+
+#: nova/api/openstack/servers.py:235
+#, python-format
+msgid "Compute.api::unpause %s"
+msgstr ""
+
+#: nova/api/openstack/servers.py:246
+#, python-format
+msgid "compute.api::suspend %s"
+msgstr ""
+
+#: nova/api/openstack/servers.py:257
+#, python-format
+msgid "compute.api::resume %s"
+msgstr ""
+
+#: nova/auth/dbdriver.py:84
+#, python-format
+msgid "User %s already exists"
+msgstr ""
+
+#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207
+#, python-format
+msgid "Project can't be created because manager %s doesn't exist"
+msgstr ""
+
+#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204
+#, python-format
+msgid "Project can't be created because project %s already exists"
+msgstr ""
+
+#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241
+#, python-format
+msgid "Project can't be modified because manager %s doesn't exist"
+msgstr ""
+
+#: nova/auth/dbdriver.py:245
+#, python-format
+msgid "User \"%s\" not found"
+msgstr ""
+
+#: nova/auth/dbdriver.py:248
+#, python-format
+msgid "Project \"%s\" not found"
+msgstr ""
+
+#: nova/auth/fakeldap.py:33
+msgid "Attempted to instantiate singleton"
+msgstr ""
+
+#: nova/auth/ldapdriver.py:181
+#, python-format
+msgid "LDAP object for %s doesn't exist"
+msgstr ""
+
+#: nova/auth/ldapdriver.py:218
+#, python-format
+msgid "Project can't be created because user %s doesn't exist"
+msgstr ""
+
+#: nova/auth/ldapdriver.py:478
+#, python-format
+msgid "User %s is already a member of the group %s"
+msgstr ""
+
+#: nova/auth/ldapdriver.py:507
+#, python-format
+msgid ""
+"Attempted to remove the last member of a group. Deleting the group at %s "
+"instead."
+msgstr ""
+
+#: nova/auth/ldapdriver.py:528
+#, python-format
+msgid "Group at dn %s doesn't exist"
+msgstr ""
+
+#: nova/auth/manager.py:259
+#, python-format
+msgid "Looking up user: %r"
+msgstr ""
+
+#: nova/auth/manager.py:263
+#, python-format
+msgid "Failed authorization for access key %s"
+msgstr ""
+
+#: nova/auth/manager.py:264
+#, python-format
+msgid "No user found for access key %s"
+msgstr ""
+
+#: nova/auth/manager.py:270
+#, python-format
+msgid "Using project name = user name (%s)"
+msgstr ""
+
+#: nova/auth/manager.py:275
+#, python-format
+msgid "failed authorization: no project named %s (user=%s)"
+msgstr ""
+
+#: nova/auth/manager.py:277
+#, python-format
+msgid "No project called %s could be found"
+msgstr ""
+
+#: nova/auth/manager.py:281
+#, python-format
+msgid "Failed authorization: user %s not admin and not member of project %s"
+msgstr ""
+
+#: nova/auth/manager.py:283
+#, python-format
+msgid "User %s is not a member of project %s"
+msgstr ""
+
+#: nova/auth/manager.py:292 nova/auth/manager.py:303
+#, python-format
+msgid "Invalid signature for user %s"
+msgstr ""
+
+#: nova/auth/manager.py:293 nova/auth/manager.py:304
+msgid "Signature does not match"
+msgstr ""
+
+#: nova/auth/manager.py:374
+msgid "Must specify project"
+msgstr ""
+
+#: nova/auth/manager.py:408
+#, python-format
+msgid "The %s role can not be found"
+msgstr ""
+
+#: nova/auth/manager.py:410
+#, python-format
+msgid "The %s role is global only"
+msgstr ""
+
+#: nova/auth/manager.py:412
+#, python-format
+msgid "Adding role %s to user %s in project %s"
+msgstr ""
+
+#: nova/auth/manager.py:438
+#, python-format
+msgid "Removing role %s from user %s on project %s"
+msgstr ""
+
+#: nova/auth/manager.py:505
+#, python-format
+msgid "Created project %s with manager %s"
+msgstr ""
+
+#: nova/auth/manager.py:523
+#, python-format
+msgid "modifying project %s"
+msgstr ""
+
+#: nova/auth/manager.py:553
+#, python-format
+msgid "Remove user %s from project %s"
+msgstr ""
+
+#: nova/auth/manager.py:581
+#, python-format
+msgid "Deleting project %s"
+msgstr ""
+
+#: nova/auth/manager.py:637
+#, python-format
+msgid "Created user %s (admin: %r)"
+msgstr ""
+
+#: nova/auth/manager.py:645
+#, python-format
+msgid "Deleting user %s"
+msgstr ""
+
+#: nova/auth/manager.py:655
+#, python-format
+msgid "Access Key change for user %s"
+msgstr ""
+
+#: nova/auth/manager.py:657
+#, python-format
+msgid "Secret Key change for user %s"
+msgstr ""
+
+#: nova/auth/manager.py:659
+#, python-format
+msgid "Admin status set to %r for user %s"
+msgstr ""
+
+#: nova/auth/manager.py:708
+#, python-format
+msgid "No vpn data for project %s"
+msgstr ""
+
+#: nova/cloudpipe/pipelib.py:45
+msgid "Template for script to run on cloudpipe instance boot"
+msgstr ""
+
+#: nova/cloudpipe/pipelib.py:48
+msgid "Network to push into openvpn config"
+msgstr ""
+
+#: nova/cloudpipe/pipelib.py:51
+msgid "Netmask to push into openvpn config"
+msgstr ""
+
+#: nova/cloudpipe/pipelib.py:97
+#, python-format
+msgid "Launching VPN for %s"
+msgstr ""
+
+#: nova/compute/api.py:67
+#, python-format
+msgid "Instance %d was not found in get_network_topic"
+msgstr ""
+
+#: nova/compute/api.py:73
+#, python-format
+msgid "Instance %d has no host"
+msgstr ""
+
+#: nova/compute/api.py:92
+#, python-format
+msgid "Quota exceeeded for %s, tried to run %s instances"
+msgstr ""
+
+#: nova/compute/api.py:94
+#, python-format
+msgid "Instance quota exceeded. You can only run %s more instances of this type."
+msgstr ""
+
+#: nova/compute/api.py:109
+msgid "Creating a raw instance"
+msgstr ""
+
+#: nova/compute/api.py:156
+#, python-format
+msgid "Going to run %s instances..."
+msgstr ""
+
+#: nova/compute/api.py:180
+#, python-format
+msgid "Casting to scheduler for %s/%s's instance %s"
+msgstr ""
+
+#: nova/compute/api.py:279
+#, python-format
+msgid "Going to try and terminate %s"
+msgstr ""
+
+#: nova/compute/api.py:283
+#, python-format
+msgid "Instance %d was not found during terminate"
+msgstr ""
+
+#: nova/compute/api.py:288
+#, python-format
+msgid "Instance %d is already being terminated"
+msgstr ""
+
+#: nova/compute/api.py:450
+#, python-format
+msgid "Invalid device specified: %s. Example device: /dev/vdb"
+msgstr ""
+
+#: nova/compute/api.py:465
+msgid "Volume isn't attached to anything!"
+msgstr ""
+
+#: nova/compute/disk.py:71
+#, python-format
+msgid "Input partition size not evenly divisible by sector size: %d / %d"
+msgstr ""
+
+#: nova/compute/disk.py:75
+#, python-format
+msgid "Bytes for local storage not evenly divisible by sector size: %d / %d"
+msgstr ""
+
+#: nova/compute/disk.py:128
+#, python-format
+msgid "Could not attach image to loopback: %s"
+msgstr ""
+
+#: nova/compute/disk.py:136
+#, python-format
+msgid "Failed to load partition: %s"
+msgstr ""
+
+#: nova/compute/disk.py:158
+#, python-format
+msgid "Failed to mount filesystem: %s"
+msgstr ""
+
+#: nova/compute/instance_types.py:41
+#, python-format
+msgid "Unknown instance type: %s"
+msgstr ""
+
+#: nova/compute/manager.py:69
+#, python-format
+msgid "check_instance_lock: decorating: |%s|"
+msgstr ""
+
+#: nova/compute/manager.py:71
+#, python-format
+msgid "check_instance_lock: arguments: |%s| |%s| |%s|"
+msgstr ""
+
+#: nova/compute/manager.py:75
+#, python-format
+msgid "check_instance_lock: locked: |%s|"
+msgstr ""
+
+#: nova/compute/manager.py:77
+#, python-format
+msgid "check_instance_lock: admin: |%s|"
+msgstr ""
+
+#: nova/compute/manager.py:82
+#, python-format
+msgid "check_instance_lock: executing: |%s|"
+msgstr ""
+
+#: nova/compute/manager.py:86
+#, python-format
+msgid "check_instance_lock: not executing |%s|"
+msgstr ""
+
+#: nova/compute/manager.py:157
+msgid "Instance has already been created"
+msgstr ""
+
+#: nova/compute/manager.py:158
+#, python-format
+msgid "instance %s: starting..."
+msgstr ""
+
+#: nova/compute/manager.py:197
+#, python-format
+msgid "instance %s: Failed to spawn"
+msgstr ""
+
+#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228
+#, python-format
+msgid "Terminating instance %s"
+msgstr ""
+
+#: nova/compute/manager.py:217
+#, python-format
+msgid "Disassociating address %s"
+msgstr ""
+
+#: nova/compute/manager.py:230
+#, python-format
+msgid "Deallocating address %s"
+msgstr ""
+
+#: nova/compute/manager.py:243
+#, python-format
+msgid "trying to destroy already destroyed instance: %s"
+msgstr ""
+
+#: nova/compute/manager.py:257
+#, python-format
+msgid "Rebooting instance %s"
+msgstr ""
+
+#: nova/compute/manager.py:260
+#, python-format
+msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)"
+msgstr ""
+
+#: nova/compute/manager.py:286
+#, python-format
+msgid "instance %s: snapshotting"
+msgstr ""
+
+#: nova/compute/manager.py:289
+#, python-format
+msgid "trying to snapshot a non-running instance: %s (state: %s excepted: %s)"
+msgstr ""
+
+#: nova/compute/manager.py:301
+#, python-format
+msgid "instance %s: rescuing"
+msgstr ""
+
+#: nova/compute/manager.py:316
+#, python-format
+msgid "instance %s: unrescuing"
+msgstr ""
+
+#: nova/compute/manager.py:335
+#, python-format
+msgid "instance %s: pausing"
+msgstr ""
+
+#: nova/compute/manager.py:352
+#, python-format
+msgid "instance %s: unpausing"
+msgstr ""
+
+#: nova/compute/manager.py:369
+#, python-format
+msgid "instance %s: retrieving diagnostics"
+msgstr ""
+
+#: nova/compute/manager.py:382
+#, python-format
+msgid "instance %s: suspending"
+msgstr ""
+
+#: nova/compute/manager.py:401
+#, python-format
+msgid "instance %s: resuming"
+msgstr ""
+
+#: nova/compute/manager.py:420
+#, python-format
+msgid "instance %s: locking"
+msgstr ""
+
+#: nova/compute/manager.py:432
+#, python-format
+msgid "instance %s: unlocking"
+msgstr ""
+
+#: nova/compute/manager.py:442
+#, python-format
+msgid "instance %s: getting locked state"
+msgstr ""
+
+#: nova/compute/manager.py:462
+#, python-format
+msgid "instance %s: attaching volume %s to %s"
+msgstr ""
+
+#: nova/compute/manager.py:478
+#, python-format
+msgid "instance %s: attach failed %s, removing"
+msgstr ""
+
+#: nova/compute/manager.py:493
+#, python-format
+msgid "Detach volume %s from mountpoint %s on instance %s"
+msgstr ""
+
+#: nova/compute/manager.py:497
+#, python-format
+msgid "Detaching volume from unknown instance %s"
+msgstr ""
+
+#: nova/compute/monitor.py:259
+#, python-format
+msgid "updating %s..."
+msgstr ""
+
+#: nova/compute/monitor.py:289
+msgid "unexpected error during update"
+msgstr ""
+
+#: nova/compute/monitor.py:355
+#, python-format
+msgid "Cannot get blockstats for \"%s\" on \"%s\""
+msgstr ""
+
+#: nova/compute/monitor.py:377
+#, python-format
+msgid "Cannot get ifstats for \"%s\" on \"%s\""
+msgstr ""
+
+#: nova/compute/monitor.py:412
+msgid "unexpected exception getting connection"
+msgstr ""
+
+#: nova/compute/monitor.py:427
+#, python-format
+msgid "Found instance: %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:43
+msgid "Use of empty request context is deprecated"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:132
+#, python-format
+msgid "No service for id %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:229
+#, python-format
+msgid "No service for %s, %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:574
+#, python-format
+msgid "No floating ip for address %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:668
+#, python-format
+msgid "No instance for id %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598
+#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103
+#, python-format
+msgid "Instance %s not found"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:891
+#, python-format
+msgid "no keypair for user %s, name %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064
+#, python-format
+msgid "No network for id %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1036
+#, python-format
+msgid "No network for bridge %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1050
+#, python-format
+msgid "No network for instance %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1180
+#, python-format
+msgid "Token %s does not exist"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1205
+#, python-format
+msgid "No quota for project_id %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1356
+#, python-format
+msgid "No volume for id %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1401
+#, python-format
+msgid "Volume %s not found"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1413
+#, python-format
+msgid "No export device found for volume %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1426
+#, python-format
+msgid "No target id found for volume %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1471
+#, python-format
+msgid "No security group with id %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1488
+#, python-format
+msgid "No security group named %s for project: %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1576
+#, python-format
+msgid "No secuity group rule with id %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1650
+#, python-format
+msgid "No user for id %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1666
+#, python-format
+msgid "No user for access key %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1728
+#, python-format
+msgid "No project with id %s"
+msgstr ""
+
+#: nova/image/glance.py:78
+#, python-format
+msgid "Parallax returned HTTP error %d from request for /images"
+msgstr ""
+
+#: nova/image/glance.py:97
+#, python-format
+msgid "Parallax returned HTTP error %d from request for /images/detail"
+msgstr ""
+
+#: nova/image/s3.py:82
+#, python-format
+msgid "Image %s could not be found"
+msgstr ""
+
+#: nova/network/api.py:39
+#, python-format
+msgid "Quota exceeeded for %s, tried to allocate address"
+msgstr ""
+
+#: nova/network/api.py:42
+msgid "Address quota exceeded. You cannot allocate any more addresses"
+msgstr ""
+
+#: nova/network/linux_net.py:176
+#, python-format
+msgid "Starting VLAN inteface %s"
+msgstr ""
+
+#: nova/network/linux_net.py:186
+#, python-format
+msgid "Starting Bridge interface for %s"
+msgstr ""
+
+#: nova/network/linux_net.py:254
+#, python-format
+msgid "Hupping dnsmasq threw %s"
+msgstr ""
+
+#: nova/network/linux_net.py:256
+#, python-format
+msgid "Pid %d is stale, relaunching dnsmasq"
+msgstr ""
+
+#: nova/network/linux_net.py:334
+#, python-format
+msgid "Killing dnsmasq threw %s"
+msgstr ""
+
+#: nova/network/manager.py:135
+msgid "setting network host"
+msgstr ""
+
+#: nova/network/manager.py:190
+#, python-format
+msgid "Leasing IP %s"
+msgstr ""
+
+#: nova/network/manager.py:194
+#, python-format
+msgid "IP %s leased that isn't associated"
+msgstr ""
+
+#: nova/network/manager.py:197
+#, python-format
+msgid "IP %s leased to bad mac %s vs %s"
+msgstr ""
+
+#: nova/network/manager.py:205
+#, python-format
+msgid "IP %s leased that was already deallocated"
+msgstr ""
+
+#: nova/network/manager.py:214
+#, python-format
+msgid "IP %s released that isn't associated"
+msgstr ""
+
+#: nova/network/manager.py:217
+#, python-format
+msgid "IP %s released from bad mac %s vs %s"
+msgstr ""
+
+#: nova/network/manager.py:220
+#, python-format
+msgid "IP %s released that was not leased"
+msgstr ""
+
+#: nova/network/manager.py:442
+#, python-format
+msgid "Dissassociated %s stale fixed ip(s)"
+msgstr ""
+
+#: nova/objectstore/handler.py:106
+#, python-format
+msgid "Unknown S3 value type %r"
+msgstr ""
+
+#: nova/objectstore/handler.py:137
+msgid "Authenticated request"
+msgstr ""
+
+#: nova/objectstore/handler.py:182
+msgid "List of buckets requested"
+msgstr ""
+
+#: nova/objectstore/handler.py:209
+#, python-format
+msgid "List keys for bucket %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:217
+#, python-format
+msgid "Unauthorized attempt to access bucket %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:235
+#, python-format
+msgid "Creating bucket %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:245
+#, python-format
+msgid "Deleting bucket %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:249
+#, python-format
+msgid "Unauthorized attempt to delete bucket %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:271
+#, python-format
+msgid "Getting object: %s / %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:274
+#, python-format
+msgid "Unauthorized attempt to get object %s from bucket %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:292
+#, python-format
+msgid "Putting object: %s / %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:295
+#, python-format
+msgid "Unauthorized attempt to upload object %s to bucket %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:314
+#, python-format
+msgid "Deleting object: %s / %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:393
+#, python-format
+msgid "Not authorized to upload image: invalid directory %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:401
+#, python-format
+msgid "Not authorized to upload image: unauthorized bucket %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:406
+#, python-format
+msgid "Starting image upload: %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:420
+#, python-format
+msgid "Not authorized to update attributes of image %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:428
+#, python-format
+msgid "Toggling publicity flag of image %s %r"
+msgstr ""
+
+#: nova/objectstore/handler.py:433
+#, python-format
+msgid "Updating user fields on image %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:447
+#, python-format
+msgid "Unauthorized attempt to delete image %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:452
+#, python-format
+msgid "Deleted image: %s"
+msgstr ""
+
+#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73
+#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118
+msgid "No hosts found"
+msgstr ""
+
+#: nova/scheduler/driver.py:66
+msgid "Must implement a fallback schedule"
+msgstr ""
+
+#: nova/scheduler/manager.py:69
+#, python-format
+msgid "Casting to %s %s for %s"
+msgstr ""
+
+#: nova/scheduler/simple.py:63
+msgid "All hosts have too many cores"
+msgstr ""
+
+#: nova/scheduler/simple.py:95
+msgid "All hosts have too many gigabytes"
+msgstr ""
+
+#: nova/scheduler/simple.py:115
+msgid "All hosts have too many networks"
+msgstr ""
+
+#: nova/tests/test_cloud.py:198
+msgid "Can't test instances without a real virtual env."
+msgstr ""
+
+#: nova/tests/test_cloud.py:210
+#, python-format
+msgid "Need to watch instance %s until it's running..."
+msgstr ""
+
+#: nova/tests/test_compute.py:104
+#, python-format
+msgid "Running instances: %s"
+msgstr ""
+
+#: nova/tests/test_compute.py:110
+#, python-format
+msgid "After terminating instances: %s"
+msgstr ""
+
+#: nova/tests/test_rpc.py:89
+#, python-format
+msgid "Nested received %s, %s"
+msgstr ""
+
+#: nova/tests/test_rpc.py:94
+#, python-format
+msgid "Nested return %s"
+msgstr ""
+
+#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125
+#, python-format
+msgid "Received %s"
+msgstr ""
+
+#: nova/tests/test_volume.py:162
+#, python-format
+msgid "Target %s allocated"
+msgstr ""
+
+#: nova/virt/connection.py:73
+msgid "Failed to open connection to the hypervisor"
+msgstr ""
+
+#: nova/virt/fake.py:210
+#, python-format
+msgid "Instance %s Not Found"
+msgstr ""
+
+#: nova/virt/hyperv.py:118
+msgid "In init host"
+msgstr ""
+
+#: nova/virt/hyperv.py:131
+#, python-format
+msgid "Attempt to create duplicate vm %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:148
+#, python-format
+msgid "Starting VM %s "
+msgstr ""
+
+#: nova/virt/hyperv.py:150
+#, python-format
+msgid "Started VM %s "
+msgstr ""
+
+#: nova/virt/hyperv.py:152
+#, python-format
+msgid "spawn vm failed: %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:169
+#, python-format
+msgid "Failed to create VM %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125
+#, python-format
+msgid "Created VM %s..."
+msgstr ""
+
+#: nova/virt/hyperv.py:188
+#, python-format
+msgid "Set memory for vm %s..."
+msgstr ""
+
+#: nova/virt/hyperv.py:198
+#, python-format
+msgid "Set vcpus for vm %s..."
+msgstr ""
+
+#: nova/virt/hyperv.py:202
+#, python-format
+msgid "Creating disk for %s by attaching disk file %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:227
+#, python-format
+msgid "Failed to add diskdrive to VM %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:230
+#, python-format
+msgid "New disk drive path is %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:247
+#, python-format
+msgid "Failed to add vhd file to VM %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:249
+#, python-format
+msgid "Created disk for %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:253
+#, python-format
+msgid "Creating nic for %s "
+msgstr ""
+
+#: nova/virt/hyperv.py:272
+msgid "Failed creating a port on the external vswitch"
+msgstr ""
+
+#: nova/virt/hyperv.py:273
+#, python-format
+msgid "Failed creating port for %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:275
+#, python-format
+msgid "Created switch port %s on switch %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:285
+#, python-format
+msgid "Failed to add nic to VM %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:287
+#, python-format
+msgid "Created nic for %s "
+msgstr ""
+
+#: nova/virt/hyperv.py:320
+#, python-format
+msgid "WMI job failed: %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:322
+#, python-format
+msgid "WMI job succeeded: %s, Elapsed=%s "
+msgstr ""
+
+#: nova/virt/hyperv.py:358
+#, python-format
+msgid "Got request to destroy vm %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:383
+#, python-format
+msgid "Failed to destroy vm %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:389
+#, python-format
+msgid "Del: disk %s vm %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:405
+#, python-format
+msgid ""
+"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, "
+"cpu_time=%s"
+msgstr ""
+
+#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301
+#, python-format
+msgid "duplicate name found: %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:444
+#, python-format
+msgid "Successfully changed vm state of %s to %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449
+#, python-format
+msgid "Failed to change vm state of %s to %s"
+msgstr ""
+
+#: nova/virt/images.py:70
+#, python-format
+msgid "Finished retreving %s -- placed in %s"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:144
+#, python-format
+msgid "Connecting to libvirt: %s"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:157
+msgid "Connection to libvirt broke"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:229
+#, python-format
+msgid "instance %s: deleting instance files %s"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:271
+#, python-format
+msgid "No disk at %s"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:278
+msgid "Instance snapshotting is not supported for libvirtat this time"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:294
+#, python-format
+msgid "instance %s: rebooted"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:297
+#, python-format
+msgid "_wait_for_reboot failed: %s"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:340
+#, python-format
+msgid "instance %s: rescued"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:343
+#, python-format
+msgid "_wait_for_rescue failed: %s"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:370
+#, python-format
+msgid "instance %s: is running"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:381
+#, python-format
+msgid "instance %s: booted"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116
+#, python-format
+msgid "instance %s: failed to boot"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:395
+#, python-format
+msgid "virsh said: %r"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:399
+msgid "cool, it's a device"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:407
+#, python-format
+msgid "data: %r, fpath: %r"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:415
+#, python-format
+msgid "Contents of file %s: %r"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:449
+#, python-format
+msgid "instance %s: Creating image"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:505
+#, python-format
+msgid "instance %s: injecting key into image %s"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:508
+#, python-format
+msgid "instance %s: injecting net into image %s"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:516
+#, python-format
+msgid "instance %s: ignoring error injecting data into image %s (%s)"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547
+#, python-format
+msgid "instance %s: starting toXML method"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:589
+#, python-format
+msgid "instance %s: finished toXML method"
+msgstr ""
+
+#: nova/virt/xenapi_conn.py:113
+msgid ""
+"Must specify xenapi_connection_url, xenapi_connection_username "
+"(optionally), and xenapi_connection_password to use "
+"connection_type=xenapi"
+msgstr ""
+
+#: nova/virt/xenapi_conn.py:263
+#, python-format
+msgid "Task [%s] %s status: success %s"
+msgstr ""
+
+#: nova/virt/xenapi_conn.py:271
+#, python-format
+msgid "Task [%s] %s status: %s %s"
+msgstr ""
+
+#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300
+#, python-format
+msgid "Got exception: %s"
+msgstr ""
+
+#: nova/virt/xenapi/fake.py:72
+#, python-format
+msgid "%s: _db_content => %s"
+msgstr ""
+
+#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338
+#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404
+msgid "Raising NotImplemented"
+msgstr ""
+
+#: nova/virt/xenapi/fake.py:249
+#, python-format
+msgid "xenapi.fake does not have an implementation for %s"
+msgstr ""
+
+#: nova/virt/xenapi/fake.py:283
+#, python-format
+msgid "Calling %s %s"
+msgstr ""
+
+#: nova/virt/xenapi/fake.py:288
+#, python-format
+msgid "Calling getter %s"
+msgstr ""
+
+#: nova/virt/xenapi/fake.py:340
+#, python-format
+msgid ""
+"xenapi.fake does not have an implementation for %s or it has been called "
+"with the wrong number of arguments"
+msgstr ""
+
+#: nova/virt/xenapi/network_utils.py:40
+#, python-format
+msgid "Found non-unique network for bridge %s"
+msgstr ""
+
+#: nova/virt/xenapi/network_utils.py:43
+#, python-format
+msgid "Found no network for bridge %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:127
+#, python-format
+msgid "Created VM %s as %s."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:147
+#, python-format
+msgid "Creating VBD for VM %s, VDI %s ... "
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:149
+#, python-format
+msgid "Created VBD %s for VM %s, VDI %s."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:165
+#, python-format
+msgid "VBD not found in instance %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:175
+#, python-format
+msgid "Unable to unplug VBD %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:187
+#, python-format
+msgid "Unable to destroy VBD %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:202
+#, python-format
+msgid "Creating VIF for VM %s, network %s."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:205
+#, python-format
+msgid "Created VIF %s for VM %s, network %s."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:216
+#, python-format
+msgid "Snapshotting VM %s with label '%s'..."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:229
+#, python-format
+msgid "Created snapshot %s from VM %s."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:243
+#, python-format
+msgid "Asking xapi to upload %s as '%s'"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:261
+#, python-format
+msgid "Asking xapi to fetch %s as %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:279
+#, python-format
+msgid "Looking up vdi %s for PV kernel"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:290
+#, python-format
+msgid "PV Kernel in VDI:%d"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:318
+#, python-format
+msgid "VDI %s is still available"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:331
+#, python-format
+msgid "(VM_UTILS) xenserver vm state -> |%s|"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:333
+#, python-format
+msgid "(VM_UTILS) xenapi power_state -> |%s|"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:390
+#, python-format
+msgid "VHD %s has parent %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:407
+#, python-format
+msgid "Re-scanning SR %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:431
+#, python-format
+msgid "Parent %s doesn't match original parent %s, waiting for coalesce..."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:448
+#, python-format
+msgid "No VDIs found for VM %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:452
+#, python-format
+msgid "Unexpected number of VDIs (%s) found for VM %s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:62
+#, python-format
+msgid "Attempted to create non-unique name %s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:99
+#, python-format
+msgid "Starting VM %s..."
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:101
+#, python-format
+msgid "Spawning VM %s created %s."
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:112
+#, python-format
+msgid "Instance %s: booted"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:137
+#, python-format
+msgid "Instance not present %s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:166
+#, python-format
+msgid "Starting snapshot for VM %s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:174
+#, python-format
+msgid "Unable to Snapshot %s: %s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:184
+#, python-format
+msgid "Finished snapshot and upload for VM %s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:252
+#, python-format
+msgid "suspend: instance not present %s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:262
+#, python-format
+msgid "resume: instance not present %s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:271
+#, python-format
+msgid "Instance not found %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:57
+#, python-format
+msgid "Introducing %s..."
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:74
+#, python-format
+msgid "Introduced %s as %s."
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:78
+msgid "Unable to create Storage Repository"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:90
+#, python-format
+msgid "Unable to find SR from VBD %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:96
+#, python-format
+msgid "Forgetting SR %s ... "
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:101
+#, python-format
+msgid "Ignoring exception %s when getting PBDs for %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:107
+#, python-format
+msgid "Ignoring exception %s when unplugging PBD %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:111
+#, python-format
+msgid "Forgetting SR %s done."
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:113
+#, python-format
+msgid "Ignoring exception %s when forgetting SR %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:123
+#, python-format
+msgid "Unable to introduce VDI on SR %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:128
+#, python-format
+msgid "Unable to get record of VDI %s on"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:146
+#, python-format
+msgid "Unable to introduce VDI for SR %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:175
+#, python-format
+msgid "Unable to obtain target information %s, %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:197
+#, python-format
+msgid "Mountpoint cannot be translated: %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:51
+#, python-format
+msgid "Attach_volume: %s, %s, %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:69
+#, python-format
+msgid "Unable to create VDI on SR %s for instance %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:81
+#, python-format
+msgid "Unable to use SR %s for instance %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:93
+#, python-format
+msgid "Unable to attach volume to instance %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:95
+#, python-format
+msgid "Mountpoint %s attached to instance %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:106
+#, python-format
+msgid "Detach_volume: %s, %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:113
+#, python-format
+msgid "Unable to locate volume %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:121
+#, python-format
+msgid "Unable to detach volume %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:128
+#, python-format
+msgid "Mountpoint %s detached from instance %s"
+msgstr ""
+
+#: nova/volume/api.py:44
+#, python-format
+msgid "Quota exceeeded for %s, tried to create %sG volume"
+msgstr ""
+
+#: nova/volume/api.py:46
+#, python-format
+msgid "Volume quota exceeded. You cannot create a volume of size %s"
+msgstr ""
+
+#: nova/volume/api.py:70 nova/volume/api.py:95
+msgid "Volume status must be available"
+msgstr ""
+
+#: nova/volume/api.py:97
+msgid "Volume is already attached"
+msgstr ""
+
+#: nova/volume/api.py:103
+msgid "Volume is already detached"
+msgstr ""
+
+#: nova/volume/driver.py:76
+#, python-format
+msgid "Recovering from a failed execute. Try number %s"
+msgstr ""
+
+#: nova/volume/driver.py:85
+#, python-format
+msgid "volume group %s doesn't exist"
+msgstr ""
+
+#: nova/volume/driver.py:210
+#, python-format
+msgid "FAKE AOE: %s"
+msgstr ""
+
+#: nova/volume/driver.py:315
+#, python-format
+msgid "FAKE ISCSI: %s"
+msgstr ""
+
+#: nova/volume/manager.py:85
+#, python-format
+msgid "Re-exporting %s volumes"
+msgstr ""
+
+#: nova/volume/manager.py:93
+#, python-format
+msgid "volume %s: creating"
+msgstr ""
+
+#: nova/volume/manager.py:102
+#, python-format
+msgid "volume %s: creating lv of size %sG"
+msgstr ""
+
+#: nova/volume/manager.py:106
+#, python-format
+msgid "volume %s: creating export"
+msgstr ""
+
+#: nova/volume/manager.py:113
+#, python-format
+msgid "volume %s: created successfully"
+msgstr ""
+
+#: nova/volume/manager.py:121
+msgid "Volume is still attached"
+msgstr ""
+
+#: nova/volume/manager.py:123
+msgid "Volume is not local to this node"
+msgstr ""
+
+#: nova/volume/manager.py:124
+#, python-format
+msgid "volume %s: removing export"
+msgstr ""
+
+#: nova/volume/manager.py:126
+#, python-format
+msgid "volume %s: deleting"
+msgstr ""
+
+#: nova/volume/manager.py:129
+#, python-format
+msgid "volume %s: deleted successfully"
+msgstr ""
+
diff --git a/nova/adminclient.py b/nova/adminclient.py
index 6ae9f0c0f..b2609c8c4 100644
--- a/nova/adminclient.py
+++ b/nova/adminclient.py
@@ -23,12 +23,9 @@ import base64
import boto
import httplib
-from nova import flags
from boto.ec2.regioninfo import RegionInfo
-FLAGS = flags.FLAGS
-
DEFAULT_CLC_URL = 'http://127.0.0.1:8773'
DEFAULT_REGION = 'nova'
@@ -199,8 +196,8 @@ class NovaAdminClient(object):
self,
clc_url=DEFAULT_CLC_URL,
region=DEFAULT_REGION,
- access_key=FLAGS.aws_access_key_id,
- secret_key=FLAGS.aws_secret_access_key,
+ access_key=None,
+ secret_key=None,
**kwargs):
parts = self.split_clc_url(clc_url)
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 0836c3411..093515cb4 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -20,7 +20,8 @@ Starting point for routing EC2 requests.
"""
-import logging
+import datetime
+import routes
import webob
import webob.dec
import webob.exc
@@ -28,6 +29,7 @@ import webob.exc
from nova import context
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
from nova import wsgi
from nova.api.ec2 import apirequest
@@ -35,6 +37,7 @@ from nova.auth import manager
FLAGS = flags.FLAGS
+LOG = logging.getLogger("nova.api")
flags.DEFINE_boolean('use_forwarded_for', False,
'Treat X-Forwarded-For as the canonical remote address. '
'Only enable this if you have a sanitizing proxy.')
@@ -48,8 +51,42 @@ flags.DEFINE_list('lockout_memcached_servers', None,
'Memcached servers or None for in process cache.')
-_log = logging.getLogger("api")
-_log.setLevel(logging.DEBUG)
+class RequestLogging(wsgi.Middleware):
+ """Access-Log akin logging for all EC2 API requests."""
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ rv = req.get_response(self.application)
+ self.log_request_completion(rv, req)
+ return rv
+
+ def log_request_completion(self, response, request):
+ controller = request.environ.get('ec2.controller', None)
+ if controller:
+ controller = controller.__class__.__name__
+ action = request.environ.get('ec2.action', None)
+ ctxt = request.environ.get('ec2.context', None)
+ seconds = 'X'
+ microseconds = 'X'
+ if ctxt:
+ delta = datetime.datetime.utcnow() - \
+ ctxt.timestamp
+ seconds = delta.seconds
+ microseconds = delta.microseconds
+ LOG.info(
+ "%s.%ss %s %s %s %s:%s %s [%s] %s %s",
+ seconds,
+ microseconds,
+ request.remote_addr,
+ request.method,
+ request.path_info,
+ controller,
+ action,
+ response.status_int,
+ request.user_agent,
+ request.content_type,
+ response.content_type,
+ context=ctxt)
class Lockout(wsgi.Middleware):
@@ -85,7 +122,7 @@ class Lockout(wsgi.Middleware):
failures_key = "authfailures-%s" % access_key
failures = int(self.mc.get(failures_key) or 0)
if failures >= FLAGS.lockout_attempts:
- detail = "Too many failed authentications."
+ detail = _("Too many failed authentications.")
raise webob.exc.HTTPForbidden(detail=detail)
res = req.get_response(self.application)
if res.status_int == 403:
@@ -94,9 +131,9 @@ class Lockout(wsgi.Middleware):
# NOTE(vish): To use incr, failures has to be a string.
self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60)
elif failures >= FLAGS.lockout_attempts:
- _log.warn('Access key %s has had %d failed authentications'
- ' and will be locked out for %d minutes.' %
- (access_key, failures, FLAGS.lockout_minutes))
+ LOG.warn(_('Access key %s has had %d failed authentications'
+ ' and will be locked out for %d minutes.'),
+ access_key, failures, FLAGS.lockout_minutes)
self.mc.set(failures_key, str(failures),
time=FLAGS.lockout_minutes * 60)
return res
@@ -129,8 +166,9 @@ class Authenticate(wsgi.Middleware):
req.method,
req.host,
req.path)
- except exception.Error, ex:
- logging.debug(_("Authentication Failure: %s") % ex)
+ # Be explicit for what exceptions are 403, the rest bubble as 500
+ except (exception.NotFound, exception.NotAuthorized) as ex:
+ LOG.audit(_("Authentication Failure: %s"), str(ex))
raise webob.exc.HTTPForbidden()
# Authenticated!
@@ -141,6 +179,8 @@ class Authenticate(wsgi.Middleware):
project=project,
remote_address=remote_address)
req.environ['ec2.context'] = ctxt
+ LOG.audit(_('Authenticated Request For %s:%s)'), user.name,
+ project.name, context=req.environ['ec2.context'])
return self.application
@@ -163,6 +203,12 @@ class Requestify(wsgi.Middleware):
args.pop(non_arg)
except:
raise webob.exc.HTTPBadRequest()
+
+ LOG.debug(_('action: %s'), action)
+ for key, value in args.items():
+ LOG.debug(_('arg: %s\t\tval: %s'), key, value)
+
+ # Success!
api_request = apirequest.APIRequest(self.controller, action, args)
req.environ['ec2.request'] = api_request
req.environ['ec2.action_args'] = args
@@ -231,6 +277,9 @@ class Authorizer(wsgi.Middleware):
if self._matches_any_role(context, allowed_roles):
return self.application
else:
+ LOG.audit(_("Unauthorized request for controller=%s "
+ "and action=%s"), controller_name, action,
+ context=context)
raise webob.exc.HTTPUnauthorized()
def _matches_any_role(self, context, roles):
@@ -261,14 +310,24 @@ class Executor(wsgi.Application):
result = None
try:
result = api_request.invoke(context)
+ except exception.NotFound as ex:
+ LOG.info(_('NotFound raised: %s'), str(ex), context=context)
+ return self._error(req, context, type(ex).__name__, str(ex))
except exception.ApiError as ex:
+ LOG.exception(_('ApiError raised: %s'), str(ex), context=context)
if ex.code:
- return self._error(req, ex.code, ex.message)
+ return self._error(req, context, ex.code, str(ex))
else:
- return self._error(req, type(ex).__name__, ex.message)
- # TODO(vish): do something more useful with unknown exceptions
+ return self._error(req, context, type(ex).__name__, str(ex))
except Exception as ex:
- return self._error(req, type(ex).__name__, str(ex))
+ extra = {'environment': req.environ}
+ LOG.exception(_('Unexpected error raised: %s'), str(ex),
+ extra=extra, context=context)
+ return self._error(req,
+ context,
+ 'UnknownError',
+ _('An unknown error has occurred. '
+ 'Please try your request again.'))
else:
resp = webob.Response()
resp.status = 200
@@ -276,15 +335,16 @@ class Executor(wsgi.Application):
resp.body = str(result)
return resp
- def _error(self, req, code, message):
- logging.error("%s: %s", code, message)
+ def _error(self, req, context, code, message):
+ LOG.error("%s: %s", code, message, context=context)
resp = webob.Response()
resp.status = 400
resp.headers['Content-Type'] = 'text/xml'
resp.body = str('<?xml version="1.0"?>\n'
- '<Response><Errors><Error><Code>%s</Code>'
- '<Message>%s</Message></Error></Errors>'
- '<RequestID>?</RequestID></Response>' % (code, message))
+ '<Response><Errors><Error><Code>%s</Code>'
+ '<Message>%s</Message></Error></Errors>'
+ '<RequestID>%s</RequestID></Response>' %
+ (code, message, context.request_id))
return resp
diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py
index fac01369e..758b612e8 100644
--- a/nova/api/ec2/admin.py
+++ b/nova/api/ec2/admin.py
@@ -24,9 +24,13 @@ import base64
from nova import db
from nova import exception
+from nova import log as logging
from nova.auth import manager
+LOG = logging.getLogger('nova.api.ec2.admin')
+
+
def user_dict(user, base64_file=None):
"""Convert the user object to a result dict"""
if user:
@@ -75,17 +79,18 @@ class AdminController(object):
return {'userSet':
[user_dict(u) for u in manager.AuthManager().get_users()]}
- def register_user(self, _context, name, **_kwargs):
+ def register_user(self, context, name, **_kwargs):
"""Creates a new user, and returns generated credentials."""
+ LOG.audit(_("Creating new user: %s"), name, context=context)
return user_dict(manager.AuthManager().create_user(name))
- def deregister_user(self, _context, name, **_kwargs):
+ def deregister_user(self, context, name, **_kwargs):
"""Deletes a single user (NOT undoable.)
Should throw an exception if the user has instances,
volumes, or buckets remaining.
"""
+ LOG.audit(_("Deleting user: %s"), name, context=context)
manager.AuthManager().delete_user(name)
-
return True
def describe_roles(self, context, project_roles=True, **kwargs):
@@ -105,15 +110,27 @@ class AdminController(object):
operation='add', **kwargs):
"""Add or remove a role for a user and project."""
if operation == 'add':
+ if project:
+ LOG.audit(_("Adding role %s to user %s for project %s"), role,
+ user, project, context=context)
+ else:
+ LOG.audit(_("Adding sitewide role %s to user %s"), role, user,
+ context=context)
manager.AuthManager().add_role(user, role, project)
elif operation == 'remove':
+ if project:
+ LOG.audit(_("Removing role %s from user %s for project %s"),
+ role, user, project, context=context)
+ else:
+ LOG.audit(_("Removing sitewide role %s from user %s"), role,
+ user, context=context)
manager.AuthManager().remove_role(user, role, project)
else:
- raise exception.ApiError('operation must be add or remove')
+ raise exception.ApiError(_('operation must be add or remove'))
return True
- def generate_x509_for_user(self, _context, name, project=None, **kwargs):
+ def generate_x509_for_user(self, context, name, project=None, **kwargs):
"""Generates and returns an x509 certificate for a single user.
Is usually called from a client that will wrap this with
access and secret key info, and return a zip file.
@@ -122,6 +139,8 @@ class AdminController(object):
project = name
project = manager.AuthManager().get_project(project)
user = manager.AuthManager().get_user(name)
+ LOG.audit(_("Getting x509 for user: %s on project: %s"), name,
+ project, context=context)
return user_dict(user, base64.b64encode(project.get_credentials(user)))
def describe_project(self, context, name, **kwargs):
@@ -137,6 +156,8 @@ class AdminController(object):
def register_project(self, context, name, manager_user, description=None,
member_users=None, **kwargs):
"""Creates a new project"""
+ LOG.audit(_("Create project %s managed by %s"), name, manager_user,
+ context=context)
return project_dict(
manager.AuthManager().create_project(
name,
@@ -146,6 +167,7 @@ class AdminController(object):
def deregister_project(self, context, name):
"""Permanently deletes a project."""
+ LOG.audit(_("Delete project: %s"), name, context=context)
manager.AuthManager().delete_project(name)
return True
@@ -159,11 +181,15 @@ class AdminController(object):
**kwargs):
"""Add or remove a user from a project."""
if operation == 'add':
+ LOG.audit(_("Adding user %s to project %s"), user, project,
+ context=context)
manager.AuthManager().add_to_project(user, project)
elif operation == 'remove':
+ LOG.audit(_("Removing user %s from project %s"), user, project,
+ context=context)
manager.AuthManager().remove_from_project(user, project)
else:
- raise exception.ApiError('operation must be add or remove')
+ raise exception.ApiError(_('operation must be add or remove'))
return True
# FIXME(vish): these host commands don't work yet, perhaps some of the
diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py
index 8a1dd3978..78576470a 100644
--- a/nova/api/ec2/apirequest.py
+++ b/nova/api/ec2/apirequest.py
@@ -20,13 +20,13 @@
APIRequest class
"""
-import logging
import re
# TODO(termie): replace minidom with etree
from xml.dom import minidom
-_log = logging.getLogger("api")
-_log.setLevel(logging.DEBUG)
+from nova import log as logging
+
+LOG = logging.getLogger("nova.api.request")
_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
@@ -95,7 +95,7 @@ class APIRequest(object):
except AttributeError:
_error = _('Unsupported API request: controller = %s,'
'action = %s') % (self.controller, self.action)
- _log.warning(_error)
+ LOG.exception(_error)
# TODO: Raise custom exception, trap in apiserver,
# and reraise as 400 error.
raise Exception(_error)
@@ -143,7 +143,7 @@ class APIRequest(object):
response = xml.toxml()
xml.unlink()
- _log.debug(response)
+ LOG.debug(response)
return response
def _render_dict(self, xml, el, data):
@@ -152,7 +152,7 @@ class APIRequest(object):
val = data[key]
el.appendChild(self._render_data(xml, key, val))
except:
- _log.debug(data)
+ LOG.debug(data)
raise
def _render_data(self, xml, el_name, data):
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 9fb6307a8..832426b94 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -24,26 +24,27 @@ datastore.
import base64
import datetime
-import logging
-import re
+import IPy
import os
+from nova import compute
from nova import context
-import IPy
-
from nova import crypto
from nova import db
from nova import exception
from nova import flags
-from nova import quota
+from nova import log as logging
+from nova import network
from nova import rpc
from nova import utils
-from nova.compute import api as compute_api
+from nova import volume
from nova.compute import instance_types
FLAGS = flags.FLAGS
-flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
+flags.DECLARE('service_down_time', 'nova.scheduler.driver')
+
+LOG = logging.getLogger("nova.api.cloud")
InvalidInputException = exception.InvalidInputException
@@ -71,16 +72,16 @@ def _gen_key(context, user_id, key_name):
return {'private_key': private_key, 'fingerprint': fingerprint}
-def ec2_id_to_internal_id(ec2_id):
- """Convert an ec2 ID (i-[base 36 number]) to an internal id (int)"""
+def ec2_id_to_id(ec2_id):
+ """Convert an ec2 ID (i-[base 36 number]) to an instance id (int)"""
return int(ec2_id[2:], 36)
-def internal_id_to_ec2_id(internal_id):
- """Convert an internal ID (int) to an ec2 ID (i-[base 36 number])"""
+def id_to_ec2_id(instance_id):
+ """Convert an instance ID (int) to an ec2 ID (i-[base 36 number])"""
digits = []
- while internal_id != 0:
- internal_id, remainder = divmod(internal_id, 36)
+ while instance_id != 0:
+ instance_id, remainder = divmod(instance_id, 36)
digits.append('0123456789abcdefghijklmnopqrstuvwxyz'[remainder])
return "i-%s" % ''.join(reversed(digits))
@@ -91,10 +92,11 @@ class CloudController(object):
sent to the other nodes.
"""
def __init__(self):
- self.network_manager = utils.import_object(FLAGS.network_manager)
self.image_service = utils.import_object(FLAGS.image_service)
- self.compute_api = compute_api.ComputeAPI(self.network_manager,
- self.image_service)
+ self.network_api = network.API()
+ self.volume_api = volume.API()
+ self.compute_api = compute.API(self.image_service, self.network_api,
+ self.volume_api)
self.setup()
def __str__(self):
@@ -118,7 +120,8 @@ class CloudController(object):
def _get_mpi_data(self, context, project_id):
result = {}
- for instance in self.compute_api.get_instances(context, project_id):
+ for instance in self.compute_api.get_all(context,
+ project_id=project_id):
if instance['fixed_ip']:
line = '%s slots=%d' % (instance['fixed_ip']['address'],
instance['vcpus'])
@@ -138,9 +141,15 @@ class CloudController(object):
{"method": "refresh_security_group",
"args": {"security_group_id": security_group.id}})
+ def _get_availability_zone_by_host(self, context, host):
+ services = db.service_get_all_by_host(context, host)
+ if len(services) > 0:
+ return services[0]['availability_zone']
+ return 'unknown zone'
+
def get_metadata(self, address):
ctxt = context.get_admin_context()
- instance_ref = db.fixed_ip_get_instance(ctxt, address)
+ instance_ref = self.compute_api.get_all(ctxt, fixed_ip=address)
if instance_ref is None:
return None
mpi = self._get_mpi_data(ctxt, instance_ref['project_id'])
@@ -150,9 +159,11 @@ class CloudController(object):
else:
keys = ''
hostname = instance_ref['hostname']
+ host = instance_ref['host']
+ availability_zone = self._get_availability_zone_by_host(ctxt, host)
floating_ip = db.instance_get_floating_address(ctxt,
instance_ref['id'])
- ec2_id = internal_id_to_ec2_id(instance_ref['internal_id'])
+ ec2_id = id_to_ec2_id(instance_ref['id'])
data = {
'user-data': base64.b64decode(instance_ref['user_data']),
'meta-data': {
@@ -172,8 +183,7 @@ class CloudController(object):
'local-hostname': hostname,
'local-ipv4': address,
'kernel-id': instance_ref['kernel_id'],
- # TODO(vish): real zone
- 'placement': {'availability-zone': 'nova'},
+ 'placement': {'availability-zone': availability_zone},
'public-hostname': hostname,
'public-ipv4': floating_ip or '',
'public-keys': keys,
@@ -197,15 +207,33 @@ class CloudController(object):
return self._describe_availability_zones(context, **kwargs)
def _describe_availability_zones(self, context, **kwargs):
- return {'availabilityZoneInfo': [{'zoneName': 'nova',
- 'zoneState': 'available'}]}
+ enabled_services = db.service_get_all(context)
+ disabled_services = db.service_get_all(context, True)
+ available_zones = []
+ for zone in [service.availability_zone for service
+ in enabled_services]:
+ if not zone in available_zones:
+ available_zones.append(zone)
+ not_available_zones = []
+ for zone in [service.availability_zone for service in disabled_services
+ if not service['availability_zone'] in available_zones]:
+ if not zone in not_available_zones:
+ not_available_zones.append(zone)
+ result = []
+ for zone in available_zones:
+ result.append({'zoneName': zone,
+ 'zoneState': "available"})
+ for zone in not_available_zones:
+ result.append({'zoneName': zone,
+ 'zoneState': "not available"})
+ return {'availabilityZoneInfo': result}
def _describe_availability_zones_verbose(self, context, **kwargs):
rv = {'availabilityZoneInfo': [{'zoneName': 'nova',
'zoneState': 'available'}]}
services = db.service_get_all(context)
- now = db.get_time()
+ now = datetime.datetime.utcnow()
hosts = []
for host in [service['host'] for service in services]:
if not host in hosts:
@@ -245,6 +273,7 @@ class CloudController(object):
FLAGS.cc_host,
FLAGS.cc_port,
FLAGS.ec2_suffix)}]
+ return {'regionInfo': regions}
def describe_snapshots(self,
context,
@@ -280,6 +309,7 @@ class CloudController(object):
return {'keypairsSet': result}
def create_key_pair(self, context, key_name, **kwargs):
+ LOG.audit(_("Create key pair %s"), key_name, context=context)
data = _gen_key(context, context.user.id, key_name)
return {'keyName': key_name,
'keyFingerprint': data['fingerprint'],
@@ -287,6 +317,7 @@ class CloudController(object):
# TODO(vish): when context is no longer an object, pass it here
def delete_key_pair(self, context, key_name, **kwargs):
+ LOG.audit(_("Delete key pair %s"), key_name, context=context)
try:
db.key_pair_destroy(context, context.user.id, key_name)
except exception.NotFound:
@@ -393,6 +424,8 @@ class CloudController(object):
return False
def revoke_security_group_ingress(self, context, group_name, **kwargs):
+ LOG.audit(_("Revoke security group ingress %s"), group_name,
+ context=context)
self.compute_api.ensure_default_security_group(context)
security_group = db.security_group_get_by_name(context,
context.project_id,
@@ -400,8 +433,8 @@ class CloudController(object):
criteria = self._revoke_rule_args_to_dict(context, **kwargs)
if criteria == None:
- raise exception.ApiError(_("No rule for the specified "
- "parameters."))
+ raise exception.ApiError(_("Not enough parameters to build a "
+ "valid rule."))
for rule in security_group.rules:
match = True
@@ -410,7 +443,8 @@ class CloudController(object):
match = False
if match:
db.security_group_rule_destroy(context, rule['id'])
- self._trigger_refresh_security_group(context, security_group)
+ self.compute_api.trigger_security_group_rules_refresh(context,
+ security_group['id'])
return True
raise exception.ApiError(_("No rule for the specified parameters."))
@@ -419,12 +453,17 @@ class CloudController(object):
# for these operations, so support for newer API versions
# is sketchy.
def authorize_security_group_ingress(self, context, group_name, **kwargs):
+ LOG.audit(_("Authorize security group ingress %s"), group_name,
+ context=context)
self.compute_api.ensure_default_security_group(context)
security_group = db.security_group_get_by_name(context,
context.project_id,
group_name)
values = self._revoke_rule_args_to_dict(context, **kwargs)
+ if values is None:
+ raise exception.ApiError(_("Not enough parameters to build a "
+ "valid rule."))
values['parent_group_id'] = security_group.id
if self._security_group_rule_exists(security_group, values):
@@ -433,7 +472,8 @@ class CloudController(object):
security_group_rule = db.security_group_rule_create(context, values)
- self._trigger_refresh_security_group(context, security_group)
+ self.compute_api.trigger_security_group_rules_refresh(context,
+ security_group['id'])
return True
@@ -455,6 +495,7 @@ class CloudController(object):
return source_project_id
def create_security_group(self, context, group_name, group_description):
+ LOG.audit(_("Create Security Group %s"), group_name, context=context)
self.compute_api.ensure_default_security_group(context)
if db.security_group_exists(context, context.project_id, group_name):
raise exception.ApiError(_('group %s already exists') % group_name)
@@ -469,6 +510,7 @@ class CloudController(object):
group_ref)]}
def delete_security_group(self, context, group_name, **kwargs):
+ LOG.audit(_("Delete security group %s"), group_name, context=context)
security_group = db.security_group_get_by_name(context,
context.project_id,
group_name)
@@ -476,10 +518,12 @@ class CloudController(object):
return True
def get_console_output(self, context, instance_id, **kwargs):
+ LOG.audit(_("Get console output for instance %s"), instance_id,
+ context=context)
# instance_id is passed in as a list of instances
ec2_id = instance_id[0]
- internal_id = ec2_id_to_internal_id(ec2_id)
- instance_ref = self.compute_api.get_instance(context, internal_id)
+ instance_id = ec2_id_to_id(ec2_id)
+ instance_ref = self.compute_api.get(context, instance_id)
output = rpc.call(context,
'%s.%s' % (FLAGS.compute_topic,
instance_ref['host']),
@@ -491,28 +535,28 @@ class CloudController(object):
"Timestamp": now,
"output": base64.b64encode(output)}
- def describe_volumes(self, context, volume_id=None, **kwargs):
- if context.user.is_admin():
- volumes = db.volume_get_all(context)
- else:
- volumes = db.volume_get_all_by_project(context, context.project_id)
+ def get_ajax_console(self, context, instance_id, **kwargs):
+ ec2_id = instance_id[0]
+ internal_id = ec2_id_to_id(ec2_id)
+ return self.compute_api.get_ajax_console(context, internal_id)
+ def describe_volumes(self, context, volume_id=None, **kwargs):
+ volumes = self.volume_api.get_all(context)
# NOTE(vish): volume_id is an optional list of volume ids to filter by.
volumes = [self._format_volume(context, v) for v in volumes
- if volume_id is None or v['ec2_id'] in volume_id]
-
+ if volume_id is None or v['id'] in volume_id]
return {'volumeSet': volumes}
def _format_volume(self, context, volume):
instance_ec2_id = None
instance_data = None
if volume.get('instance', None):
- internal_id = volume['instance']['internal_id']
- instance_ec2_id = internal_id_to_ec2_id(internal_id)
+ instance_id = volume['instance']['id']
+ instance_ec2_id = id_to_ec2_id(instance_id)
instance_data = '%s[%s]' % (instance_ec2_id,
volume['instance']['host'])
v = {}
- v['volumeId'] = volume['ec2_id']
+ v['volumeId'] = volume['id']
v['status'] = volume['status']
v['size'] = volume['size']
v['availabilityZone'] = volume['availability_zone']
@@ -539,88 +583,51 @@ class CloudController(object):
return v
def create_volume(self, context, size, **kwargs):
- # check quota
- if quota.allowed_volumes(context, 1, size) < 1:
- logging.warn("Quota exceeeded for %s, tried to create %sG volume",
- context.project_id, size)
- raise quota.QuotaError("Volume quota exceeded. You cannot "
- "create a volume of size %s" % size)
- vol = {}
- vol['size'] = size
- vol['user_id'] = context.user.id
- vol['project_id'] = context.project_id
- vol['availability_zone'] = FLAGS.storage_availability_zone
- vol['status'] = "creating"
- vol['attach_status'] = "detached"
- vol['display_name'] = kwargs.get('display_name')
- vol['display_description'] = kwargs.get('display_description')
- volume_ref = db.volume_create(context, vol)
-
- rpc.cast(context,
- FLAGS.scheduler_topic,
- {"method": "create_volume",
- "args": {"topic": FLAGS.volume_topic,
- "volume_id": volume_ref['id']}})
-
+ LOG.audit(_("Create volume of %s GB"), size, context=context)
+ volume = self.volume_api.create(context, size,
+ kwargs.get('display_name'),
+ kwargs.get('display_description'))
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
return {'volumeSet': [self._format_volume(context, dict(volume_ref))]}
+ def delete_volume(self, context, volume_id, **kwargs):
+ self.volume_api.delete(context, volume_id)
+ return True
+
+ def update_volume(self, context, volume_id, **kwargs):
+ updatable_fields = ['display_name', 'display_description']
+ changes = {}
+ for field in updatable_fields:
+ if field in kwargs:
+ changes[field] = kwargs[field]
+ if changes:
+ self.volume_api.update(context, volume_id, kwargs)
+ return True
+
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
- volume_ref = db.volume_get_by_ec2_id(context, volume_id)
- if not re.match("^/dev/[a-z]d[a-z]+$", device):
- raise exception.ApiError(_("Invalid device specified: %s. "
- "Example device: /dev/vdb") % device)
- # TODO(vish): abstract status checking?
- if volume_ref['status'] != "available":
- raise exception.ApiError(_("Volume status must be available"))
- if volume_ref['attach_status'] == "attached":
- raise exception.ApiError(_("Volume is already attached"))
- internal_id = ec2_id_to_internal_id(instance_id)
- instance_ref = self.compute_api.get_instance(context, internal_id)
- host = instance_ref['host']
- rpc.cast(context,
- db.queue_get_for(context, FLAGS.compute_topic, host),
- {"method": "attach_volume",
- "args": {"volume_id": volume_ref['id'],
- "instance_id": instance_ref['id'],
- "mountpoint": device}})
- return {'attachTime': volume_ref['attach_time'],
- 'device': volume_ref['mountpoint'],
- 'instanceId': instance_ref['id'],
+ LOG.audit(_("Attach volume %s to instacne %s at %s"), volume_id,
+ instance_id, device, context=context)
+ self.compute_api.attach_volume(context, instance_id, volume_id, device)
+ volume = self.volume_api.get(context, volume_id)
+ return {'attachTime': volume['attach_time'],
+ 'device': volume['mountpoint'],
+ 'instanceId': instance_id,
'requestId': context.request_id,
- 'status': volume_ref['attach_status'],
- 'volumeId': volume_ref['id']}
+ 'status': volume['attach_status'],
+ 'volumeId': volume_id}
def detach_volume(self, context, volume_id, **kwargs):
- volume_ref = db.volume_get_by_ec2_id(context, volume_id)
- instance_ref = db.volume_get_instance(context.elevated(),
- volume_ref['id'])
- if not instance_ref:
- raise exception.ApiError(_("Volume isn't attached to anything!"))
- # TODO(vish): abstract status checking?
- if volume_ref['status'] == "available":
- raise exception.ApiError(_("Volume is already detached"))
- try:
- host = instance_ref['host']
- rpc.cast(context,
- db.queue_get_for(context, FLAGS.compute_topic, host),
- {"method": "detach_volume",
- "args": {"instance_id": instance_ref['id'],
- "volume_id": volume_ref['id']}})
- except exception.NotFound:
- # If the instance doesn't exist anymore,
- # then we need to call detach blind
- db.volume_detached(context)
- internal_id = instance_ref['internal_id']
- ec2_id = internal_id_to_ec2_id(internal_id)
- return {'attachTime': volume_ref['attach_time'],
- 'device': volume_ref['mountpoint'],
- 'instanceId': internal_id,
+ LOG.audit(_("Detach volume %s"), volume_id, context=context)
+ volume = self.volume_api.get(context, volume_id)
+ instance = self.compute_api.detach_volume(context, volume_id)
+ return {'attachTime': volume['attach_time'],
+ 'device': volume['mountpoint'],
+ 'instanceId': id_to_ec2_id(instance['id']),
'requestId': context.request_id,
- 'status': volume_ref['attach_status'],
- 'volumeId': volume_ref['id']}
+ 'status': volume['attach_status'],
+ 'volumeId': volume_id}
def _convert_to_set(self, lst, label):
if lst == None or lst == []:
@@ -629,41 +636,32 @@ class CloudController(object):
lst = [lst]
return [{label: x} for x in lst]
- def update_volume(self, context, volume_id, **kwargs):
- updatable_fields = ['display_name', 'display_description']
- changes = {}
- for field in updatable_fields:
- if field in kwargs:
- changes[field] = kwargs[field]
- if changes:
- db.volume_update(context, volume_id, kwargs)
- return True
-
def describe_instances(self, context, **kwargs):
- return self._format_describe_instances(context)
+ return self._format_describe_instances(context, **kwargs)
- def _format_describe_instances(self, context):
- return {'reservationSet': self._format_instances(context)}
+ def _format_describe_instances(self, context, **kwargs):
+ return {'reservationSet': self._format_instances(context, **kwargs)}
def _format_run_instances(self, context, reservation_id):
- i = self._format_instances(context, reservation_id)
+ i = self._format_instances(context, reservation_id=reservation_id)
assert len(i) == 1
return i[0]
- def _format_instances(self, context, reservation_id=None):
+ def _format_instances(self, context, instance_id=None, **kwargs):
reservations = {}
- if reservation_id:
- instances = db.instance_get_all_by_reservation(context,
- reservation_id)
+ # NOTE(vish): instance_id is an optional list of ids to filter by
+ if instance_id:
+ instance_id = [ec2_id_to_id(x) for x in instance_id]
+ instances = [self.compute_api.get(context, x) for x in instance_id]
else:
- instances = self.compute_api.get_instances(context)
+ instances = self.compute_api.get_all(context, **kwargs)
for instance in instances:
if not context.user.is_admin():
if instance['image_id'] == FLAGS.vpn_image_id:
continue
i = {}
- internal_id = instance['internal_id']
- ec2_id = internal_id_to_ec2_id(internal_id)
+ instance_id = instance['id']
+ ec2_id = id_to_ec2_id(instance_id)
i['instanceId'] = ec2_id
i['imageId'] = instance['image_id']
i['instanceState'] = {
@@ -690,6 +688,9 @@ class CloudController(object):
i['amiLaunchIndex'] = instance['launch_index']
i['displayName'] = instance['display_name']
i['displayDescription'] = instance['display_description']
+ host = instance['host']
+ zone = self._get_availability_zone_by_host(context, host)
+ i['placement'] = {'availabilityZone': zone}
if instance['reservation_id'] not in reservations:
r = {}
r['reservationId'] = instance['reservation_id']
@@ -716,8 +717,8 @@ class CloudController(object):
ec2_id = None
if (floating_ip_ref['fixed_ip']
and floating_ip_ref['fixed_ip']['instance']):
- internal_id = floating_ip_ref['fixed_ip']['instance']['ec2_id']
- ec2_id = internal_id_to_ec2_id(internal_id)
+ instance_id = floating_ip_ref['fixed_ip']['instance']['ec2_id']
+ ec2_id = id_to_ec2_id(instance_id)
address_rv = {'public_ip': address,
'instance_id': ec2_id}
if context.user.is_admin():
@@ -728,69 +729,30 @@ class CloudController(object):
return {'addressesSet': addresses}
def allocate_address(self, context, **kwargs):
- # check quota
- if quota.allowed_floating_ips(context, 1) < 1:
- logging.warn(_("Quota exceeeded for %s, tried to allocate "
- "address"),
- context.project_id)
- raise quota.QuotaError(_("Address quota exceeded. You cannot "
- "allocate any more addresses"))
- # NOTE(vish): We don't know which network host should get the ip
- # when we allocate, so just send it to any one. This
- # will probably need to move into a network supervisor
- # at some point.
- public_ip = rpc.call(context,
- FLAGS.network_topic,
- {"method": "allocate_floating_ip",
- "args": {"project_id": context.project_id}})
+ LOG.audit(_("Allocate address"), context=context)
+ public_ip = self.network_api.allocate_floating_ip(context)
return {'addressSet': [{'publicIp': public_ip}]}
def release_address(self, context, public_ip, **kwargs):
- floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
- # NOTE(vish): We don't know which network host should get the ip
- # when we deallocate, so just send it to any one. This
- # will probably need to move into a network supervisor
- # at some point.
- rpc.cast(context,
- FLAGS.network_topic,
- {"method": "deallocate_floating_ip",
- "args": {"floating_address": floating_ip_ref['address']}})
+ LOG.audit(_("Release address %s"), public_ip, context=context)
+ self.network_api.release_floating_ip(context, public_ip)
return {'releaseResponse': ["Address released."]}
def associate_address(self, context, instance_id, public_ip, **kwargs):
- internal_id = ec2_id_to_internal_id(instance_id)
- instance_ref = self.compute_api.get_instance(context, internal_id)
- fixed_address = db.instance_get_fixed_address(context,
- instance_ref['id'])
- floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
- # NOTE(vish): Perhaps we should just pass this on to compute and
- # let compute communicate with network.
- network_topic = self.compute_api.get_network_topic(context,
- internal_id)
- rpc.cast(context,
- network_topic,
- {"method": "associate_floating_ip",
- "args": {"floating_address": floating_ip_ref['address'],
- "fixed_address": fixed_address}})
+ LOG.audit(_("Associate address %s to instance %s"), public_ip,
+ instance_id, context=context)
+ instance_id = ec2_id_to_id(instance_id)
+ self.compute_api.associate_floating_ip(context, instance_id, public_ip)
return {'associateResponse': ["Address associated."]}
def disassociate_address(self, context, public_ip, **kwargs):
- floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
- # NOTE(vish): Get the topic from the host name of the network of
- # the associated fixed ip.
- if not floating_ip_ref.get('fixed_ip'):
- raise exception.ApiError('Address is not associated.')
- host = floating_ip_ref['fixed_ip']['network']['host']
- topic = db.queue_get_for(context, FLAGS.network_topic, host)
- rpc.cast(context,
- topic,
- {"method": "disassociate_floating_ip",
- "args": {"floating_address": floating_ip_ref['address']}})
+ LOG.audit(_("Disassociate address %s"), public_ip, context=context)
+ self.network_api.disassociate_floating_ip(context, public_ip)
return {'disassociateResponse': ["Address disassociated."]}
def run_instances(self, context, **kwargs):
max_count = int(kwargs.get('max_count', 1))
- instances = self.compute_api.create_instances(context,
+ instances = self.compute_api.create(context,
instance_types.get_by_type(kwargs.get('instance_type', None)),
kwargs['image_id'],
min_count=int(kwargs.get('min_count', max_count)),
@@ -798,42 +760,43 @@ class CloudController(object):
kernel_id=kwargs.get('kernel_id', None),
ramdisk_id=kwargs.get('ramdisk_id'),
display_name=kwargs.get('display_name'),
- description=kwargs.get('display_description'),
+ display_description=kwargs.get('display_description'),
key_name=kwargs.get('key_name'),
user_data=kwargs.get('user_data'),
security_group=kwargs.get('security_group'),
availability_zone=kwargs.get('placement', {}).get(
'AvailabilityZone'),
- generate_hostname=internal_id_to_ec2_id)
+ generate_hostname=id_to_ec2_id)
return self._format_run_instances(context,
instances[0]['reservation_id'])
def terminate_instances(self, context, instance_id, **kwargs):
"""Terminate each instance in instance_id, which is a list of ec2 ids.
instance_id is a kwarg so its name cannot be modified."""
- logging.debug("Going to start terminating instances")
+ LOG.debug(_("Going to start terminating instances"))
for ec2_id in instance_id:
- internal_id = ec2_id_to_internal_id(ec2_id)
- self.compute_api.delete_instance(context, internal_id)
+ instance_id = ec2_id_to_id(ec2_id)
+ self.compute_api.delete(context, instance_id)
return True
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids"""
+ LOG.audit(_("Reboot instance %r"), instance_id, context=context)
for ec2_id in instance_id:
- internal_id = ec2_id_to_internal_id(ec2_id)
- self.compute_api.reboot(context, internal_id)
+ instance_id = ec2_id_to_id(ec2_id)
+ self.compute_api.reboot(context, instance_id)
return True
def rescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
- internal_id = ec2_id_to_internal_id(instance_id)
- self.compute_api.rescue(context, internal_id)
+ instance_id = ec2_id_to_id(instance_id)
+ self.compute_api.rescue(context, instance_id)
return True
def unrescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
- internal_id = ec2_id_to_internal_id(instance_id)
- self.compute_api.unrescue(context, internal_id)
+ instance_id = ec2_id_to_id(instance_id)
+ self.compute_api.unrescue(context, instance_id)
return True
def update_instance(self, context, ec2_id, **kwargs):
@@ -843,24 +806,8 @@ class CloudController(object):
if field in kwargs:
changes[field] = kwargs[field]
if changes:
- internal_id = ec2_id_to_internal_id(ec2_id)
- inst = self.compute_api.get_instance(context, internal_id)
- db.instance_update(context, inst['id'], kwargs)
- return True
-
- def delete_volume(self, context, volume_id, **kwargs):
- # TODO: return error if not authorized
- volume_ref = db.volume_get_by_ec2_id(context, volume_id)
- if volume_ref['status'] != "available":
- raise exception.ApiError(_("Volume status must be available"))
- now = datetime.datetime.utcnow()
- db.volume_update(context, volume_ref['id'], {'status': 'deleting',
- 'terminated_at': now})
- host = volume_ref['host']
- rpc.cast(context,
- db.queue_get_for(context, FLAGS.volume_topic, host),
- {"method": "delete_volume",
- "args": {"volume_id": volume_ref['id']}})
+ instance_id = ec2_id_to_id(ec2_id)
+ self.compute_api.update(context, instance_id, **kwargs)
return True
def describe_images(self, context, image_id=None, **kwargs):
@@ -871,6 +818,7 @@ class CloudController(object):
return {'imagesSet': images}
def deregister_image(self, context, image_id, **kwargs):
+ LOG.audit(_("De-registering image %s"), image_id, context=context)
self.image_service.deregister(context, image_id)
return {'imageId': image_id}
@@ -878,7 +826,8 @@ class CloudController(object):
if image_location is None and 'name' in kwargs:
image_location = kwargs['name']
image_id = self.image_service.register(context, image_location)
- logging.debug("Registered %s as %s" % (image_location, image_id))
+ LOG.audit(_("Registered image %s with id %s"), image_location,
+ image_id, context=context)
return {'imageId': image_id}
def describe_image_attribute(self, context, image_id, attribute, **kwargs):
@@ -906,6 +855,7 @@ class CloudController(object):
raise exception.ApiError(_('only group "all" is supported'))
if not operation_type in ['add', 'remove']:
raise exception.ApiError(_('operation_type must be add or remove'))
+ LOG.audit(_("Updating image %s publicity"), image_id, context=context)
return self.image_service.modify(context, image_id, operation_type)
def update_image(self, context, image_id, **kwargs):
diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py
index 9067568a4..6fb441656 100644
--- a/nova/api/ec2/metadatarequesthandler.py
+++ b/nova/api/ec2/metadatarequesthandler.py
@@ -18,16 +18,16 @@
"""Metadata request handler."""
-import logging
-
import webob.dec
import webob.exc
+from nova import log as logging
from nova import flags
from nova import wsgi
from nova.api.ec2 import cloud
+LOG = logging.getLogger('nova.api.ec2.metadata')
FLAGS = flags.FLAGS
@@ -73,8 +73,7 @@ class MetadataRequestHandler(wsgi.Application):
remote_address = req.headers.get('X-Forwarded-For', remote_address)
meta_data = cc.get_metadata(remote_address)
if meta_data is None:
- logging.error(_('Failed to get metadata for ip: %s') %
- remote_address)
+ LOG.error(_('Failed to get metadata for ip: %s'), remote_address)
raise webob.exc.HTTPNotFound()
data = self.lookup(req.path_info, meta_data)
if data is None:
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index 0bf82d839..f2caac483 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -20,24 +20,27 @@
WSGI middleware for OpenStack API controllers.
"""
-import logging
import routes
-import traceback
import webob.dec
import webob.exc
-import webob
from nova import flags
+from nova import log as logging
from nova import wsgi
from nova.api.openstack import faults
from nova.api.openstack import backup_schedules
+from nova.api.openstack import consoles
from nova.api.openstack import flavors
from nova.api.openstack import images
from nova.api.openstack import servers
-from nova.api.openstack import sharedipgroups
+from nova.api.openstack import shared_ip_groups
+LOG = logging.getLogger('nova.api.openstack')
FLAGS = flags.FLAGS
+flags.DEFINE_string('os_krm_mapping_file',
+ 'krm_mapping.json',
+ 'Location of OpenStack Flavor/OS:EC2 Kernel/Ramdisk/Machine JSON file.')
flags.DEFINE_bool('allow_admin_api',
False,
'When True, this API service will accept admin operations.')
@@ -51,8 +54,7 @@ class FaultWrapper(wsgi.Middleware):
try:
return req.get_response(self.application)
except Exception as ex:
- logging.warn(_("Caught error: %s") % str(ex))
- logging.error(traceback.format_exc())
+ LOG.exception(_("Caught error: %s"), str(ex))
exc = webob.exc.HTTPInternalServerError(explanation=str(ex))
return faults.Fault(exc)
@@ -73,7 +75,7 @@ class APIRouter(wsgi.Router):
server_members = {'action': 'POST'}
if FLAGS.allow_admin_api:
- logging.debug("Including admin operations in API.")
+ LOG.debug(_("Including admin operations in API."))
server_members['pause'] = 'POST'
server_members['unpause'] = 'POST'
server_members["diagnostics"] = "GET"
@@ -90,12 +92,18 @@ class APIRouter(wsgi.Router):
parent_resource=dict(member_name='server',
collection_name='servers'))
+ mapper.resource("console", "consoles",
+ controller=consoles.Controller(),
+ parent_resource=dict(member_name='server',
+ collection_name='servers'))
+
mapper.resource("image", "images", controller=images.Controller(),
collection={'detail': 'GET'})
mapper.resource("flavor", "flavors", controller=flavors.Controller(),
collection={'detail': 'GET'})
- mapper.resource("sharedipgroup", "sharedipgroups",
- controller=sharedipgroups.Controller())
+ mapper.resource("shared_ip_group", "shared_ip_groups",
+ collection={'detail': 'GET'},
+ controller=shared_ip_groups.Controller())
super(APIRouter, self).__init__(mapper)
diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py
index fcc07bdd3..197125d86 100644
--- a/nova/api/openstack/backup_schedules.py
+++ b/nova/api/openstack/backup_schedules.py
@@ -15,7 +15,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+import logging
import time
+
from webob import exc
from nova import wsgi
@@ -46,8 +48,8 @@ class Controller(wsgi.Controller):
def create(self, req, server_id):
""" No actual update method required, since the existing API allows
both create and update through a POST """
- return faults.Fault(exc.HTTPNotFound())
+ return faults.Fault(exc.HTTPNotImplemented())
def delete(self, req, server_id, id):
""" Deletes an existing backup schedule """
- return faults.Fault(exc.HTTPNotFound())
+ return faults.Fault(exc.HTTPNotImplemented())
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index ac0572c96..037ed47a0 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova import exception
+
def limited(items, req):
"""Return a slice of items according to requested offset and limit.
@@ -34,3 +36,25 @@ def limited(items, req):
limit = min(1000, limit)
range_end = offset + limit
return items[offset:range_end]
+
+
+def get_image_id_from_image_hash(image_service, context, image_hash):
+ """Given an Image ID Hash, return an objectstore Image ID.
+
+ image_service - reference to objectstore compatible image service.
+ context - security context for image service requests.
+ image_hash - hash of the image ID.
+ """
+
+ # FIX(sandy): This is terribly inefficient. It pulls all images
+ # from objectstore in order to find the match. ObjectStore
+ # should have a numeric counterpart to the string ID.
+ try:
+ items = image_service.detail(context)
+ except NotImplementedError:
+ items = image_service.index(context)
+ for image in items:
+ image_id = image['imageId']
+ if abs(hash(image_id)) == int(image_hash):
+ return image_id
+ raise exception.NotFound(image_hash)
diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py
new file mode 100644
index 000000000..9ebdbe710
--- /dev/null
+++ b/nova/api/openstack/consoles.py
@@ -0,0 +1,96 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from webob import exc
+
+from nova import console
+from nova import exception
+from nova import wsgi
+from nova.api.openstack import faults
+
+
+def _translate_keys(cons):
+ """Coerces a console instance into proper dictionary format """
+ pool = cons['pool']
+ info = {'id': cons['id'],
+ 'console_type': pool['console_type']}
+ return dict(console=info)
+
+
+def _translate_detail_keys(cons):
+ """Coerces a console instance into proper dictionary format with
+ correctly mapped attributes """
+ pool = cons['pool']
+ info = {'id': cons['id'],
+ 'console_type': pool['console_type'],
+ 'password': cons['password'],
+ 'port': cons['port'],
+ 'host': pool['public_hostname']}
+ return dict(console=info)
+
+
+class Controller(wsgi.Controller):
+ """The Consoles Controller for the Openstack API"""
+
+ _serialization_metadata = {
+ 'application/xml': {
+ 'attributes': {
+ 'console': []}}}
+
+ def __init__(self):
+ self.console_api = console.API()
+ super(Controller, self).__init__()
+
+ def index(self, req, server_id):
+ """Returns a list of consoles for this instance"""
+ consoles = self.console_api.get_consoles(
+ req.environ['nova.context'],
+ int(server_id))
+ return dict(consoles=[_translate_keys(console)
+ for console in consoles])
+
+ def create(self, req, server_id):
+ """Creates a new console"""
+ #info = self._deserialize(req.body, req)
+ self.console_api.create_console(
+ req.environ['nova.context'],
+ int(server_id))
+
+ def show(self, req, server_id, id):
+ """Shows in-depth information on a specific console"""
+ try:
+ console = self.console_api.get_console(
+ req.environ['nova.context'],
+ int(server_id),
+ int(id))
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ return _translate_detail_keys(console)
+
+ def update(self, req, server_id, id):
+ """You can't update a console"""
+ raise faults.Fault(exc.HTTPNotImplemented())
+
+ def delete(self, req, server_id, id):
+ """Deletes a console"""
+ try:
+ self.console_api.delete_console(req.environ['nova.context'],
+ int(server_id),
+ int(id))
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ return exc.HTTPAccepted()
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index 867ee5a7e..a5f55a489 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -15,17 +15,19 @@
# License for the specific language governing permissions and limitations
# under the License.
+import logging
+
from webob import exc
+from nova import compute
from nova import flags
from nova import utils
from nova import wsgi
import nova.api.openstack
-import nova.image.service
-
from nova.api.openstack import common
from nova.api.openstack import faults
-from nova.compute import api as compute_api
+import nova.image.service
+
FLAGS = flags.FLAGS
@@ -89,6 +91,12 @@ def _filter_keys(item, keys):
return dict((k, v) for k, v in item.iteritems() if k in keys)
+def _convert_image_id_to_hash(image):
+ image_id = abs(hash(image['imageId']))
+ image['imageId'] = image_id
+ image['id'] = image_id
+
+
class Controller(wsgi.Controller):
_serialization_metadata = {
@@ -113,6 +121,9 @@ class Controller(wsgi.Controller):
items = self._service.detail(req.environ['nova.context'])
except NotImplementedError:
items = self._service.index(req.environ['nova.context'])
+ for image in items:
+ _convert_image_id_to_hash(image)
+
items = common.limited(items, req)
items = [_translate_keys(item) for item in items]
items = [_translate_status(item) for item in items]
@@ -120,7 +131,12 @@ class Controller(wsgi.Controller):
def show(self, req, id):
"""Return data about the given image id"""
- return dict(image=self._service.show(req.environ['nova.context'], id))
+ image_id = common.get_image_id_from_image_hash(self._service,
+ req.environ['nova.context'], id)
+
+ image = self._service.show(req.environ['nova.context'], image_id)
+ _convert_image_id_to_hash(image)
+ return dict(image=image)
def delete(self, req, id):
# Only public images are supported for now.
@@ -131,7 +147,7 @@ class Controller(wsgi.Controller):
env = self._deserialize(req.body, req)
instance_id = env["image"]["serverId"]
name = env["image"]["name"]
- return compute_api.ComputeAPI().snapshot(context, instance_id, name)
+ return compute.API().snapshot(context, instance_id, name)
def update(self, req, id):
# Users may not modify public images, and that's all that
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index c5cbe21ef..29af82533 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -15,17 +15,20 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
+import json
import traceback
from webob import exc
+from nova import compute
from nova import exception
+from nova import flags
+from nova import log as logging
from nova import wsgi
+from nova import utils
from nova.api.openstack import common
from nova.api.openstack import faults
from nova.auth import manager as auth_manager
-from nova.compute import api as compute_api
from nova.compute import instance_types
from nova.compute import power_state
import nova.api.openstack
@@ -35,6 +38,9 @@ LOG = logging.getLogger('server')
LOG.setLevel(logging.DEBUG)
+FLAGS = flags.FLAGS
+
+
def _translate_detail_keys(inst):
""" Coerces into dictionary format, mapping everything to Rackspace-like
attributes for return"""
@@ -44,14 +50,14 @@ def _translate_detail_keys(inst):
power_state.RUNNING: 'active',
power_state.BLOCKED: 'active',
power_state.SUSPENDED: 'suspended',
- power_state.PAUSED: 'error',
+ power_state.PAUSED: 'paused',
power_state.SHUTDOWN: 'active',
power_state.SHUTOFF: 'active',
power_state.CRASHED: 'error'}
inst_dict = {}
mapped_keys = dict(status='state', imageId='image_id',
- flavorId='instance_type', name='display_name', id='internal_id')
+ flavorId='instance_type', name='display_name', id='id')
for k, v in mapped_keys.iteritems():
inst_dict[k] = inst[v]
@@ -67,7 +73,7 @@ def _translate_detail_keys(inst):
def _translate_keys(inst):
""" Coerces into dictionary format, excluding all model attributes
save for id and name """
- return dict(server=dict(id=inst['internal_id'], name=inst['display_name']))
+ return dict(server=dict(id=inst['id'], name=inst['display_name']))
class Controller(wsgi.Controller):
@@ -80,7 +86,8 @@ class Controller(wsgi.Controller):
"status", "progress"]}}}
def __init__(self):
- self.compute_api = compute_api.ComputeAPI()
+ self.compute_api = compute.API()
+ self._image_service = utils.import_object(FLAGS.image_service)
super(Controller, self).__init__()
def index(self, req):
@@ -96,8 +103,7 @@ class Controller(wsgi.Controller):
entity_maker - either _translate_detail_keys or _translate_keys
"""
- instance_list = self.compute_api.get_instances(
- req.environ['nova.context'])
+ instance_list = self.compute_api.get_all(req.environ['nova.context'])
limited_list = common.limited(instance_list, req)
res = [entity_maker(inst)['server'] for inst in limited_list]
return dict(servers=res)
@@ -105,8 +111,7 @@ class Controller(wsgi.Controller):
def show(self, req, id):
""" Returns server details by server id """
try:
- instance = self.compute_api.get_instance(
- req.environ['nova.context'], int(id))
+ instance = self.compute_api.get(req.environ['nova.context'], id)
return _translate_detail_keys(instance)
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
@@ -114,12 +119,23 @@ class Controller(wsgi.Controller):
def delete(self, req, id):
""" Destroys a server """
try:
- self.compute_api.delete_instance(req.environ['nova.context'],
- int(id))
+ self.compute_api.delete(req.environ['nova.context'], id)
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
+ def _get_kernel_ramdisk_from_image(self, image_id):
+ mapping_filename = FLAGS.os_krm_mapping_file
+
+ with open(mapping_filename) as f:
+ mapping = json.load(f)
+ if image_id in mapping:
+ return mapping[image_id]
+
+ raise exception.NotFound(
+ _("No entry for image '%s' in mapping file '%s'") %
+ (image_id, mapping_filename))
+
def create(self, req):
""" Creates a new server for a given user """
env = self._deserialize(req.body, req)
@@ -128,12 +144,17 @@ class Controller(wsgi.Controller):
key_pair = auth_manager.AuthManager.get_key_pairs(
req.environ['nova.context'])[0]
- instances = self.compute_api.create_instances(
+ image_id = common.get_image_id_from_image_hash(self._image_service,
+ req.environ['nova.context'], env['server']['imageId'])
+ kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(image_id)
+ instances = self.compute_api.create(
req.environ['nova.context'],
instance_types.get_by_flavor_id(env['server']['flavorId']),
- env['server']['imageId'],
+ image_id,
+ kernel_id=kernel_id,
+ ramdisk_id=ramdisk_id,
display_name=env['server']['name'],
- description=env['server']['name'],
+ display_description=env['server']['name'],
key_name=key_pair['name'],
key_data=key_pair['public_key'])
return _translate_keys(instances[0])
@@ -151,10 +172,8 @@ class Controller(wsgi.Controller):
update_dict['display_name'] = inst_dict['server']['name']
try:
- ctxt = req.environ['nova.context']
- self.compute_api.update_instance(ctxt,
- id,
- **update_dict)
+ self.compute_api.update(req.environ['nova.context'], id,
+ **update_dict)
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPNoContent()
@@ -163,6 +182,7 @@ class Controller(wsgi.Controller):
""" Multi-purpose method used to reboot, rebuild, and
resize a server """
input_dict = self._deserialize(req.body, req)
+ #TODO(sandy): rebuild/resize not supported.
try:
reboot_type = input_dict['reboot']['type']
except Exception:
@@ -175,6 +195,50 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ def lock(self, req, id):
+ """
+ lock the instance with id
+ admin only operation
+
+ """
+ context = req.environ['nova.context']
+ try:
+ self.compute_api.lock(context, id)
+ except:
+ readable = traceback.format_exc()
+ LOG.exception(_("Compute.api::lock %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
+ def unlock(self, req, id):
+ """
+ unlock the instance with id
+ admin only operation
+
+ """
+ context = req.environ['nova.context']
+ try:
+ self.compute_api.unlock(context, id)
+ except:
+ readable = traceback.format_exc()
+ LOG.exception(_("Compute.api::unlock %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
+ def get_lock(self, req, id):
+ """
+ return the boolean state of (instance with id)'s lock
+
+ """
+ context = req.environ['nova.context']
+ try:
+ self.compute_api.get_lock(context, id)
+ except:
+ readable = traceback.format_exc()
+ LOG.exception(_("Compute.api::get_lock %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
def pause(self, req, id):
""" Permit Admins to Pause the server. """
ctxt = req.environ['nova.context']
@@ -182,7 +246,7 @@ class Controller(wsgi.Controller):
self.compute_api.pause(ctxt, id)
except:
readable = traceback.format_exc()
- logging.error(_("Compute.api::pause %s"), readable)
+ LOG.exception(_("Compute.api::pause %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
@@ -193,7 +257,7 @@ class Controller(wsgi.Controller):
self.compute_api.unpause(ctxt, id)
except:
readable = traceback.format_exc()
- logging.error(_("Compute.api::unpause %s"), readable)
+ LOG.exception(_("Compute.api::unpause %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
@@ -204,7 +268,7 @@ class Controller(wsgi.Controller):
self.compute_api.suspend(context, id)
except:
readable = traceback.format_exc()
- logging.error(_("compute.api::suspend %s"), readable)
+ LOG.exception(_("compute.api::suspend %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
@@ -215,10 +279,19 @@ class Controller(wsgi.Controller):
self.compute_api.resume(context, id)
except:
readable = traceback.format_exc()
- logging.error(_("compute.api::resume %s"), readable)
+ LOG.exception(_("compute.api::resume %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ def get_ajax_console(self, req, id):
+ """ Returns a url to an instance's ajaxterm console. """
+ try:
+ self.compute_api.get_ajax_console(req.environ['nova.context'],
+ int(id))
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ return exc.HTTPAccepted()
+
def diagnostics(self, req, id):
"""Permit Admins to retrieve server diagnostics."""
ctxt = req.environ["nova.context"]
@@ -227,4 +300,13 @@ class Controller(wsgi.Controller):
def actions(self, req, id):
"""Permit Admins to retrieve server actions."""
ctxt = req.environ["nova.context"]
- return self.compute_api.get_actions(ctxt, id)
+ items = self.compute_api.get_actions(ctxt, id)
+ actions = []
+ # TODO(jk0): Do not do pre-serialization here once the default
+ # serializer is updated
+ for item in items:
+ actions.append(dict(
+ created_at=str(item.created_at),
+ action=item.action,
+ error=item.error))
+ return dict(actions=actions)
diff --git a/nova/api/openstack/sharedipgroups.py b/nova/api/openstack/shared_ip_groups.py
index 845f5bead..bd3cc23a8 100644
--- a/nova/api/openstack/sharedipgroups.py
+++ b/nova/api/openstack/shared_ip_groups.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import logging
+
from webob import exc
from nova import wsgi
@@ -29,7 +31,7 @@ def _translate_keys(inst):
def _translate_detail_keys(inst):
""" Coerces a shared IP group instance into proper dictionary format with
correctly mapped attributes """
- return dict(sharedIpGroup=inst)
+ return dict(sharedIpGroups=inst)
class Controller(wsgi.Controller):
@@ -54,12 +56,12 @@ class Controller(wsgi.Controller):
def delete(self, req, id):
""" Deletes a Shared IP Group """
- raise faults.Fault(exc.HTTPNotFound())
+ raise faults.Fault(exc.HTTPNotImplemented())
- def detail(self, req, id):
+ def detail(self, req):
""" Returns a complete list of Shared IP Groups """
return _translate_detail_keys({})
def create(self, req):
""" Creates a new Shared IP group """
- raise faults.Fault(exc.HTTPNotFound())
+ raise faults.Fault(exc.HTTPNotImplemented())
diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py
index 47e435cb6..0eb6fe588 100644
--- a/nova/auth/dbdriver.py
+++ b/nova/auth/dbdriver.py
@@ -20,7 +20,6 @@
Auth driver using the DB as its backend.
"""
-import logging
import sys
from nova import context
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index 7616ff112..bc53e0ec6 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -24,11 +24,11 @@ other backends by creating another class that exposes the same
public methods.
"""
-import logging
import sys
from nova import exception
from nova import flags
+from nova import log as logging
FLAGS = flags.FLAGS
@@ -65,6 +65,8 @@ flags.DEFINE_string('ldap_netadmin',
flags.DEFINE_string('ldap_developer',
'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers')
+LOG = logging.getLogger("nova.ldapdriver")
+
# TODO(vish): make an abstract base class with the same public methods
# to define a set interface for AuthDrivers. I'm delaying
@@ -117,8 +119,7 @@ class LdapDriver(object):
def get_project(self, pid):
"""Retrieve project by id"""
- dn = 'cn=%s,%s' % (pid,
- FLAGS.ldap_project_subtree)
+ dn = self.__project_to_dn(pid)
attr = self.__find_object(dn, LdapDriver.project_pattern)
return self.__to_project(attr)
@@ -226,7 +227,8 @@ class LdapDriver(object):
('description', [description]),
(LdapDriver.project_attribute, [manager_dn]),
('member', members)]
- self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr)
+ dn = self.__project_to_dn(name, search=False)
+ self.conn.add_s(dn, attr)
return self.__to_project(dict(attr))
def modify_project(self, project_id, manager_uid=None, description=None):
@@ -244,23 +246,22 @@ class LdapDriver(object):
manager_dn))
if description:
attr.append((self.ldap.MOD_REPLACE, 'description', description))
- self.conn.modify_s('cn=%s,%s' % (project_id,
- FLAGS.ldap_project_subtree),
- attr)
+ dn = self.__project_to_dn(project_id)
+ self.conn.modify_s(dn, attr)
def add_to_project(self, uid, project_id):
"""Add user to project"""
- dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ dn = self.__project_to_dn(project_id)
return self.__add_to_group(uid, dn)
def remove_from_project(self, uid, project_id):
"""Remove user from project"""
- dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ dn = self.__project_to_dn(project_id)
return self.__remove_from_group(uid, dn)
def is_in_project(self, uid, project_id):
"""Check if user is in project"""
- dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ dn = self.__project_to_dn(project_id)
return self.__is_in_group(uid, dn)
def has_role(self, uid, role, project_id=None):
@@ -300,7 +301,7 @@ class LdapDriver(object):
roles.append(role)
return roles
else:
- project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ project_dn = self.__project_to_dn(project_id)
query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' %
(LdapDriver.project_pattern, self.__uid_to_dn(uid)))
roles = self.__find_objects(project_dn, query)
@@ -333,7 +334,7 @@ class LdapDriver(object):
def delete_project(self, project_id):
"""Delete a project"""
- project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ project_dn = self.__project_to_dn(project_id)
self.__delete_roles(project_dn)
self.__delete_group(project_dn)
@@ -365,9 +366,10 @@ class LdapDriver(object):
def __get_ldap_user(self, uid):
"""Retrieve LDAP user entry by id"""
- attr = self.__find_object(self.__uid_to_dn(uid),
- '(objectclass=novaUser)')
- return attr
+ dn = FLAGS.ldap_user_subtree
+ query = ('(&(%s=%s)(objectclass=novaUser))' %
+ (FLAGS.ldap_user_id_attribute, uid))
+ return self.__find_object(dn, query)
def __find_object(self, dn, query=None, scope=None):
"""Find an object by dn and query"""
@@ -418,15 +420,13 @@ class LdapDriver(object):
query = '(objectclass=groupOfNames)'
return self.__find_object(dn, query) is not None
- @staticmethod
- def __role_to_dn(role, project_id=None):
+ def __role_to_dn(self, role, project_id=None):
"""Convert role to corresponding dn"""
if project_id is None:
return FLAGS.__getitem__("ldap_%s" % role).value
else:
- return 'cn=%s,cn=%s,%s' % (role,
- project_id,
- FLAGS.ldap_project_subtree)
+ project_dn = self.__project_to_dn(project_id)
+ return 'cn=%s,%s' % (role, project_dn)
def __create_group(self, group_dn, name, uid,
description, member_uids=None):
@@ -502,8 +502,8 @@ class LdapDriver(object):
try:
self.conn.modify_s(group_dn, attr)
except self.ldap.OBJECT_CLASS_VIOLATION:
- logging.debug(_("Attempted to remove the last member of a group. "
- "Deleting the group at %s instead."), group_dn)
+ LOG.debug(_("Attempted to remove the last member of a group. "
+ "Deleting the group at %s instead."), group_dn)
self.__delete_group(group_dn)
def __remove_from_all(self, uid):
@@ -532,6 +532,42 @@ class LdapDriver(object):
for role_dn in self.__find_role_dns(project_dn):
self.__delete_group(role_dn)
+ def __to_project(self, attr):
+ """Convert ldap attributes to Project object"""
+ if attr is None:
+ return None
+ member_dns = attr.get('member', [])
+ return {
+ 'id': attr['cn'][0],
+ 'name': attr['cn'][0],
+ 'project_manager_id':
+ self.__dn_to_uid(attr[LdapDriver.project_attribute][0]),
+ 'description': attr.get('description', [None])[0],
+ 'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
+
+ def __uid_to_dn(self, uid, search=True):
+ """Convert uid to dn"""
+ # By default return a generated DN
+ userdn = (FLAGS.ldap_user_id_attribute + '=%s,%s'
+ % (uid, FLAGS.ldap_user_subtree))
+ if search:
+ query = ('%s=%s' % (FLAGS.ldap_user_id_attribute, uid))
+ user = self.__find_dns(FLAGS.ldap_user_subtree, query)
+ if len(user) > 0:
+ userdn = user[0]
+ return userdn
+
+ def __project_to_dn(self, pid, search=True):
+ """Convert pid to dn"""
+ # By default return a generated DN
+ projectdn = ('cn=%s,%s' % (pid, FLAGS.ldap_project_subtree))
+ if search:
+ query = ('(&(cn=%s)%s)' % (pid, LdapDriver.project_pattern))
+ project = self.__find_dns(FLAGS.ldap_project_subtree, query)
+ if len(project) > 0:
+ projectdn = project[0]
+ return projectdn
+
@staticmethod
def __to_user(attr):
"""Convert ldap attributes to User object"""
@@ -548,30 +584,11 @@ class LdapDriver(object):
else:
return None
- def __to_project(self, attr):
- """Convert ldap attributes to Project object"""
- if attr is None:
- return None
- member_dns = attr.get('member', [])
- return {
- 'id': attr['cn'][0],
- 'name': attr['cn'][0],
- 'project_manager_id':
- self.__dn_to_uid(attr[LdapDriver.project_attribute][0]),
- 'description': attr.get('description', [None])[0],
- 'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
-
@staticmethod
def __dn_to_uid(dn):
"""Convert user dn to uid"""
return dn.split(',')[0].split('=')[1]
- @staticmethod
- def __uid_to_dn(uid):
- """Convert uid to dn"""
- return (FLAGS.ldap_user_id_attribute + '=%s,%s'
- % (uid, FLAGS.ldap_user_subtree))
-
class FakeLdapDriver(LdapDriver):
"""Fake Ldap Auth driver"""
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index d3e266952..89f02998d 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -20,7 +20,6 @@
Nova authentication management
"""
-import logging
import os
import shutil
import string # pylint: disable-msg=W0402
@@ -33,6 +32,7 @@ from nova import crypto
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
from nova.auth import signer
@@ -70,6 +70,8 @@ flags.DEFINE_string('credential_rc_file', '%src',
flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver',
'Driver that auth manager uses')
+LOG = logging.getLogger('nova.auth.manager')
+
class AuthBase(object):
"""Base class for objects relating to auth
@@ -254,43 +256,51 @@ class AuthManager(object):
# TODO(vish): check for valid timestamp
(access_key, _sep, project_id) = access.partition(':')
- logging.info(_('Looking up user: %r'), access_key)
+ LOG.debug(_('Looking up user: %r'), access_key)
user = self.get_user_from_access_key(access_key)
- logging.info('user: %r', user)
+ LOG.debug('user: %r', user)
if user == None:
+ LOG.audit(_("Failed authorization for access key %s"), access_key)
raise exception.NotFound(_('No user found for access key %s')
% access_key)
# NOTE(vish): if we stop using project name as id we need better
# logic to find a default project for user
if project_id == '':
+ LOG.debug(_("Using project name = user name (%s)"), user.name)
project_id = user.name
project = self.get_project(project_id)
if project == None:
+ LOG.audit(_("failed authorization: no project named %s (user=%s)"),
+ project_id, user.name)
raise exception.NotFound(_('No project called %s could be found')
% project_id)
if not self.is_admin(user) and not self.is_project_member(user,
project):
+ LOG.audit(_("Failed authorization: user %s not admin and not "
+ "member of project %s"), user.name, project.name)
raise exception.NotFound(_('User %s is not a member of project %s')
% (user.id, project.id))
if check_type == 's3':
sign = signer.Signer(user.secret.encode())
expected_signature = sign.s3_authorization(headers, verb, path)
- logging.debug('user.secret: %s', user.secret)
- logging.debug('expected_signature: %s', expected_signature)
- logging.debug('signature: %s', signature)
+ LOG.debug('user.secret: %s', user.secret)
+ LOG.debug('expected_signature: %s', expected_signature)
+ LOG.debug('signature: %s', signature)
if signature != expected_signature:
+ LOG.audit(_("Invalid signature for user %s"), user.name)
raise exception.NotAuthorized(_('Signature does not match'))
elif check_type == 'ec2':
# NOTE(vish): hmac can't handle unicode, so encode ensures that
# secret isn't unicode
expected_signature = signer.Signer(user.secret.encode()).generate(
params, verb, server_string, path)
- logging.debug('user.secret: %s', user.secret)
- logging.debug('expected_signature: %s', expected_signature)
- logging.debug('signature: %s', signature)
+ LOG.debug('user.secret: %s', user.secret)
+ LOG.debug('expected_signature: %s', expected_signature)
+ LOG.debug('signature: %s', signature)
if signature != expected_signature:
+ LOG.audit(_("Invalid signature for user %s"), user.name)
raise exception.NotAuthorized(_('Signature does not match'))
return (user, project)
@@ -398,6 +408,12 @@ class AuthManager(object):
raise exception.NotFound(_("The %s role can not be found") % role)
if project is not None and role in FLAGS.global_roles:
raise exception.NotFound(_("The %s role is global only") % role)
+ if project:
+ LOG.audit(_("Adding role %s to user %s in project %s"), role,
+ User.safe_id(user), Project.safe_id(project))
+ else:
+ LOG.audit(_("Adding sitewide role %s to user %s"), role,
+ User.safe_id(user))
with self.driver() as drv:
drv.add_role(User.safe_id(user), role, Project.safe_id(project))
@@ -418,6 +434,12 @@ class AuthManager(object):
@type project: Project or project_id
@param project: Project in which to remove local role.
"""
+ if project:
+ LOG.audit(_("Removing role %s from user %s on project %s"),
+ role, User.safe_id(user), Project.safe_id(project))
+ else:
+ LOG.audit(_("Removing sitewide role %s from user %s"), role,
+ User.safe_id(user))
with self.driver() as drv:
drv.remove_role(User.safe_id(user), role, Project.safe_id(project))
@@ -480,6 +502,8 @@ class AuthManager(object):
description,
member_users)
if project_dict:
+ LOG.audit(_("Created project %s with manager %s"), name,
+ manager_user)
project = Project(**project_dict)
return project
@@ -496,6 +520,7 @@ class AuthManager(object):
@param project: This will be the new description of the project.
"""
+ LOG.audit(_("modifying project %s"), Project.safe_id(project))
if manager_user:
manager_user = User.safe_id(manager_user)
with self.driver() as drv:
@@ -505,6 +530,8 @@ class AuthManager(object):
def add_to_project(self, user, project):
"""Add user to project"""
+ LOG.audit(_("Adding user %s to project %s"), User.safe_id(user),
+ Project.safe_id(project))
with self.driver() as drv:
return drv.add_to_project(User.safe_id(user),
Project.safe_id(project))
@@ -523,6 +550,8 @@ class AuthManager(object):
def remove_from_project(self, user, project):
"""Removes a user from a project"""
+ LOG.audit(_("Remove user %s from project %s"), User.safe_id(user),
+ Project.safe_id(project))
with self.driver() as drv:
return drv.remove_from_project(User.safe_id(user),
Project.safe_id(project))
@@ -549,6 +578,7 @@ class AuthManager(object):
def delete_project(self, project):
"""Deletes a project"""
+ LOG.audit(_("Deleting project %s"), Project.safe_id(project))
with self.driver() as drv:
drv.delete_project(Project.safe_id(project))
@@ -603,13 +633,16 @@ class AuthManager(object):
with self.driver() as drv:
user_dict = drv.create_user(name, access, secret, admin)
if user_dict:
- return User(**user_dict)
+ rv = User(**user_dict)
+ LOG.audit(_("Created user %s (admin: %r)"), rv.name, rv.admin)
+ return rv
def delete_user(self, user):
"""Deletes a user
Additionally deletes all users key_pairs"""
uid = User.safe_id(user)
+ LOG.audit(_("Deleting user %s"), uid)
db.key_pair_destroy_all_by_user(context.get_admin_context(),
uid)
with self.driver() as drv:
@@ -618,6 +651,12 @@ class AuthManager(object):
def modify_user(self, user, access_key=None, secret_key=None, admin=None):
"""Modify credentials for a user"""
uid = User.safe_id(user)
+ if access_key:
+ LOG.audit(_("Access Key change for user %s"), uid)
+ if secret_key:
+ LOG.audit(_("Secret Key change for user %s"), uid)
+ if admin is not None:
+ LOG.audit(_("Admin status set to %r for user %s"), admin, uid)
with self.driver() as drv:
drv.modify_user(uid, access_key, secret_key, admin)
@@ -645,8 +684,7 @@ class AuthManager(object):
else:
regions = {'nova': FLAGS.cc_host}
for region, host in regions.iteritems():
- rc = self.__generate_rc(user.access,
- user.secret,
+ rc = self.__generate_rc(user,
pid,
use_dmz,
host)
@@ -666,7 +704,7 @@ class AuthManager(object):
port=vpn_port)
zippy.writestr(FLAGS.credential_vpn_file, config)
else:
- logging.warn(_("No vpn data for project %s"), pid)
+ LOG.warn(_("No vpn data for project %s"), pid)
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(pid))
zippy.close()
@@ -686,7 +724,7 @@ class AuthManager(object):
return self.__generate_rc(user.access, user.secret, pid, use_dmz)
@staticmethod
- def __generate_rc(access, secret, pid, use_dmz=True, host=None):
+ def __generate_rc(user, pid, use_dmz=True, host=None):
"""Generate rc file for user"""
if use_dmz:
cc_host = FLAGS.cc_dmz
@@ -699,14 +737,19 @@ class AuthManager(object):
s3_host = host
cc_host = host
rc = open(FLAGS.credentials_template).read()
- rc = rc % {'access': access,
+ rc = rc % {'access': user.access,
'project': pid,
- 'secret': secret,
+ 'secret': user.secret,
'ec2': '%s://%s:%s%s' % (FLAGS.ec2_prefix,
cc_host,
FLAGS.cc_port,
FLAGS.ec2_suffix),
's3': 'http://%s:%s' % (s3_host, FLAGS.s3_port),
+ 'os': '%s://%s:%s%s' % (FLAGS.os_prefix,
+ cc_host,
+ FLAGS.cc_port,
+ FLAGS.os_suffix),
+ 'user': user.name,
'nova': FLAGS.ca_file,
'cert': FLAGS.credential_cert_file,
'key': FLAGS.credential_key_file}
diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template
index 1b8ecb173..c53a4acdc 100644
--- a/nova/auth/novarc.template
+++ b/nova/auth/novarc.template
@@ -10,3 +10,7 @@ export NOVA_CERT=${NOVA_KEY_DIR}/%(nova)s
export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set
alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}"
alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}"
+export CLOUD_SERVERS_API_KEY="%(access)s"
+export CLOUD_SERVERS_USERNAME="%(user)s"
+export CLOUD_SERVERS_URL="%(os)s"
+
diff --git a/nova/auth/signer.py b/nova/auth/signer.py
index f7d29f534..744e315d4 100644
--- a/nova/auth/signer.py
+++ b/nova/auth/signer.py
@@ -46,7 +46,6 @@ Utility class for parsing signed AMI manifests.
import base64
import hashlib
import hmac
-import logging
import urllib
# NOTE(vish): for new boto
@@ -54,9 +53,13 @@ import boto
# NOTE(vish): for old boto
import boto.utils
+from nova import log as logging
from nova.exception import Error
+LOG = logging.getLogger('nova.signer')
+
+
class Signer(object):
"""Hacked up code from boto/connection.py"""
@@ -120,7 +123,7 @@ class Signer(object):
def _calc_signature_2(self, params, verb, server_string, path):
"""Generate AWS signature version 2 string."""
- logging.debug('using _calc_signature_2')
+ LOG.debug('using _calc_signature_2')
string_to_sign = '%s\n%s\n%s\n' % (verb, server_string, path)
if self.hmac_256:
current_hmac = self.hmac_256
@@ -136,13 +139,13 @@ class Signer(object):
val = urllib.quote(val, safe='-_~')
pairs.append(urllib.quote(key, safe='') + '=' + val)
qs = '&'.join(pairs)
- logging.debug('query string: %s', qs)
+ LOG.debug('query string: %s', qs)
string_to_sign += qs
- logging.debug('string_to_sign: %s', string_to_sign)
+ LOG.debug('string_to_sign: %s', string_to_sign)
current_hmac.update(string_to_sign)
b64 = base64.b64encode(current_hmac.digest())
- logging.debug('len(b64)=%d', len(b64))
- logging.debug('base64 encoded digest: %s', b64)
+ LOG.debug('len(b64)=%d', len(b64))
+ LOG.debug('base64 encoded digest: %s', b64)
return b64
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index 09361828d..8aefd341f 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -22,7 +22,6 @@ an instance with it.
"""
-import logging
import os
import string
import tempfile
@@ -33,6 +32,7 @@ from nova import crypto
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
from nova.auth import manager
# TODO(eday): Eventually changes these to something not ec2-specific
@@ -51,7 +51,7 @@ flags.DEFINE_string('dmz_mask',
_('Netmask to push into openvpn config'))
-LOG = logging.getLogger('nova-cloudpipe')
+LOG = logging.getLogger('nova.cloudpipe')
class CloudPipe(object):
diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py
index a5df2ec1a..b94f971d1 100644
--- a/nova/compute/__init__.py
+++ b/nova/compute/__init__.py
@@ -16,17 +16,4 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-:mod:`nova.compute` -- Compute Nodes using LibVirt
-=====================================================
-
-.. automodule:: nova.compute
- :platform: Unix
- :synopsis: Thin wrapper around libvirt for VM mgmt.
-.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
-.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
-.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
-.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
-.. moduleauthor:: Manish Singh <yosh@gimp.org>
-.. moduleauthor:: Andy Smith <andy@anarkystic.com>
-"""
+from nova.compute.api import API
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 07c69bd31..bf921aa00 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -17,76 +17,82 @@
# under the License.
"""
-Handles all API requests relating to instances (guest vms).
+Handles all requests relating to instances (guest vms).
"""
import datetime
-import logging
import time
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
+from nova import network
from nova import quota
from nova import rpc
from nova import utils
+from nova import volume
from nova.compute import instance_types
from nova.db import base
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.compute.api')
-def generate_default_hostname(internal_id):
+def generate_default_hostname(instance_id):
"""Default function to generate a hostname given an instance reference."""
- return str(internal_id)
+ return str(instance_id)
-class ComputeAPI(base.Base):
+class API(base.Base):
"""API for interacting with the compute manager."""
- def __init__(self, network_manager=None, image_service=None, **kwargs):
- if not network_manager:
- network_manager = utils.import_object(FLAGS.network_manager)
- self.network_manager = network_manager
+ def __init__(self, image_service=None, network_api=None, volume_api=None,
+ **kwargs):
if not image_service:
image_service = utils.import_object(FLAGS.image_service)
self.image_service = image_service
- super(ComputeAPI, self).__init__(**kwargs)
+ if not network_api:
+ network_api = network.API()
+ self.network_api = network_api
+ if not volume_api:
+ volume_api = volume.API()
+ self.volume_api = volume_api
+ super(API, self).__init__(**kwargs)
def get_network_topic(self, context, instance_id):
try:
- instance = self.db.instance_get_by_internal_id(context,
- instance_id)
+ instance = self.get(context, instance_id)
except exception.NotFound as e:
- logging.warning("Instance %d was not found in get_network_topic",
- instance_id)
+ LOG.warning(_("Instance %d was not found in get_network_topic"),
+ instance_id)
raise e
host = instance['host']
if not host:
- raise exception.Error("Instance %d has no host" % instance_id)
+ raise exception.Error(_("Instance %d has no host") % instance_id)
topic = self.db.queue_get_for(context, FLAGS.compute_topic, host)
return rpc.call(context,
topic,
{"method": "get_network_topic", "args": {'fake': 1}})
- def create_instances(self, context, instance_type, image_id, min_count=1,
- max_count=1, kernel_id=None, ramdisk_id=None,
- display_name='', description='', key_name=None,
- key_data=None, security_group='default',
- availability_zone=None,
- user_data=None,
- generate_hostname=generate_default_hostname):
- """Create the number of instances requested if quote and
+ def create(self, context, instance_type,
+ image_id, kernel_id=None, ramdisk_id=None,
+ min_count=1, max_count=1,
+ display_name='', display_description='',
+ key_name=None, key_data=None, security_group='default',
+ availability_zone=None, user_data=None,
+ generate_hostname=generate_default_hostname):
+ """Create the number of instances requested if quota and
other arguments check out ok."""
- num_instances = quota.allowed_instances(context, max_count,
- instance_type)
+ type_data = instance_types.INSTANCE_TYPES[instance_type]
+ num_instances = quota.allowed_instances(context, max_count, type_data)
if num_instances < min_count:
- logging.warn("Quota exceeeded for %s, tried to run %s instances",
- context.project_id, min_count)
- raise quota.QuotaError("Instance quota exceeded. You can only "
- "run %s more instances of this type." %
+ LOG.warn(_("Quota exceeeded for %s, tried to run %s instances"),
+ context.project_id, min_count)
+ raise quota.QuotaError(_("Instance quota exceeded. You can only "
+ "run %s more instances of this type.") %
num_instances, "InstanceLimitExceeded")
is_vpn = image_id == FLAGS.vpn_image_id
@@ -96,12 +102,14 @@ class ComputeAPI(base.Base):
kernel_id = image.get('kernelId', None)
if ramdisk_id is None:
ramdisk_id = image.get('ramdiskId', None)
- #No kernel and ramdisk for raw images
+ # No kernel and ramdisk for raw images
if kernel_id == str(FLAGS.null_kernel):
kernel_id = None
ramdisk_id = None
- logging.debug("Creating a raw instance")
+ LOG.debug(_("Creating a raw instance"))
# Make sure we have access to kernel and ramdisk (if not raw)
+ logging.debug("Using Kernel=%s, Ramdisk=%s" %
+ (kernel_id, ramdisk_id))
if kernel_id:
self.image_service.show(context, kernel_id)
if ramdisk_id:
@@ -124,7 +132,6 @@ class ComputeAPI(base.Base):
key_pair = db.key_pair_get(context, context.user_id, key_name)
key_data = key_pair['public_key']
- type_data = instance_types.INSTANCE_TYPES[instance_type]
base_options = {
'reservation_id': utils.generate_uid('r'),
'image_id': image_id,
@@ -139,22 +146,22 @@ class ComputeAPI(base.Base):
'vcpus': type_data['vcpus'],
'local_gb': type_data['local_gb'],
'display_name': display_name,
- 'display_description': description,
+ 'display_description': display_description,
'user_data': user_data or '',
'key_name': key_name,
'key_data': key_data,
+ 'locked': False,
'availability_zone': availability_zone}
elevated = context.elevated()
instances = []
- logging.debug(_("Going to run %s instances..."), num_instances)
+ LOG.debug(_("Going to run %s instances..."), num_instances)
for num in range(num_instances):
instance = dict(mac_address=utils.generate_mac(),
launch_index=num,
**base_options)
instance = self.db.instance_create(context, instance)
instance_id = instance['id']
- internal_id = instance['internal_id']
elevated = context.elevated()
if not security_groups:
@@ -165,20 +172,25 @@ class ComputeAPI(base.Base):
security_group_id)
# Set sane defaults if not specified
- updates = dict(hostname=generate_hostname(internal_id))
- if 'display_name' not in instance:
- updates['display_name'] = "Server %s" % internal_id
+ updates = dict(hostname=generate_hostname(instance_id))
+ if (not hasattr(instance, 'display_name')) or \
+ instance.display_name == None:
+ updates['display_name'] = "Server %s" % instance_id
- instance = self.update_instance(context, instance_id, **updates)
+ instance = self.update(context, instance_id, **updates)
instances.append(instance)
- logging.debug(_("Casting to scheduler for %s/%s's instance %s"),
+ LOG.debug(_("Casting to scheduler for %s/%s's instance %s"),
context.project_id, context.user_id, instance_id)
rpc.cast(context,
FLAGS.scheduler_topic,
{"method": "run_instance",
"args": {"topic": FLAGS.compute_topic,
- "instance_id": instance_id}})
+ "instance_id": instance_id,
+ "availability_zone": availability_zone}})
+
+ for group_id in security_groups:
+ self.trigger_security_group_members_refresh(elevated, group_id)
return instances
@@ -199,7 +211,61 @@ class ComputeAPI(base.Base):
'project_id': context.project_id}
db.security_group_create(context, values)
- def update_instance(self, context, instance_id, **kwargs):
+ def trigger_security_group_rules_refresh(self, context, security_group_id):
+ """Called when a rule is added to or removed from a security_group"""
+
+ security_group = self.db.security_group_get(context, security_group_id)
+
+ hosts = set()
+ for instance in security_group['instances']:
+ if instance['host'] is not None:
+ hosts.add(instance['host'])
+
+ for host in hosts:
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "refresh_security_group_rules",
+ "args": {"security_group_id": security_group.id}})
+
+ def trigger_security_group_members_refresh(self, context, group_id):
+ """Called when a security group gains a new or loses a member
+
+ Sends an update request to each compute node for whom this is
+ relevant."""
+
+ # First, we get the security group rules that reference this group as
+ # the grantee..
+ security_group_rules = \
+ self.db.security_group_rule_get_by_security_group_grantee(
+ context,
+ group_id)
+
+ # ..then we distill the security groups to which they belong..
+ security_groups = set()
+ for rule in security_group_rules:
+ security_groups.add(rule['parent_group_id'])
+
+ # ..then we find the instances that are members of these groups..
+ instances = set()
+ for security_group in security_groups:
+ for instance in security_group['instances']:
+ instances.add(instance['id'])
+
+ # ...then we find the hosts where they live...
+ hosts = set()
+ for instance in instances:
+ if instance['host']:
+ hosts.add(instance['host'])
+
+ # ...and finally we tell these nodes to refresh their view of this
+ # particular security group.
+ for host in hosts:
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "refresh_security_group_members",
+ "args": {"security_group_id": group_id}})
+
+ def update(self, context, instance_id, **kwargs):
"""Updates the instance in the datastore.
:param context: The security context
@@ -213,134 +279,223 @@ class ComputeAPI(base.Base):
"""
return self.db.instance_update(context, instance_id, kwargs)
- def delete_instance(self, context, instance_id):
- logging.debug("Going to try and terminate %d" % instance_id)
+ def delete(self, context, instance_id):
+ LOG.debug(_("Going to try and terminate %s"), instance_id)
try:
- instance = self.db.instance_get_by_internal_id(context,
- instance_id)
+ instance = self.get(context, instance_id)
except exception.NotFound as e:
- logging.warning(_("Instance %d was not found during terminate"),
- instance_id)
+ LOG.warning(_("Instance %d was not found during terminate"),
+ instance_id)
raise e
if (instance['state_description'] == 'terminating'):
- logging.warning(_("Instance %d is already being terminated"),
- instance_id)
+ LOG.warning(_("Instance %d is already being terminated"),
+ instance_id)
return
- self.update_instance(context,
- instance['id'],
- state_description='terminating',
- state=0,
- terminated_at=datetime.datetime.utcnow())
+ self.update(context,
+ instance['id'],
+ state_description='terminating',
+ state=0,
+ terminated_at=datetime.datetime.utcnow())
host = instance['host']
if host:
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "terminate_instance",
- "args": {"instance_id": instance['id']}})
+ "args": {"instance_id": instance_id}})
else:
- self.db.instance_destroy(context, instance['id'])
-
- def get_instances(self, context, project_id=None):
- """Get all instances, possibly filtered by project ID or
- user ID. If there is no filter and the context is an admin,
- it will retreive all instances in the system."""
+ self.db.instance_destroy(context, instance_id)
+
+ def get(self, context, instance_id):
+ """Get a single instance with the given ID."""
+ return self.db.instance_get_by_id(context, instance_id)
+
+ def get_all(self, context, project_id=None, reservation_id=None,
+ fixed_ip=None):
+ """Get all instances, possibly filtered by one of the
+ given parameters. If there is no filter and the context is
+ an admin, it will retreive all instances in the system."""
+ if reservation_id is not None:
+ return self.db.instance_get_all_by_reservation(context,
+ reservation_id)
+ if fixed_ip is not None:
+ return self.db.fixed_ip_get_instance(context, fixed_ip)
if project_id or not context.is_admin:
if not context.project:
return self.db.instance_get_all_by_user(context,
context.user_id)
if project_id is None:
project_id = context.project_id
- return self.db.instance_get_all_by_project(context, project_id)
+ return self.db.instance_get_all_by_project(context,
+ project_id)
return self.db.instance_get_all(context)
- def get_instance(self, context, instance_id):
- return self.db.instance_get_by_internal_id(context, instance_id)
-
def snapshot(self, context, instance_id, name):
"""Snapshot the given instance."""
- instance = self.db.instance_get_by_internal_id(context, instance_id)
+ instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "snapshot_instance",
- "args": {"instance_id": instance['id'], "name": name}})
+ "args": {"instance_id": instance_id, "name": name}})
def reboot(self, context, instance_id):
"""Reboot the given instance."""
- instance = self.db.instance_get_by_internal_id(context, instance_id)
+ instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "reboot_instance",
- "args": {"instance_id": instance['id']}})
+ "args": {"instance_id": instance_id}})
def pause(self, context, instance_id):
"""Pause the given instance."""
- instance = self.db.instance_get_by_internal_id(context, instance_id)
+ instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "pause_instance",
- "args": {"instance_id": instance['id']}})
+ "args": {"instance_id": instance_id}})
def unpause(self, context, instance_id):
"""Unpause the given instance."""
- instance = self.db.instance_get_by_internal_id(context, instance_id)
+ instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "unpause_instance",
- "args": {"instance_id": instance['id']}})
+ "args": {"instance_id": instance_id}})
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for the given instance."""
- instance = self.db.instance_get_by_internal_id(context, instance_id)
+ instance = self.get(context, instance_id)
host = instance["host"]
return rpc.call(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "get_diagnostics",
- "args": {"instance_id": instance["id"]}})
+ "args": {"instance_id": instance_id}})
def get_actions(self, context, instance_id):
"""Retrieve actions for the given instance."""
- instance = self.db.instance_get_by_internal_id(context, instance_id)
- return self.db.instance_get_actions(context, instance["id"])
+ return self.db.instance_get_actions(context, instance_id)
def suspend(self, context, instance_id):
"""suspend the instance with instance_id"""
- instance = self.db.instance_get_by_internal_id(context, instance_id)
+ instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "suspend_instance",
- "args": {"instance_id": instance['id']}})
+ "args": {"instance_id": instance_id}})
def resume(self, context, instance_id):
"""resume the instance with instance_id"""
- instance = self.db.instance_get_by_internal_id(context, instance_id)
+ instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "resume_instance",
- "args": {"instance_id": instance['id']}})
+ "args": {"instance_id": instance_id}})
def rescue(self, context, instance_id):
"""Rescue the given instance."""
- instance = self.db.instance_get_by_internal_id(context, instance_id)
+ instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "rescue_instance",
- "args": {"instance_id": instance['id']}})
+ "args": {"instance_id": instance_id}})
def unrescue(self, context, instance_id):
"""Unrescue the given instance."""
- instance = self.db.instance_get_by_internal_id(context, instance_id)
+ instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "unrescue_instance",
"args": {"instance_id": instance['id']}})
+
+ def get_ajax_console(self, context, instance_id):
+ """Get a url to an AJAX Console"""
+
+ instance = self.get(context, instance_id)
+
+ output = rpc.call(context,
+ '%s.%s' % (FLAGS.compute_topic,
+ instance['host']),
+ {'method': 'get_ajax_console',
+ 'args': {'instance_id': instance['id']}})
+
+ rpc.cast(context, '%s' % FLAGS.ajax_console_proxy_topic,
+ {'method': 'authorize_ajax_console',
+ 'args': {'token': output['token'], 'host': output['host'],
+ 'port': output['port']}})
+
+ return {'url': '%s?token=%s' % (FLAGS.ajax_console_proxy_url,
+ output['token'])}
+
+ def lock(self, context, instance_id):
+ """
+ lock the instance with instance_id
+
+ """
+ instance = self.get_instance(context, instance_id)
+ host = instance['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "lock_instance",
+ "args": {"instance_id": instance['id']}})
+
+ def unlock(self, context, instance_id):
+ """
+ unlock the instance with instance_id
+
+ """
+ instance = self.get_instance(context, instance_id)
+ host = instance['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "unlock_instance",
+ "args": {"instance_id": instance['id']}})
+
+ def get_lock(self, context, instance_id):
+ """
+ return the boolean state of (instance with instance_id)'s lock
+
+ """
+ instance = self.get_instance(context, instance_id)
+ return instance['locked']
+
+ def attach_volume(self, context, instance_id, volume_id, device):
+ if not re.match("^/dev/[a-z]d[a-z]+$", device):
+ raise exception.ApiError(_("Invalid device specified: %s. "
+ "Example device: /dev/vdb") % device)
+ self.volume_api.check_attach(context, volume_id)
+ instance = self.get(context, instance_id)
+ host = instance['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "attach_volume",
+ "args": {"volume_id": volume_id,
+ "instance_id": instance_id,
+ "mountpoint": device}})
+
+ def detach_volume(self, context, volume_id):
+ instance = self.db.volume_get_instance(context.elevated(), volume_id)
+ if not instance:
+ raise exception.ApiError(_("Volume isn't attached to anything!"))
+ self.volume_api.check_detach(context, volume_id)
+ host = instance['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "detach_volume",
+ "args": {"instance_id": instance['id'],
+ "volume_id": volume_id}})
+ return instance
+
+ def associate_floating_ip(self, context, instance_id, address):
+ instance = self.get(context, instance_id)
+ self.network_api.associate_floating_ip(context, address,
+ instance['fixed_ip'])
diff --git a/nova/compute/disk.py b/nova/compute/disk.py
index 814a258cd..741499294 100644
--- a/nova/compute/disk.py
+++ b/nova/compute/disk.py
@@ -22,14 +22,15 @@ Includes injection of SSH PGP keys into authorized_keys file.
"""
-import logging
import os
import tempfile
from nova import exception
from nova import flags
+from nova import log as logging
+LOG = logging.getLogger('nova.compute.disk')
FLAGS = flags.FLAGS
flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10,
'minimum size in bytes of root partition')
@@ -67,12 +68,12 @@ def partition(infile, outfile, local_bytes=0, resize=True,
execute('resize2fs %s' % infile)
file_size = FLAGS.minimum_root_size
elif file_size % sector_size != 0:
- logging.warn(_("Input partition size not evenly divisible by"
- " sector size: %d / %d"), file_size, sector_size)
+ LOG.warn(_("Input partition size not evenly divisible by"
+ " sector size: %d / %d"), file_size, sector_size)
primary_sectors = file_size / sector_size
if local_bytes % sector_size != 0:
- logging.warn(_("Bytes for local storage not evenly divisible"
- " by sector size: %d / %d"), local_bytes, sector_size)
+ LOG.warn(_("Bytes for local storage not evenly divisible"
+ " by sector size: %d / %d"), local_bytes, sector_size)
local_sectors = local_bytes / sector_size
mbr_last = 62 # a
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 6e8f34347..6b2fc4adb 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -36,9 +36,12 @@ terminating it.
import datetime
import logging
+import socket
+import functools
from nova import exception
from nova import flags
+from nova import log as logging
from nova import manager
from nova import rpc
from nova import utils
@@ -51,6 +54,45 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection',
'Driver to use for controlling virtualization')
flags.DEFINE_string('stub_network', False,
'Stub network related code')
+flags.DEFINE_string('console_host', socket.gethostname(),
+ 'Console proxy host to use to connect to instances on'
+ 'this host.')
+
+LOG = logging.getLogger('nova.compute.manager')
+
+
+def checks_instance_lock(function):
+ """
+ decorator used for preventing action against locked instances
+ unless, of course, you happen to be admin
+
+ """
+
+ @functools.wraps(function)
+ def decorated_function(self, context, instance_id, *args, **kwargs):
+
+ LOG.info(_("check_instance_lock: decorating: |%s|"), function,
+ context=context)
+ LOG.info(_("check_instance_lock: arguments: |%s| |%s| |%s|"),
+ self, context, instance_id, context=context)
+ locked = self.get_lock(context, instance_id)
+ admin = context.is_admin
+ LOG.info(_("check_instance_lock: locked: |%s|"), locked,
+ context=context)
+ LOG.info(_("check_instance_lock: admin: |%s|"), admin,
+ context=context)
+
+ # if admin or unlocked call function otherwise log error
+ if admin or not locked:
+ LOG.info(_("check_instance_lock: executing: |%s|"), function,
+ context=context)
+ function(self, context, instance_id, *args, **kwargs)
+ else:
+ LOG.error(_("check_instance_lock: not executing |%s|"),
+ function, context=context)
+ return False
+
+ return decorated_function
class ComputeManager(manager.Manager):
@@ -85,6 +127,15 @@ class ComputeManager(manager.Manager):
state = power_state.NOSTATE
self.db.instance_set_state(context, instance_id, state)
+ def get_console_topic(self, context, **_kwargs):
+ """Retrieves the console host for a project on this host
+ Currently this is just set in the flags for each compute
+ host."""
+ #TODO(mdragon): perhaps make this variable by console_type?
+ return self.db.queue_get_for(context,
+ FLAGS.console_topic,
+ FLAGS.console_host)
+
def get_network_topic(self, context, **_kwargs):
"""Retrieves the network host for a project on this host"""
# TODO(vish): This method should be memoized. This will make
@@ -99,10 +150,20 @@ class ComputeManager(manager.Manager):
FLAGS.network_topic,
host)
+ def get_console_pool_info(self, context, console_type):
+ return self.driver.get_console_pool_info(console_type)
+
+ @exception.wrap_exception
+ def refresh_security_group_rules(self, context,
+ security_group_id, **_kwargs):
+ """This call passes straight through to the virtualization driver."""
+ return self.driver.refresh_security_group_rules(security_group_id)
+
@exception.wrap_exception
- def refresh_security_group(self, context, security_group_id, **_kwargs):
- """This call passes stright through to the virtualization driver."""
- self.driver.refresh_security_group(security_group_id)
+ def refresh_security_group_members(self, context,
+ security_group_id, **_kwargs):
+ """This call passes straight through to the virtualization driver."""
+ return self.driver.refresh_security_group_members(security_group_id)
@exception.wrap_exception
def run_instance(self, context, instance_id, **_kwargs):
@@ -111,7 +172,8 @@ class ComputeManager(manager.Manager):
instance_ref = self.db.instance_get(context, instance_id)
if instance_ref['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created"))
- logging.debug(_("instance %s: starting..."), instance_id)
+ LOG.audit(_("instance %s: starting..."), instance_id,
+ context=context)
self.db.instance_update(context,
instance_id,
{'host': self.host})
@@ -149,8 +211,8 @@ class ComputeManager(manager.Manager):
instance_id,
{'launched_at': now})
except Exception: # pylint: disable-msg=W0702
- logging.exception(_("instance %s: Failed to spawn"),
- instance_ref['name'])
+ LOG.exception(_("instance %s: Failed to spawn"), instance_id,
+ context=context)
self.db.instance_set_state(context,
instance_id,
power_state.SHUTDOWN)
@@ -158,17 +220,19 @@ class ComputeManager(manager.Manager):
self._update_state(context, instance_id)
@exception.wrap_exception
+ @checks_instance_lock
def terminate_instance(self, context, instance_id):
"""Terminate an instance on this machine."""
context = context.elevated()
-
instance_ref = self.db.instance_get(context, instance_id)
+ LOG.audit(_("Terminating instance %s"), instance_id, context=context)
if not FLAGS.stub_network:
address = self.db.instance_get_floating_address(context,
instance_ref['id'])
if address:
- logging.debug(_("Disassociating address %s") % address)
+ LOG.debug(_("Disassociating address %s"), address,
+ context=context)
# NOTE(vish): Right now we don't really care if the ip is
# disassociated. We may need to worry about
# checking this later.
@@ -180,15 +244,14 @@ class ComputeManager(manager.Manager):
address = self.db.instance_get_fixed_address(context,
instance_ref['id'])
if address:
- logging.debug(_("Deallocating address %s") % address)
+ LOG.debug(_("Deallocating address %s"), address,
+ context=context)
# NOTE(vish): Currently, nothing needs to be done on the
# network node until release. If this changes,
# we will need to cast here.
self.network_manager.deallocate_fixed_ip(context.elevated(),
address)
- logging.debug(_("instance %s: terminating"), instance_id)
-
volumes = instance_ref.get('volumes', []) or []
for volume in volumes:
self.detach_volume(context, instance_id, volume['id'])
@@ -202,20 +265,22 @@ class ComputeManager(manager.Manager):
self.db.instance_destroy(context, instance_id)
@exception.wrap_exception
+ @checks_instance_lock
def reboot_instance(self, context, instance_id):
"""Reboot an instance on this server."""
context = context.elevated()
self._update_state(context, instance_id)
instance_ref = self.db.instance_get(context, instance_id)
+ LOG.audit(_("Rebooting instance %s"), instance_id, context=context)
if instance_ref['state'] != power_state.RUNNING:
- logging.warn(_('trying to reboot a non-running '
- 'instance: %s (state: %s excepted: %s)'),
- instance_ref['internal_id'],
- instance_ref['state'],
- power_state.RUNNING)
+ LOG.warn(_('trying to reboot a non-running '
+ 'instance: %s (state: %s excepted: %s)'),
+ instance_id,
+ instance_ref['state'],
+ power_state.RUNNING,
+ context=context)
- logging.debug(_('instance %s: rebooting'), instance_ref['name'])
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -235,24 +300,22 @@ class ComputeManager(manager.Manager):
# potentially?
self._update_state(context, instance_id)
- logging.debug(_('instance %s: snapshotting'), instance_ref['name'])
+ LOG.audit(_('instance %s: snapshotting'), instance_id,
+ context=context)
if instance_ref['state'] != power_state.RUNNING:
- logging.warn(_('trying to snapshot a non-running '
- 'instance: %s (state: %s excepted: %s)'),
- instance_ref['internal_id'],
- instance_ref['state'],
- power_state.RUNNING)
+ LOG.warn(_('trying to snapshot a non-running '
+ 'instance: %s (state: %s excepted: %s)'),
+ instance_id, instance_ref['state'], power_state.RUNNING)
self.driver.snapshot(instance_ref, name)
@exception.wrap_exception
+ @checks_instance_lock
def rescue_instance(self, context, instance_id):
"""Rescue an instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
-
- logging.debug(_('instance %s: rescuing'),
- instance_ref['internal_id'])
+ LOG.audit(_('instance %s: rescuing'), instance_id, context=context)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -262,13 +325,12 @@ class ComputeManager(manager.Manager):
self._update_state(context, instance_id)
@exception.wrap_exception
+ @checks_instance_lock
def unrescue_instance(self, context, instance_id):
"""Rescue an instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
-
- logging.debug(_('instance %s: unrescuing'),
- instance_ref['internal_id'])
+ LOG.audit(_('instance %s: unrescuing'), instance_id, context=context)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -282,13 +344,12 @@ class ComputeManager(manager.Manager):
self._update_state(context, instance_id)
@exception.wrap_exception
+ @checks_instance_lock
def pause_instance(self, context, instance_id):
"""Pause an instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
-
- logging.debug('instance %s: pausing',
- instance_ref['internal_id'])
+ LOG.audit(_('instance %s: pausing'), instance_id, context=context)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -300,13 +361,12 @@ class ComputeManager(manager.Manager):
result))
@exception.wrap_exception
+ @checks_instance_lock
def unpause_instance(self, context, instance_id):
"""Unpause a paused instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
-
- logging.debug('instance %s: unpausing',
- instance_ref['internal_id'])
+ LOG.audit(_('instance %s: unpausing'), instance_id, context=context)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -323,18 +383,20 @@ class ComputeManager(manager.Manager):
instance_ref = self.db.instance_get(context, instance_id)
if instance_ref["state"] == power_state.RUNNING:
- logging.debug(_("instance %s: retrieving diagnostics"),
- instance_ref["internal_id"])
+ LOG.audit(_("instance %s: retrieving diagnostics"), instance_id,
+ context=context)
return self.driver.get_diagnostics(instance_ref)
@exception.wrap_exception
+ @checks_instance_lock
def suspend_instance(self, context, instance_id):
- """suspend the instance with instance_id"""
+ """
+ suspend the instance with instance_id
+
+ """
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
-
- logging.debug(_('instance %s: suspending'),
- instance_ref['internal_id'])
+ LOG.audit(_('instance %s: suspending'), instance_id, context=context)
self.db.instance_set_state(context, instance_id,
power_state.NOSTATE,
'suspending')
@@ -345,12 +407,15 @@ class ComputeManager(manager.Manager):
result))
@exception.wrap_exception
+ @checks_instance_lock
def resume_instance(self, context, instance_id):
- """resume the suspended instance with instance_id"""
+ """
+ resume the suspended instance with instance_id
+
+ """
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
-
- logging.debug(_('instance %s: resuming'), instance_ref['internal_id'])
+ LOG.audit(_('instance %s: resuming'), instance_id, context=context)
self.db.instance_set_state(context, instance_id,
power_state.NOSTATE,
'resuming')
@@ -361,21 +426,66 @@ class ComputeManager(manager.Manager):
result))
@exception.wrap_exception
+ def lock_instance(self, context, instance_id):
+ """
+ lock the instance with instance_id
+
+ """
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+
+ LOG.debug(_('instance %s: locking'), instance_id, context=context)
+ self.db.instance_update(context, instance_id, {'locked': True})
+
+ @exception.wrap_exception
+ def unlock_instance(self, context, instance_id):
+ """
+ unlock the instance with instance_id
+
+ """
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+
+ LOG.debug(_('instance %s: unlocking'), instance_id, context=context)
+ self.db.instance_update(context, instance_id, {'locked': False})
+
+ @exception.wrap_exception
+ def get_lock(self, context, instance_id):
+ """
+ return the boolean state of (instance with instance_id)'s lock
+
+ """
+ context = context.elevated()
+ LOG.debug(_('instance %s: getting locked state'), instance_id,
+ context=context)
+ instance_ref = self.db.instance_get(context, instance_id)
+ return instance_ref['locked']
+
+ @exception.wrap_exception
def get_console_output(self, context, instance_id):
"""Send the console output for an instance."""
context = context.elevated()
- logging.debug(_("instance %s: getting console output"), instance_id)
instance_ref = self.db.instance_get(context, instance_id)
-
+ LOG.audit(_("Get console output for instance %s"), instance_id,
+ context=context)
return self.driver.get_console_output(instance_ref)
@exception.wrap_exception
+ def get_ajax_console(self, context, instance_id):
+ """Return connection information for an ajax console"""
+ context = context.elevated()
+ logging.debug(_("instance %s: getting ajax console"), instance_id)
+ instance_ref = self.db.instance_get(context, instance_id)
+
+ return self.driver.get_ajax_console(instance_ref)
+
+ @checks_instance_lock
def attach_volume(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instance."""
context = context.elevated()
- logging.debug(_("instance %s: attaching volume %s to %s"), instance_id,
- volume_id, mountpoint)
instance_ref = self.db.instance_get(context, instance_id)
+ LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id,
+ volume_id, mountpoint, context=context)
dev_path = self.volume_manager.setup_compute_volume(context,
volume_id)
try:
@@ -390,8 +500,8 @@ class ComputeManager(manager.Manager):
# NOTE(vish): The inline callback eats the exception info so we
# log the traceback here and reraise the same
# ecxception below.
- logging.exception(_("instance %s: attach failed %s, removing"),
- instance_id, mountpoint)
+ LOG.exception(_("instance %s: attach failed %s, removing"),
+ instance_id, mountpoint, context=context)
self.volume_manager.remove_compute_volume(context,
volume_id)
raise exc
@@ -399,17 +509,18 @@ class ComputeManager(manager.Manager):
return True
@exception.wrap_exception
+ @checks_instance_lock
def detach_volume(self, context, instance_id, volume_id):
"""Detach a volume from an instance."""
context = context.elevated()
- logging.debug(_("instance %s: detaching volume %s"),
- instance_id,
- volume_id)
instance_ref = self.db.instance_get(context, instance_id)
volume_ref = self.db.volume_get(context, volume_id)
+ LOG.audit(_("Detach volume %s from mountpoint %s on instance %s"),
+ volume_id, volume_ref['mountpoint'], instance_id,
+ context=context)
if instance_ref['name'] not in self.driver.list_instances():
- logging.warn(_("Detaching volume from unknown instance %s"),
- instance_ref['name'])
+ LOG.warn(_("Detaching volume from unknown instance %s"),
+ instance_id, context=context)
else:
self.driver.detach_volume(instance_ref['name'],
volume_ref['mountpoint'])
diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py
index 60c347a5e..14d0e8ca1 100644
--- a/nova/compute/monitor.py
+++ b/nova/compute/monitor.py
@@ -25,19 +25,17 @@ Instance Monitoring:
"""
import datetime
-import logging
import os
-import sys
import time
import boto
import boto.s3
import rrdtool
-from twisted.internet import defer
from twisted.internet import task
from twisted.application import service
from nova import flags
+from nova import log as logging
from nova.virt import connection as virt_connection
@@ -91,6 +89,9 @@ RRD_VALUES = {
utcnow = datetime.datetime.utcnow
+LOG = logging.getLogger('nova.compute.monitor')
+
+
def update_rrd(instance, name, data):
"""
Updates the specified RRD file.
@@ -255,20 +256,20 @@ class Instance(object):
Updates the instances statistics and stores the resulting graphs
in the internal object store on the cloud controller.
"""
- logging.debug(_('updating %s...'), self.instance_id)
+ LOG.debug(_('updating %s...'), self.instance_id)
try:
data = self.fetch_cpu_stats()
if data != None:
- logging.debug('CPU: %s', data)
+ LOG.debug('CPU: %s', data)
update_rrd(self, 'cpu', data)
data = self.fetch_net_stats()
- logging.debug('NET: %s', data)
+ LOG.debug('NET: %s', data)
update_rrd(self, 'net', data)
data = self.fetch_disk_stats()
- logging.debug('DISK: %s', data)
+ LOG.debug('DISK: %s', data)
update_rrd(self, 'disk', data)
# TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls
@@ -285,7 +286,7 @@ class Instance(object):
graph_disk(self, '1w')
graph_disk(self, '1m')
except Exception:
- logging.exception(_('unexpected error during update'))
+ LOG.exception(_('unexpected error during update'))
self.last_updated = utcnow()
@@ -309,7 +310,7 @@ class Instance(object):
self.cputime = float(info['cpu_time'])
self.cputime_last_updated = utcnow()
- logging.debug('CPU: %d', self.cputime)
+ LOG.debug('CPU: %d', self.cputime)
# Skip calculation on first pass. Need delta to get a meaningful value.
if cputime_last_updated == None:
@@ -319,17 +320,17 @@ class Instance(object):
d = self.cputime_last_updated - cputime_last_updated
t = d.days * 86400 + d.seconds
- logging.debug('t = %d', t)
+ LOG.debug('t = %d', t)
# Calculate change over time in number of nanoseconds of CPU time used.
cputime_delta = self.cputime - cputime_last
- logging.debug('cputime_delta = %s', cputime_delta)
+ LOG.debug('cputime_delta = %s', cputime_delta)
# Get the number of virtual cpus in this domain.
vcpus = int(info['num_cpu'])
- logging.debug('vcpus = %d', vcpus)
+ LOG.debug('vcpus = %d', vcpus)
# Calculate CPU % used and cap at 100.
return min(cputime_delta / (t * vcpus * 1.0e9) * 100, 100)
@@ -351,8 +352,8 @@ class Instance(object):
rd += rd_bytes
wr += wr_bytes
except TypeError:
- logging.error(_('Cannot get blockstats for "%s" on "%s"'),
- disk, self.instance_id)
+ LOG.error(_('Cannot get blockstats for "%s" on "%s"'),
+ disk, self.instance_id)
raise
return '%d:%d' % (rd, wr)
@@ -373,8 +374,8 @@ class Instance(object):
rx += stats[0]
tx += stats[4]
except TypeError:
- logging.error(_('Cannot get ifstats for "%s" on "%s"'),
- interface, self.instance_id)
+ LOG.error(_('Cannot get ifstats for "%s" on "%s"'),
+ interface, self.instance_id)
raise
return '%d:%d' % (rx, tx)
@@ -408,7 +409,7 @@ class InstanceMonitor(object, service.Service):
try:
conn = virt_connection.get_connection(read_only=True)
except Exception, exn:
- logging.exception(_('unexpected exception getting connection'))
+ LOG.exception(_('unexpected exception getting connection'))
time.sleep(FLAGS.monitoring_instances_delay)
return
@@ -416,14 +417,14 @@ class InstanceMonitor(object, service.Service):
try:
self.updateInstances_(conn, domain_ids)
except Exception, exn:
- logging.exception('updateInstances_')
+ LOG.exception('updateInstances_')
def updateInstances_(self, conn, domain_ids):
for domain_id in domain_ids:
if not domain_id in self._instances:
instance = Instance(conn, domain_id)
self._instances[domain_id] = instance
- logging.debug(_('Found instance: %s'), domain_id)
+ LOG.debug(_('Found instance: %s'), domain_id)
for key in self._instances.keys():
instance = self._instances[key]
diff --git a/nova/console/__init__.py b/nova/console/__init__.py
new file mode 100644
index 000000000..dfc72cd61
--- /dev/null
+++ b/nova/console/__init__.py
@@ -0,0 +1,13 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+"""
+:mod:`nova.console` -- Console Prxy to set up VM console access (i.e. with xvp)
+=====================================================
+
+.. automodule:: nova.console
+ :platform: Unix
+ :synopsis: Wrapper around console proxies such as xvp to set up
+ multitenant VM console access
+.. moduleauthor:: Monsyne Dragon <mdragon@rackspace.com>
+"""
+from nova.console.api import API
diff --git a/nova/console/api.py b/nova/console/api.py
new file mode 100644
index 000000000..3850d2c44
--- /dev/null
+++ b/nova/console/api.py
@@ -0,0 +1,75 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handles ConsoleProxy API requests
+"""
+
+from nova import exception
+from nova.db import base
+
+
+from nova import flags
+from nova import rpc
+
+
+FLAGS = flags.FLAGS
+
+
+class API(base.Base):
+ """API for spining up or down console proxy connections"""
+
+ def __init__(self, **kwargs):
+ super(API, self).__init__(**kwargs)
+
+ def get_consoles(self, context, instance_id):
+ return self.db.console_get_all_by_instance(context, instance_id)
+
+ def get_console(self, context, instance_id, console_id):
+ return self.db.console_get(context, console_id, instance_id)
+
+ def delete_console(self, context, instance_id, console_id):
+ console = self.db.console_get(context,
+ console_id,
+ instance_id)
+ pool = console['pool']
+ rpc.cast(context,
+ self.db.queue_get_for(context,
+ FLAGS.console_topic,
+ pool['host']),
+ {"method": "remove_console",
+ "args": {"console_id": console['id']}})
+
+ def create_console(self, context, instance_id):
+ instance = self.db.instance_get(context, instance_id)
+ #NOTE(mdragon): If we wanted to return this the console info
+ # here, as we would need to do a call.
+ # They can just do an index later to fetch
+ # console info. I am not sure which is better
+ # here.
+ rpc.cast(context,
+ self._get_console_topic(context, instance['host']),
+ {"method": "add_console",
+ "args": {"instance_id": instance_id}})
+
+ def _get_console_topic(self, context, instance_host):
+ topic = self.db.queue_get_for(context,
+ FLAGS.compute_topic,
+ instance_host)
+ return rpc.call(context,
+ topic,
+ {"method": "get_console_topic", "args": {'fake': 1}})
diff --git a/nova/console/fake.py b/nova/console/fake.py
new file mode 100644
index 000000000..7a90d5221
--- /dev/null
+++ b/nova/console/fake.py
@@ -0,0 +1,58 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Fake ConsoleProxy driver for tests.
+"""
+
+from nova import exception
+
+
+class FakeConsoleProxy(object):
+ """Fake ConsoleProxy driver."""
+
+ @property
+ def console_type(self):
+ return "fake"
+
+ def setup_console(self, context, console):
+ """Sets up actual proxies"""
+ pass
+
+ def teardown_console(self, context, console):
+ """Tears down actual proxies"""
+ pass
+
+ def init_host(self):
+ """Start up any config'ed consoles on start"""
+ pass
+
+ def generate_password(self, length=8):
+ """Returns random console password"""
+ return "fakepass"
+
+ def get_port(self, context):
+ """get available port for consoles that need one"""
+ return 5999
+
+ def fix_pool_password(self, password):
+ """Trim password to length, and any other massaging"""
+ return password
+
+ def fix_console_password(self, password):
+ """Trim password to length, and any other massaging"""
+ return password
diff --git a/nova/console/manager.py b/nova/console/manager.py
new file mode 100644
index 000000000..c55ca8e8f
--- /dev/null
+++ b/nova/console/manager.py
@@ -0,0 +1,127 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Console Proxy Service
+"""
+
+import functools
+import logging
+import socket
+
+from nova import exception
+from nova import flags
+from nova import manager
+from nova import rpc
+from nova import utils
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('console_driver',
+ 'nova.console.xvp.XVPConsoleProxy',
+ 'Driver to use for the console proxy')
+flags.DEFINE_boolean('stub_compute', False,
+ 'Stub calls to compute worker for tests')
+flags.DEFINE_string('console_public_hostname',
+ socket.gethostname(),
+ 'Publicly visable name for this console host')
+
+
+class ConsoleProxyManager(manager.Manager):
+
+ """ Sets up and tears down any proxy connections needed for accessing
+ instance consoles securely"""
+
+ def __init__(self, console_driver=None, *args, **kwargs):
+ if not console_driver:
+ console_driver = FLAGS.console_driver
+ self.driver = utils.import_object(console_driver)
+ super(ConsoleProxyManager, self).__init__(*args, **kwargs)
+ self.driver.host = self.host
+
+ def init_host(self):
+ self.driver.init_host()
+
+ @exception.wrap_exception
+ def add_console(self, context, instance_id, password=None,
+ port=None, **kwargs):
+ instance = self.db.instance_get(context, instance_id)
+ host = instance['host']
+ name = instance['name']
+ pool = self.get_pool_for_instance_host(context, host)
+ try:
+ console = self.db.console_get_by_pool_instance(context,
+ pool['id'],
+ instance_id)
+ except exception.NotFound:
+ logging.debug("Adding console")
+ if not password:
+ password = self.driver.generate_password()
+ if not port:
+ port = self.driver.get_port(context)
+ console_data = {'instance_name': name,
+ 'instance_id': instance_id,
+ 'password': password,
+ 'pool_id': pool['id']}
+ if port:
+ console_data['port'] = port
+ console = self.db.console_create(context, console_data)
+ self.driver.setup_console(context, console)
+ return console['id']
+
+ @exception.wrap_exception
+ def remove_console(self, context, console_id, **_kwargs):
+ try:
+ console = self.db.console_get(context, console_id)
+ except exception.NotFound:
+ logging.debug(_('Tried to remove non-existant console '
+ '%(console_id)s.') %
+ {'console_id': console_id})
+ return
+ self.db.console_delete(context, console_id)
+ self.driver.teardown_console(context, console)
+
+ def get_pool_for_instance_host(self, context, instance_host):
+ context = context.elevated()
+ console_type = self.driver.console_type
+ try:
+ pool = self.db.console_pool_get_by_host_type(context,
+ instance_host,
+ self.host,
+ console_type)
+ except exception.NotFound:
+ #NOTE(mdragon): Right now, the only place this info exists is the
+ # compute worker's flagfile, at least for
+ # xenserver. Thus we ned to ask.
+ if FLAGS.stub_compute:
+ pool_info = {'address': '127.0.0.1',
+ 'username': 'test',
+ 'password': '1234pass'}
+ else:
+ pool_info = rpc.call(context,
+ self.db.queue_get_for(context,
+ FLAGS.compute_topic,
+ instance_host),
+ {"method": "get_console_pool_info",
+ "args": {"console_type": console_type}})
+ pool_info['password'] = self.driver.fix_pool_password(
+ pool_info['password'])
+ pool_info['host'] = self.host
+ pool_info['public_hostname'] = FLAGS.console_public_hostname
+ pool_info['console_type'] = self.driver.console_type
+ pool_info['compute_host'] = instance_host
+ pool = self.db.console_pool_create(context, pool_info)
+ return pool
diff --git a/nova/console/xvp.conf.template b/nova/console/xvp.conf.template
new file mode 100644
index 000000000..695ddbe96
--- /dev/null
+++ b/nova/console/xvp.conf.template
@@ -0,0 +1,16 @@
+# One time password use with time window
+OTP ALLOW IPCHECK HTTP 60
+#if $multiplex_port
+MULTIPLEX $multiplex_port
+#end if
+
+#for $pool in $pools
+POOL $pool.address
+ DOMAIN $pool.address
+ MANAGER root $pool.password
+ HOST $pool.address
+ VM - dummy 0123456789ABCDEF
+ #for $console in $pool.consoles
+ VM #if $multiplex_port then '-' else $console.port # $console.instance_name $pass_encode($console.password)
+ #end for
+#end for
diff --git a/nova/console/xvp.py b/nova/console/xvp.py
new file mode 100644
index 000000000..2a76223da
--- /dev/null
+++ b/nova/console/xvp.py
@@ -0,0 +1,194 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+XVP (Xenserver VNC Proxy) driver.
+"""
+
+import fcntl
+import logging
+import os
+import signal
+import subprocess
+
+from Cheetah.Template import Template
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import utils
+
+flags.DEFINE_string('console_xvp_conf_template',
+ utils.abspath('console/xvp.conf.template'),
+ 'XVP conf template')
+flags.DEFINE_string('console_xvp_conf',
+ '/etc/xvp.conf',
+ 'generated XVP conf file')
+flags.DEFINE_string('console_xvp_pid',
+ '/var/run/xvp.pid',
+ 'XVP master process pid file')
+flags.DEFINE_string('console_xvp_log',
+ '/var/log/xvp.log',
+ 'XVP log file')
+flags.DEFINE_integer('console_xvp_multiplex_port',
+ 5900,
+ "port for XVP to multiplex VNC connections on")
+FLAGS = flags.FLAGS
+
+
+class XVPConsoleProxy(object):
+ """Sets up XVP config, and manages xvp daemon"""
+
+ def __init__(self):
+ self.xvpconf_template = open(FLAGS.console_xvp_conf_template).read()
+ self.host = FLAGS.host # default, set by manager.
+ super(XVPConsoleProxy, self).__init__()
+
+ @property
+ def console_type(self):
+ return "vnc+xvp"
+
+ def get_port(self, context):
+ """get available port for consoles that need one"""
+ #TODO(mdragon): implement port selection for non multiplex ports,
+ # we are not using that, but someone else may want
+ # it.
+ return FLAGS.console_xvp_multiplex_port
+
+ def setup_console(self, context, console):
+ """Sets up actual proxies"""
+ self._rebuild_xvp_conf(context.elevated())
+
+ def teardown_console(self, context, console):
+ """Tears down actual proxies"""
+ self._rebuild_xvp_conf(context.elevated())
+
+ def init_host(self):
+ """Start up any config'ed consoles on start"""
+ ctxt = context.get_admin_context()
+ self._rebuild_xvp_conf(ctxt)
+
+ def fix_pool_password(self, password):
+ """Trim password to length, and encode"""
+ return self._xvp_encrypt(password, is_pool_password=True)
+
+ def fix_console_password(self, password):
+ """Trim password to length, and encode"""
+ return self._xvp_encrypt(password)
+
+ def generate_password(self, length=8):
+ """Returns random console password"""
+ return os.urandom(length * 2).encode('base64')[:length]
+
+ def _rebuild_xvp_conf(self, context):
+ logging.debug("Rebuilding xvp conf")
+ pools = [pool for pool in
+ db.console_pool_get_all_by_host_type(context, self.host,
+ self.console_type)
+ if pool['consoles']]
+ if not pools:
+ logging.debug("No console pools!")
+ self._xvp_stop()
+ return
+ conf_data = {'multiplex_port': FLAGS.console_xvp_multiplex_port,
+ 'pools': pools,
+ 'pass_encode': self.fix_console_password}
+ config = str(Template(self.xvpconf_template, searchList=[conf_data]))
+ self._write_conf(config)
+ self._xvp_restart()
+
+ def _write_conf(self, config):
+ logging.debug('Re-wrote %s' % FLAGS.console_xvp_conf)
+ with open(FLAGS.console_xvp_conf, 'w') as cfile:
+ cfile.write(config)
+
+ def _xvp_stop(self):
+ logging.debug("Stopping xvp")
+ pid = self._xvp_pid()
+ if not pid:
+ return
+ try:
+ os.kill(pid, signal.SIGTERM)
+ except OSError:
+ #if it's already not running, no problem.
+ pass
+
+ def _xvp_start(self):
+ if self._xvp_check_running():
+ return
+ logging.debug("Starting xvp")
+ try:
+ utils.execute('xvp -p %s -c %s -l %s' %
+ (FLAGS.console_xvp_pid,
+ FLAGS.console_xvp_conf,
+ FLAGS.console_xvp_log))
+ except exception.ProcessExecutionError, err:
+ logging.error("Error starting xvp: %s" % err)
+
+ def _xvp_restart(self):
+ logging.debug("Restarting xvp")
+ if not self._xvp_check_running():
+ logging.debug("xvp not running...")
+ self._xvp_start()
+ else:
+ pid = self._xvp_pid()
+ os.kill(pid, signal.SIGUSR1)
+
+ def _xvp_pid(self):
+ try:
+ with open(FLAGS.console_xvp_pid, 'r') as pidfile:
+ pid = int(pidfile.read())
+ except IOError:
+ return None
+ except ValueError:
+ return None
+ return pid
+
+ def _xvp_check_running(self):
+ pid = self._xvp_pid()
+ if not pid:
+ return False
+ try:
+ os.kill(pid, 0)
+ except OSError:
+ return False
+ return True
+
+ def _xvp_encrypt(self, password, is_pool_password=False):
+ """Call xvp to obfuscate passwords for config file.
+
+ Args:
+ - password: the password to encode, max 8 char for vm passwords,
+ and 16 chars for pool passwords. passwords will
+ be trimmed to max len before encoding.
+ - is_pool_password: True if this this is the XenServer api password
+ False if it's a VM console password
+ (xvp uses different keys and max lengths for pool passwords)
+
+ Note that xvp's obfuscation should not be considered 'real' encryption.
+ It simply DES encrypts the passwords with static keys plainly viewable
+ in the xvp source code."""
+ maxlen = 8
+ flag = '-e'
+ if is_pool_password:
+ maxlen = 16
+ flag = '-x'
+ #xvp will blow up on passwords that are too long (mdragon)
+ password = password[:maxlen]
+ out, err = utils.execute('xvp %s' % flag, process_input=password)
+ return out.strip()
diff --git a/nova/crypto.py b/nova/crypto.py
index b8405552d..a34b940f5 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -24,7 +24,6 @@ Includes root and intermediate CAs, SSH key_pairs and x509 certificates.
import base64
import gettext
import hashlib
-import logging
import os
import shutil
import struct
@@ -39,8 +38,10 @@ gettext.install('nova', unicode=1)
from nova import context
from nova import db
from nova import flags
+from nova import log as logging
+LOG = logging.getLogger("nova.crypto")
FLAGS = flags.FLAGS
flags.DEFINE_string('ca_file', 'cacert.pem', _('Filename of root CA'))
flags.DEFINE_string('key_file',
@@ -254,7 +255,7 @@ def _sign_csr(csr_text, ca_folder):
csrfile = open(inbound, "w")
csrfile.write(csr_text)
csrfile.close()
- logging.debug(_("Flags path: %s") % ca_folder)
+ LOG.debug(_("Flags path: %s"), ca_folder)
start = os.getcwd()
# Change working dir to CA
os.chdir(ca_folder)
diff --git a/nova/db/api.py b/nova/db/api.py
index bab698d9d..1f81ef145 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -81,11 +81,21 @@ def service_get(context, service_id):
return IMPL.service_get(context, service_id)
+def service_get_all(context, disabled=False):
+ """Get all service."""
+ return IMPL.service_get_all(context, None, disabled)
+
+
def service_get_all_by_topic(context, topic):
- """Get all compute services for a given topic."""
+ """Get all services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic)
+def service_get_all_by_host(context, host):
+ """Get all services for a given host."""
+ return IMPL.service_get_all_by_host(context, host)
+
+
def service_get_all_compute_sorted(context):
"""Get all compute services sorted by instance count.
@@ -353,9 +363,9 @@ def instance_get_project_vpn(context, project_id):
return IMPL.instance_get_project_vpn(context, project_id)
-def instance_get_by_internal_id(context, internal_id):
- """Get an instance by internal id."""
- return IMPL.instance_get_by_internal_id(context, internal_id)
+def instance_get_by_id(context, instance_id):
+ """Get an instance by id."""
+ return IMPL.instance_get_by_id(context, instance_id)
def instance_is_vpn(context, instance_id):
@@ -719,7 +729,7 @@ def security_group_get_all(context):
def security_group_get(context, security_group_id):
- """Get security group by its internal id."""
+ """Get security group by its id."""
return IMPL.security_group_get(context, security_group_id)
@@ -772,6 +782,13 @@ def security_group_rule_get_by_security_group(context, security_group_id):
security_group_id)
+def security_group_rule_get_by_security_group_grantee(context,
+ security_group_id):
+ """Get all rules that grant access to the given security group."""
+ return IMPL.security_group_rule_get_by_security_group_grantee(context,
+ security_group_id)
+
+
def security_group_rule_destroy(context, security_group_rule_id):
"""Deletes a security group rule."""
return IMPL.security_group_rule_destroy(context, security_group_rule_id)
@@ -894,3 +911,57 @@ def host_get_networks(context, host):
"""
return IMPL.host_get_networks(context, host)
+
+
+##################
+
+
+def console_pool_create(context, values):
+ """Create console pool."""
+ return IMPL.console_pool_create(context, values)
+
+
+def console_pool_get(context, pool_id):
+ """Get a console pool."""
+ return IMPL.console_pool_get(context, pool_id)
+
+
+def console_pool_get_by_host_type(context, compute_host, proxy_host,
+ console_type):
+ """Fetch a console pool for a given proxy host, compute host, and type."""
+ return IMPL.console_pool_get_by_host_type(context,
+ compute_host,
+ proxy_host,
+ console_type)
+
+
+def console_pool_get_all_by_host_type(context, host, console_type):
+ """Fetch all pools for given proxy host and type."""
+ return IMPL.console_pool_get_all_by_host_type(context,
+ host,
+ console_type)
+
+
+def console_create(context, values):
+ """Create a console."""
+ return IMPL.console_create(context, values)
+
+
+def console_delete(context, console_id):
+ """Delete a console."""
+ return IMPL.console_delete(context, console_id)
+
+
+def console_get_by_pool_instance(context, pool_id, instance_id):
+ """Get console entry for a given instance and pool."""
+ return IMPL.console_get_by_pool_instance(context, pool_id, instance_id)
+
+
+def console_get_all_by_instance(context, instance_id):
+ """Get consoles for a given instance."""
+ return IMPL.console_get_all_by_instance(context, instance_id)
+
+
+def console_get(context, console_id, instance_id=None):
+ """Get a specific console (possibly on a given instance)."""
+ return IMPL.console_get(context, console_id, instance_id)
diff --git a/nova/db/sqlalchemy/__init__.py b/nova/db/sqlalchemy/__init__.py
index 22aa1cfe6..501373942 100644
--- a/nova/db/sqlalchemy/__init__.py
+++ b/nova/db/sqlalchemy/__init__.py
@@ -19,16 +19,17 @@
"""
SQLAlchemy database backend
"""
-import logging
import time
from sqlalchemy.exc import OperationalError
from nova import flags
+from nova import log as logging
from nova.db.sqlalchemy import models
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.db.sqlalchemy')
for i in xrange(FLAGS.sql_max_retries):
@@ -39,5 +40,6 @@ for i in xrange(FLAGS.sql_max_retries):
models.register_models()
break
except OperationalError:
- logging.exception(_("Data store is unreachable."
- " Trying again in %d seconds.") % FLAGS.sql_retry_interval)
+ LOG.exception(_("Data store %s is unreachable."
+ " Trying again in %d seconds."),
+ FLAGS.sql_connection, FLAGS.sql_retry_interval)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 72369d187..2e4f8fc39 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -19,7 +19,6 @@
Implementation of SQLAlchemy backend.
"""
-import random
import warnings
from nova import db
@@ -136,6 +135,18 @@ def service_get(context, service_id, session=None):
@require_admin_context
+def service_get_all(context, session=None, disabled=False):
+ if not session:
+ session = get_session()
+
+ result = session.query(models.Service).\
+ filter_by(deleted=can_read_deleted(context)).\
+ filter_by(disabled=disabled).\
+ all()
+ return result
+
+
+@require_admin_context
def service_get_all_by_topic(context, topic):
session = get_session()
return session.query(models.Service).\
@@ -146,6 +157,15 @@ def service_get_all_by_topic(context, topic):
@require_admin_context
+def service_get_all_by_host(context, host):
+ session = get_session()
+ return session.query(models.Service).\
+ filter_by(deleted=False).\
+ filter_by(host=host).\
+ all()
+
+
+@require_admin_context
def _service_get_all_topic_subquery(context, session, topic, subq, label):
sort_value = getattr(subq.c, label)
return session.query(models.Service, func.coalesce(sort_value, 0)).\
@@ -606,30 +626,18 @@ def fixed_ip_update(context, address, values):
###################
-#TODO(gundlach): instance_create and volume_create are nearly identical
-#and should be refactored. I expect there are other copy-and-paste
-#functions between the two of them as well.
-
-
@require_context
def instance_create(context, values):
"""Create a new Instance record in the database.
context - request context object
values - dict containing column values.
- 'internal_id' is auto-generated and should not be specified.
"""
instance_ref = models.Instance()
instance_ref.update(values)
session = get_session()
with session.begin():
- while instance_ref.internal_id == None:
- # Instances have integer internal ids.
- internal_id = random.randint(0, 2 ** 31 - 1)
- if not instance_internal_id_exists(context, internal_id,
- session=session):
- instance_ref.internal_id = internal_id
instance_ref.save(session=session)
return instance_ref
@@ -663,7 +671,7 @@ def instance_get(context, instance_id, session=None):
if is_admin_context(context):
result = session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
- options(joinedload('security_groups')).\
+ options(joinedload_all('security_groups.rules')).\
options(joinedload('volumes')).\
filter_by(id=instance_id).\
filter_by(deleted=can_read_deleted(context)).\
@@ -671,7 +679,7 @@ def instance_get(context, instance_id, session=None):
elif is_user_context(context):
result = session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
- options(joinedload('security_groups')).\
+ options(joinedload_all('security_groups.rules')).\
options(joinedload('volumes')).\
filter_by(project_id=context.project_id).\
filter_by(id=instance_id).\
@@ -751,38 +759,29 @@ def instance_get_project_vpn(context, project_id):
@require_context
-def instance_get_by_internal_id(context, internal_id):
+def instance_get_by_id(context, instance_id):
session = get_session()
if is_admin_context(context):
result = session.query(models.Instance).\
options(joinedload('security_groups')).\
- filter_by(internal_id=internal_id).\
+ filter_by(id=instance_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
elif is_user_context(context):
result = session.query(models.Instance).\
options(joinedload('security_groups')).\
filter_by(project_id=context.project_id).\
- filter_by(internal_id=internal_id).\
+ filter_by(id=instance_id).\
filter_by(deleted=False).\
first()
if not result:
- raise exception.NotFound(_('Instance %s not found') % (internal_id))
+ raise exception.NotFound(_('Instance %s not found') % (instance_id))
return result
@require_context
-def instance_internal_id_exists(context, internal_id, session=None):
- if not session:
- session = get_session()
- return session.query(exists().\
- where(models.Instance.internal_id == internal_id)).\
- one()[0]
-
-
-@require_context
def instance_get_fixed_address(context, instance_id):
session = get_session()
with session.begin():
@@ -862,12 +861,9 @@ def instance_action_create(context, values):
def instance_get_actions(context, instance_id):
"""Return the actions associated to the given instance id"""
session = get_session()
- actions = {}
- for action in session.query(models.InstanceActions).\
+ return session.query(models.InstanceActions).\
filter_by(instance_id=instance_id).\
- all():
- actions[action.action] = action.error
- return actions
+ all()
###################
@@ -1317,10 +1313,6 @@ def volume_create(context, values):
session = get_session()
with session.begin():
- while volume_ref.ec2_id == None:
- ec2_id = utils.generate_uid('vol')
- if not volume_ec2_id_exists(context, ec2_id, session=session):
- volume_ref.ec2_id = ec2_id
volume_ref.save(session=session)
return volume_ref
@@ -1418,41 +1410,6 @@ def volume_get_all_by_project(context, project_id):
all()
-@require_context
-def volume_get_by_ec2_id(context, ec2_id):
- session = get_session()
- result = None
-
- if is_admin_context(context):
- result = session.query(models.Volume).\
- filter_by(ec2_id=ec2_id).\
- filter_by(deleted=can_read_deleted(context)).\
- first()
- elif is_user_context(context):
- result = session.query(models.Volume).\
- filter_by(project_id=context.project_id).\
- filter_by(ec2_id=ec2_id).\
- filter_by(deleted=False).\
- first()
- else:
- raise exception.NotAuthorized()
-
- if not result:
- raise exception.NotFound(_('Volume %s not found') % ec2_id)
-
- return result
-
-
-@require_context
-def volume_ec2_id_exists(context, ec2_id, session=None):
- if not session:
- session = get_session()
-
- return session.query(exists().\
- where(models.Volume.id == ec2_id)).\
- one()[0]
-
-
@require_admin_context
def volume_get_instance(context, volume_id):
session = get_session()
@@ -1643,6 +1600,44 @@ def security_group_rule_get(context, security_group_rule_id, session=None):
@require_context
+def security_group_rule_get_by_security_group(context, security_group_id,
+ session=None):
+ if not session:
+ session = get_session()
+ if is_admin_context(context):
+ result = session.query(models.SecurityGroupIngressRule).\
+ filter_by(deleted=can_read_deleted(context)).\
+ filter_by(parent_group_id=security_group_id).\
+ all()
+ else:
+ # TODO(vish): Join to group and check for project_id
+ result = session.query(models.SecurityGroupIngressRule).\
+ filter_by(deleted=False).\
+ filter_by(parent_group_id=security_group_id).\
+ all()
+ return result
+
+
+@require_context
+def security_group_rule_get_by_security_group_grantee(context,
+ security_group_id,
+ session=None):
+ if not session:
+ session = get_session()
+ if is_admin_context(context):
+ result = session.query(models.SecurityGroupIngressRule).\
+ filter_by(deleted=can_read_deleted(context)).\
+ filter_by(group_id=security_group_id).\
+ all()
+ else:
+ result = session.query(models.SecurityGroupIngressRule).\
+ filter_by(deleted=False).\
+ filter_by(group_id=security_group_id).\
+ all()
+ return result
+
+
+@require_context
def security_group_rule_create(context, values):
security_group_rule_ref = models.SecurityGroupIngressRule()
security_group_rule_ref.update(values)
@@ -1877,3 +1872,111 @@ def host_get_networks(context, host):
filter_by(deleted=False).\
filter_by(host=host).\
all()
+
+
+##################
+
+
+def console_pool_create(context, values):
+ pool = models.ConsolePool()
+ pool.update(values)
+ pool.save()
+ return pool
+
+
+def console_pool_get(context, pool_id):
+ session = get_session()
+ result = session.query(models.ConsolePool).\
+ filter_by(deleted=False).\
+ filter_by(id=pool_id).\
+ first()
+ if not result:
+ raise exception.NotFound(_("No console pool with id %(pool_id)s") %
+ {'pool_id': pool_id})
+
+ return result
+
+
+def console_pool_get_by_host_type(context, compute_host, host,
+ console_type):
+ session = get_session()
+ result = session.query(models.ConsolePool).\
+ filter_by(host=host).\
+ filter_by(console_type=console_type).\
+ filter_by(compute_host=compute_host).\
+ filter_by(deleted=False).\
+ options(joinedload('consoles')).\
+ first()
+ if not result:
+ raise exception.NotFound(_('No console pool of type %(type)s '
+ 'for compute host %(compute_host)s '
+ 'on proxy host %(host)s') %
+ {'type': console_type,
+ 'compute_host': compute_host,
+ 'host': host})
+ return result
+
+
+def console_pool_get_all_by_host_type(context, host, console_type):
+ session = get_session()
+ return session.query(models.ConsolePool).\
+ filter_by(host=host).\
+ filter_by(console_type=console_type).\
+ filter_by(deleted=False).\
+ options(joinedload('consoles')).\
+ all()
+
+
+def console_create(context, values):
+ console = models.Console()
+ console.update(values)
+ console.save()
+ return console
+
+
+def console_delete(context, console_id):
+ session = get_session()
+ with session.begin():
+ # consoles are meant to be transient. (mdragon)
+ session.execute('delete from consoles '
+ 'where id=:id', {'id': console_id})
+
+
+def console_get_by_pool_instance(context, pool_id, instance_id):
+ session = get_session()
+ result = session.query(models.Console).\
+ filter_by(pool_id=pool_id).\
+ filter_by(instance_id=instance_id).\
+ options(joinedload('pool')).\
+ first()
+ if not result:
+ raise exception.NotFound(_('No console for instance %(instance_id)s '
+ 'in pool %(pool_id)s') %
+ {'instance_id': instance_id,
+ 'pool_id': pool_id})
+ return result
+
+
+def console_get_all_by_instance(context, instance_id):
+ session = get_session()
+ results = session.query(models.Console).\
+ filter_by(instance_id=instance_id).\
+ options(joinedload('pool')).\
+ all()
+ return results
+
+
+def console_get(context, console_id, instance_id=None):
+ session = get_session()
+ query = session.query(models.Console).\
+ filter_by(id=console_id)
+ if instance_id:
+ query = query.filter_by(instance_id=instance_id)
+ result = query.options(joinedload('pool')).first()
+ if not result:
+ idesc = (_("on instance %s") % instance_id) if instance_id else ""
+ raise exception.NotFound(_("No console with id %(console_id)s"
+ " %(instance)s") %
+ {'instance': idesc,
+ 'console_id': console_id})
+ return result
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 0c9c387fc..1dc46fe78 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -149,6 +149,7 @@ class Service(BASE, NovaBase):
topic = Column(String(255))
report_count = Column(Integer, nullable=False, default=0)
disabled = Column(Boolean, default=False)
+ availability_zone = Column(String(255), default='nova')
class Certificate(BASE, NovaBase):
@@ -164,11 +165,13 @@ class Certificate(BASE, NovaBase):
class Instance(BASE, NovaBase):
"""Represents a guest vm."""
__tablename__ = 'instances'
- id = Column(Integer, primary_key=True)
- internal_id = Column(Integer, unique=True)
+ id = Column(Integer, primary_key=True, autoincrement=True)
- admin_pass = Column(String(255))
+ @property
+ def name(self):
+ return "instance-%08x" % self.id
+ admin_pass = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
@@ -180,10 +183,6 @@ class Instance(BASE, NovaBase):
def project(self):
return auth.manager.AuthManager().get_project(self.project_id)
- @property
- def name(self):
- return "instance-%d" % self.internal_id
-
image_id = Column(String(255))
kernel_id = Column(String(255))
ramdisk_id = Column(String(255))
@@ -226,6 +225,8 @@ class Instance(BASE, NovaBase):
display_name = Column(String(255))
display_description = Column(String(255))
+ locked = Column(Boolean)
+
# TODO(vish): see Ewan's email about state improvements, probably
# should be in a driver base class or some such
# vmstate_state = running, halted, suspended, paused
@@ -251,8 +252,11 @@ class InstanceActions(BASE, NovaBase):
class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a vm."""
__tablename__ = 'volumes'
- id = Column(Integer, primary_key=True)
- ec2_id = Column(String(12), unique=True)
+ id = Column(Integer, primary_key=True, autoincrement=True)
+
+ @property
+ def name(self):
+ return "volume-%08x" % self.id
user_id = Column(String(255))
project_id = Column(String(255))
@@ -278,10 +282,6 @@ class Volume(BASE, NovaBase):
display_name = Column(String(255))
display_description = Column(String(255))
- @property
- def name(self):
- return self.ec2_id
-
class Quota(BASE, NovaBase):
"""Represents quota overrides for a project."""
@@ -541,18 +541,44 @@ class FloatingIp(BASE, NovaBase):
host = Column(String(255)) # , ForeignKey('hosts.id'))
+class ConsolePool(BASE, NovaBase):
+ """Represents pool of consoles on the same physical node."""
+ __tablename__ = 'console_pools'
+ id = Column(Integer, primary_key=True)
+ address = Column(String(255))
+ username = Column(String(255))
+ password = Column(String(255))
+ console_type = Column(String(255))
+ public_hostname = Column(String(255))
+ host = Column(String(255))
+ compute_host = Column(String(255))
+
+
+class Console(BASE, NovaBase):
+ """Represents a console session for an instance."""
+ __tablename__ = 'consoles'
+ id = Column(Integer, primary_key=True)
+ instance_name = Column(String(255))
+ instance_id = Column(Integer)
+ password = Column(String(255))
+ port = Column(Integer, nullable=True)
+ pool_id = Column(Integer, ForeignKey('console_pools.id'))
+ pool = relationship(ConsolePool, backref=backref('consoles'))
+
+
def register_models():
"""Register Models and create metadata.
Called from nova.db.sqlalchemy.__init__ as part of loading the driver,
- it will never need to be called explicitly elsewhere.
+ it will never need to be called explicitly elsewhere unless the
+ connection is lost and needs to be reestablished.
"""
from sqlalchemy import create_engine
models = (Service, Instance, InstanceActions,
Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp,
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,
- Project, Certificate) # , Image, Host
+ Project, Certificate, ConsolePool, Console) # , Image, Host
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
diff --git a/nova/exception.py b/nova/exception.py
index 277033e0f..7680e534a 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -21,9 +21,8 @@ Nova base exception handling, including decorator for re-raising
Nova-type exceptions. SHOULD include dedicated exception logging.
"""
-import logging
-import sys
-import traceback
+from nova import log as logging
+LOG = logging.getLogger('nova.exception')
class ProcessExecutionError(IOError):
@@ -84,7 +83,7 @@ def wrap_exception(f):
except Exception, e:
if not isinstance(e, Error):
#exc_type, exc_value, exc_traceback = sys.exc_info()
- logging.exception(_('Uncaught exception'))
+ LOG.exception(_('Uncaught exception'))
#logging.error(traceback.extract_stack(exc_traceback))
raise Error(str(e))
raise
diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py
index 79d8b894d..7c2d7177b 100644
--- a/nova/fakerabbit.py
+++ b/nova/fakerabbit.py
@@ -18,12 +18,16 @@
"""Based a bit on the carrot.backeds.queue backend... but a lot better."""
-import logging
import Queue as queue
from carrot.backends import base
from eventlet import greenthread
+from nova import log as logging
+
+
+LOG = logging.getLogger("nova.fakerabbit")
+
EXCHANGES = {}
QUEUES = {}
@@ -41,12 +45,12 @@ class Exchange(object):
self._routes = {}
def publish(self, message, routing_key=None):
- logging.debug(_('(%s) publish (key: %s) %s'),
- self.name, routing_key, message)
+ LOG.debug(_('(%s) publish (key: %s) %s'),
+ self.name, routing_key, message)
routing_key = routing_key.split('.')[0]
if routing_key in self._routes:
for f in self._routes[routing_key]:
- logging.debug(_('Publishing to route %s'), f)
+ LOG.debug(_('Publishing to route %s'), f)
f(message, routing_key=routing_key)
def bind(self, callback, routing_key):
@@ -76,19 +80,19 @@ class Backend(base.BaseBackend):
def queue_declare(self, queue, **kwargs):
global QUEUES
if queue not in QUEUES:
- logging.debug(_('Declaring queue %s'), queue)
+ LOG.debug(_('Declaring queue %s'), queue)
QUEUES[queue] = Queue(queue)
def exchange_declare(self, exchange, type, *args, **kwargs):
global EXCHANGES
if exchange not in EXCHANGES:
- logging.debug(_('Declaring exchange %s'), exchange)
+ LOG.debug(_('Declaring exchange %s'), exchange)
EXCHANGES[exchange] = Exchange(exchange, type)
def queue_bind(self, queue, exchange, routing_key, **kwargs):
global EXCHANGES
global QUEUES
- logging.debug(_('Binding %s to %s with key %s'),
+ LOG.debug(_('Binding %s to %s with key %s'),
queue, exchange, routing_key)
EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key)
@@ -113,7 +117,7 @@ class Backend(base.BaseBackend):
content_type=content_type,
content_encoding=content_encoding)
message.result = True
- logging.debug(_('Getting from %s: %s'), queue, message)
+ LOG.debug(_('Getting from %s: %s'), queue, message)
return message
def prepare_message(self, message_data, delivery_mode,
diff --git a/nova/flags.py b/nova/flags.py
index 4b7334927..fdcba6c72 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -29,8 +29,6 @@ import sys
import gflags
-from nova import utils
-
class FlagValues(gflags.FlagValues):
"""Extension of gflags.FlagValues that allows undefined and runtime flags.
@@ -202,10 +200,22 @@ def DECLARE(name, module_string, flag_values=FLAGS):
"%s not defined by %s" % (name, module_string))
+def _get_my_ip():
+ """Returns the actual ip of the local machine."""
+ try:
+ csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ csock.connect(('8.8.8.8', 80))
+ (addr, port) = csock.getsockname()
+ csock.close()
+ return addr
+ except socket.gaierror as ex:
+ return "127.0.0.1"
+
+
# __GLOBAL FLAGS ONLY__
# Define any app-specific flags in their own files, docs at:
-# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39
-
+# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#a9
+DEFINE_string('my_ip', _get_my_ip(), 'host ip address')
DEFINE_list('region_list',
[],
'list of region=url pairs separated by commas')
@@ -213,16 +223,25 @@ DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
DEFINE_integer('glance_port', 9292, 'glance port')
-DEFINE_string('glance_host', utils.get_my_ip(), 'glance host')
+DEFINE_string('glance_host', '$my_ip', 'glance host')
DEFINE_integer('s3_port', 3333, 's3 port')
-DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)')
-DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)')
+DEFINE_string('s3_host', '$my_ip', 's3 host (for infrastructure)')
+DEFINE_string('s3_dmz', '$my_ip', 's3 dmz ip (for instances)')
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
+DEFINE_string('console_topic', 'console',
+ 'the topic console proxy nodes listen on')
DEFINE_string('scheduler_topic', 'scheduler',
'the topic scheduler nodes listen on')
DEFINE_string('volume_topic', 'volume', 'the topic volume nodes listen on')
DEFINE_string('network_topic', 'network', 'the topic network nodes listen on')
-
+DEFINE_string('ajax_console_proxy_topic', 'ajax_proxy',
+ 'the topic ajax proxy nodes listen on')
+DEFINE_string('ajax_console_proxy_url',
+ 'http://127.0.0.1:8000',
+ 'location of ajax console proxy, \
+ in the form "http://127.0.0.1:8000"')
+DEFINE_string('ajax_console_proxy_port',
+ 8000, 'port that ajax_console_proxy binds')
DEFINE_bool('verbose', False, 'show debug output')
DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit')
DEFINE_bool('fake_network', False,
@@ -236,10 +255,12 @@ DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
DEFINE_string('ec2_prefix', 'http', 'prefix for ec2')
-DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server')
-DEFINE_string('cc_dmz', utils.get_my_ip(), 'internal ip of api server')
+DEFINE_string('os_prefix', 'http', 'prefix for openstack')
+DEFINE_string('cc_host', '$my_ip', 'ip of api server')
+DEFINE_string('cc_dmz', '$my_ip', 'internal ip of api server')
DEFINE_integer('cc_port', 8773, 'cloud controller port')
DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2')
+DEFINE_string('os_suffix', '/v1.0/', 'suffix for openstack')
DEFINE_string('default_project', 'openstack', 'default project for openstack')
DEFINE_string('default_image', 'ami-11111',
@@ -271,6 +292,8 @@ DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval')
DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager',
'Manager for compute')
+DEFINE_string('console_manager', 'nova.console.manager.ConsoleProxyManager',
+ 'Manager for console proxy')
DEFINE_string('network_manager', 'nova.network.manager.VlanManager',
'Manager for network')
DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager',
@@ -285,6 +308,5 @@ DEFINE_string('image_service', 'nova.image.s3.S3ImageService',
DEFINE_string('host', socket.gethostname(),
'name of this node')
-# UNUSED
DEFINE_string('node_availability_zone', 'nova',
'availability zone of this node')
diff --git a/nova/image/glance.py b/nova/image/glance.py
index cc3192e7c..a3a2f4308 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -19,20 +19,17 @@
import httplib
import json
-import logging
import urlparse
-import webob.exc
-
-from nova.compute import api as compute_api
-from nova import utils
-from nova import flags
from nova import exception
-import nova.image.service
+from nova import flags
+from nova import log as logging
+from nova.image import service
-FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.image.glance')
+FLAGS = flags.FLAGS
flags.DEFINE_string('glance_teller_address', 'http://127.0.0.1',
'IP address or URL where Glance\'s Teller service resides')
flags.DEFINE_string('glance_teller_port', '9191',
@@ -78,8 +75,8 @@ class ParallaxClient(object):
data = json.loads(res.read())['images']
return data
else:
- logging.warn(_("Parallax returned HTTP error %d from "
- "request for /images"), res.status_int)
+ LOG.warn(_("Parallax returned HTTP error %d from "
+ "request for /images"), res.status_int)
return []
finally:
c.close()
@@ -97,8 +94,8 @@ class ParallaxClient(object):
data = json.loads(res.read())['images']
return data
else:
- logging.warn(_("Parallax returned HTTP error %d from "
- "request for /images/detail"), res.status_int)
+ LOG.warn(_("Parallax returned HTTP error %d from "
+ "request for /images/detail"), res.status_int)
return []
finally:
c.close()
@@ -166,7 +163,7 @@ class ParallaxClient(object):
c.close()
-class GlanceImageService(nova.image.service.BaseImageService):
+class GlanceImageService(service.BaseImageService):
"""Provides storage and retrieval of disk image objects within Glance."""
def __init__(self):
diff --git a/nova/log.py b/nova/log.py
new file mode 100644
index 000000000..c1428c051
--- /dev/null
+++ b/nova/log.py
@@ -0,0 +1,254 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Nova logging handler.
+
+This module adds to logging functionality by adding the option to specify
+a context object when calling the various log methods. If the context object
+is not specified, default formatting is used.
+
+It also allows setting of formatting information through flags.
+"""
+
+
+import cStringIO
+import json
+import logging
+import logging.handlers
+import traceback
+
+from nova import flags
+from nova import version
+
+
+FLAGS = flags.FLAGS
+
+flags.DEFINE_string('logging_context_format_string',
+ '(%(name)s %(nova_version)s): %(levelname)s '
+ '[%(request_id)s %(user)s '
+ '%(project)s] %(message)s',
+ 'format string to use for log messages')
+
+flags.DEFINE_string('logging_default_format_string',
+ '(%(name)s %(nova_version)s): %(levelname)s [N/A] '
+ '%(message)s',
+ 'format string to use for log messages')
+
+flags.DEFINE_string('logging_debug_format_suffix',
+ 'from %(processName)s (pid=%(process)d) %(funcName)s'
+ ' %(pathname)s:%(lineno)d',
+ 'data to append to log format when level is DEBUG')
+
+flags.DEFINE_string('logging_exception_prefix',
+ '(%(name)s): TRACE: ',
+ 'prefix each line of exception output with this format')
+
+flags.DEFINE_list('default_log_levels',
+ ['amqplib=WARN',
+ 'sqlalchemy=WARN',
+ 'eventlet.wsgi.server=WARN'],
+ 'list of logger=LEVEL pairs')
+
+flags.DEFINE_bool('use_syslog', False, 'output to syslog')
+flags.DEFINE_string('logfile', None, 'output to named file')
+
+
+# A list of things we want to replicate from logging.
+# levels
+CRITICAL = logging.CRITICAL
+FATAL = logging.FATAL
+ERROR = logging.ERROR
+WARNING = logging.WARNING
+WARN = logging.WARN
+INFO = logging.INFO
+DEBUG = logging.DEBUG
+NOTSET = logging.NOTSET
+# methods
+getLogger = logging.getLogger
+debug = logging.debug
+info = logging.info
+warning = logging.warning
+warn = logging.warn
+error = logging.error
+exception = logging.exception
+critical = logging.critical
+log = logging.log
+# handlers
+StreamHandler = logging.StreamHandler
+FileHandler = logging.FileHandler
+# logging.SysLogHandler is nicer than logging.logging.handler.SysLogHandler.
+SysLogHandler = logging.handlers.SysLogHandler
+
+
+# our new audit level
+AUDIT = logging.INFO + 1
+logging.addLevelName(AUDIT, 'AUDIT')
+
+
+def _dictify_context(context):
+ if context == None:
+ return None
+ if not isinstance(context, dict) \
+ and getattr(context, 'to_dict', None):
+ context = context.to_dict()
+ return context
+
+
+def basicConfig():
+ logging.basicConfig()
+ for handler in logging.root.handlers:
+ handler.setFormatter(_formatter)
+ if FLAGS.verbose:
+ logging.root.setLevel(logging.DEBUG)
+ if FLAGS.use_syslog:
+ syslog = SysLogHandler(address='/dev/log')
+ syslog.setFormatter(_formatter)
+ logging.root.addHandler(syslog)
+ if FLAGS.logfile:
+ logfile = FileHandler(FLAGS.logfile)
+ logfile.setFormatter(_formatter)
+ logging.root.addHandler(logfile)
+
+
+class NovaLogger(logging.Logger):
+ """
+ NovaLogger manages request context and formatting.
+
+ This becomes the class that is instanciated by logging.getLogger.
+ """
+ def __init__(self, name, level=NOTSET):
+ level_name = self._get_level_from_flags(name, FLAGS)
+ level = globals()[level_name]
+ logging.Logger.__init__(self, name, level)
+
+ def _get_level_from_flags(self, name, FLAGS):
+ # if exactly "nova", or a child logger, honor the verbose flag
+ if (name == "nova" or name.startswith("nova.")) and FLAGS.verbose:
+ return 'DEBUG'
+ for pair in FLAGS.default_log_levels:
+ logger, _sep, level = pair.partition('=')
+ # NOTE(todd): if we set a.b, we want a.b.c to have the same level
+ # (but not a.bc, so we check the dot)
+ if name == logger:
+ return level
+ if name.startswith(logger) and name[len(logger)] == '.':
+ return level
+ return 'INFO'
+
+ def _log(self, level, msg, args, exc_info=None, extra=None, context=None):
+ """Extract context from any log call"""
+ if not extra:
+ extra = {}
+ if context:
+ extra.update(_dictify_context(context))
+ extra.update({"nova_version": version.version_string_with_vcs()})
+ logging.Logger._log(self, level, msg, args, exc_info, extra)
+
+ def addHandler(self, handler):
+ """Each handler gets our custom formatter"""
+ handler.setFormatter(_formatter)
+ logging.Logger.addHandler(self, handler)
+
+ def audit(self, msg, *args, **kwargs):
+ """Shortcut for our AUDIT level"""
+ if self.isEnabledFor(AUDIT):
+ self._log(AUDIT, msg, args, **kwargs)
+
+ def exception(self, msg, *args, **kwargs):
+ """Logging.exception doesn't handle kwargs, so breaks context"""
+ if not kwargs.get('exc_info'):
+ kwargs['exc_info'] = 1
+ self.error(msg, *args, **kwargs)
+ # NOTE(todd): does this really go here, or in _log ?
+ extra = kwargs.get('extra')
+ if not extra:
+ return
+ env = extra.get('environment')
+ if env:
+ env = env.copy()
+ for k in env.keys():
+ if not isinstance(env[k], str):
+ env.pop(k)
+ message = "Environment: %s" % json.dumps(env)
+ kwargs.pop('exc_info')
+ self.error(message, **kwargs)
+
+logging.setLoggerClass(NovaLogger)
+
+
+class NovaRootLogger(NovaLogger):
+ pass
+
+if not isinstance(logging.root, NovaRootLogger):
+ logging.root = NovaRootLogger("nova.root", WARNING)
+ NovaLogger.root = logging.root
+ NovaLogger.manager.root = logging.root
+
+
+class NovaFormatter(logging.Formatter):
+ """
+ A nova.context.RequestContext aware formatter configured through flags.
+
+ The flags used to set format strings are: logging_context_foramt_string
+ and logging_default_format_string. You can also specify
+ logging_debug_format_suffix to append extra formatting if the log level is
+ debug.
+
+ For information about what variables are available for the formatter see:
+ http://docs.python.org/library/logging.html#formatter
+ """
+
+ def format(self, record):
+ """Uses contextstring if request_id is set, otherwise default"""
+ if record.__dict__.get('request_id', None):
+ self._fmt = FLAGS.logging_context_format_string
+ else:
+ self._fmt = FLAGS.logging_default_format_string
+ if record.levelno == logging.DEBUG \
+ and FLAGS.logging_debug_format_suffix:
+ self._fmt += " " + FLAGS.logging_debug_format_suffix
+ # Cache this on the record, Logger will respect our formated copy
+ if record.exc_info:
+ record.exc_text = self.formatException(record.exc_info, record)
+ return logging.Formatter.format(self, record)
+
+ def formatException(self, exc_info, record=None):
+ """Format exception output with FLAGS.logging_exception_prefix"""
+ if not record:
+ return logging.Formatter.formatException(self, exc_info)
+ stringbuffer = cStringIO.StringIO()
+ traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
+ None, stringbuffer)
+ lines = stringbuffer.getvalue().split("\n")
+ stringbuffer.close()
+ formatted_lines = []
+ for line in lines:
+ pl = FLAGS.logging_exception_prefix % record.__dict__
+ fl = "%s%s" % (pl, line)
+ formatted_lines.append(fl)
+ return "\n".join(formatted_lines)
+
+_formatter = NovaFormatter()
+
+
+def audit(msg, *args, **kwargs):
+ """Shortcut for logging to root log with sevrity 'AUDIT'."""
+ if len(logging.root.handlers) == 0:
+ basicConfig()
+ logging.root.log(AUDIT, msg, *args, **kwargs)
diff --git a/nova/network/__init__.py b/nova/network/__init__.py
index dcc54db09..6eb3e3ef6 100644
--- a/nova/network/__init__.py
+++ b/nova/network/__init__.py
@@ -16,17 +16,4 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-:mod:`nova.network` -- Network Nodes
-=====================================================
-
-.. automodule:: nova.network
- :platform: Unix
- :synopsis: Network is responsible for managing networking
-.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
-.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
-.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
-.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
-.. moduleauthor:: Manish Singh <yosh@gimp.org>
-.. moduleauthor:: Andy Smith <andy@anarkystic.com>
-"""
+from nova.network.api import API
diff --git a/nova/network/api.py b/nova/network/api.py
new file mode 100644
index 000000000..bf43acb51
--- /dev/null
+++ b/nova/network/api.py
@@ -0,0 +1,87 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handles all requests relating to instances (guest vms).
+"""
+
+from nova import db
+from nova import flags
+from nova import log as logging
+from nova import quota
+from nova import rpc
+from nova.db import base
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.network')
+
+
+class API(base.Base):
+ """API for interacting with the network manager."""
+
+ def allocate_floating_ip(self, context):
+ if quota.allowed_floating_ips(context, 1) < 1:
+ LOG.warn(_("Quota exceeeded for %s, tried to allocate "
+ "address"),
+ context.project_id)
+ raise quota.QuotaError(_("Address quota exceeded. You cannot "
+ "allocate any more addresses"))
+ # NOTE(vish): We don't know which network host should get the ip
+ # when we allocate, so just send it to any one. This
+ # will probably need to move into a network supervisor
+ # at some point.
+ return rpc.call(context,
+ FLAGS.network_topic,
+ {"method": "allocate_floating_ip",
+ "args": {"project_id": context.project_id}})
+
+ def release_floating_ip(self, context, address):
+ floating_ip = self.db.floating_ip_get_by_address(context, address)
+ # NOTE(vish): We don't know which network host should get the ip
+ # when we deallocate, so just send it to any one. This
+ # will probably need to move into a network supervisor
+ # at some point.
+ rpc.cast(context,
+ FLAGS.network_topic,
+ {"method": "deallocate_floating_ip",
+ "args": {"floating_address": floating_ip['address']}})
+
+ def associate_floating_ip(self, context, floating_ip, fixed_ip):
+ if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode):
+ fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip)
+ floating_ip = self.db.floating_ip_get_by_address(context, floating_ip)
+ # NOTE(vish): Perhaps we should just pass this on to compute and
+ # let compute communicate with network.
+ host = fixed_ip['network']['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.network_topic, host),
+ {"method": "associate_floating_ip",
+ "args": {"floating_address": floating_ip['address'],
+ "fixed_address": fixed_ip['address']}})
+
+ def disassociate_floating_ip(self, context, address):
+ floating_ip = self.db.floating_ip_get_by_address(context, address)
+ if not floating_ip.get('fixed_ip'):
+ raise exception.ApiError('Address is not associated.')
+ # NOTE(vish): Get the topic from the host name of the network of
+ # the associated fixed ip.
+ host = floating_ip['fixed_ip']['network']['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.network_topic, host),
+ {"method": "disassociate_floating_ip",
+ "args": {"floating_address": floating_ip['address']}})
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 931a89554..3743fc7e8 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -17,16 +17,17 @@
Implements vlans, bridges, and iptables rules using linux utilities.
"""
-import logging
import os
-# TODO(ja): does the definition of network_path belong here?
-
from nova import db
from nova import flags
+from nova import log as logging
from nova import utils
+LOG = logging.getLogger("nova.linux_net")
+
+
def _bin_file(script):
"""Return the absolute path to scipt in the bin directory"""
return os.path.abspath(os.path.join(__file__, "../../../bin", script))
@@ -45,7 +46,7 @@ flags.DEFINE_string('vlan_interface', 'eth0',
'network device for vlans')
flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'),
'location of nova-dhcpbridge')
-flags.DEFINE_string('routing_source_ip', utils.get_my_ip(),
+flags.DEFINE_string('routing_source_ip', '$my_ip',
'Public IP of network host')
flags.DEFINE_bool('use_nova_chains', False,
'use the nova_ routing chains instead of default')
@@ -172,7 +173,7 @@ def ensure_vlan(vlan_num):
"""Create a vlan unless it already exists"""
interface = "vlan%s" % vlan_num
if not _device_exists(interface):
- logging.debug(_("Starting VLAN inteface %s"), interface)
+ LOG.debug(_("Starting VLAN inteface %s"), interface)
_execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD")
_execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num))
_execute("sudo ifconfig %s up" % interface)
@@ -182,7 +183,7 @@ def ensure_vlan(vlan_num):
def ensure_bridge(bridge, interface, net_attrs=None):
"""Create a bridge unless it already exists"""
if not _device_exists(bridge):
- logging.debug(_("Starting Bridge interface for %s"), interface)
+ LOG.debug(_("Starting Bridge interface for %s"), interface)
_execute("sudo brctl addbr %s" % bridge)
_execute("sudo brctl setfd %s 0" % bridge)
# _execute("sudo brctl setageing %s 10" % bridge)
@@ -208,6 +209,8 @@ def ensure_bridge(bridge, interface, net_attrs=None):
_confirm_rule("FORWARD", "--in-interface %s -j ACCEPT" % bridge)
_confirm_rule("FORWARD", "--out-interface %s -j ACCEPT" % bridge)
+ _execute("sudo iptables -N nova-local", check_exit_code=False)
+ _confirm_rule("FORWARD", "-j nova-local")
def get_dhcp_hosts(context, network_id):
@@ -248,9 +251,9 @@ def update_dhcp(context, network_id):
_execute('sudo kill -HUP %d' % pid)
return
except Exception as exc: # pylint: disable-msg=W0703
- logging.debug(_("Hupping dnsmasq threw %s"), exc)
+ LOG.debug(_("Hupping dnsmasq threw %s"), exc)
else:
- logging.debug(_("Pid %d is stale, relaunching dnsmasq"), pid)
+ LOG.debug(_("Pid %d is stale, relaunching dnsmasq"), pid)
# FLAGFILE and DNSMASQ_INTERFACE in env
env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile,
@@ -270,7 +273,7 @@ def _host_dhcp(fixed_ip_ref):
def _execute(cmd, *args, **kwargs):
"""Wrapper around utils._execute for fake_network"""
if FLAGS.fake_network:
- logging.debug("FAKE NET: %s", cmd)
+ LOG.debug("FAKE NET: %s", cmd)
return "fake", 0
else:
return utils.execute(cmd, *args, **kwargs)
@@ -328,7 +331,7 @@ def _stop_dnsmasq(network):
try:
_execute('sudo kill -TERM %d' % pid)
except Exception as exc: # pylint: disable-msg=W0703
- logging.debug(_("Killing dnsmasq threw %s"), exc)
+ LOG.debug(_("Killing dnsmasq threw %s"), exc)
def _dhcp_file(bridge, kind):
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 16aa8f895..c75ecc671 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -45,7 +45,6 @@ topologies. All of the network commands are issued to a subclass of
"""
import datetime
-import logging
import math
import socket
@@ -55,11 +54,13 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import manager
from nova import utils
from nova import rpc
+LOG = logging.getLogger("nova.network.manager")
FLAGS = flags.FLAGS
flags.DEFINE_string('flat_network_bridge', 'br100',
'Bridge for simple network instances')
@@ -73,7 +74,7 @@ flags.DEFINE_string('flat_network_dhcp_start', '10.0.0.2',
'Dhcp start for FlatDhcp')
flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks')
flags.DEFINE_integer('num_networks', 1000, 'Number of networks to support')
-flags.DEFINE_string('vpn_ip', utils.get_my_ip(),
+flags.DEFINE_string('vpn_ip', '$my_ip',
'Public IP for the cloudpipe VPN servers')
flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks')
flags.DEFINE_integer('network_size', 256,
@@ -131,7 +132,7 @@ class NetworkManager(manager.Manager):
def set_network_host(self, context, network_id):
"""Safely sets the host of the network."""
- logging.debug(_("setting network host"))
+ LOG.debug(_("setting network host"), context=context)
host = self.db.network_set_host(context,
network_id,
self.host)
@@ -186,7 +187,7 @@ class NetworkManager(manager.Manager):
def lease_fixed_ip(self, context, mac, address):
"""Called by dhcp-bridge when ip is leased."""
- logging.debug("Leasing IP %s", address)
+ LOG.debug(_("Leasing IP %s"), address, context=context)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
if not instance_ref:
@@ -201,12 +202,12 @@ class NetworkManager(manager.Manager):
{'leased': True,
'updated_at': now})
if not fixed_ip_ref['allocated']:
- logging.warn(_("IP %s leased that was already deallocated"),
- address)
+ LOG.warn(_("IP %s leased that was already deallocated"), address,
+ context=context)
def release_fixed_ip(self, context, mac, address):
"""Called by dhcp-bridge when ip is released."""
- logging.debug("Releasing IP %s", address)
+ LOG.debug("Releasing IP %s", address, context=context)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
if not instance_ref:
@@ -216,7 +217,8 @@ class NetworkManager(manager.Manager):
raise exception.Error(_("IP %s released from bad mac %s vs %s") %
(address, instance_ref['mac_address'], mac))
if not fixed_ip_ref['leased']:
- logging.warn(_("IP %s released that was not leased"), address)
+ LOG.warn(_("IP %s released that was not leased"), address,
+ context=context)
self.db.fixed_ip_update(context,
fixed_ip_ref['address'],
{'leased': False})
@@ -437,7 +439,7 @@ class VlanManager(NetworkManager):
self.host,
time)
if num:
- logging.debug(_("Dissassociated %s stale fixed ip(s)"), num)
+ LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num)
def init_host(self):
"""Do any initialization that needs to be run if this is a
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
index 52257f69f..bc26fd3c5 100644
--- a/nova/objectstore/handler.py
+++ b/nova/objectstore/handler.py
@@ -39,7 +39,6 @@ S3 client with this module::
import datetime
import json
-import logging
import multiprocessing
import os
import urllib
@@ -54,12 +53,14 @@ from twisted.web import static
from nova import context
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
from nova.auth import manager
from nova.objectstore import bucket
from nova.objectstore import image
+LOG = logging.getLogger('nova.objectstore.handler')
FLAGS = flags.FLAGS
flags.DEFINE_string('s3_listen_host', '', 'Host to listen on.')
@@ -132,9 +133,11 @@ def get_context(request):
request.uri,
headers=request.getAllHeaders(),
check_type='s3')
- return context.RequestContext(user, project)
+ rv = context.RequestContext(user, project)
+ LOG.audit(_("Authenticated request"), context=rv)
+ return rv
except exception.Error as ex:
- logging.debug(_("Authentication Failure: %s"), ex)
+ LOG.debug(_("Authentication Failure: %s"), ex)
raise exception.NotAuthorized()
@@ -176,7 +179,7 @@ class S3(ErrorHandlingResource):
def render_GET(self, request): # pylint: disable-msg=R0201
"""Renders the GET request for a list of buckets as XML"""
- logging.debug('List of buckets requested')
+ LOG.debug(_('List of buckets requested'), context=request.context)
buckets = [b for b in bucket.Bucket.all() \
if b.is_authorized(request.context)]
@@ -203,7 +206,7 @@ class BucketResource(ErrorHandlingResource):
def render_GET(self, request):
"Returns the keys for the bucket resource"""
- logging.debug("List keys for bucket %s", self.name)
+ LOG.debug(_("List keys for bucket %s"), self.name)
try:
bucket_object = bucket.Bucket(self.name)
@@ -211,6 +214,8 @@ class BucketResource(ErrorHandlingResource):
return error.NoResource(message="No such bucket").render(request)
if not bucket_object.is_authorized(request.context):
+ LOG.audit(_("Unauthorized attempt to access bucket %s"),
+ self.name, context=request.context)
raise exception.NotAuthorized()
prefix = get_argument(request, "prefix", u"")
@@ -227,8 +232,8 @@ class BucketResource(ErrorHandlingResource):
def render_PUT(self, request):
"Creates the bucket resource"""
- logging.debug(_("Creating bucket %s"), self.name)
- logging.debug("calling bucket.Bucket.create(%r, %r)",
+ LOG.debug(_("Creating bucket %s"), self.name)
+ LOG.debug("calling bucket.Bucket.create(%r, %r)",
self.name,
request.context)
bucket.Bucket.create(self.name, request.context)
@@ -237,10 +242,12 @@ class BucketResource(ErrorHandlingResource):
def render_DELETE(self, request):
"""Deletes the bucket resource"""
- logging.debug(_("Deleting bucket %s"), self.name)
+ LOG.debug(_("Deleting bucket %s"), self.name)
bucket_object = bucket.Bucket(self.name)
if not bucket_object.is_authorized(request.context):
+ LOG.audit(_("Unauthorized attempt to delete bucket %s"),
+ self.name, context=request.context)
raise exception.NotAuthorized()
bucket_object.delete()
@@ -261,11 +268,12 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
- logging.debug(_("Getting object: %s / %s"),
- self.bucket.name,
- self.name)
+ LOG.debug(_("Getting object: %s / %s"), self.bucket.name, self.name)
if not self.bucket.is_authorized(request.context):
+ LOG.audit(_("Unauthorized attempt to get object %s from bucket "
+ "%s"), self.name, self.bucket.name,
+ context=request.context)
raise exception.NotAuthorized()
obj = self.bucket[urllib.unquote(self.name)]
@@ -281,11 +289,12 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
- logging.debug(_("Putting object: %s / %s"),
- self.bucket.name,
- self.name)
+ LOG.debug(_("Putting object: %s / %s"), self.bucket.name, self.name)
if not self.bucket.is_authorized(request.context):
+ LOG.audit(_("Unauthorized attempt to upload object %s to bucket "
+ "%s"),
+ self.name, self.bucket.name, context=request.context)
raise exception.NotAuthorized()
key = urllib.unquote(self.name)
@@ -302,11 +311,13 @@ class ObjectResource(ErrorHandlingResource):
authorized to delete the object.
"""
- logging.debug(_("Deleting object: %s / %s"),
- self.bucket.name,
- self.name)
+ LOG.debug(_("Deleting object: %s / %s"), self.bucket.name, self.name,
+ context=request.context)
if not self.bucket.is_authorized(request.context):
+ LOG.audit("Unauthorized attempt to delete object %s from "
+ "bucket %s", self.name, self.bucket.name,
+ context=request.context)
raise exception.NotAuthorized()
del self.bucket[urllib.unquote(self.name)]
@@ -379,13 +390,21 @@ class ImagesResource(resource.Resource):
image_path = os.path.join(FLAGS.images_path, image_id)
if not image_path.startswith(FLAGS.images_path) or \
os.path.exists(image_path):
+ LOG.audit(_("Not authorized to upload image: invalid directory "
+ "%s"),
+ image_path, context=request.context)
raise exception.NotAuthorized()
bucket_object = bucket.Bucket(image_location.split("/")[0])
if not bucket_object.is_authorized(request.context):
+ LOG.audit(_("Not authorized to upload image: unauthorized "
+ "bucket %s"), bucket_object.name,
+ context=request.context)
raise exception.NotAuthorized()
+ LOG.audit(_("Starting image upload: %s"), image_id,
+ context=request.context)
p = multiprocessing.Process(target=image.Image.register_aws_image,
args=(image_id, image_location, request.context))
p.start()
@@ -398,17 +417,21 @@ class ImagesResource(resource.Resource):
image_id = get_argument(request, 'image_id', u'')
image_object = image.Image(image_id)
if not image_object.is_authorized(request.context):
- logging.debug(_("not authorized for render_POST in images"))
+ LOG.audit(_("Not authorized to update attributes of image %s"),
+ image_id, context=request.context)
raise exception.NotAuthorized()
operation = get_argument(request, 'operation', u'')
if operation:
# operation implies publicity toggle
- logging.debug(_("handling publicity toggle"))
- image_object.set_public(operation == 'add')
+ newstatus = (operation == 'add')
+ LOG.audit(_("Toggling publicity flag of image %s %r"), image_id,
+ newstatus, context=request.context)
+ image_object.set_public(newstatus)
else:
# other attributes imply update
- logging.debug(_("update user fields"))
+ LOG.audit(_("Updating user fields on image %s"), image_id,
+ context=request.context)
clean_args = {}
for arg in request.args.keys():
clean_args[arg] = request.args[arg][0]
@@ -421,9 +444,12 @@ class ImagesResource(resource.Resource):
image_object = image.Image(image_id)
if not image_object.is_authorized(request.context):
+ LOG.audit(_("Unauthorized attempt to delete image %s"),
+ image_id, context=request.context)
raise exception.NotAuthorized()
image_object.delete()
+ LOG.audit(_("Deleted image: %s"), image_id, context=request.context)
request.setResponseCode(204)
return ''
diff --git a/nova/quota.py b/nova/quota.py
index f6ca9f77c..3884eb308 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -22,7 +22,6 @@ Quotas for instances, volumes, and floating ips
from nova import db
from nova import exception
from nova import flags
-from nova.compute import instance_types
FLAGS = flags.FLAGS
@@ -63,10 +62,9 @@ def allowed_instances(context, num_instances, instance_type):
quota = get_quota(context, project_id)
allowed_instances = quota['instances'] - used_instances
allowed_cores = quota['cores'] - used_cores
- type_cores = instance_types.INSTANCE_TYPES[instance_type]['vcpus']
- num_cores = num_instances * type_cores
+ num_cores = num_instances * instance_type['vcpus']
allowed_instances = min(allowed_instances,
- int(allowed_cores // type_cores))
+ int(allowed_cores // instance_type['vcpus']))
return min(num_instances, allowed_instances)
diff --git a/nova/rpc.py b/nova/rpc.py
index 844088348..49b11602b 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -22,7 +22,6 @@ No fan-out support yet.
"""
import json
-import logging
import sys
import time
import traceback
@@ -36,13 +35,12 @@ from nova import context
from nova import exception
from nova import fakerabbit
from nova import flags
+from nova import log as logging
from nova import utils
FLAGS = flags.FLAGS
-
-LOG = logging.getLogger('amqplib')
-LOG.setLevel(logging.DEBUG)
+LOG = logging.getLogger('nova.rpc')
class Connection(carrot_connection.BrokerConnection):
@@ -91,15 +89,16 @@ class Consumer(messaging.Consumer):
self.failed_connection = False
break
except: # Catching all because carrot sucks
- logging.exception(_("AMQP server on %s:%d is unreachable."
- " Trying again in %d seconds.") % (
- FLAGS.rabbit_host,
- FLAGS.rabbit_port,
- FLAGS.rabbit_retry_interval))
+ LOG.exception(_("AMQP server on %s:%d is unreachable."
+ " Trying again in %d seconds.") % (
+ FLAGS.rabbit_host,
+ FLAGS.rabbit_port,
+ FLAGS.rabbit_retry_interval))
self.failed_connection = True
if self.failed_connection:
- logging.exception(_("Unable to connect to AMQP server"
- " after %d tries. Shutting down.") % FLAGS.rabbit_max_retries)
+ LOG.exception(_("Unable to connect to AMQP server "
+ "after %d tries. Shutting down."),
+ FLAGS.rabbit_max_retries)
sys.exit(1)
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
@@ -116,14 +115,14 @@ class Consumer(messaging.Consumer):
self.declare()
super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
if self.failed_connection:
- logging.error(_("Reconnected to queue"))
+ LOG.error(_("Reconnected to queue"))
self.failed_connection = False
# NOTE(vish): This is catching all errors because we really don't
# exceptions to be logged 10 times a second if some
# persistent failure occurs.
except Exception: # pylint: disable-msg=W0703
if not self.failed_connection:
- logging.exception(_("Failed to fetch message from queue"))
+ LOG.exception(_("Failed to fetch message from queue"))
self.failed_connection = True
def attach_to_eventlet(self):
@@ -193,6 +192,7 @@ class AdapterConsumer(TopicConsumer):
if msg_id:
msg_reply(msg_id, rval, None)
except Exception as e:
+ logging.exception("Exception during message handling")
if msg_id:
msg_reply(msg_id, None, sys.exc_info())
return
@@ -242,8 +242,8 @@ def msg_reply(msg_id, reply=None, failure=None):
if failure:
message = str(failure[1])
tb = traceback.format_exception(*failure)
- logging.error(_("Returning exception %s to caller"), message)
- logging.error(tb)
+ LOG.error(_("Returning exception %s to caller"), message)
+ LOG.error(tb)
failure = (failure[0].__name__, str(failure[1]), tb)
conn = Connection.instance(True)
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 44e21f2fd..a4d6dd574 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -21,15 +21,16 @@
Scheduler Service
"""
-import logging
import functools
from nova import db
from nova import flags
+from nova import log as logging
from nova import manager
from nova import rpc
from nova import utils
+LOG = logging.getLogger('nova.scheduler.manager')
FLAGS = flags.FLAGS
flags.DEFINE_string('scheduler_driver',
'nova.scheduler.chance.ChanceScheduler',
@@ -65,4 +66,4 @@ class SchedulerManager(manager.Manager):
db.queue_get_for(context, topic, host),
{"method": method,
"args": kwargs})
- logging.debug(_("Casting to %s %s for %s"), topic, host, method)
+ LOG.debug(_("Casting to %s %s for %s"), topic, host, method)
diff --git a/nova/scheduler/zone.py b/nova/scheduler/zone.py
new file mode 100644
index 000000000..49786cd32
--- /dev/null
+++ b/nova/scheduler/zone.py
@@ -0,0 +1,56 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Availability Zone Scheduler implementation
+"""
+
+import random
+
+from nova.scheduler import driver
+from nova import db
+
+
+class ZoneScheduler(driver.Scheduler):
+ """Implements Scheduler as a random node selector."""
+
+ def hosts_up_with_zone(self, context, topic, zone):
+ """Return the list of hosts that have a running service
+ for topic and availability zone (if defined).
+ """
+
+ if zone is None:
+ return self.hosts_up(context, topic)
+
+ services = db.service_get_all_by_topic(context, topic)
+ return [service.host
+ for service in services
+ if self.service_is_up(service)
+ and service.availability_zone == zone]
+
+ def schedule(self, context, topic, *_args, **_kwargs):
+ """Picks a host that is up at random in selected
+ availability zone (if defined).
+ """
+
+ zone = _kwargs.get('availability_zone')
+ hosts = self.hosts_up_with_zone(context, topic, zone)
+ if not hosts:
+ raise driver.NoValidHost(_("No hosts found"))
+ return hosts[int(random.random() * len(hosts))]
diff --git a/nova/service.py b/nova/service.py
index f1f90742f..8b2a22ce0 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -21,17 +21,20 @@ Generic Node baseclass for all workers that run on hosts
"""
import inspect
-import logging
import os
import sys
+import time
from eventlet import event
from eventlet import greenthread
from eventlet import greenpool
+from sqlalchemy.exc import OperationalError
+
from nova import context
from nova import db
from nova import exception
+from nova import log as logging
from nova import flags
from nova import rpc
from nova import utils
@@ -110,11 +113,13 @@ class Service(object):
self.timers.append(periodic)
def _create_service_ref(self, context):
+ zone = FLAGS.node_availability_zone
service_ref = db.service_create(context,
{'host': self.host,
'binary': self.binary,
'topic': self.topic,
- 'report_count': 0})
+ 'report_count': 0,
+ 'availability_zone': zone})
self.service_id = service_ref['id']
def __getattr__(self, key):
@@ -151,7 +156,7 @@ class Service(object):
report_interval = FLAGS.report_interval
if not periodic_interval:
periodic_interval = FLAGS.periodic_interval
- logging.warn(_("Starting %s node"), topic)
+ logging.audit(_("Starting %s node"), topic)
service_obj = cls(host, binary, topic, manager,
report_interval, periodic_interval)
@@ -204,22 +209,29 @@ class Service(object):
self.model_disconnected = True
logging.exception(_("model server went away"))
+ try:
+ # NOTE(vish): This is late-loaded to make sure that the
+ # database is not created before flags have
+ # been loaded.
+ from nova.db.sqlalchemy import models
+ models.register_models()
+ except OperationalError:
+ logging.exception(_("Data store %s is unreachable."
+ " Trying again in %d seconds.") %
+ (FLAGS.sql_connection,
+ FLAGS.sql_retry_interval))
+ time.sleep(FLAGS.sql_retry_interval)
+
def serve(*services):
- argv = FLAGS(sys.argv)
+ FLAGS(sys.argv)
+ logging.basicConfig()
if not services:
services = [Service.create()]
name = '_'.join(x.binary for x in services)
- logging.debug("Serving %s" % name)
-
- logging.getLogger('amqplib').setLevel(logging.WARN)
-
- if FLAGS.verbose:
- logging.getLogger().setLevel(logging.DEBUG)
- else:
- logging.getLogger().setLevel(logging.WARNING)
+ logging.debug(_("Serving %s"), name)
logging.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 2028024bb..9a7a7adc7 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -120,13 +120,13 @@ def stub_out_rate_limiting(stubs):
def stub_out_networking(stubs):
def get_my_ip():
return '127.0.0.1'
- stubs.Set(nova.utils, 'get_my_ip', get_my_ip)
+ stubs.Set(nova.flags, '_get_my_ip', get_my_ip)
def stub_out_compute_api_snapshot(stubs):
def snapshot(self, context, instance_id, name):
return 123
- stubs.Set(nova.compute.api.ComputeAPI, 'snapshot', snapshot)
+ stubs.Set(nova.compute.API, 'snapshot', snapshot)
def stub_out_glance(stubs, initial_fixtures=[]):
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index ced881375..5d9ddefbe 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -22,7 +22,6 @@ and as a WSGI layer
import json
import datetime
-import logging
import unittest
import stubout
@@ -173,6 +172,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase):
IMAGE_FIXTURES = [
{'id': '23g2ogk23k4hhkk4k42l',
+ 'imageId': '23g2ogk23k4hhkk4k42l',
'name': 'public image #1',
'created_at': str(datetime.datetime.utcnow()),
'updated_at': str(datetime.datetime.utcnow()),
@@ -182,6 +182,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase):
'status': 'available',
'image_type': 'kernel'},
{'id': 'slkduhfas73kkaskgdas',
+ 'imageId': 'slkduhfas73kkaskgdas',
'name': 'public image #2',
'created_at': str(datetime.datetime.utcnow()),
'updated_at': str(datetime.datetime.utcnow()),
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 3c95ceda6..29883e7c8 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -56,8 +56,8 @@ def instance_address(context, instance_id):
def stub_instance(id, user_id=1):
- return Instance(id=int(id) + 123456, state=0, image_id=10, user_id=user_id,
- display_name='server%s' % id, internal_id=id)
+ return Instance(id=id, state=0, image_id=10, user_id=user_id,
+ display_name='server%s' % id)
def fake_compute_api(cls, req, id):
@@ -76,8 +76,7 @@ class ServersTest(unittest.TestCase):
fakes.stub_out_key_pair_funcs(self.stubs)
fakes.stub_out_image_service(self.stubs)
self.stubs.Set(nova.db.api, 'instance_get_all', return_servers)
- self.stubs.Set(nova.db.api, 'instance_get_by_internal_id',
- return_server)
+ self.stubs.Set(nova.db.api, 'instance_get_by_id', return_server)
self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
return_servers)
self.stubs.Set(nova.db.api, 'instance_add_security_group',
@@ -87,18 +86,12 @@ class ServersTest(unittest.TestCase):
instance_address)
self.stubs.Set(nova.db.api, 'instance_get_floating_address',
instance_address)
- self.stubs.Set(nova.compute.api.ComputeAPI, 'pause',
- fake_compute_api)
- self.stubs.Set(nova.compute.api.ComputeAPI, 'unpause',
- fake_compute_api)
- self.stubs.Set(nova.compute.api.ComputeAPI, 'suspend',
- fake_compute_api)
- self.stubs.Set(nova.compute.api.ComputeAPI, 'resume',
- fake_compute_api)
- self.stubs.Set(nova.compute.api.ComputeAPI, "get_diagnostics",
- fake_compute_api)
- self.stubs.Set(nova.compute.api.ComputeAPI, "get_actions",
- fake_compute_api)
+ self.stubs.Set(nova.compute.API, 'pause', fake_compute_api)
+ self.stubs.Set(nova.compute.API, 'unpause', fake_compute_api)
+ self.stubs.Set(nova.compute.API, 'suspend', fake_compute_api)
+ self.stubs.Set(nova.compute.API, 'resume', fake_compute_api)
+ self.stubs.Set(nova.compute.API, "get_diagnostics", fake_compute_api)
+ self.stubs.Set(nova.compute.API, "get_actions", fake_compute_api)
self.allow_admin = FLAGS.allow_admin_api
def tearDown(self):
@@ -109,7 +102,7 @@ class ServersTest(unittest.TestCase):
req = webob.Request.blank('/v1.0/servers/1')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
- self.assertEqual(res_dict['server']['id'], 1)
+ self.assertEqual(res_dict['server']['id'], '1')
self.assertEqual(res_dict['server']['name'], 'server1')
def test_get_server_list(self):
@@ -126,7 +119,7 @@ class ServersTest(unittest.TestCase):
def test_create_instance(self):
def instance_create(context, inst):
- return {'id': 1, 'internal_id': 1, 'display_name': ''}
+ return {'id': '1', 'display_name': ''}
def server_update(context, id, params):
return instance_create(context, id)
@@ -140,6 +133,12 @@ class ServersTest(unittest.TestCase):
def queue_get_for(context, *args):
return 'network_topic'
+ def kernel_ramdisk_mapping(*args, **kwargs):
+ return (1, 1)
+
+ def image_id_from_hash(*args, **kwargs):
+ return 2
+
self.stubs.Set(nova.db.api, 'project_get_network', project_get_network)
self.stubs.Set(nova.db.api, 'instance_create', instance_create)
self.stubs.Set(nova.rpc, 'cast', fake_method)
@@ -149,6 +148,10 @@ class ServersTest(unittest.TestCase):
self.stubs.Set(nova.db.api, 'queue_get_for', queue_get_for)
self.stubs.Set(nova.network.manager.VlanManager, 'allocate_fixed_ip',
fake_method)
+ self.stubs.Set(nova.api.openstack.servers.Controller,
+ "_get_kernel_ramdisk_from_image", kernel_ramdisk_mapping)
+ self.stubs.Set(nova.api.openstack.common,
+ "get_image_id_from_image_hash", image_id_from_hash)
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
diff --git a/nova/tests/api/openstack/test_sharedipgroups.py b/nova/tests/api/openstack/test_shared_ip_groups.py
index d199951d8..c2fc3a203 100644
--- a/nova/tests/api/openstack/test_sharedipgroups.py
+++ b/nova/tests/api/openstack/test_shared_ip_groups.py
@@ -19,7 +19,7 @@ import unittest
import stubout
-from nova.api.openstack import sharedipgroups
+from nova.api.openstack import shared_ip_groups
class SharedIpGroupsTest(unittest.TestCase):
diff --git a/nova/tests/hyperv_unittest.py b/nova/tests/hyperv_unittest.py
new file mode 100644
index 000000000..3980ae3cb
--- /dev/null
+++ b/nova/tests/hyperv_unittest.py
@@ -0,0 +1,71 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2010 Cloud.com, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Hyper-V driver
+"""
+
+import random
+
+from nova import context
+from nova import db
+from nova import flags
+from nova import test
+from nova.auth import manager
+from nova.virt import hyperv
+
+FLAGS = flags.FLAGS
+FLAGS.connection_type = 'hyperv'
+
+
+class HyperVTestCase(test.TestCase):
+ """Test cases for the Hyper-V driver"""
+ def setUp(self):
+ super(HyperVTestCase, self).setUp()
+ self.manager = manager.AuthManager()
+ self.user = self.manager.create_user('fake', 'fake', 'fake',
+ admin=True)
+ self.project = self.manager.create_project('fake', 'fake', 'fake')
+ self.context = context.RequestContext(self.user, self.project)
+
+ def test_create_destroy(self):
+ """Create a VM and destroy it"""
+ instance = {'internal_id': random.randint(1, 1000000),
+ 'memory_mb': '1024',
+ 'mac_address': '02:12:34:46:56:67',
+ 'vcpus': 2,
+ 'project_id': 'fake',
+ 'instance_type': 'm1.small'}
+ instance_ref = db.instance_create(self.context, instance)
+
+ conn = hyperv.get_connection(False)
+ conn._create_vm(instance_ref) # pylint: disable-msg=W0212
+ found = [n for n in conn.list_instances()
+ if n == instance_ref['name']]
+ self.assertTrue(len(found) == 1)
+ info = conn.get_info(instance_ref['name'])
+ #Unfortunately since the vm is not running at this point,
+ #we cannot obtain memory information from get_info
+ self.assertEquals(info['num_cpu'], instance_ref['vcpus'])
+
+ conn.destroy(instance_ref)
+ found = [n for n in conn.list_instances()
+ if n == instance_ref['name']]
+ self.assertTrue(len(found) == 0)
+
+ def tearDown(self):
+ super(HyperVTestCase, self).tearDown()
+ self.manager.delete_project(self.project)
+ self.manager.delete_user(self.user)
diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py
index ceac17adb..da86e6e11 100644
--- a/nova/tests/objectstore_unittest.py
+++ b/nova/tests/objectstore_unittest.py
@@ -23,7 +23,6 @@ Unittets for S3 objectstore clone.
import boto
import glob
import hashlib
-import logging
import os
import shutil
import tempfile
@@ -63,7 +62,6 @@ class ObjectStoreTestCase(test.TestCase):
self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
images_path=os.path.join(OSS_TEMPDIR, 'images'),
ca_path=os.path.join(os.path.dirname(__file__), 'CA'))
- logging.getLogger().setLevel(logging.DEBUG)
self.auth_manager = manager.AuthManager()
self.auth_manager.create_user('user1')
diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py
index 15d40bc53..35ffffb67 100644
--- a/nova/tests/test_auth.py
+++ b/nova/tests/test_auth.py
@@ -16,17 +16,18 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
from M2Crypto import X509
import unittest
from nova import crypto
from nova import flags
+from nova import log as logging
from nova import test
from nova.auth import manager
from nova.api.ec2 import cloud
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.auth_unittest')
class user_generator(object):
@@ -211,12 +212,12 @@ class AuthManagerTestCase(object):
# NOTE(vish): Setup runs genroot.sh if it hasn't been run
cloud.CloudController().setup()
_key, cert_str = crypto.generate_x509_cert(user.id, project.id)
- logging.debug(cert_str)
+ LOG.debug(cert_str)
full_chain = crypto.fetch_ca(project_id=project.id, chain=True)
int_cert = crypto.fetch_ca(project_id=project.id, chain=False)
cloud_cert = crypto.fetch_ca()
- logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain)
+ LOG.debug("CA chain:\n\n =====\n%s\n\n=====", full_chain)
signed_cert = X509.load_cert_string(cert_str)
chain_cert = X509.load_cert_string(full_chain)
int_cert = X509.load_cert_string(int_cert)
@@ -331,7 +332,7 @@ class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase):
test.TestCase.__init__(self, *args, **kwargs)
import nova.auth.fakeldap as fakeldap
if FLAGS.flush_db:
- logging.info("Flushing datastore")
+ LOG.info("Flushing datastore")
r = fakeldap.Store.instance()
r.flushdb()
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index 70d2c44da..fdacb04f6 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -18,7 +18,6 @@
from base64 import b64decode
import json
-import logging
from M2Crypto import BIO
from M2Crypto import RSA
import os
@@ -31,6 +30,7 @@ from nova import context
from nova import crypto
from nova import db
from nova import flags
+from nova import log as logging
from nova import rpc
from nova import service
from nova import test
@@ -41,6 +41,7 @@ from nova.objectstore import image
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.cloud')
# Temp dirs for working with image attributes through the cloud controller
# (stole this from objectstore_unittest.py)
@@ -56,7 +57,6 @@ class CloudTestCase(test.TestCase):
images_path=IMAGES_PATH)
self.conn = rpc.Connection.instance()
- logging.getLogger().setLevel(logging.DEBUG)
# set up our cloud
self.cloud = cloud.CloudController()
@@ -106,7 +106,7 @@ class CloudTestCase(test.TestCase):
self.cloud.allocate_address(self.context)
inst = db.instance_create(self.context, {'host': FLAGS.host})
fixed = self.network.allocate_fixed_ip(self.context, inst['id'])
- ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id'])
+ ec2_id = cloud.id_to_ec2_id(inst['id'])
self.cloud.associate_address(self.context,
instance_id=ec2_id,
public_ip=address)
@@ -127,12 +127,58 @@ class CloudTestCase(test.TestCase):
result = self.cloud.describe_volumes(self.context)
self.assertEqual(len(result['volumeSet']), 2)
result = self.cloud.describe_volumes(self.context,
- volume_id=[vol2['ec2_id']])
+ volume_id=[vol2['id']])
self.assertEqual(len(result['volumeSet']), 1)
- self.assertEqual(result['volumeSet'][0]['volumeId'], vol2['ec2_id'])
+ self.assertEqual(result['volumeSet'][0]['volumeId'], vol2['id'])
db.volume_destroy(self.context, vol1['id'])
db.volume_destroy(self.context, vol2['id'])
+ def test_describe_availability_zones(self):
+ """Makes sure describe_availability_zones works and filters results."""
+ service1 = db.service_create(self.context, {'host': 'host1_zones',
+ 'binary': "nova-compute",
+ 'topic': 'compute',
+ 'report_count': 0,
+ 'availability_zone': "zone1"})
+ service2 = db.service_create(self.context, {'host': 'host2_zones',
+ 'binary': "nova-compute",
+ 'topic': 'compute',
+ 'report_count': 0,
+ 'availability_zone': "zone2"})
+ result = self.cloud.describe_availability_zones(self.context)
+ self.assertEqual(len(result['availabilityZoneInfo']), 3)
+ db.service_destroy(self.context, service1['id'])
+ db.service_destroy(self.context, service2['id'])
+
+ def test_describe_instances(self):
+ """Makes sure describe_instances works and filters results."""
+ inst1 = db.instance_create(self.context, {'reservation_id': 'a',
+ 'host': 'host1'})
+ inst2 = db.instance_create(self.context, {'reservation_id': 'a',
+ 'host': 'host2'})
+ comp1 = db.service_create(self.context, {'host': 'host1',
+ 'availability_zone': 'zone1',
+ 'topic': "compute"})
+ comp2 = db.service_create(self.context, {'host': 'host2',
+ 'availability_zone': 'zone2',
+ 'topic': "compute"})
+ result = self.cloud.describe_instances(self.context)
+ result = result['reservationSet'][0]
+ self.assertEqual(len(result['instancesSet']), 2)
+ instance_id = cloud.id_to_ec2_id(inst2['id'])
+ result = self.cloud.describe_instances(self.context,
+ instance_id=[instance_id])
+ result = result['reservationSet'][0]
+ self.assertEqual(len(result['instancesSet']), 1)
+ self.assertEqual(result['instancesSet'][0]['instanceId'],
+ instance_id)
+ self.assertEqual(result['instancesSet'][0]
+ ['placement']['availabilityZone'], 'zone2')
+ db.instance_destroy(self.context, inst1['id'])
+ db.instance_destroy(self.context, inst2['id'])
+ db.service_destroy(self.context, comp1['id'])
+ db.service_destroy(self.context, comp2['id'])
+
def test_console_output(self):
image_id = FLAGS.default_image
instance_type = FLAGS.default_instance_type
@@ -140,11 +186,24 @@ class CloudTestCase(test.TestCase):
kwargs = {'image_id': image_id,
'instance_type': instance_type,
'max_count': max_count}
+ rv = self.cloud.run_instances(self.context, **kwargs)
+ instance_id = rv['instancesSet'][0]['instanceId']
+ output = self.cloud.get_console_output(context=self.context,
+ instance_id=[instance_id])
+ self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT')
+ # TODO(soren): We need this until we can stop polling in the rpc code
+ # for unit tests.
+ greenthread.sleep(0.3)
+ rv = self.cloud.terminate_instances(self.context, [instance_id])
+
+ def test_ajax_console(self):
+ kwargs = {'image_id': image_id}
rv = yield self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
output = yield self.cloud.get_console_output(context=self.context,
instance_id=[instance_id])
- self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT')
+ self.assertEquals(b64decode(output['output']),
+ 'http://fakeajaxconsole.com/?token=FAKETOKEN')
# TODO(soren): We need this until we can stop polling in the rpc code
# for unit tests.
greenthread.sleep(0.3)
@@ -178,7 +237,7 @@ class CloudTestCase(test.TestCase):
def test_run_instances(self):
if FLAGS.connection_type == 'fake':
- logging.debug("Can't test instances without a real virtual env.")
+ LOG.debug(_("Can't test instances without a real virtual env."))
return
image_id = FLAGS.default_image
instance_type = FLAGS.default_instance_type
@@ -186,30 +245,43 @@ class CloudTestCase(test.TestCase):
kwargs = {'image_id': image_id,
'instance_type': instance_type,
'max_count': max_count}
- rv = yield self.cloud.run_instances(self.context, **kwargs)
+ rv = self.cloud.run_instances(self.context, **kwargs)
# TODO: check for proper response
instance_id = rv['reservationSet'][0].keys()[0]
instance = rv['reservationSet'][0][instance_id][0]
- logging.debug("Need to watch instance %s until it's running..." %
- instance['instance_id'])
+ LOG.debug(_("Need to watch instance %s until it's running..."),
+ instance['instance_id'])
while True:
greenthread.sleep(1)
info = self.cloud._get_instance(instance['instance_id'])
- logging.debug(info['state'])
+ LOG.debug(info['state'])
if info['state'] == power_state.RUNNING:
break
self.assert_(rv)
- if connection_type != 'fake':
+ if FLAGS.connection_type != 'fake':
time.sleep(45) # Should use boto for polling here
for reservations in rv['reservationSet']:
# for res_id in reservations.keys():
- # logging.debug(reservations[res_id])
+ # LOG.debug(reservations[res_id])
# for instance in reservations[res_id]:
for instance in reservations[reservations.keys()[0]]:
instance_id = instance['instance_id']
- logging.debug("Terminating instance %s" % instance_id)
- rv = yield self.compute.terminate_instance(instance_id)
+ LOG.debug(_("Terminating instance %s"), instance_id)
+ rv = self.compute.terminate_instance(instance_id)
+
+ def test_describe_instances(self):
+ """Makes sure describe_instances works."""
+ instance1 = db.instance_create(self.context, {'host': 'host2'})
+ comp1 = db.service_create(self.context, {'host': 'host2',
+ 'availability_zone': 'zone1',
+ 'topic': "compute"})
+ result = self.cloud.describe_instances(self.context)
+ self.assertEqual(result['reservationSet'][0]
+ ['instancesSet'][0]
+ ['placement']['availabilityZone'], 'zone1')
+ db.instance_destroy(self.context, instance1['id'])
+ db.service_destroy(self.context, comp1['id'])
def test_instance_update_state(self):
def instance(num):
@@ -296,7 +368,7 @@ class CloudTestCase(test.TestCase):
def test_update_of_instance_display_fields(self):
inst = db.instance_create(self.context, {})
- ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id'])
+ ec2_id = cloud.id_to_ec2_id(inst['id'])
self.cloud.update_instance(self.context, ec2_id,
display_name='c00l 1m4g3')
inst = db.instance_get(self.context, inst['id'])
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index e6a0ffa20..113fbd5a3 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -20,18 +20,19 @@ Tests For Compute
"""
import datetime
-import logging
+from nova import compute
from nova import context
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
-from nova.compute import api as compute_api
+LOG = logging.getLogger('nova.tests.compute')
FLAGS = flags.FLAGS
flags.DECLARE('stub_network', 'nova.compute.manager')
@@ -39,13 +40,12 @@ flags.DECLARE('stub_network', 'nova.compute.manager')
class ComputeTestCase(test.TestCase):
"""Test case for compute"""
def setUp(self):
- logging.getLogger().setLevel(logging.DEBUG)
super(ComputeTestCase, self).setUp()
self.flags(connection_type='fake',
stub_network=True,
network_manager='nova.network.manager.FlatManager')
self.compute = utils.import_object(FLAGS.compute_manager)
- self.compute_api = compute_api.ComputeAPI()
+ self.compute_api = compute.API()
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake')
self.project = self.manager.create_project('fake', 'fake', 'fake')
@@ -73,7 +73,7 @@ class ComputeTestCase(test.TestCase):
"""Verify that an instance cannot be created without a display_name."""
cases = [dict(), dict(display_name=None)]
for instance in cases:
- ref = self.compute_api.create_instances(self.context,
+ ref = self.compute_api.create(self.context,
FLAGS.default_instance_type, None, **instance)
try:
self.assertNotEqual(ref[0].display_name, None)
@@ -81,13 +81,13 @@ class ComputeTestCase(test.TestCase):
db.instance_destroy(self.context, ref[0]['id'])
def test_create_instance_associates_security_groups(self):
- """Make sure create_instances associates security groups"""
+ """Make sure create associates security groups"""
values = {'name': 'default',
'description': 'default',
'user_id': self.user.id,
'project_id': self.project.id}
group = db.security_group_create(self.context, values)
- ref = self.compute_api.create_instances(self.context,
+ ref = self.compute_api.create(self.context,
FLAGS.default_instance_type, None, security_group=['default'])
try:
self.assertEqual(len(ref[0]['security_groups']), 1)
@@ -102,13 +102,13 @@ class ComputeTestCase(test.TestCase):
self.compute.run_instance(self.context, instance_id)
instances = db.instance_get_all(context.get_admin_context())
- logging.info(_("Running instances: %s"), instances)
+ LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
self.compute.terminate_instance(self.context, instance_id)
instances = db.instance_get_all(context.get_admin_context())
- logging.info(_("After terminating instances: %s"), instances)
+ LOG.info(_("After terminating instances: %s"), instances)
self.assertEqual(len(instances), 0)
def test_run_terminate_timestamps(self):
@@ -170,6 +170,16 @@ class ComputeTestCase(test.TestCase):
self.assert_(console)
self.compute.terminate_instance(self.context, instance_id)
+ def test_ajax_console(self):
+ """Make sure we can get console output from instance"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+
+ console = self.compute.get_ajax_console(self.context,
+ instance_id)
+ self.assert_(console)
+ self.compute.terminate_instance(self.context, instance_id)
+
def test_run_instance_existing(self):
"""Ensure failure when running an instance that already exists"""
instance_id = self._create_instance()
@@ -179,3 +189,22 @@ class ComputeTestCase(test.TestCase):
self.context,
instance_id)
self.compute.terminate_instance(self.context, instance_id)
+
+ def test_lock(self):
+ """ensure locked instance cannot be changed"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+
+ non_admin_context = context.RequestContext(None, None, False, False)
+
+ # decorator should return False (fail) with locked nonadmin context
+ self.compute.lock_instance(self.context, instance_id)
+ ret_val = self.compute.reboot_instance(non_admin_context, instance_id)
+ self.assertEqual(ret_val, False)
+
+ # decorator should return None (success) with unlocked nonadmin context
+ self.compute.unlock_instance(self.context, instance_id)
+ ret_val = self.compute.reboot_instance(non_admin_context, instance_id)
+ self.assertEqual(ret_val, None)
+
+ self.compute.terminate_instance(self.context, instance_id)
diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py
new file mode 100644
index 000000000..31b5ca79c
--- /dev/null
+++ b/nova/tests/test_console.py
@@ -0,0 +1,129 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Tests For Console proxy.
+"""
+
+import datetime
+import logging
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import test
+from nova import utils
+from nova.auth import manager
+from nova.console import manager as console_manager
+
+FLAGS = flags.FLAGS
+
+
+class ConsoleTestCase(test.TestCase):
+ """Test case for console proxy"""
+ def setUp(self):
+ logging.getLogger().setLevel(logging.DEBUG)
+ super(ConsoleTestCase, self).setUp()
+ self.flags(console_driver='nova.console.fake.FakeConsoleProxy',
+ stub_compute=True)
+ self.console = utils.import_object(FLAGS.console_manager)
+ self.manager = manager.AuthManager()
+ self.user = self.manager.create_user('fake', 'fake', 'fake')
+ self.project = self.manager.create_project('fake', 'fake', 'fake')
+ self.context = context.get_admin_context()
+ self.host = 'test_compute_host'
+
+ def tearDown(self):
+ self.manager.delete_user(self.user)
+ self.manager.delete_project(self.project)
+ super(ConsoleTestCase, self).tearDown()
+
+ def _create_instance(self):
+ """Create a test instance"""
+ inst = {}
+ #inst['host'] = self.host
+ #inst['name'] = 'instance-1234'
+ inst['image_id'] = 'ami-test'
+ inst['reservation_id'] = 'r-fakeres'
+ inst['launch_time'] = '10'
+ inst['user_id'] = self.user.id
+ inst['project_id'] = self.project.id
+ inst['instance_type'] = 'm1.tiny'
+ inst['mac_address'] = utils.generate_mac()
+ inst['ami_launch_index'] = 0
+ return db.instance_create(self.context, inst)['id']
+
+ def test_get_pool_for_instance_host(self):
+ pool = self.console.get_pool_for_instance_host(self.context, self.host)
+ self.assertEqual(pool['compute_host'], self.host)
+
+ def test_get_pool_creates_new_pool_if_needed(self):
+ self.assertRaises(exception.NotFound,
+ db.console_pool_get_by_host_type,
+ self.context,
+ self.host,
+ self.console.host,
+ self.console.driver.console_type)
+ pool = self.console.get_pool_for_instance_host(self.context,
+ self.host)
+ pool2 = db.console_pool_get_by_host_type(self.context,
+ self.host,
+ self.console.host,
+ self.console.driver.console_type)
+ self.assertEqual(pool['id'], pool2['id'])
+
+ def test_get_pool_does_not_create_new_pool_if_exists(self):
+ pool_info = {'address': '127.0.0.1',
+ 'username': 'test',
+ 'password': '1234pass',
+ 'host': self.console.host,
+ 'console_type': self.console.driver.console_type,
+ 'compute_host': 'sometesthostname'}
+ new_pool = db.console_pool_create(self.context, pool_info)
+ pool = self.console.get_pool_for_instance_host(self.context,
+ 'sometesthostname')
+ self.assertEqual(pool['id'], new_pool['id'])
+
+ def test_add_console(self):
+ instance_id = self._create_instance()
+ self.console.add_console(self.context, instance_id)
+ instance = db.instance_get(self.context, instance_id)
+ pool = db.console_pool_get_by_host_type(self.context,
+ instance['host'],
+ self.console.host,
+ self.console.driver.console_type)
+
+ console_instances = [con['instance_id'] for con in pool.consoles]
+ self.assert_(instance_id in console_instances)
+
+ def test_add_console_does_not_duplicate(self):
+ instance_id = self._create_instance()
+ cons1 = self.console.add_console(self.context, instance_id)
+ cons2 = self.console.add_console(self.context, instance_id)
+ self.assertEqual(cons1, cons2)
+
+ def test_remove_console(self):
+ instance_id = self._create_instance()
+ console_id = self.console.add_console(self.context, instance_id)
+ self.console.remove_console(self.context, console_id)
+
+ self.assertRaises(exception.NotFound,
+ db.console_get,
+ self.context,
+ console_id)
diff --git a/nova/tests/test_log.py b/nova/tests/test_log.py
new file mode 100644
index 000000000..beb1d97cf
--- /dev/null
+++ b/nova/tests/test_log.py
@@ -0,0 +1,110 @@
+import cStringIO
+
+from nova import context
+from nova import log
+from nova import test
+
+
+def _fake_context():
+ return context.RequestContext(1, 1)
+
+
+class RootLoggerTestCase(test.TrialTestCase):
+ def setUp(self):
+ super(RootLoggerTestCase, self).setUp()
+ self.log = log.logging.root
+
+ def tearDown(self):
+ super(RootLoggerTestCase, self).tearDown()
+ log.NovaLogger.manager.loggerDict = {}
+
+ def test_is_nova_instance(self):
+ self.assert_(isinstance(self.log, log.NovaLogger))
+
+ def test_name_is_nova_root(self):
+ self.assertEqual("nova.root", self.log.name)
+
+ def test_handlers_have_nova_formatter(self):
+ formatters = []
+ for h in self.log.handlers:
+ f = h.formatter
+ if isinstance(f, log.NovaFormatter):
+ formatters.append(f)
+ self.assert_(formatters)
+ self.assertEqual(len(formatters), len(self.log.handlers))
+
+ def test_handles_context_kwarg(self):
+ self.log.info("foo", context=_fake_context())
+ self.assert_(True) # didn't raise exception
+
+ def test_module_level_methods_handle_context_arg(self):
+ log.info("foo", context=_fake_context())
+ self.assert_(True) # didn't raise exception
+
+ def test_module_level_audit_handles_context_arg(self):
+ log.audit("foo", context=_fake_context())
+ self.assert_(True) # didn't raise exception
+
+
+class NovaFormatterTestCase(test.TrialTestCase):
+ def setUp(self):
+ super(NovaFormatterTestCase, self).setUp()
+ self.flags(logging_context_format_string="HAS CONTEXT "\
+ "[%(request_id)s]: %(message)s",
+ logging_default_format_string="NOCTXT: %(message)s",
+ logging_debug_format_suffix="--DBG")
+ self.log = log.logging.root
+ self.stream = cStringIO.StringIO()
+ handler = log.StreamHandler(self.stream)
+ self.log.addHandler(handler)
+ self.log.setLevel(log.DEBUG)
+
+ def tearDown(self):
+ super(NovaFormatterTestCase, self).tearDown()
+ log.NovaLogger.manager.loggerDict = {}
+
+ def test_uncontextualized_log(self):
+ self.log.info("foo")
+ self.assertEqual("NOCTXT: foo\n", self.stream.getvalue())
+
+ def test_contextualized_log(self):
+ ctxt = _fake_context()
+ self.log.info("bar", context=ctxt)
+ expected = "HAS CONTEXT [%s]: bar\n" % ctxt.request_id
+ self.assertEqual(expected, self.stream.getvalue())
+
+ def test_debugging_log(self):
+ self.log.debug("baz")
+ self.assertEqual("NOCTXT: baz --DBG\n", self.stream.getvalue())
+
+
+class NovaLoggerTestCase(test.TrialTestCase):
+ def setUp(self):
+ super(NovaLoggerTestCase, self).setUp()
+ self.flags(default_log_levels=["nova-test=AUDIT"], verbose=False)
+ self.log = log.getLogger('nova-test')
+
+ def tearDown(self):
+ super(NovaLoggerTestCase, self).tearDown()
+ log.NovaLogger.manager.loggerDict = {}
+
+ def test_has_level_from_flags(self):
+ self.assertEqual(log.AUDIT, self.log.level)
+
+ def test_child_log_has_level_of_parent_flag(self):
+ l = log.getLogger('nova-test.foo')
+ self.assertEqual(log.AUDIT, l.level)
+
+
+class VerboseLoggerTestCase(test.TrialTestCase):
+ def setUp(self):
+ super(VerboseLoggerTestCase, self).setUp()
+ self.flags(default_log_levels=["nova.test=AUDIT"], verbose=True)
+ self.log = log.getLogger('nova.test')
+
+ def tearDown(self):
+ super(VerboseLoggerTestCase, self).tearDown()
+ log.NovaLogger.manager.loggerDict = {}
+
+ def test_will_be_verbose_if_named_nova_and_verbose_flag_set(self):
+ self.assertEqual(log.DEBUG, self.log.level)
diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py
index 96473ac7c..349e20f84 100644
--- a/nova/tests/test_network.py
+++ b/nova/tests/test_network.py
@@ -20,18 +20,18 @@ Unit Tests for network code
"""
import IPy
import os
-import logging
from nova import context
from nova import db
from nova import exception
from nova import flags
-from nova import service
+from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.network')
class NetworkTestCase(test.TestCase):
@@ -45,7 +45,6 @@ class NetworkTestCase(test.TestCase):
fake_network=True,
network_size=16,
num_networks=5)
- logging.getLogger().setLevel(logging.DEBUG)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
self.projects = []
@@ -328,7 +327,7 @@ def lease_ip(private_ip):
'TESTING': '1',
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
(out, err) = utils.execute(cmd, addl_env=env)
- logging.debug("ISSUE_IP: %s, %s ", out, err)
+ LOG.debug("ISSUE_IP: %s, %s ", out, err)
def release_ip(private_ip):
@@ -344,4 +343,4 @@ def release_ip(private_ip):
'TESTING': '1',
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
(out, err) = utils.execute(cmd, addl_env=env)
- logging.debug("RELEASE_IP: %s, %s ", out, err)
+ LOG.debug("RELEASE_IP: %s, %s ", out, err)
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 8cf2a5e54..9548a8c13 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -16,17 +16,15 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
-
from nova import context
from nova import db
-from nova import exception
from nova import flags
from nova import quota
from nova import test
from nova import utils
from nova.auth import manager
from nova.api.ec2 import cloud
+from nova.compute import instance_types
FLAGS = flags.FLAGS
@@ -34,7 +32,6 @@ FLAGS = flags.FLAGS
class QuotaTestCase(test.TestCase):
def setUp(self):
- logging.getLogger().setLevel(logging.DEBUG)
super(QuotaTestCase, self).setUp()
self.flags(connection_type='fake',
quota_instances=2,
@@ -78,14 +75,17 @@ class QuotaTestCase(test.TestCase):
def test_quota_overrides(self):
"""Make sure overriding a projects quotas works"""
- num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
+ num_instances = quota.allowed_instances(self.context, 100,
+ instance_types.INSTANCE_TYPES['m1.small'])
self.assertEqual(num_instances, 2)
db.quota_create(self.context, {'project_id': self.project.id,
'instances': 10})
- num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
+ num_instances = quota.allowed_instances(self.context, 100,
+ instance_types.INSTANCE_TYPES['m1.small'])
self.assertEqual(num_instances, 4)
db.quota_update(self.context, self.project.id, {'cores': 100})
- num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
+ num_instances = quota.allowed_instances(self.context, 100,
+ instance_types.INSTANCE_TYPES['m1.small'])
self.assertEqual(num_instances, 10)
db.quota_destroy(self.context, self.project.id)
diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py
index 6ea2edcab..85593ab46 100644
--- a/nova/tests/test_rpc.py
+++ b/nova/tests/test_rpc.py
@@ -18,15 +18,16 @@
"""
Unit Tests for remote procedure calls using queue
"""
-import logging
from nova import context
from nova import flags
+from nova import log as logging
from nova import rpc
from nova import test
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.rpc')
class RpcTestCase(test.TestCase):
@@ -85,12 +86,12 @@ class RpcTestCase(test.TestCase):
@staticmethod
def echo(context, queue, value):
"""Calls echo in the passed queue"""
- logging.debug("Nested received %s, %s", queue, value)
+ LOG.debug(_("Nested received %s, %s"), queue, value)
ret = rpc.call(context,
queue,
{"method": "echo",
"args": {"value": value}})
- logging.debug("Nested return %s", ret)
+ LOG.debug(_("Nested return %s"), ret)
return value
nested = Nested()
@@ -115,13 +116,13 @@ class TestReceiver(object):
@staticmethod
def echo(context, value):
"""Simply returns whatever value is sent in"""
- logging.debug("Received %s", value)
+ LOG.debug(_("Received %s"), value)
return value
@staticmethod
def context(context, value):
"""Returns dictionary version of context"""
- logging.debug("Received %s", context)
+ LOG.debug(_("Received %s"), context)
return context.to_dict()
@staticmethod
diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py
index a9937d797..9d458244b 100644
--- a/nova/tests/test_scheduler.py
+++ b/nova/tests/test_scheduler.py
@@ -21,6 +21,7 @@ Tests For Scheduler
import datetime
+from mox import IgnoreArg
from nova import context
from nova import db
from nova import flags
@@ -76,6 +77,59 @@ class SchedulerTestCase(test.TestCase):
scheduler.named_method(ctxt, 'topic', num=7)
+class ZoneSchedulerTestCase(test.TestCase):
+ """Test case for zone scheduler"""
+ def setUp(self):
+ super(ZoneSchedulerTestCase, self).setUp()
+ self.flags(scheduler_driver='nova.scheduler.zone.ZoneScheduler')
+
+ def _create_service_model(self, **kwargs):
+ service = db.sqlalchemy.models.Service()
+ service.host = kwargs['host']
+ service.disabled = False
+ service.deleted = False
+ service.report_count = 0
+ service.binary = 'nova-compute'
+ service.topic = 'compute'
+ service.id = kwargs['id']
+ service.availability_zone = kwargs['zone']
+ service.created_at = datetime.datetime.utcnow()
+ return service
+
+ def test_with_two_zones(self):
+ scheduler = manager.SchedulerManager()
+ ctxt = context.get_admin_context()
+ service_list = [self._create_service_model(id=1,
+ host='host1',
+ zone='zone1'),
+ self._create_service_model(id=2,
+ host='host2',
+ zone='zone2'),
+ self._create_service_model(id=3,
+ host='host3',
+ zone='zone2'),
+ self._create_service_model(id=4,
+ host='host4',
+ zone='zone2'),
+ self._create_service_model(id=5,
+ host='host5',
+ zone='zone2')]
+ self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
+ arg = IgnoreArg()
+ db.service_get_all_by_topic(arg, arg).AndReturn(service_list)
+ self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
+ rpc.cast(ctxt,
+ 'compute.host1',
+ {'method': 'run_instance',
+ 'args': {'instance_id': 'i-ffffffff',
+ 'availability_zone': 'zone1'}})
+ self.mox.ReplayAll()
+ scheduler.run_instance(ctxt,
+ 'compute',
+ instance_id='i-ffffffff',
+ availability_zone='zone1')
+
+
class SimpleDriverTestCase(test.TestCase):
"""Test case for simple driver"""
def setUp(self):
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index 9f1a181a0..a67c8d1e8 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -133,7 +133,8 @@ class ServiceTestCase(test.TestCase):
service_create = {'host': host,
'binary': binary,
'topic': topic,
- 'report_count': 0}
+ 'report_count': 0,
+ 'availability_zone': 'nova'}
service_ref = {'host': host,
'binary': binary,
'report_count': 0,
@@ -161,11 +162,13 @@ class ServiceTestCase(test.TestCase):
service_create = {'host': host,
'binary': binary,
'topic': topic,
- 'report_count': 0}
+ 'report_count': 0,
+ 'availability_zone': 'nova'}
service_ref = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
+ 'availability_zone': 'nova',
'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
@@ -193,11 +196,13 @@ class ServiceTestCase(test.TestCase):
service_create = {'host': host,
'binary': binary,
'topic': topic,
- 'report_count': 0}
+ 'report_count': 0,
+ 'availability_zone': 'nova'}
service_ref = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
+ 'availability_zone': 'nova',
'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
@@ -224,11 +229,13 @@ class ServiceTestCase(test.TestCase):
service_create = {'host': host,
'binary': binary,
'topic': topic,
- 'report_count': 0}
+ 'report_count': 0,
+ 'availability_zone': 'nova'}
service_ref = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
+ 'availability_zone': 'nova',
'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index 4aa489d08..afdc89ba2 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -208,8 +208,141 @@ class LibvirtConnTestCase(test.TestCase):
self.manager.delete_user(self.user)
-class NWFilterTestCase(test.TestCase):
+class IptablesFirewallTestCase(test.TestCase):
+ def setUp(self):
+ super(IptablesFirewallTestCase, self).setUp()
+ self.manager = manager.AuthManager()
+ self.user = self.manager.create_user('fake', 'fake', 'fake',
+ admin=True)
+ self.project = self.manager.create_project('fake', 'fake', 'fake')
+ self.context = context.RequestContext('fake', 'fake')
+ self.network = utils.import_object(FLAGS.network_manager)
+ self.fw = libvirt_conn.IptablesFirewallDriver()
+
+ def tearDown(self):
+ self.manager.delete_project(self.project)
+ self.manager.delete_user(self.user)
+ super(IptablesFirewallTestCase, self).tearDown()
+
+ def _p(self, *args, **kwargs):
+ if 'iptables-restore' in args:
+ print ' '.join(args), kwargs['stdin']
+ if 'iptables-save' in args:
+ return
+
+ in_rules = [
+ '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
+ '*filter',
+ ':INPUT ACCEPT [969615:281627771]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [915599:63811649]',
+ ':nova-block-ipv4 - [0:0]',
+ '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ',
+ '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ',
+ '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ',
+ '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
+ '-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
+ ',ESTABLISHED -j ACCEPT ',
+ '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
+ '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable ',
+ '-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable ',
+ 'COMMIT',
+ '# Completed on Mon Dec 6 11:54:13 2010',
+ ]
+
+ def test_static_filters(self):
+ self.fw.execute = self._p
+ instance_ref = db.instance_create(self.context,
+ {'user_id': 'fake',
+ 'project_id': 'fake'})
+ ip = '10.11.12.13'
+
+ network_ref = db.project_get_network(self.context,
+ 'fake')
+
+ fixed_ip = {'address': ip,
+ 'network_id': network_ref['id']}
+
+ admin_ctxt = context.get_admin_context()
+ db.fixed_ip_create(admin_ctxt, fixed_ip)
+ db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
+ 'instance_id': instance_ref['id']})
+
+ secgroup = db.security_group_create(admin_ctxt,
+ {'user_id': 'fake',
+ 'project_id': 'fake',
+ 'name': 'testgroup',
+ 'description': 'test group'})
+
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'icmp',
+ 'from_port': -1,
+ 'to_port': -1,
+ 'cidr': '192.168.11.0/24'})
+
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'icmp',
+ 'from_port': 8,
+ 'to_port': -1,
+ 'cidr': '192.168.11.0/24'})
+
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'tcp',
+ 'from_port': 80,
+ 'to_port': 81,
+ 'cidr': '192.168.10.0/24'})
+
+ db.instance_add_security_group(admin_ctxt, instance_ref['id'],
+ secgroup['id'])
+ instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
+
+ self.fw.add_instance(instance_ref)
+
+ out_rules = self.fw.modify_rules(self.in_rules)
+
+ in_rules = filter(lambda l: not l.startswith('#'), self.in_rules)
+ for rule in in_rules:
+ if not 'nova' in rule:
+ self.assertTrue(rule in out_rules,
+ 'Rule went missing: %s' % rule)
+
+ instance_chain = None
+ for rule in out_rules:
+ # This is pretty crude, but it'll do for now
+ if '-d 10.11.12.13 -j' in rule:
+ instance_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(instance_chain, "The instance chain wasn't added")
+
+ security_group_chain = None
+ for rule in out_rules:
+ # This is pretty crude, but it'll do for now
+ if '-A %s -j' % instance_chain in rule:
+ security_group_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(security_group_chain,
+ "The security group chain wasn't added")
+
+ self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -j ACCEPT' % \
+ security_group_chain in out_rules,
+ "ICMP acceptance rule wasn't added")
+
+ self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type'
+ ' 8 -j ACCEPT' % security_group_chain in out_rules,
+ "ICMP Echo Request acceptance rule wasn't added")
+
+ self.assertTrue('-A %s -p tcp -s 192.168.10.0/24 -m multiport '
+ '--dports 80:81 -j ACCEPT' % security_group_chain \
+ in out_rules,
+ "TCP port 80/81 acceptance rule wasn't added")
+
+
+class NWFilterTestCase(test.TestCase):
def setUp(self):
super(NWFilterTestCase, self).setUp()
@@ -224,7 +357,8 @@ class NWFilterTestCase(test.TestCase):
self.fake_libvirt_connection = Mock()
- self.fw = libvirt_conn.NWFilterFirewall(self.fake_libvirt_connection)
+ self.fw = libvirt_conn.NWFilterFirewall(
+ lambda: self.fake_libvirt_connection)
def tearDown(self):
self.manager.delete_project(self.project)
@@ -337,7 +471,7 @@ class NWFilterTestCase(test.TestCase):
self.security_group.id)
instance = db.instance_get(self.context, inst_id)
- self.fw.setup_base_nwfilters()
- self.fw.setup_nwfilters_for_instance(instance)
+ self.fw.setup_basic_filtering(instance)
+ self.fw.prepare_instance_filter(instance)
_ensure_all_called()
self.teardown_security_group()
diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py
index b13455fb0..b40ca004b 100644
--- a/nova/tests/test_volume.py
+++ b/nova/tests/test_volume.py
@@ -19,23 +19,23 @@
Tests for Volume Code.
"""
-import logging
from nova import context
from nova import exception
from nova import db
from nova import flags
+from nova import log as logging
from nova import test
from nova import utils
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.volume')
class VolumeTestCase(test.TestCase):
"""Test Case for volumes."""
def setUp(self):
- logging.getLogger().setLevel(logging.DEBUG)
super(VolumeTestCase, self).setUp()
self.compute = utils.import_object(FLAGS.compute_manager)
self.flags(connection_type='fake')
@@ -159,7 +159,7 @@ class VolumeTestCase(test.TestCase):
volume_id)
self.assert_(iscsi_target not in targets)
targets.append(iscsi_target)
- logging.debug("Target %s allocated", iscsi_target)
+ LOG.debug(_("Target %s allocated"), iscsi_target)
total_slots = FLAGS.iscsi_num_targets
for _index in xrange(total_slots):
volume_id = self._create_volume()
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 33571dad0..ec9462ada 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -79,8 +79,8 @@ class XenAPIVolumeTestCase(test.TestCase):
helper = volume_utils.VolumeHelper
helper.XenAPI = session.get_imported_xenapi()
vol = self._create_volume()
- info = helper.parse_volume_info(vol['ec2_id'], '/dev/sdc')
- label = 'SR-%s' % vol['ec2_id']
+ info = helper.parse_volume_info(vol['id'], '/dev/sdc')
+ label = 'SR-%s' % vol['id']
description = 'Test-SR'
sr_ref = helper.create_iscsi_storage(session, info, label, description)
srs = xenapi_fake.get_all('SR')
@@ -97,7 +97,7 @@ class XenAPIVolumeTestCase(test.TestCase):
# oops, wrong mount point!
self.assertRaises(volume_utils.StorageError,
helper.parse_volume_info,
- vol['ec2_id'],
+ vol['id'],
'/dev/sd')
db.volume_destroy(context.get_admin_context(), vol['id'])
@@ -108,8 +108,7 @@ class XenAPIVolumeTestCase(test.TestCase):
volume = self._create_volume()
instance = db.instance_create(self.values)
xenapi_fake.create_vm(instance.name, 'Running')
- result = conn.attach_volume(instance.name, volume['ec2_id'],
- '/dev/sdc')
+ result = conn.attach_volume(instance.name, volume['id'], '/dev/sdc')
def check():
# check that the VM has a VBD attached to it
@@ -134,7 +133,7 @@ class XenAPIVolumeTestCase(test.TestCase):
self.assertRaises(Exception,
conn.attach_volume,
instance.name,
- volume['ec2_id'],
+ volume['id'],
'/dev/sdc')
def tearDown(self):
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 55f751f11..292bd9ba9 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -41,9 +41,33 @@ def stubout_instance_snapshot(stubs):
rv = done.wait()
return rv
+ def fake_loop(self):
+ pass
+
stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task',
fake_wait_for_task)
+ stubs.Set(xenapi_conn.XenAPISession, '_stop_loop', fake_loop)
+
+ from nova.virt.xenapi.fake import create_vdi
+ name_label = "instance-%s" % instance_id
+ #TODO: create fake SR record
+ sr_ref = "fakesr"
+ vdi_ref = create_vdi(name_label=name_label, read_only=False,
+ sr_ref=sr_ref, sharable=False)
+ vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
+ vdi_uuid = vdi_rec['uuid']
+ return vdi_uuid
+
+ stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
+
+ def fake_parse_xmlrpc_value(val):
+ return val
+
+ stubs.Set(xenapi_conn, '_parse_xmlrpc_value', fake_parse_xmlrpc_value)
+
+ def fake_wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
+ original_parent_uuid):
from nova.virt.xenapi.fake import create_vdi
name_label = "instance-%s" % instance_id
#TODO: create fake SR record
diff --git a/nova/twistd.py b/nova/twistd.py
index 29be9c4e1..556271999 100644
--- a/nova/twistd.py
+++ b/nova/twistd.py
@@ -22,7 +22,6 @@ manage pid files and support syslogging.
"""
import gflags
-import logging
import os
import signal
import sys
@@ -34,6 +33,7 @@ from twisted.python import runtime
from twisted.python import usage
from nova import flags
+from nova import log as logging
if runtime.platformType == "win32":
@@ -234,22 +234,12 @@ def serve(filename):
OptionsClass = WrapTwistedOptions(TwistdServerOptions)
options = OptionsClass()
argv = options.parseOptions()
- logging.getLogger('amqplib').setLevel(logging.WARN)
FLAGS.python = filename
FLAGS.no_save = True
if not FLAGS.pidfile:
FLAGS.pidfile = '%s.pid' % name
elif FLAGS.pidfile.endswith('twistd.pid'):
FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name)
- # NOTE(vish): if we're running nodaemon, redirect the log to stdout
- if FLAGS.nodaemon and not FLAGS.logfile:
- FLAGS.logfile = "-"
- if not FLAGS.logfile:
- FLAGS.logfile = '%s.log' % name
- elif FLAGS.logfile.endswith('twistd.log'):
- FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name)
- if FLAGS.logdir:
- FLAGS.logfile = os.path.join(FLAGS.logdir, FLAGS.logfile)
if not FLAGS.prefix:
FLAGS.prefix = name
elif FLAGS.prefix.endswith('twisted'):
@@ -270,19 +260,10 @@ def serve(filename):
print 'usage: %s [options] [start|stop|restart]' % argv[0]
sys.exit(1)
- formatter = logging.Formatter(
- '(%(name)s): %(levelname)s %(message)s')
- handler = logging.StreamHandler(log.StdioOnnaStick())
- handler.setFormatter(formatter)
- logging.getLogger().addHandler(handler)
-
- if FLAGS.verbose:
- logging.getLogger().setLevel(logging.DEBUG)
- else:
- logging.getLogger().setLevel(logging.WARNING)
-
+ logging.basicConfig()
logging.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
+ logging.audit(_("Starting %s"), name)
twistd.runApp(options)
diff --git a/nova/utils.py b/nova/utils.py
index 15112faa2..45adb7b38 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -22,7 +22,6 @@ System-level utilities and helper functions.
import datetime
import inspect
-import logging
import os
import random
import subprocess
@@ -37,8 +36,10 @@ from eventlet import greenthread
from nova import exception
from nova.exception import ProcessExecutionError
+from nova import log as logging
+LOG = logging.getLogger("nova.utils")
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
@@ -109,7 +110,7 @@ def vpn_ping(address, port, timeout=0.05, session_id=None):
def fetchfile(url, target):
- logging.debug(_("Fetching %s") % url)
+ LOG.debug(_("Fetching %s") % url)
# c = pycurl.Curl()
# fp = open(target, "wb")
# c.setopt(c.URL, url)
@@ -121,7 +122,7 @@ def fetchfile(url, target):
def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
- logging.debug(_("Running cmd (subprocess): %s"), cmd)
+ LOG.debug(_("Running cmd (subprocess): %s"), cmd)
env = os.environ.copy()
if addl_env:
env.update(addl_env)
@@ -134,7 +135,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
result = obj.communicate()
obj.stdin.close()
if obj.returncode:
- logging.debug(_("Result was %s") % (obj.returncode))
+ LOG.debug(_("Result was %s") % (obj.returncode))
if check_exit_code and obj.returncode != 0:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=obj.returncode,
@@ -152,6 +153,11 @@ def abspath(s):
return os.path.join(os.path.dirname(__file__), s)
+def novadir():
+ import nova
+ return os.path.abspath(nova.__file__).split('nova/__init__.pyc')[0]
+
+
def default_flagfile(filename='nova.conf'):
for arg in sys.argv:
if arg.find('flagfile') != -1:
@@ -167,12 +173,12 @@ def default_flagfile(filename='nova.conf'):
def debug(arg):
- logging.debug('debug in callback: %s', arg)
+ LOG.debug(_('debug in callback: %s'), arg)
return arg
def runthis(prompt, cmd, check_exit_code=True):
- logging.debug(_("Running %s") % (cmd))
+ LOG.debug(_("Running %s"), (cmd))
rv, err = execute(cmd, check_exit_code=check_exit_code)
@@ -194,19 +200,6 @@ def last_octet(address):
return int(address.split(".")[-1])
-def get_my_ip():
- """Returns the actual ip of the local machine."""
- try:
- csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- csock.connect(('8.8.8.8', 80))
- (addr, port) = csock.getsockname()
- csock.close()
- return addr
- except socket.gaierror as ex:
- logging.warn(_("Couldn't get IP, using 127.0.0.1 %s"), ex)
- return "127.0.0.1"
-
-
def utcnow():
"""Overridable version of datetime.datetime.utcnow."""
if utcnow.override_time:
@@ -296,7 +289,7 @@ class LazyPluggable(object):
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
- logging.info('backend %s', self.__backend)
+ LOG.debug(_('backend %s'), self.__backend)
return self.__backend
def __getattr__(self, key):
diff --git a/nova/version.py b/nova/version.py
new file mode 100644
index 000000000..7b27acb6a
--- /dev/null
+++ b/nova/version.py
@@ -0,0 +1,46 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+try:
+ from nova.vcsversion import version_info
+except ImportError:
+ version_info = {'branch_nick': u'LOCALBRANCH',
+ 'revision_id': 'LOCALREVISION',
+ 'revno': 0}
+
+NOVA_VERSION = ['2011', '1']
+YEAR, COUNT = NOVA_VERSION
+
+FINAL = False # This becomes true at Release Candidate time
+
+
+def canonical_version_string():
+ return '.'.join([YEAR, COUNT])
+
+
+def version_string():
+ if FINAL:
+ return canonical_version_string()
+ else:
+ return '%s-dev' % (canonical_version_string(),)
+
+
+def vcs_version_string():
+ return "%s:%s" % (version_info['branch_nick'], version_info['revision_id'])
+
+
+def version_string_with_vcs():
+ return "%s-%s" % (canonical_version_string(), vcs_version_string())
diff --git a/nova/virt/connection.py b/nova/virt/connection.py
index 61e99944e..13181b730 100644
--- a/nova/virt/connection.py
+++ b/nova/virt/connection.py
@@ -19,15 +19,17 @@
"""Abstraction of the underlying virtualization API."""
-import logging
import sys
from nova import flags
+from nova import log as logging
from nova.virt import fake
from nova.virt import libvirt_conn
from nova.virt import xenapi_conn
+from nova.virt import hyperv
+LOG = logging.getLogger("nova.virt.connection")
FLAGS = flags.FLAGS
@@ -62,10 +64,12 @@ def get_connection(read_only=False):
conn = libvirt_conn.get_connection(read_only)
elif t == 'xenapi':
conn = xenapi_conn.get_connection(read_only)
+ elif t == 'hyperv':
+ conn = hyperv.get_connection(read_only)
else:
raise Exception('Unknown connection type "%s"' % t)
if conn is None:
- logging.error(_('Failed to open connection to the hypervisor'))
+ LOG.error(_('Failed to open connection to the hypervisor'))
sys.exit(1)
return conn
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 32541f5b4..9186d885e 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -289,6 +289,14 @@ class FakeConnection(object):
def get_console_output(self, instance):
return 'FAKE CONSOLE OUTPUT'
+ def get_ajax_console(self, instance):
+ return 'http://fakeajaxconsole.com/?token=FAKETOKEN'
+
+ def get_console_pool_info(self, console_type):
+ return {'address': '127.0.0.1',
+ 'username': 'fakeuser',
+ 'password': 'fakepassword'}
+
class FakeInstance(object):
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
new file mode 100644
index 000000000..30dc1c79b
--- /dev/null
+++ b/nova/virt/hyperv.py
@@ -0,0 +1,462 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Cloud.com, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A connection to Hyper-V .
+Uses Windows Management Instrumentation (WMI) calls to interact with Hyper-V
+Hyper-V WMI usage:
+ http://msdn.microsoft.com/en-us/library/cc723875%28v=VS.85%29.aspx
+The Hyper-V object model briefly:
+ The physical computer and its hosted virtual machines are each represented
+ by the Msvm_ComputerSystem class.
+
+ Each virtual machine is associated with a
+ Msvm_VirtualSystemGlobalSettingData (vs_gs_data) instance and one or more
+ Msvm_VirtualSystemSettingData (vmsetting) instances. For each vmsetting
+ there is a series of Msvm_ResourceAllocationSettingData (rasd) objects.
+ The rasd objects describe the settings for each device in a VM.
+ Together, the vs_gs_data, vmsettings and rasds describe the configuration
+ of the virtual machine.
+
+ Creating new resources such as disks and nics involves cloning a default
+ rasd object and appropriately modifying the clone and calling the
+ AddVirtualSystemResources WMI method
+ Changing resources such as memory uses the ModifyVirtualSystemResources
+ WMI method
+
+Using the Python WMI library:
+ Tutorial:
+ http://timgolden.me.uk/python/wmi/tutorial.html
+ Hyper-V WMI objects can be retrieved simply by using the class name
+ of the WMI object and optionally specifying a column to filter the
+ result set. More complex filters can be formed using WQL (sql-like)
+ queries.
+ The parameters and return tuples of WMI method calls can gleaned by
+ examining the doc string. For example:
+ >>> vs_man_svc.ModifyVirtualSystemResources.__doc__
+ ModifyVirtualSystemResources (ComputerSystem, ResourceSettingData[])
+ => (Job, ReturnValue)'
+ When passing setting data (ResourceSettingData) to the WMI method,
+ an XML representation of the data is passed in using GetText_(1).
+ Available methods on a service can be determined using method.keys():
+ >>> vs_man_svc.methods.keys()
+ vmsettings and rasds for a vm can be retrieved using the 'associators'
+ method with the appropriate return class.
+ Long running WMI commands generally return a Job (an instance of
+ Msvm_ConcreteJob) whose state can be polled to determine when it finishes
+
+"""
+
+import os
+import time
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova.auth import manager
+from nova.compute import power_state
+from nova.virt import images
+
+wmi = None
+
+
+FLAGS = flags.FLAGS
+
+
+LOG = logging.getLogger('nova.virt.hyperv')
+
+
+HYPERV_POWER_STATE = {
+ 3: power_state.SHUTDOWN,
+ 2: power_state.RUNNING,
+ 32768: power_state.PAUSED,
+}
+
+
+REQ_POWER_STATE = {
+ 'Enabled': 2,
+ 'Disabled': 3,
+ 'Reboot': 10,
+ 'Reset': 11,
+ 'Paused': 32768,
+ 'Suspended': 32769,
+}
+
+
+WMI_JOB_STATUS_STARTED = 4096
+WMI_JOB_STATE_RUNNING = 4
+WMI_JOB_STATE_COMPLETED = 7
+
+
+def get_connection(_):
+ global wmi
+ if wmi is None:
+ wmi = __import__('wmi')
+ return HyperVConnection()
+
+
+class HyperVConnection(object):
+ def __init__(self):
+ self._conn = wmi.WMI(moniker='//./root/virtualization')
+ self._cim_conn = wmi.WMI(moniker='//./root/cimv2')
+
+ def init_host(self):
+ #FIXME(chiradeep): implement this
+ LOG.debug(_('In init host'))
+ pass
+
+ def list_instances(self):
+ """ Return the names of all the instances known to Hyper-V. """
+ vms = [v.ElementName \
+ for v in self._conn.Msvm_ComputerSystem(['ElementName'])]
+ return vms
+
+ def spawn(self, instance):
+ """ Create a new VM and start it."""
+ vm = self._lookup(instance.name)
+ if vm is not None:
+ raise exception.Duplicate(_('Attempt to create duplicate vm %s') %
+ instance.name)
+
+ user = manager.AuthManager().get_user(instance['user_id'])
+ project = manager.AuthManager().get_project(instance['project_id'])
+ #Fetch the file, assume it is a VHD file.
+ base_vhd_filename = os.path.join(FLAGS.instances_path,
+ instance.name)
+ vhdfile = "%s.vhd" % (base_vhd_filename)
+ images.fetch(instance['image_id'], vhdfile, user, project)
+
+ try:
+ self._create_vm(instance)
+
+ self._create_disk(instance['name'], vhdfile)
+ self._create_nic(instance['name'], instance['mac_address'])
+
+ LOG.debug(_('Starting VM %s '), instance.name)
+ self._set_vm_state(instance['name'], 'Enabled')
+ LOG.info(_('Started VM %s '), instance.name)
+ except Exception as exn:
+ LOG.exception(_('spawn vm failed: %s'), exn)
+ self.destroy(instance)
+
+ def _create_vm(self, instance):
+ """Create a VM but don't start it. """
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+
+ vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
+ vs_gs_data.ElementName = instance['name']
+ (job, ret_val) = vs_man_svc.DefineVirtualSystem(
+ [], None, vs_gs_data.GetText_(1))[1:]
+ if ret_val == WMI_JOB_STATUS_STARTED:
+ success = self._check_job_status(job)
+ else:
+ success = (ret_val == 0)
+
+ if not success:
+ raise Exception(_('Failed to create VM %s'), instance.name)
+
+ LOG.debug(_('Created VM %s...'), instance.name)
+ vm = self._conn.Msvm_ComputerSystem(ElementName=instance.name)[0]
+
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ vmsetting = [s for s in vmsettings
+ if s.SettingType == 3][0] # avoid snapshots
+ memsetting = vmsetting.associators(
+ wmi_result_class='Msvm_MemorySettingData')[0]
+ #No Dynamic Memory, so reservation, limit and quantity are identical.
+ mem = long(str(instance['memory_mb']))
+ memsetting.VirtualQuantity = mem
+ memsetting.Reservation = mem
+ memsetting.Limit = mem
+
+ (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
+ vm.path_(), [memsetting.GetText_(1)])
+ LOG.debug(_('Set memory for vm %s...'), instance.name)
+ procsetting = vmsetting.associators(
+ wmi_result_class='Msvm_ProcessorSettingData')[0]
+ vcpus = long(instance['vcpus'])
+ procsetting.VirtualQuantity = vcpus
+ procsetting.Reservation = vcpus
+ procsetting.Limit = vcpus
+
+ (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
+ vm.path_(), [procsetting.GetText_(1)])
+ LOG.debug(_('Set vcpus for vm %s...'), instance.name)
+
+ def _create_disk(self, vm_name, vhdfile):
+ """Create a disk and attach it to the vm"""
+ LOG.debug(_('Creating disk for %s by attaching disk file %s'),
+ vm_name, vhdfile)
+ #Find the IDE controller for the vm.
+ vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
+ vm = vms[0]
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ rasds = vmsettings[0].associators(
+ wmi_result_class='MSVM_ResourceAllocationSettingData')
+ ctrller = [r for r in rasds
+ if r.ResourceSubType == 'Microsoft Emulated IDE Controller'\
+ and r.Address == "0"]
+ #Find the default disk drive object for the vm and clone it.
+ diskdflt = self._conn.query(
+ "SELECT * FROM Msvm_ResourceAllocationSettingData \
+ WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\
+ AND InstanceID LIKE '%Default%'")[0]
+ diskdrive = self._clone_wmi_obj(
+ 'Msvm_ResourceAllocationSettingData', diskdflt)
+ #Set the IDE ctrller as parent.
+ diskdrive.Parent = ctrller[0].path_()
+ diskdrive.Address = 0
+ #Add the cloned disk drive object to the vm.
+ new_resources = self._add_virt_resource(diskdrive, vm)
+ if new_resources is None:
+ raise Exception(_('Failed to add diskdrive to VM %s'),
+ vm_name)
+ diskdrive_path = new_resources[0]
+ LOG.debug(_('New disk drive path is %s'), diskdrive_path)
+ #Find the default VHD disk object.
+ vhddefault = self._conn.query(
+ "SELECT * FROM Msvm_ResourceAllocationSettingData \
+ WHERE ResourceSubType LIKE 'Microsoft Virtual Hard Disk' AND \
+ InstanceID LIKE '%Default%' ")[0]
+
+ #Clone the default and point it to the image file.
+ vhddisk = self._clone_wmi_obj(
+ 'Msvm_ResourceAllocationSettingData', vhddefault)
+ #Set the new drive as the parent.
+ vhddisk.Parent = diskdrive_path
+ vhddisk.Connection = [vhdfile]
+
+ #Add the new vhd object as a virtual hard disk to the vm.
+ new_resources = self._add_virt_resource(vhddisk, vm)
+ if new_resources is None:
+ raise Exception(_('Failed to add vhd file to VM %s'),
+ vm_name)
+ LOG.info(_('Created disk for %s'), vm_name)
+
+ def _create_nic(self, vm_name, mac):
+ """Create a (emulated) nic and attach it to the vm"""
+ LOG.debug(_('Creating nic for %s '), vm_name)
+ #Find the vswitch that is connected to the physical nic.
+ vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
+ extswitch = self._find_external_network()
+ vm = vms[0]
+ switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
+ #Find the default nic and clone it to create a new nic for the vm.
+ #Use Msvm_SyntheticEthernetPortSettingData for Windows or Linux with
+ #Linux Integration Components installed.
+ emulatednics_data = self._conn.Msvm_EmulatedEthernetPortSettingData()
+ default_nic_data = [n for n in emulatednics_data
+ if n.InstanceID.rfind('Default') > 0]
+ new_nic_data = self._clone_wmi_obj(
+ 'Msvm_EmulatedEthernetPortSettingData',
+ default_nic_data[0])
+ #Create a port on the vswitch.
+ (new_port, ret_val) = switch_svc.CreateSwitchPort(vm_name, vm_name,
+ "", extswitch.path_())
+ if ret_val != 0:
+ LOG.error(_('Failed creating a port on the external vswitch'))
+ raise Exception(_('Failed creating port for %s'),
+ vm_name)
+ LOG.debug(_("Created switch port %s on switch %s"),
+ vm_name, extswitch.path_())
+ #Connect the new nic to the new port.
+ new_nic_data.Connection = [new_port]
+ new_nic_data.ElementName = vm_name + ' nic'
+ new_nic_data.Address = ''.join(mac.split(':'))
+ new_nic_data.StaticMacAddress = 'TRUE'
+ #Add the new nic to the vm.
+ new_resources = self._add_virt_resource(new_nic_data, vm)
+ if new_resources is None:
+ raise Exception(_('Failed to add nic to VM %s'),
+ vm_name)
+ LOG.info(_("Created nic for %s "), vm_name)
+
+ def _add_virt_resource(self, res_setting_data, target_vm):
+ """Add a new resource (disk/nic) to the VM"""
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+ (job, new_resources, ret_val) = vs_man_svc.\
+ AddVirtualSystemResources([res_setting_data.GetText_(1)],
+ target_vm.path_())
+ success = True
+ if ret_val == WMI_JOB_STATUS_STARTED:
+ success = self._check_job_status(job)
+ else:
+ success = (ret_val == 0)
+ if success:
+ return new_resources
+ else:
+ return None
+
+ #TODO: use the reactor to poll instead of sleep
+ def _check_job_status(self, jobpath):
+ """Poll WMI job state for completion"""
+ #Jobs have a path of the form:
+ #\\WIN-P5IG7367DAG\root\virtualization:Msvm_ConcreteJob.InstanceID=
+ #"8A496B9C-AF4D-4E98-BD3C-1128CD85320D"
+ inst_id = jobpath.split('=')[1].strip('"')
+ jobs = self._conn.Msvm_ConcreteJob(InstanceID=inst_id)
+ if len(jobs) == 0:
+ return False
+ job = jobs[0]
+ while job.JobState == WMI_JOB_STATE_RUNNING:
+ time.sleep(0.1)
+ job = self._conn.Msvm_ConcreteJob(InstanceID=inst_id)[0]
+ if job.JobState != WMI_JOB_STATE_COMPLETED:
+ LOG.debug(_("WMI job failed: %s"), job.ErrorSummaryDescription)
+ return False
+ LOG.debug(_("WMI job succeeded: %s, Elapsed=%s "), job.Description,
+ job.ElapsedTime)
+ return True
+
+ def _find_external_network(self):
+ """Find the vswitch that is connected to the physical nic.
+ Assumes only one physical nic on the host
+ """
+ #If there are no physical nics connected to networks, return.
+ bound = self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')
+ if len(bound) == 0:
+ return None
+ return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')[0]\
+ .associators(wmi_result_class='Msvm_SwitchLANEndpoint')[0]\
+ .associators(wmi_result_class='Msvm_SwitchPort')[0]\
+ .associators(wmi_result_class='Msvm_VirtualSwitch')[0]
+
+ def _clone_wmi_obj(self, wmi_class, wmi_obj):
+ """Clone a WMI object"""
+ cl = self._conn.__getattr__(wmi_class) # get the class
+ newinst = cl.new()
+ #Copy the properties from the original.
+ for prop in wmi_obj._properties:
+ newinst.Properties_.Item(prop).Value =\
+ wmi_obj.Properties_.Item(prop).Value
+ return newinst
+
+ def reboot(self, instance):
+ """Reboot the specified instance."""
+ vm = self._lookup(instance.name)
+ if vm is None:
+ raise exception.NotFound('instance not present %s' % instance.name)
+ self._set_vm_state(instance.name, 'Reboot')
+
+ def destroy(self, instance):
+ """Destroy the VM. Also destroy the associated VHD disk files"""
+ LOG.debug(_("Got request to destroy vm %s"), instance.name)
+ vm = self._lookup(instance.name)
+ if vm is None:
+ return
+ vm = self._conn.Msvm_ComputerSystem(ElementName=instance.name)[0]
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+ #Stop the VM first.
+ self._set_vm_state(instance.name, 'Disabled')
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ rasds = vmsettings[0].associators(
+ wmi_result_class='MSVM_ResourceAllocationSettingData')
+ disks = [r for r in rasds \
+ if r.ResourceSubType == 'Microsoft Virtual Hard Disk']
+ diskfiles = []
+ #Collect disk file information before destroying the VM.
+ for disk in disks:
+ diskfiles.extend([c for c in disk.Connection])
+ #Nuke the VM. Does not destroy disks.
+ (job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
+ if ret_val == WMI_JOB_STATUS_STARTED:
+ success = self._check_job_status(job)
+ elif ret_val == 0:
+ success = True
+ if not success:
+ raise Exception(_('Failed to destroy vm %s') % instance.name)
+ #Delete associated vhd disk files.
+ for disk in diskfiles:
+ vhdfile = self._cim_conn.CIM_DataFile(Name=disk)
+ for vf in vhdfile:
+ vf.Delete()
+ LOG.debug(_("Del: disk %s vm %s"), vhdfile, instance.name)
+
+ def get_info(self, instance_id):
+ """Get information about the VM"""
+ vm = self._lookup(instance_id)
+ if vm is None:
+ raise exception.NotFound('instance not present %s' % instance_id)
+ vm = self._conn.Msvm_ComputerSystem(ElementName=instance_id)[0]
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ settings_paths = [v.path_() for v in vmsettings]
+ #See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
+ summary_info = vs_man_svc.GetSummaryInformation(
+ [4, 100, 103, 105], settings_paths)[1]
+ info = summary_info[0]
+ LOG.debug(_("Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, \
+ cpu_time=%s"), instance_id,
+ str(HYPERV_POWER_STATE[info.EnabledState]),
+ str(info.MemoryUsage),
+ str(info.NumberOfProcessors),
+ str(info.UpTime))
+
+ return {'state': HYPERV_POWER_STATE[info.EnabledState],
+ 'max_mem': info.MemoryUsage,
+ 'mem': info.MemoryUsage,
+ 'num_cpu': info.NumberOfProcessors,
+ 'cpu_time': info.UpTime}
+
+ def _lookup(self, i):
+ vms = self._conn.Msvm_ComputerSystem(ElementName=i)
+ n = len(vms)
+ if n == 0:
+ return None
+ elif n > 1:
+ raise Exception(_('duplicate name found: %s') % i)
+ else:
+ return vms[0].ElementName
+
+ def _set_vm_state(self, vm_name, req_state):
+ """Set the desired state of the VM"""
+ vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
+ if len(vms) == 0:
+ return False
+ (job, ret_val) = vms[0].RequestStateChange(REQ_POWER_STATE[req_state])
+ success = False
+ if ret_val == WMI_JOB_STATUS_STARTED:
+ success = self._check_job_status(job)
+ elif ret_val == 0:
+ success = True
+ elif ret_val == 32775:
+ #Invalid state for current operation. Typically means it is
+ #already in the state requested
+ success = True
+ if success:
+ LOG.info(_("Successfully changed vm state of %s to %s"), vm_name,
+ req_state)
+ else:
+ LOG.error(_("Failed to change vm state of %s to %s"), vm_name,
+ req_state)
+ raise Exception(_("Failed to change vm state of %s to %s"),
+ vm_name, req_state)
+
+ def attach_volume(self, instance_name, device_path, mountpoint):
+ vm = self._lookup(instance_name)
+ if vm is None:
+ raise exception.NotFound('Cannot attach volume to missing %s vm' %
+ instance_name)
+
+ def detach_volume(self, instance_name, mountpoint):
+ vm = self._lookup(instance_name)
+ if vm is None:
+ raise exception.NotFound('Cannot detach volume from missing %s ' %
+ instance_name)
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 1c9b2e093..ecf0e5efb 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -22,10 +22,14 @@ Handling of VM disk images.
"""
import os.path
+import shutil
+import sys
import time
+import urllib2
import urlparse
from nova import flags
+from nova import log as logging
from nova import utils
from nova.auth import manager
from nova.auth import signer
@@ -36,6 +40,8 @@ FLAGS = flags.FLAGS
flags.DEFINE_bool('use_s3', True,
'whether to get images from s3 or use local copy')
+LOG = logging.getLogger('nova.virt.images')
+
def fetch(image, path, user, project):
if FLAGS.use_s3:
@@ -45,6 +51,25 @@ def fetch(image, path, user, project):
return f(image, path, user, project)
+def _fetch_image_no_curl(url, path, headers):
+ request = urllib2.Request(url)
+ for (k, v) in headers.iteritems():
+ request.add_header(k, v)
+
+ def urlretrieve(urlfile, fpath):
+ chunk = 1 * 1024 * 1024
+ f = open(fpath, "wb")
+ while 1:
+ data = urlfile.read(chunk)
+ if not data:
+ break
+ f.write(data)
+
+ urlopened = urllib2.urlopen(request)
+ urlretrieve(urlopened, path)
+ LOG.debug(_("Finished retreving %s -- placed in %s"), url, path)
+
+
def _fetch_s3_image(image, path, user, project):
url = image_url(image)
@@ -61,18 +86,24 @@ def _fetch_s3_image(image, path, user, project):
url_path)
headers['Authorization'] = 'AWS %s:%s' % (access, signature)
- cmd = ['/usr/bin/curl', '--fail', '--silent', url]
- for (k, v) in headers.iteritems():
- cmd += ['-H', '"%s: %s"' % (k, v)]
+ if sys.platform.startswith('win'):
+ return _fetch_image_no_curl(url, path, headers)
+ else:
+ cmd = ['/usr/bin/curl', '--fail', '--silent', url]
+ for (k, v) in headers.iteritems():
+ cmd += ['-H', '\'%s: %s\'' % (k, v)]
- cmd += ['-o', path]
- cmd_out = ' '.join(cmd)
- return utils.execute(cmd_out)
+ cmd += ['-o', path]
+ cmd_out = ' '.join(cmd)
+ return utils.execute(cmd_out)
def _fetch_local_image(image, path, user, project):
- source = _image_path('%s/image' % image)
- return utils.execute('cp %s %s' % (source, path))
+ source = _image_path(os.path.join(image, 'image'))
+ if sys.platform.startswith('win'):
+ return shutil.copy(source, path)
+ else:
+ return utils.execute('cp %s %s' % (source, path))
def _image_path(path):
diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template
index 3fb2243da..2eb7d9488 100644
--- a/nova/virt/libvirt.xml.template
+++ b/nova/virt/libvirt.xml.template
@@ -71,9 +71,22 @@
#end if
</filterref>
</interface>
+
+ <!-- The order is significant here. File must be defined first -->
<serial type="file">
<source path='${basepath}/console.log'/>
<target port='1'/>
</serial>
+
+ <console type='pty' tty='/dev/pts/2'>
+ <source path='/dev/pts/2'/>
+ <target port='0'/>
+ </console>
+
+ <serial type='pty'>
+ <source path='/dev/pts/2'/>
+ <target port='0'/>
+ </serial>
+
</devices>
</domain>
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 00edfbdc8..655c55fa1 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -36,9 +36,13 @@ Supports KVM, QEMU, UML, and XEN.
"""
-import logging
import os
import shutil
+import random
+import subprocess
+import uuid
+from xml.dom import minidom
+
from eventlet import greenthread
from eventlet import event
@@ -50,6 +54,7 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
#from nova.api import context
from nova.auth import manager
@@ -62,6 +67,7 @@ libvirt = None
libxml2 = None
Template = None
+LOG = logging.getLogger('nova.virt.libvirt_conn')
FLAGS = flags.FLAGS
# TODO(vish): These flags should probably go into a shared location
@@ -85,6 +91,12 @@ flags.DEFINE_string('libvirt_uri',
flags.DEFINE_bool('allow_project_net_traffic',
True,
'Whether to allow in project network traffic')
+flags.DEFINE_string('ajaxterm_portrange',
+ '10000-12000',
+ 'Range of ports that ajaxterm should randomly try to bind')
+flags.DEFINE_string('firewall_driver',
+ 'nova.virt.libvirt_conn.IptablesFirewallDriver',
+ 'Firewall driver (defaults to iptables)')
def get_connection(read_only):
@@ -124,16 +136,24 @@ class LibvirtConnection(object):
self._wrapped_conn = None
self.read_only = read_only
+ self.nwfilter = NWFilterFirewall(self._get_connection)
+
+ if not FLAGS.firewall_driver:
+ self.firewall_driver = self.nwfilter
+ self.nwfilter.handle_security_groups = True
+ else:
+ self.firewall_driver = utils.import_object(FLAGS.firewall_driver)
+
def init_host(self):
- NWFilterFirewall(self._conn).setup_base_nwfilters()
+ pass
- @property
- def _conn(self):
+ def _get_connection(self):
if not self._wrapped_conn or not self._test_connection():
- logging.debug(_('Connecting to libvirt: %s') % self.libvirt_uri)
+ LOG.debug(_('Connecting to libvirt: %s'), self.libvirt_uri)
self._wrapped_conn = self._connect(self.libvirt_uri,
self.read_only)
return self._wrapped_conn
+ _conn = property(_get_connection)
def _test_connection(self):
try:
@@ -142,7 +162,7 @@ class LibvirtConnection(object):
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \
e.get_error_domain() == libvirt.VIR_FROM_REMOTE:
- logging.debug(_('Connection to libvirt broke'))
+ LOG.debug(_('Connection to libvirt broke'))
return False
raise
@@ -214,8 +234,8 @@ class LibvirtConnection(object):
def _cleanup(self, instance):
target = os.path.join(FLAGS.instances_path, instance['name'])
- logging.info(_('instance %s: deleting instance files %s'),
- instance['name'], target)
+ LOG.info(_('instance %s: deleting instance files %s'),
+ instance['name'], target)
if os.path.exists(target):
shutil.rmtree(target)
@@ -279,10 +299,10 @@ class LibvirtConnection(object):
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
- logging.debug(_('instance %s: rebooted'), instance['name'])
+ LOG.debug(_('instance %s: rebooted'), instance['name'])
timer.stop()
except Exception, exn:
- logging.error(_('_wait_for_reboot failed: %s'), exn)
+ LOG.exception(_('_wait_for_reboot failed: %s'), exn)
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
@@ -325,10 +345,10 @@ class LibvirtConnection(object):
state = self.get_info(instance['name'])['state']
db.instance_set_state(None, instance['id'], state)
if state == power_state.RUNNING:
- logging.debug(_('instance %s: rescued'), instance['name'])
+ LOG.debug(_('instance %s: rescued'), instance['name'])
timer.stop()
except Exception, exn:
- logging.error(_('_wait_for_rescue failed: %s'), exn)
+ LOG.exception(_('_wait_for_rescue failed: %s'), exn)
db.instance_set_state(None,
instance['id'],
power_state.SHUTDOWN)
@@ -350,10 +370,13 @@ class LibvirtConnection(object):
instance['id'],
power_state.NOSTATE,
'launching')
- NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance)
+
+ self.nwfilter.setup_basic_filtering(instance)
+ self.firewall_driver.prepare_instance_filter(instance)
self._create_image(instance, xml)
self._conn.createXML(xml, 0)
- logging.debug(_("instance %s: is running"), instance['name'])
+ LOG.debug(_("instance %s: is running"), instance['name'])
+ self.firewall_driver.apply_instance_filter(instance)
timer = utils.LoopingCall(f=None)
@@ -363,11 +386,11 @@ class LibvirtConnection(object):
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
- logging.debug(_('instance %s: booted'), instance['name'])
+ LOG.debug(_('instance %s: booted'), instance['name'])
timer.stop()
except:
- logging.exception(_('instance %s: failed to boot'),
- instance['name'])
+ LOG.exception(_('instance %s: failed to boot'),
+ instance['name'])
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
@@ -377,11 +400,11 @@ class LibvirtConnection(object):
return timer.start(interval=0.5, now=True)
def _flush_xen_console(self, virsh_output):
- logging.info('virsh said: %r' % (virsh_output,))
+ LOG.info(_('virsh said: %r'), virsh_output)
virsh_output = virsh_output[0].strip()
if virsh_output.startswith('/dev/'):
- logging.info(_('cool, it\'s a device'))
+ LOG.info(_('cool, it\'s a device'))
out, err = utils.execute("sudo dd if=%s iflag=nonblock" %
virsh_output, check_exit_code=False)
return out
@@ -389,7 +412,7 @@ class LibvirtConnection(object):
return ''
def _append_to_file(self, data, fpath):
- logging.info(_('data: %r, fpath: %r') % (data, fpath))
+ LOG.info(_('data: %r, fpath: %r'), data, fpath)
fp = open(fpath, 'a+')
fp.write(data)
return fpath
@@ -397,7 +420,7 @@ class LibvirtConnection(object):
def _dump_file(self, fpath):
fp = open(fpath, 'r+')
contents = fp.read()
- logging.info('Contents: %r' % (contents,))
+ LOG.info(_('Contents of file %s: %r'), fpath, contents)
return contents
@exception.wrap_exception
@@ -418,6 +441,43 @@ class LibvirtConnection(object):
return self._dump_file(fpath)
+ @exception.wrap_exception
+ def get_ajax_console(self, instance):
+ def get_open_port():
+ start_port, end_port = FLAGS.ajaxterm_portrange.split("-")
+ for i in xrange(0, 100): # don't loop forever
+ port = random.randint(int(start_port), int(end_port))
+ # netcat will exit with 0 only if the port is in use,
+ # so a nonzero return value implies it is unused
+ cmd = 'netcat 0.0.0.0 %s -w 1 </dev/null || echo free' % (port)
+ stdout, stderr = utils.execute(cmd)
+ if stdout.strip() == 'free':
+ return port
+ raise Exception(_('Unable to find an open port'))
+
+ def get_pty_for_instance(instance_name):
+ virt_dom = self._conn.lookupByName(instance_name)
+ xml = virt_dom.XMLDesc(0)
+ dom = minidom.parseString(xml)
+
+ for serial in dom.getElementsByTagName('serial'):
+ if serial.getAttribute('type') == 'pty':
+ source = serial.getElementsByTagName('source')[0]
+ return source.getAttribute('path')
+
+ port = get_open_port()
+ token = str(uuid.uuid4())
+ host = instance['host']
+
+ ajaxterm_cmd = 'sudo socat - %s' \
+ % get_pty_for_instance(instance['name'])
+
+ cmd = '%s/tools/ajaxterm/ajaxterm.py --command "%s" -t %s -p %s' \
+ % (utils.novadir(), ajaxterm_cmd, token, port)
+
+ subprocess.Popen(cmd, shell=True)
+ return {'token': token, 'host': host, 'port': port}
+
def _create_image(self, inst, libvirt_xml, prefix='', disk_images=None):
# syntactic nicety
basepath = lambda fname = '', prefix = prefix: os.path.join(
@@ -431,7 +491,7 @@ class LibvirtConnection(object):
# TODO(termie): these are blocking calls, it would be great
# if they weren't.
- logging.info(_('instance %s: Creating image'), inst['name'])
+ LOG.info(_('instance %s: Creating image'), inst['name'])
f = open(basepath('libvirt.xml'), 'w')
f.write(libvirt_xml)
f.close()
@@ -487,10 +547,10 @@ class LibvirtConnection(object):
'dns': network_ref['dns']}
if key or net:
if key:
- logging.info(_('instance %s: injecting key into image %s'),
+ LOG.info(_('instance %s: injecting key into image %s'),
inst['name'], inst.image_id)
if net:
- logging.info(_('instance %s: injecting net into image %s'),
+ LOG.info(_('instance %s: injecting net into image %s'),
inst['name'], inst.image_id)
try:
disk.inject_data(basepath('disk-raw'), key, net,
@@ -498,9 +558,9 @@ class LibvirtConnection(object):
execute=execute)
except Exception as e:
# This could be a windows image, or a vmdk format disk
- logging.warn(_('instance %s: ignoring error injecting data'
- ' into image %s (%s)'),
- inst['name'], inst.image_id, e)
+ LOG.warn(_('instance %s: ignoring error injecting data'
+ ' into image %s (%s)'),
+ inst['name'], inst.image_id, e)
if inst['kernel_id']:
if os.path.exists(basepath('disk')):
@@ -526,8 +586,10 @@ class LibvirtConnection(object):
def to_xml(self, instance, rescue=False):
# TODO(termie): cache?
- logging.debug(_('instance %s: starting toXML method'),
- instance['name'])
+ LOG.debug(_('instance %s: starting toXML method'), instance['name'])
+ network = db.project_get_network(context.get_admin_context(),
+ instance['project_id'])
+ LOG.debug(_('instance %s: starting toXML method'), instance['name'])
network = db.network_get_by_instance(context.get_admin_context(),
instance['id'])
# FIXME(vish): stick this in db
@@ -569,7 +631,7 @@ class LibvirtConnection(object):
xml_info['disk'] = xml_info['basepath'] + "/disk"
xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
- logging.debug(_('instance %s: finished toXML method'),
+ LOG.debug(_('instance %s: finished toXML method'),
instance['name'])
return xml
@@ -690,18 +752,63 @@ class LibvirtConnection(object):
domain = self._conn.lookupByName(instance_name)
return domain.interfaceStats(interface)
- def refresh_security_group(self, security_group_id):
- fw = NWFilterFirewall(self._conn)
- fw.ensure_security_group_filter(security_group_id)
+ def get_console_pool_info(self, console_type):
+ #TODO(mdragon): console proxy should be implemented for libvirt,
+ # in case someone wants to use it with kvm or
+ # such. For now return fake data.
+ return {'address': '127.0.0.1',
+ 'username': 'fakeuser',
+ 'password': 'fakepassword'}
+
+ def refresh_security_group_rules(self, security_group_id):
+ self.firewall_driver.refresh_security_group_rules(security_group_id)
+
+ def refresh_security_group_members(self, security_group_id):
+ self.firewall_driver.refresh_security_group_members(security_group_id)
+
+class FirewallDriver(object):
+ def prepare_instance_filter(self, instance):
+ """Prepare filters for the instance.
-class NWFilterFirewall(object):
+ At this point, the instance isn't running yet."""
+ raise NotImplementedError()
+
+ def apply_instance_filter(self, instance):
+ """Apply instance filter.
+
+ Once this method returns, the instance should be firewalled
+ appropriately. This method should as far as possible be a
+ no-op. It's vastly preferred to get everything set up in
+ prepare_instance_filter.
+ """
+ raise NotImplementedError()
+
+ def refresh_security_group_rules(self, security_group_id):
+ """Refresh security group rules from data store
+
+ Gets called when a rule has been added to or removed from
+ the security group."""
+ raise NotImplementedError()
+
+ def refresh_security_group_members(self, security_group_id):
+ """Refresh security group members from data store
+
+ Gets called when an instance gets added to or removed from
+ the security group."""
+ raise NotImplementedError()
+
+
+class NWFilterFirewall(FirewallDriver):
"""
This class implements a network filtering mechanism versatile
enough for EC2 style Security Group filtering by leveraging
libvirt's nwfilter.
First, all instances get a filter ("nova-base-filter") applied.
+ This filter provides some basic security such as protection against
+ MAC spoofing, IP spoofing, and ARP spoofing.
+
This filter drops all incoming ipv4 and ipv6 connections.
Outgoing connections are never blocked.
@@ -735,38 +842,79 @@ class NWFilterFirewall(object):
(*) This sentence brought to you by the redundancy department of
redundancy.
+
"""
def __init__(self, get_connection):
- self._conn = get_connection
-
- nova_base_filter = '''<filter name='nova-base' chain='root'>
- <uuid>26717364-50cf-42d1-8185-29bf893ab110</uuid>
- <filterref filter='no-mac-spoofing'/>
- <filterref filter='no-ip-spoofing'/>
- <filterref filter='no-arp-spoofing'/>
- <filterref filter='allow-dhcp-server'/>
- <filterref filter='nova-allow-dhcp-server'/>
- <filterref filter='nova-base-ipv4'/>
- <filterref filter='nova-base-ipv6'/>
- </filter>'''
-
- nova_dhcp_filter = '''<filter name='nova-allow-dhcp-server' chain='ipv4'>
- <uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid>
- <rule action='accept' direction='out'
- priority='100'>
- <udp srcipaddr='0.0.0.0'
- dstipaddr='255.255.255.255'
- srcportstart='68'
- dstportstart='67'/>
- </rule>
- <rule action='accept' direction='in'
- priority='100'>
- <udp srcipaddr='$DHCPSERVER'
- srcportstart='67'
- dstportstart='68'/>
- </rule>
- </filter>'''
+ self._libvirt_get_connection = get_connection
+ self.static_filters_configured = False
+ self.handle_security_groups = False
+
+ def _get_connection(self):
+ return self._libvirt_get_connection()
+ _conn = property(_get_connection)
+
+ def nova_dhcp_filter(self):
+ """The standard allow-dhcp-server filter is an <ip> one, so it uses
+ ebtables to allow traffic through. Without a corresponding rule in
+ iptables, it'll get blocked anyway."""
+
+ return '''<filter name='nova-allow-dhcp-server' chain='ipv4'>
+ <uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid>
+ <rule action='accept' direction='out'
+ priority='100'>
+ <udp srcipaddr='0.0.0.0'
+ dstipaddr='255.255.255.255'
+ srcportstart='68'
+ dstportstart='67'/>
+ </rule>
+ <rule action='accept' direction='in'
+ priority='100'>
+ <udp srcipaddr='$DHCPSERVER'
+ srcportstart='67'
+ dstportstart='68'/>
+ </rule>
+ </filter>'''
+
+ def setup_basic_filtering(self, instance):
+ """Set up basic filtering (MAC, IP, and ARP spoofing protection)"""
+ logging.info('called setup_basic_filtering in nwfilter')
+
+ if self.handle_security_groups:
+ # No point in setting up a filter set that we'll be overriding
+ # anyway.
+ return
+
+ logging.info('ensuring static filters')
+ self._ensure_static_filters()
+
+ instance_filter_name = self._instance_filter_name(instance)
+ self._define_filter(self._filter_container(instance_filter_name,
+ ['nova-base']))
+
+ def _ensure_static_filters(self):
+ if self.static_filters_configured:
+ return
+
+ self._define_filter(self._filter_container('nova-base',
+ ['no-mac-spoofing',
+ 'no-ip-spoofing',
+ 'no-arp-spoofing',
+ 'allow-dhcp-server']))
+ self._define_filter(self.nova_base_ipv4_filter)
+ self._define_filter(self.nova_base_ipv6_filter)
+ self._define_filter(self.nova_dhcp_filter)
+ self._define_filter(self.nova_vpn_filter)
+ if FLAGS.allow_project_net_traffic:
+ self._define_filter(self.nova_project_filter)
+
+ self.static_filters_configured = True
+
+ def _filter_container(self, name, filters):
+ xml = '''<filter name='%s' chain='root'>%s</filter>''' % (
+ name,
+ ''.join(["<filterref filter='%s'/>" % (f,) for f in filters]))
+ return xml
nova_vpn_filter = '''<filter name='nova-vpn' chain='root'>
<uuid>2086015e-cf03-11df-8c5d-080027c27973</uuid>
@@ -780,7 +928,7 @@ class NWFilterFirewall(object):
retval = "<filter name='nova-base-ipv4' chain='ipv4'>"
for protocol in ['tcp', 'udp', 'icmp']:
for direction, action, priority in [('out', 'accept', 399),
- ('inout', 'drop', 400)]:
+ ('in', 'drop', 400)]:
retval += """<rule action='%s' direction='%s' priority='%d'>
<%s />
</rule>""" % (action, direction,
@@ -792,7 +940,7 @@ class NWFilterFirewall(object):
retval = "<filter name='nova-base-ipv6' chain='ipv6'>"
for protocol in ['tcp', 'udp', 'icmp']:
for direction, action, priority in [('out', 'accept', 399),
- ('inout', 'drop', 400)]:
+ ('in', 'drop', 400)]:
retval += """<rule action='%s' direction='%s' priority='%d'>
<%s-ipv6 />
</rule>""" % (action, direction,
@@ -816,43 +964,49 @@ class NWFilterFirewall(object):
# execute in a native thread and block current greenthread until done
tpool.execute(self._conn.nwfilterDefineXML, xml)
- def setup_base_nwfilters(self):
- self._define_filter(self.nova_base_ipv4_filter)
- self._define_filter(self.nova_base_ipv6_filter)
- self._define_filter(self.nova_dhcp_filter)
- self._define_filter(self.nova_base_filter)
- self._define_filter(self.nova_vpn_filter)
- if FLAGS.allow_project_net_traffic:
- self._define_filter(self.nova_project_filter)
-
- def setup_nwfilters_for_instance(self, instance):
+ def prepare_instance_filter(self, instance):
"""
Creates an NWFilter for the given instance. In the process,
it makes sure the filters for the security groups as well as
the base filter are all in place.
"""
- nwfilter_xml = ("<filter name='nova-instance-%s' "
- "chain='root'>\n") % instance['name']
-
if instance['image_id'] == FLAGS.vpn_image_id:
- nwfilter_xml += " <filterref filter='nova-vpn' />\n"
+ base_filter = 'nova-vpn'
else:
- nwfilter_xml += " <filterref filter='nova-base' />\n"
+ base_filter = 'nova-base'
+
+ instance_filter_name = self._instance_filter_name(instance)
+ instance_secgroup_filter_name = '%s-secgroup' % (instance_filter_name,)
+ instance_filter_children = [base_filter, instance_secgroup_filter_name]
+ instance_secgroup_filter_children = ['nova-base-ipv4',
+ 'nova-base-ipv6',
+ 'nova-allow-dhcp-server']
+
+ ctxt = context.get_admin_context()
if FLAGS.allow_project_net_traffic:
- nwfilter_xml += " <filterref filter='nova-project' />\n"
+ instance_filter_children += ['nova-project']
- for security_group in instance.security_groups:
- self.ensure_security_group_filter(security_group['id'])
+ for security_group in db.security_group_get_by_instance(ctxt,
+ instance['id']):
- nwfilter_xml += (" <filterref filter='nova-secgroup-%d' "
- "/>\n") % security_group['id']
- nwfilter_xml += "</filter>"
+ self.refresh_security_group_rules(security_group['id'])
- self._define_filter(nwfilter_xml)
+ instance_secgroup_filter_children += [('nova-secgroup-%s' %
+ security_group['id'])]
- def ensure_security_group_filter(self, security_group_id):
+ self._define_filter(
+ self._filter_container(instance_secgroup_filter_name,
+ instance_secgroup_filter_children))
+
+ self._define_filter(
+ self._filter_container(instance_filter_name,
+ instance_filter_children))
+
+ return
+
+ def refresh_security_group_rules(self, security_group_id):
return self._define_filter(
self.security_group_to_nwfilter_xml(security_group_id))
@@ -870,9 +1024,9 @@ class NWFilterFirewall(object):
rule_xml += "dstportstart='%s' dstportend='%s' " % \
(rule.from_port, rule.to_port)
elif rule.protocol == 'icmp':
- logging.info('rule.protocol: %r, rule.from_port: %r, '
- 'rule.to_port: %r' %
- (rule.protocol, rule.from_port, rule.to_port))
+ LOG.info('rule.protocol: %r, rule.from_port: %r, '
+ 'rule.to_port: %r', rule.protocol,
+ rule.from_port, rule.to_port)
if rule.from_port != -1:
rule_xml += "type='%s' " % rule.from_port
if rule.to_port != -1:
@@ -883,3 +1037,162 @@ class NWFilterFirewall(object):
xml = "<filter name='nova-secgroup-%s' chain='ipv4'>%s</filter>" % \
(security_group_id, rule_xml,)
return xml
+
+ def _instance_filter_name(self, instance):
+ return 'nova-instance-%s' % instance['name']
+
+
+class IptablesFirewallDriver(FirewallDriver):
+ def __init__(self, execute=None):
+ self.execute = execute or utils.execute
+ self.instances = set()
+
+ def apply_instance_filter(self, instance):
+ """No-op. Everything is done in prepare_instance_filter"""
+ pass
+
+ def remove_instance(self, instance):
+ self.instances.remove(instance)
+
+ def add_instance(self, instance):
+ self.instances.add(instance)
+
+ def prepare_instance_filter(self, instance):
+ self.add_instance(instance)
+ self.apply_ruleset()
+
+ def apply_ruleset(self):
+ current_filter, _ = self.execute('sudo iptables-save -t filter')
+ current_lines = current_filter.split('\n')
+ new_filter = self.modify_rules(current_lines)
+ self.execute('sudo iptables-restore',
+ process_input='\n'.join(new_filter))
+
+ def modify_rules(self, current_lines):
+ ctxt = context.get_admin_context()
+ # Remove any trace of nova rules.
+ new_filter = filter(lambda l: 'nova-' not in l, current_lines)
+
+ seen_chains = False
+ for rules_index in range(len(new_filter)):
+ if not seen_chains:
+ if new_filter[rules_index].startswith(':'):
+ seen_chains = True
+ elif seen_chains == 1:
+ if not new_filter[rules_index].startswith(':'):
+ break
+
+ our_chains = [':nova-ipv4-fallback - [0:0]']
+ our_rules = ['-A nova-ipv4-fallback -j DROP']
+
+ our_chains += [':nova-local - [0:0]']
+ our_rules += ['-A FORWARD -j nova-local']
+
+ security_groups = set()
+ # Add our chains
+ # First, we add instance chains and rules
+ for instance in self.instances:
+ chain_name = self._instance_chain_name(instance)
+ ip_address = self._ip_for_instance(instance)
+
+ our_chains += [':%s - [0:0]' % chain_name]
+
+ # Jump to the per-instance chain
+ our_rules += ['-A nova-local -d %s -j %s' % (ip_address,
+ chain_name)]
+
+ # Always drop invalid packets
+ our_rules += ['-A %s -m state --state '
+ 'INVALID -j DROP' % (chain_name,)]
+
+ # Allow established connections
+ our_rules += ['-A %s -m state --state '
+ 'ESTABLISHED,RELATED -j ACCEPT' % (chain_name,)]
+
+ # Jump to each security group chain in turn
+ for security_group in \
+ db.security_group_get_by_instance(ctxt,
+ instance['id']):
+ security_groups.add(security_group)
+
+ sg_chain_name = self._security_group_chain_name(security_group)
+
+ our_rules += ['-A %s -j %s' % (chain_name, sg_chain_name)]
+
+ # Allow DHCP responses
+ dhcp_server = self._dhcp_server_for_instance(instance)
+ our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68' %
+ (chain_name, dhcp_server)]
+
+ # If nothing matches, jump to the fallback chain
+ our_rules += ['-A %s -j nova-ipv4-fallback' % (chain_name,)]
+
+ # then, security group chains and rules
+ for security_group in security_groups:
+ chain_name = self._security_group_chain_name(security_group)
+ our_chains += [':%s - [0:0]' % chain_name]
+
+ rules = \
+ db.security_group_rule_get_by_security_group(ctxt,
+ security_group['id'])
+
+ for rule in rules:
+ logging.info('%r', rule)
+ args = ['-A', chain_name, '-p', rule.protocol]
+
+ if rule.cidr:
+ args += ['-s', rule.cidr]
+ else:
+ # Eventually, a mechanism to grant access for security
+ # groups will turn up here. It'll use ipsets.
+ continue
+
+ if rule.protocol in ['udp', 'tcp']:
+ if rule.from_port == rule.to_port:
+ args += ['--dport', '%s' % (rule.from_port,)]
+ else:
+ args += ['-m', 'multiport',
+ '--dports', '%s:%s' % (rule.from_port,
+ rule.to_port)]
+ elif rule.protocol == 'icmp':
+ icmp_type = rule.from_port
+ icmp_code = rule.to_port
+
+ if icmp_type == -1:
+ icmp_type_arg = None
+ else:
+ icmp_type_arg = '%s' % icmp_type
+ if not icmp_code == -1:
+ icmp_type_arg += '/%s' % icmp_code
+
+ if icmp_type_arg:
+ args += ['-m', 'icmp', '--icmp-type', icmp_type_arg]
+
+ args += ['-j ACCEPT']
+ our_rules += [' '.join(args)]
+
+ new_filter[rules_index:rules_index] = our_rules
+ new_filter[rules_index:rules_index] = our_chains
+ logging.info('new_filter: %s', '\n'.join(new_filter))
+ return new_filter
+
+ def refresh_security_group_members(self, security_group):
+ pass
+
+ def refresh_security_group_rules(self, security_group):
+ self.apply_ruleset()
+
+ def _security_group_chain_name(self, security_group):
+ return 'nova-sg-%s' % (security_group['id'],)
+
+ def _instance_chain_name(self, instance):
+ return 'nova-inst-%s' % (instance['id'],)
+
+ def _ip_for_instance(self, instance):
+ return db.instance_get_fixed_address(context.get_admin_context(),
+ instance['id'])
+
+ def _dhcp_server_for_instance(self, instance):
+ network = db.project_get_network(context.get_admin_context(),
+ instance['project_id'])
+ return network['gateway']
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index aa4026f97..96d8f5fc8 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -52,12 +52,12 @@ A fake XenAPI SDK.
import datetime
-import logging
import uuid
from pprint import pformat
from nova import exception
+from nova import log as logging
_CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\
@@ -65,9 +65,11 @@ _CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\
_db_content = {}
+LOG = logging.getLogger("nova.virt.xenapi.fake")
+
def log_db_contents(msg=None):
- logging.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content))
+ LOG.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content))
def reset():
@@ -242,9 +244,9 @@ class SessionBase(object):
full_params = (self._session,) + params
meth = getattr(self, methodname, None)
if meth is None:
- logging.warn('Raising NotImplemented')
+ LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
- 'xenapi.fake does not have an implementation for %s' %
+ _('xenapi.fake does not have an implementation for %s') %
methodname)
return meth(*full_params)
@@ -278,12 +280,12 @@ class SessionBase(object):
if impl is not None:
def callit(*params):
- logging.warn('Calling %s %s', name, impl)
+ LOG.debug(_('Calling %s %s'), name, impl)
self._check_session(params)
return impl(*params)
return callit
if self._is_gettersetter(name, True):
- logging.warn('Calling getter %s', name)
+ LOG.debug(_('Calling getter %s'), name)
return lambda *params: self._getter(name, params)
elif self._is_create(name):
return lambda *params: self._create(name, params)
@@ -333,10 +335,10 @@ class SessionBase(object):
field in _db_content[cls][ref]):
return _db_content[cls][ref][field]
- logging.error('Raising NotImplemented')
+ LOG.debuug(_('Raising NotImplemented'))
raise NotImplementedError(
- 'xenapi.fake does not have an implementation for %s or it has '
- 'been called with the wrong number of arguments' % name)
+ _('xenapi.fake does not have an implementation for %s or it has '
+ 'been called with the wrong number of arguments') % name)
def _setter(self, name, params):
self._check_session(params)
@@ -351,7 +353,7 @@ class SessionBase(object):
field in _db_content[cls][ref]):
_db_content[cls][ref][field] = val
- logging.warn('Raising NotImplemented')
+ LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
'xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments or the database '
@@ -399,7 +401,7 @@ class SessionBase(object):
self._session not in _db_content['session']):
raise Failure(['HANDLE_INVALID', 'session', self._session])
if len(params) == 0 or params[0] != self._session:
- logging.warn('Raising NotImplemented')
+ LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError('Call to XenAPI without using .xenapi')
def _check_arg_count(self, params, expected):
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 9d1b51848..a91c8ea27 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -19,7 +19,6 @@ Helper methods for operations related to the management of VM records and
their attributes like VDIs, VIFs, as well as their lookup functions.
"""
-import logging
import pickle
import urllib
from xml.dom import minidom
@@ -27,6 +26,7 @@ from xml.dom import minidom
from eventlet import event
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
from nova.auth.manager import AuthManager
from nova.compute import instance_types
@@ -37,6 +37,7 @@ from nova.virt.xenapi.volume_utils import StorageError
FLAGS = flags.FLAGS
+LOG = logging.getLogger("nova.virt.xenapi.vm_utils")
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
@@ -121,9 +122,9 @@ class VMHelper(HelperBase):
rec['HVM_boot_params'] = {'order': 'dc'}
rec['platform'] = {'acpi': 'true', 'apic': 'true',
'pae': 'true', 'viridian': 'true'}
- logging.debug('Created VM %s...', instance.name)
+ LOG.debug(_('Created VM %s...'), instance.name)
vm_ref = session.call_xenapi('VM.create', rec)
- logging.debug(_('Created VM %s as %s.'), instance.name, vm_ref)
+ LOG.debug(_('Created VM %s as %s.'), instance.name, vm_ref)
return vm_ref
@classmethod
@@ -143,10 +144,9 @@ class VMHelper(HelperBase):
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
- logging.debug(_('Creating VBD for VM %s, VDI %s ... '),
- vm_ref, vdi_ref)
+ LOG.debug(_('Creating VBD for VM %s, VDI %s ... '), vm_ref, vdi_ref)
vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
- logging.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref,
+ LOG.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref,
vdi_ref)
return vbd_ref
@@ -161,7 +161,7 @@ class VMHelper(HelperBase):
if vbd_rec['userdevice'] == str(number):
return vbd
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('VBD not found in instance %s') % vm_ref)
@classmethod
@@ -170,7 +170,7 @@ class VMHelper(HelperBase):
try:
vbd_ref = session.call_xenapi('VBD.unplug', vbd_ref)
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
if exc.details[0] != 'DEVICE_ALREADY_DETACHED':
raise StorageError(_('Unable to unplug VBD %s') % vbd_ref)
@@ -183,7 +183,7 @@ class VMHelper(HelperBase):
#with Josh Kearney
session.wait_for_task(0, task)
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to destroy VBD %s') % vbd_ref)
@classmethod
@@ -199,11 +199,11 @@ class VMHelper(HelperBase):
vif_rec['other_config'] = {}
vif_rec['qos_algorithm_type'] = ''
vif_rec['qos_algorithm_params'] = {}
- logging.debug(_('Creating VIF for VM %s, network %s.'), vm_ref,
- network_ref)
+ LOG.debug(_('Creating VIF for VM %s, network %s.'), vm_ref,
+ network_ref)
vif_ref = session.call_xenapi('VIF.create', vif_rec)
- logging.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref,
- vm_ref, network_ref)
+ LOG.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref,
+ vm_ref, network_ref)
return vif_ref
@classmethod
@@ -213,8 +213,7 @@ class VMHelper(HelperBase):
"""
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
- logging.debug(_("Snapshotting VM %s with label '%s'..."),
- vm_ref, label)
+ LOG.debug(_("Snapshotting VM %s with label '%s'..."), vm_ref, label)
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
vm_vdi_uuid = vm_vdi_rec["uuid"]
@@ -227,8 +226,8 @@ class VMHelper(HelperBase):
template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
template_vdi_uuid = template_vdi_rec["uuid"]
- logging.debug(_('Created snapshot %s from VM %s.'), template_vm_ref,
- vm_ref)
+ LOG.debug(_('Created snapshot %s from VM %s.'), template_vm_ref,
+ vm_ref)
parent_uuid = wait_for_vhd_coalesce(
session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid)
@@ -241,8 +240,7 @@ class VMHelper(HelperBase):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
- logging.debug(_("Asking xapi to upload %s as '%s'"),
- vdi_uuids, image_name)
+ LOG.debug(_("Asking xapi to upload %s as '%s'"), vdi_uuids, image_name)
params = {'vdi_uuids': vdi_uuids,
'image_name': image_name,
@@ -260,7 +258,7 @@ class VMHelper(HelperBase):
"""
url = images.image_url(image)
access = AuthManager().get_access_key(user, project)
- logging.debug("Asking xapi to fetch %s as %s", url, access)
+ LOG.debug(_("Asking xapi to fetch %s as %s"), url, access)
fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
args = {}
args['src_url'] = url
@@ -278,7 +276,7 @@ class VMHelper(HelperBase):
@classmethod
def lookup_image(cls, session, vdi_ref):
- logging.debug("Looking up vdi %s for PV kernel", vdi_ref)
+ LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
fn = "is_vdi_pv"
args = {}
args['vdi-ref'] = vdi_ref
@@ -289,7 +287,7 @@ class VMHelper(HelperBase):
pv = True
elif pv_str.lower() == 'false':
pv = False
- logging.debug("PV Kernel in VDI:%d", pv)
+ LOG.debug(_("PV Kernel in VDI:%d"), pv)
return pv
@classmethod
@@ -317,10 +315,9 @@ class VMHelper(HelperBase):
vdi = session.get_xenapi().VBD.get_VDI(vbd)
# Test valid VDI
record = session.get_xenapi().VDI.get_record(vdi)
- logging.debug(_('VDI %s is still available'),
- record['uuid'])
+ LOG.debug(_('VDI %s is still available'), record['uuid'])
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
else:
vdis.append(vdi)
if len(vdis) > 0:
@@ -331,10 +328,10 @@ class VMHelper(HelperBase):
@classmethod
def compile_info(cls, record):
"""Fill record with VM status information"""
- logging.info(_("(VM_UTILS) xenserver vm state -> |%s|"),
- record['power_state'])
- logging.info(_("(VM_UTILS) xenapi power_state -> |%s|"),
- XENAPI_POWER_STATE[record['power_state']])
+ LOG.info(_("(VM_UTILS) xenserver vm state -> |%s|"),
+ record['power_state'])
+ LOG.info(_("(VM_UTILS) xenapi power_state -> |%s|"),
+ XENAPI_POWER_STATE[record['power_state']])
return {'state': XENAPI_POWER_STATE[record['power_state']],
'max_mem': long(record['memory_static_max']) >> 10,
'mem': long(record['memory_dynamic_max']) >> 10,
@@ -360,7 +357,9 @@ class VMHelper(HelperBase):
if i >= 3 and i <= 11:
ref = node.childNodes
# Name and Value
- diags[ref[0].firstChild.data] = ref[6].firstChild.data
+ if len(ref) > 6:
+ diags[ref[0].firstChild.data] = \
+ ref[6].firstChild.data
return diags
except cls.XenAPI.Failure as e:
return {"Unable to retrieve diagnostics": e}
@@ -388,11 +387,9 @@ def get_vhd_parent(session, vdi_rec):
"""
if 'vhd-parent' in vdi_rec['sm_config']:
parent_uuid = vdi_rec['sm_config']['vhd-parent']
- #NOTE(sirp): changed xenapi -> get_xenapi()
parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid)
parent_rec = session.get_xenapi().VDI.get_record(parent_ref)
- #NOTE(sirp): changed log -> logging
- logging.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref)
+ LOG.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref)
return parent_ref, parent_rec
else:
return None
@@ -409,7 +406,7 @@ def get_vhd_parent_uuid(session, vdi_ref):
def scan_sr(session, instance_id, sr_ref):
- logging.debug(_("Re-scanning SR %s"), sr_ref)
+ LOG.debug(_("Re-scanning SR %s"), sr_ref)
task = session.call_xenapi('Async.SR.scan', sr_ref)
session.wait_for_task(instance_id, task)
@@ -433,10 +430,9 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
scan_sr(session, instance_id, sr_ref)
parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
if original_parent_uuid and (parent_uuid != original_parent_uuid):
- logging.debug(
- _("Parent %s doesn't match original parent %s, "
- "waiting for coalesce..."),
- parent_uuid, original_parent_uuid)
+ LOG.debug(_("Parent %s doesn't match original parent %s, "
+ "waiting for coalesce..."), parent_uuid,
+ original_parent_uuid)
else:
done.send(parent_uuid)
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 76f31635a..7e3585991 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
+# Copyright 2010 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -18,10 +19,11 @@
Management class for VM-related functions (spawn, reboot, etc).
"""
-import logging
+import json
from nova import db
from nova import context
+from nova import log as logging
from nova import exception
from nova import utils
@@ -31,12 +33,14 @@ from nova.virt.xenapi.network_utils import NetworkHelper
from nova.virt.xenapi.vm_utils import VMHelper
from nova.virt.xenapi.vm_utils import ImageType
+XenAPI = None
+LOG = logging.getLogger("nova.virt.xenapi.vmops")
+
class VMOps(object):
"""
Management class for VM-related tasks
"""
-
def __init__(self, session):
self.XenAPI = session.get_imported_xenapi()
self._session = session
@@ -92,10 +96,9 @@ class VMOps(object):
if network_ref:
VMHelper.create_vif(self._session, vm_ref,
network_ref, instance.mac_address)
- logging.debug(_('Starting VM %s...'), vm_ref)
+ LOG.debug(_('Starting VM %s...'), vm_ref)
self._session.call_xenapi('VM.start', vm_ref, False, False)
- logging.info(_('Spawning VM %s created %s.'), instance.name,
- vm_ref)
+ LOG.info(_('Spawning VM %s created %s.'), instance.name, vm_ref)
# NOTE(armando): Do we really need to do this in virt?
timer = utils.LoopingCall(f=None)
@@ -106,12 +109,12 @@ class VMOps(object):
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
- logging.debug(_('Instance %s: booted'), instance['name'])
+ LOG.debug(_('Instance %s: booted'), instance['name'])
timer.stop()
except Exception, exc:
- logging.warn(exc)
- logging.exception(_('instance %s: failed to boot'),
- instance['name'])
+ LOG.warn(exc)
+ LOG.exception(_('instance %s: failed to boot'),
+ instance['name'])
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
@@ -120,6 +123,20 @@ class VMOps(object):
timer.f = _wait_for_boot
return timer.start(interval=0.5, now=True)
+ def _get_vm_opaque_ref(self, instance_or_vm):
+ """Refactored out the common code of many methods that receive either
+ a vm name or a vm instance, and want a vm instance in return.
+ """
+ try:
+ instance_name = instance_or_vm.name
+ vm = VMHelper.lookup(self._session, instance_name)
+ except AttributeError:
+ # A vm opaque ref was passed
+ vm = instance_or_vm
+ if vm is None:
+ raise Exception(_('Instance not present %s') % instance_name)
+ return vm
+
def snapshot(self, instance, name):
""" Create snapshot from a running VM instance
@@ -168,11 +185,7 @@ class VMOps(object):
def reboot(self, instance):
"""Reboot VM instance"""
- instance_name = instance.name
- vm = VMHelper.lookup(self._session, instance_name)
- if vm is None:
- raise exception.NotFound(_('instance not'
- ' found %s') % instance_name)
+ vm = self._get_vm_opaque_ref(instance)
task = self._session.call_xenapi('Async.VM.clean_reboot', vm)
self._session.wait_for_task(instance.id, task)
@@ -194,7 +207,7 @@ class VMOps(object):
task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
self._session.wait_for_task(instance.id, task)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
# Disk clean-up
if vdis:
@@ -203,39 +216,31 @@ class VMOps(object):
task = self._session.call_xenapi('Async.VDI.destroy', vdi)
self._session.wait_for_task(instance.id, task)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
# VM Destroy
try:
task = self._session.call_xenapi('Async.VM.destroy', vm)
self._session.wait_for_task(instance.id, task)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
def _wait_with_callback(self, instance_id, task, callback):
ret = None
try:
ret = self._session.wait_for_task(instance_id, task)
- except XenAPI.Failure, exc:
- logging.warn(exc)
+ except self.XenAPI.Failure, exc:
+ LOG.exception(exc)
callback(ret)
def pause(self, instance, callback):
"""Pause VM instance"""
- instance_name = instance.name
- vm = VMHelper.lookup(self._session, instance_name)
- if vm is None:
- raise exception.NotFound(_('Instance not'
- ' found %s') % instance_name)
+ vm = self._get_vm_opaque_ref(instance)
task = self._session.call_xenapi('Async.VM.pause', vm)
self._wait_with_callback(instance.id, task, callback)
def unpause(self, instance, callback):
"""Unpause VM instance"""
- instance_name = instance.name
- vm = VMHelper.lookup(self._session, instance_name)
- if vm is None:
- raise exception.NotFound(_('Instance not'
- ' found %s') % instance_name)
+ vm = self._get_vm_opaque_ref(instance)
task = self._session.call_xenapi('Async.VM.unpause', vm)
self._wait_with_callback(instance.id, task, callback)
@@ -270,10 +275,7 @@ class VMOps(object):
def get_diagnostics(self, instance):
"""Return data about VM diagnostics"""
- vm = VMHelper.lookup(self._session, instance.name)
- if vm is None:
- raise exception.NotFound(_("Instance not found %s") %
- instance.name)
+ vm = self._get_vm_opaque_ref(instance)
rec = self._session.get_xenapi().VM.get_record(vm)
return VMHelper.compile_diagnostics(self._session, rec)
@@ -281,3 +283,180 @@ class VMOps(object):
"""Return snapshot of console"""
# TODO: implement this to fix pylint!
return 'FAKE CONSOLE OUTPUT of instance'
+
+ def get_ajax_console(self, instance):
+ """Return link to instance's ajax console"""
+ # TODO: implement this!
+ return 'http://fakeajaxconsole/fake_url'
+
+ def list_from_xenstore(self, vm, path):
+ """Runs the xenstore-ls command to get a listing of all records
+ from 'path' downward. Returns a dict with the sub-paths as keys,
+ and the value stored in those paths as values. If nothing is
+ found at that path, returns None.
+ """
+ ret = self._make_xenstore_call('list_records', vm, path)
+ return json.loads(ret)
+
+ def read_from_xenstore(self, vm, path):
+ """Returns the value stored in the xenstore record for the given VM
+ at the specified location. A XenAPIPlugin.PluginError will be raised
+ if any error is encountered in the read process.
+ """
+ try:
+ ret = self._make_xenstore_call('read_record', vm, path,
+ {'ignore_missing_path': 'True'})
+ except self.XenAPI.Failure, e:
+ return None
+ ret = json.loads(ret)
+ if ret == "None":
+ # Can't marshall None over RPC calls.
+ return None
+ return ret
+
+ def write_to_xenstore(self, vm, path, value):
+ """Writes the passed value to the xenstore record for the given VM
+ at the specified location. A XenAPIPlugin.PluginError will be raised
+ if any error is encountered in the write process.
+ """
+ return self._make_xenstore_call('write_record', vm, path,
+ {'value': json.dumps(value)})
+
+ def clear_xenstore(self, vm, path):
+ """Deletes the VM's xenstore record for the specified path.
+ If there is no such record, the request is ignored.
+ """
+ self._make_xenstore_call('delete_record', vm, path)
+
+ def _make_xenstore_call(self, method, vm, path, addl_args={}):
+ """Handles calls to the xenstore xenapi plugin."""
+ return self._make_plugin_call('xenstore.py', method=method, vm=vm,
+ path=path, addl_args=addl_args)
+
+ def _make_plugin_call(self, plugin, method, vm, path, addl_args={}):
+ """Abstracts out the process of calling a method of a xenapi plugin.
+ Any errors raised by the plugin will in turn raise a RuntimeError here.
+ """
+ vm = self._get_vm_opaque_ref(vm)
+ rec = self._session.get_xenapi().VM.get_record(vm)
+ args = {'dom_id': rec['domid'], 'path': path}
+ args.update(addl_args)
+ # If the 'testing_mode' attribute is set, add that to the args.
+ if getattr(self, 'testing_mode', False):
+ args['testing_mode'] = 'true'
+ try:
+ task = self._session.async_call_plugin(plugin, method, args)
+ ret = self._session.wait_for_task(0, task)
+ except self.XenAPI.Failure, e:
+ raise RuntimeError("%s" % e.details[-1])
+ return ret
+
+ def add_to_xenstore(self, vm, path, key, value):
+ """Adds the passed key/value pair to the xenstore record for
+ the given VM at the specified location. A XenAPIPlugin.PluginError
+ will be raised if any error is encountered in the write process.
+ """
+ current = self.read_from_xenstore(vm, path)
+ if not current:
+ # Nothing at that location
+ current = {key: value}
+ else:
+ current[key] = value
+ self.write_to_xenstore(vm, path, current)
+
+ def remove_from_xenstore(self, vm, path, key_or_keys):
+ """Takes either a single key or a list of keys and removes
+ them from the xenstoreirecord data for the given VM.
+ If the key doesn't exist, the request is ignored.
+ """
+ current = self.list_from_xenstore(vm, path)
+ if not current:
+ return
+ if isinstance(key_or_keys, basestring):
+ keys = [key_or_keys]
+ else:
+ keys = key_or_keys
+ keys.sort(lambda x, y: cmp(y.count('/'), x.count('/')))
+ for key in keys:
+ if path:
+ keypath = "%s/%s" % (path, key)
+ else:
+ keypath = key
+ self._make_xenstore_call('delete_record', vm, keypath)
+
+ ########################################################################
+ ###### The following methods interact with the xenstore parameter
+ ###### record, not the live xenstore. They were created before I
+ ###### knew the difference, and are left in here in case they prove
+ ###### to be useful. They all have '_param' added to their method
+ ###### names to distinguish them. (dabo)
+ ########################################################################
+ def read_partial_from_param_xenstore(self, instance_or_vm, key_prefix):
+ """Returns a dict of all the keys in the xenstore parameter record
+ for the given instance that begin with the key_prefix.
+ """
+ data = self.read_from_param_xenstore(instance_or_vm)
+ badkeys = [k for k in data.keys()
+ if not k.startswith(key_prefix)]
+ for badkey in badkeys:
+ del data[badkey]
+ return data
+
+ def read_from_param_xenstore(self, instance_or_vm, keys=None):
+ """Returns the xenstore parameter record data for the specified VM
+ instance as a dict. Accepts an optional key or list of keys; if a
+ value for 'keys' is passed, the returned dict is filtered to only
+ return the values for those keys.
+ """
+ vm = self._get_vm_opaque_ref(instance_or_vm)
+ data = self._session.call_xenapi_request('VM.get_xenstore_data',
+ (vm, ))
+ ret = {}
+ if keys is None:
+ keys = data.keys()
+ elif isinstance(keys, basestring):
+ keys = [keys]
+ for key in keys:
+ raw = data.get(key)
+ if raw:
+ ret[key] = json.loads(raw)
+ else:
+ ret[key] = raw
+ return ret
+
+ def add_to_param_xenstore(self, instance_or_vm, key, val):
+ """Takes a key/value pair and adds it to the xenstore parameter
+ record for the given vm instance. If the key exists in xenstore,
+ it is overwritten"""
+ vm = self._get_vm_opaque_ref(instance_or_vm)
+ self.remove_from_param_xenstore(instance_or_vm, key)
+ jsonval = json.dumps(val)
+ self._session.call_xenapi_request('VM.add_to_xenstore_data',
+ (vm, key, jsonval))
+
+ def write_to_param_xenstore(self, instance_or_vm, mapping):
+ """Takes a dict and writes each key/value pair to the xenstore
+ parameter record for the given vm instance. Any existing data for
+ those keys is overwritten.
+ """
+ for k, v in mapping.iteritems():
+ self.add_to_param_xenstore(instance_or_vm, k, v)
+
+ def remove_from_param_xenstore(self, instance_or_vm, key_or_keys):
+ """Takes either a single key or a list of keys and removes
+ them from the xenstore parameter record data for the given VM.
+ If the key doesn't exist, the request is ignored.
+ """
+ vm = self._get_vm_opaque_ref(instance_or_vm)
+ if isinstance(key_or_keys, basestring):
+ keys = [key_or_keys]
+ else:
+ keys = key_or_keys
+ for key in keys:
+ self._session.call_xenapi_request('VM.remove_from_xenstore_data',
+ (vm, key))
+
+ def clear_param_xenstore(self, instance_or_vm):
+ """Removes all data from the xenstore parameter record for this VM."""
+ self.write_to_param_xenstore(instance_or_vm, {})
+ ########################################################################
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 1ca813bcf..0cd15b950 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -21,16 +21,17 @@ and storage repositories
import re
import string
-import logging
from nova import db
from nova import context
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
from nova.virt.xenapi import HelperBase
FLAGS = flags.FLAGS
+LOG = logging.getLogger("nova.virt.xenapi.volume_utils")
class StorageError(Exception):
@@ -53,7 +54,7 @@ class VolumeHelper(HelperBase):
"""
sr_ref = session.get_xenapi().SR.get_by_name_label(label)
if len(sr_ref) == 0:
- logging.debug('Introducing %s...', label)
+ LOG.debug(_('Introducing %s...'), label)
record = {}
if 'chapuser' in info and 'chappassword' in info:
record = {'target': info['targetHost'],
@@ -70,10 +71,10 @@ class VolumeHelper(HelperBase):
session.get_xenapi_host(),
record,
'0', label, description, 'iscsi', '', False, {})
- logging.debug('Introduced %s as %s.', label, sr_ref)
+ LOG.debug(_('Introduced %s as %s.'), label, sr_ref)
return sr_ref
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to create Storage Repository'))
else:
return sr_ref[0]
@@ -85,32 +86,32 @@ class VolumeHelper(HelperBase):
vdi_ref = session.get_xenapi().VBD.get_VDI(vbd_ref)
sr_ref = session.get_xenapi().VDI.get_SR(vdi_ref)
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to find SR from VBD %s') % vbd_ref)
return sr_ref
@classmethod
def destroy_iscsi_storage(cls, session, sr_ref):
"""Forget the SR whilst preserving the state of the disk"""
- logging.debug("Forgetting SR %s ... ", sr_ref)
+ LOG.debug(_("Forgetting SR %s ... "), sr_ref)
pbds = []
try:
pbds = session.get_xenapi().SR.get_PBDs(sr_ref)
except cls.XenAPI.Failure, exc:
- logging.warn('Ignoring exception %s when getting PBDs for %s',
- exc, sr_ref)
+ LOG.warn(_('Ignoring exception %s when getting PBDs for %s'),
+ exc, sr_ref)
for pbd in pbds:
try:
session.get_xenapi().PBD.unplug(pbd)
except cls.XenAPI.Failure, exc:
- logging.warn('Ignoring exception %s when unplugging PBD %s',
- exc, pbd)
+ LOG.warn(_('Ignoring exception %s when unplugging PBD %s'),
+ exc, pbd)
try:
session.get_xenapi().SR.forget(sr_ref)
- logging.debug("Forgetting SR %s done.", sr_ref)
+ LOG.debug(_("Forgetting SR %s done."), sr_ref)
except cls.XenAPI.Failure, exc:
- logging.warn('Ignoring exception %s when forgetting SR %s',
- exc, sr_ref)
+ LOG.warn(_('Ignoring exception %s when forgetting SR %s'), exc,
+ sr_ref)
@classmethod
def introduce_vdi(cls, session, sr_ref):
@@ -118,12 +119,12 @@ class VolumeHelper(HelperBase):
try:
vdis = session.get_xenapi().SR.get_VDIs(sr_ref)
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref)
try:
vdi_rec = session.get_xenapi().VDI.get_record(vdis[0])
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to get record'
' of VDI %s on') % vdis[0])
else:
@@ -141,7 +142,7 @@ class VolumeHelper(HelperBase):
vdi_rec['xenstore_data'],
vdi_rec['sm_config'])
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to introduce VDI for SR %s')
% sr_ref)
@@ -165,11 +166,8 @@ class VolumeHelper(HelperBase):
target_host = _get_target_host(iscsi_portal)
target_port = _get_target_port(iscsi_portal)
target_iqn = _get_iqn(iscsi_name, volume_id)
- logging.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)',
- volume_id,
- target_host,
- target_port,
- target_iqn)
+ LOG.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)',
+ volume_id, target_host, target_port, target_iqn)
if (device_number < 0) or \
(volume_id is None) or \
(target_host is None) or \
@@ -196,19 +194,23 @@ class VolumeHelper(HelperBase):
elif re.match('^[0-9]+$', mountpoint):
return string.atoi(mountpoint, 10)
else:
- logging.warn('Mountpoint cannot be translated: %s', mountpoint)
+ LOG.warn(_('Mountpoint cannot be translated: %s'), mountpoint)
return -1
-def _get_volume_id(path):
+def _get_volume_id(path_or_id):
"""Retrieve the volume id from device_path"""
+ # If we have the ID and not a path, just return it.
+ if isinstance(path_or_id, int):
+ return path_or_id
# n must contain at least the volume_id
# /vol- is for remote volumes
# -vol- is for local volumes
# see compute/manager->setup_compute_volume
- volume_id = path[path.find('/vol-') + 1:]
- if volume_id == path:
- volume_id = path[path.find('-vol-') + 1:].replace('--', '-')
+ volume_id = path_or_id[path_or_id.find('/vol-') + 1:]
+ if volume_id == path_or_id:
+ volume_id = path_or_id[path_or_id.find('-vol-') + 1:]
+ volume_id = volume_id.replace('--', '-')
return volume_id
@@ -253,7 +255,7 @@ def _get_target(volume_id):
"sendtargets -p %s" %
volume_ref['host'])
except exception.ProcessExecutionError, exc:
- logging.warn(exc)
+ LOG.exception(exc)
else:
targets = r.splitlines()
if len(_e) == 0 and len(targets) == 1:
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index fdeb2506c..189f968c6 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -17,14 +17,17 @@
"""
Management class for Storage-related functions (attach, detach, etc).
"""
-import logging
from nova import exception
+from nova import log as logging
from nova.virt.xenapi.vm_utils import VMHelper
from nova.virt.xenapi.volume_utils import VolumeHelper
from nova.virt.xenapi.volume_utils import StorageError
+LOG = logging.getLogger("nova.virt.xenapi.volumeops")
+
+
class VolumeOps(object):
"""
Management class for Volume-related tasks
@@ -45,8 +48,8 @@ class VolumeOps(object):
raise exception.NotFound(_('Instance %s not found')
% instance_name)
# NOTE: No Resource Pool concept so far
- logging.debug(_("Attach_volume: %s, %s, %s"),
- instance_name, device_path, mountpoint)
+ LOG.debug(_("Attach_volume: %s, %s, %s"),
+ instance_name, device_path, mountpoint)
# Create the iSCSI SR, and the PDB through which hosts access SRs.
# But first, retrieve target info, like Host, IQN, LUN and SCSIID
vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint)
@@ -61,7 +64,7 @@ class VolumeOps(object):
try:
vdi_ref = VolumeHelper.introduce_vdi(self._session, sr_ref)
except StorageError, exc:
- logging.warn(exc)
+ LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
raise Exception(_('Unable to create VDI on SR %s for instance %s')
% (sr_ref,
@@ -73,7 +76,7 @@ class VolumeOps(object):
vol_rec['deviceNumber'],
False)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
raise Exception(_('Unable to use SR %s for instance %s')
% (sr_ref,
@@ -84,13 +87,13 @@ class VolumeOps(object):
vbd_ref)
self._session.wait_for_task(vol_rec['deviceNumber'], task)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session,
sr_ref)
raise Exception(_('Unable to attach volume to instance %s')
% instance_name)
- logging.info(_('Mountpoint %s attached to instance %s'),
- mountpoint, instance_name)
+ LOG.info(_('Mountpoint %s attached to instance %s'),
+ mountpoint, instance_name)
def detach_volume(self, instance_name, mountpoint):
"""Detach volume storage to VM instance"""
@@ -100,13 +103,13 @@ class VolumeOps(object):
raise exception.NotFound(_('Instance %s not found')
% instance_name)
# Detach VBD from VM
- logging.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint)
+ LOG.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint)
device_number = VolumeHelper.mountpoint_to_number(mountpoint)
try:
vbd_ref = VMHelper.find_vbd_by_number(self._session,
vm_ref, device_number)
except StorageError, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise Exception(_('Unable to locate volume %s') % mountpoint)
else:
try:
@@ -114,13 +117,13 @@ class VolumeOps(object):
vbd_ref)
VMHelper.unplug_vbd(self._session, vbd_ref)
except StorageError, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise Exception(_('Unable to detach volume %s') % mountpoint)
try:
VMHelper.destroy_vbd(self._session, vbd_ref)
except StorageError, exc:
- logging.warn(exc)
+ LOG.exception(exc)
# Forget SR
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
- logging.info(_('Mountpoint %s detached from instance %s'),
- mountpoint, instance_name)
+ LOG.info(_('Mountpoint %s detached from instance %s'),
+ mountpoint, instance_name)
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index f17c8f39d..45d0738a5 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
+# Copyright 2010 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -19,15 +20,15 @@ A connection to XenServer or Xen Cloud Platform.
The concurrency model for this class is as follows:
-All XenAPI calls are on a thread (using t.i.t.deferToThread, via the decorator
-deferredToThread). They are remote calls, and so may hang for the usual
-reasons. They should not be allowed to block the reactor thread.
+All XenAPI calls are on a green thread (using eventlet's "tpool"
+thread pool). They are remote calls, and so may hang for the usual
+reasons.
All long-running XenAPI calls (VM.start, VM.reboot, etc) are called async
-(using XenAPI.VM.async_start etc). These return a task, which can then be
-polled for completion. Polling is handled using reactor.callLater.
+(using XenAPI.VM.async_start etc). These return a task, which can then be
+polled for completion.
-This combination of techniques means that we don't block the reactor thread at
+This combination of techniques means that we don't block the main thread at
all, and at the same time we don't hold lots of threads waiting for
long-running operations.
@@ -50,8 +51,8 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block.
:iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack'
"""
-import logging
import sys
+import urlparse
import xmlrpclib
from eventlet import event
@@ -61,9 +62,14 @@ from nova import context
from nova import db
from nova import utils
from nova import flags
+from nova import log as logging
from nova.virt.xenapi.vmops import VMOps
from nova.virt.xenapi.volumeops import VolumeOps
+
+LOG = logging.getLogger("nova.virt.xenapi")
+
+
FLAGS = flags.FLAGS
flags.DEFINE_string('xenapi_connection_url',
@@ -81,7 +87,7 @@ flags.DEFINE_string('xenapi_connection_password',
flags.DEFINE_float('xenapi_task_poll_interval',
0.5,
'The interval used for polling of remote tasks '
- '(Async.VM.start, etc). Used only if '
+ '(Async.VM.start, etc). Used only if '
'connection_type=xenapi.')
flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval',
5.0,
@@ -175,6 +181,10 @@ class XenAPIConnection(object):
"""Return snapshot of console"""
return self._vmops.get_console_output(instance)
+ def get_ajax_console(self, instance):
+ """Return link to instance's ajax console"""
+ return self._vmops.get_ajax_console(instance)
+
def attach_volume(self, instance_name, device_path, mountpoint):
"""Attach volume storage to VM instance"""
return self._volumeops.attach_volume(instance_name,
@@ -185,6 +195,12 @@ class XenAPIConnection(object):
"""Detach volume storage to VM instance"""
return self._volumeops.detach_volume(instance_name, mountpoint)
+ def get_console_pool_info(self, console_type):
+ xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url)
+ return {'address': xs_url.netloc,
+ 'username': FLAGS.xenapi_connection_username,
+ 'password': FLAGS.xenapi_connection_password}
+
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls"""
@@ -193,6 +209,7 @@ class XenAPISession(object):
self.XenAPI = self.get_imported_xenapi()
self._session = self._create_session(url)
self._session.login_with_password(user, pw)
+ self.loop = None
def get_imported_xenapi(self):
"""Stubout point. This can be replaced with a mock xenapi module."""
@@ -213,6 +230,14 @@ class XenAPISession(object):
f = f.__getattr__(m)
return tpool.execute(f, *args)
+ def call_xenapi_request(self, method, *args):
+ """Some interactions with dom0, such as interacting with xenstore's
+ param record, require using the xenapi_request method of the session
+ object. This wraps that call on a background thread.
+ """
+ f = self._session.xenapi_request
+ return tpool.execute(f, method, *args)
+
def async_call_plugin(self, plugin, fn, args):
"""Call Async.host.call_plugin on a background thread."""
return tpool.execute(self._unwrap_plugin_exceptions,
@@ -221,21 +246,26 @@ class XenAPISession(object):
def wait_for_task(self, id, task):
"""Return the result of the given task. The task is polled
- until it completes."""
-
+ until it completes. Not re-entrant."""
done = event.Event()
- loop = utils.LoopingCall(self._poll_task, id, task, done)
- loop.start(FLAGS.xenapi_task_poll_interval, now=True)
+ self.loop = utils.LoopingCall(self._poll_task, id, task, done)
+ self.loop.start(FLAGS.xenapi_task_poll_interval, now=True)
rv = done.wait()
- loop.stop()
+ self.loop.stop()
return rv
+ def _stop_loop(self):
+ """Stop polling for task to finish."""
+ #NOTE(sandy-walsh) Had to break this call out to support unit tests.
+ if self.loop:
+ self.loop.stop()
+
def _create_session(self, url):
"""Stubout point. This can be replaced with a mock session."""
return self.XenAPI.Session(url)
def _poll_task(self, id, task, done):
- """Poll the given XenAPI task, and fire the given Deferred if we
+ """Poll the given XenAPI task, and fire the given action if we
get a result."""
try:
name = self._session.xenapi.task.get_name_label(task)
@@ -248,7 +278,7 @@ class XenAPISession(object):
return
elif status == "success":
result = self._session.xenapi.task.get_result(task)
- logging.info(_("Task [%s] %s status: success %s") % (
+ LOG.info(_("Task [%s] %s status: success %s") % (
name,
task,
result))
@@ -256,7 +286,7 @@ class XenAPISession(object):
else:
error_info = self._session.xenapi.task.get_error_info(task)
action["error"] = str(error_info)
- logging.warn(_("Task [%s] %s status: %s %s") % (
+ LOG.warn(_("Task [%s] %s status: %s %s") % (
name,
task,
status,
@@ -264,15 +294,16 @@ class XenAPISession(object):
done.send_exception(self.XenAPI.Failure(error_info))
db.instance_action_create(context.get_admin_context(), action)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.warn(exc)
done.send_exception(*sys.exc_info())
+ self._stop_loop()
def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
"""Parse exception details"""
try:
return func(*args, **kwargs)
except self.XenAPI.Failure, exc:
- logging.debug(_("Got exception: %s"), exc)
+ LOG.debug(_("Got exception: %s"), exc)
if (len(exc.details) == 4 and
exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
exc.details[2] == 'Failure'):
@@ -285,12 +316,12 @@ class XenAPISession(object):
else:
raise
except xmlrpclib.ProtocolError, exc:
- logging.debug(_("Got exception: %s"), exc)
+ LOG.debug(_("Got exception: %s"), exc)
raise
def _parse_xmlrpc_value(val):
- """Parse the given value as if it were an XML-RPC value. This is
+ """Parse the given value as if it were an XML-RPC value. This is
sometimes used as the format for the task.result field."""
if not val:
return val
diff --git a/nova/volume/__init__.py b/nova/volume/__init__.py
index d6e944fc0..56ef9332e 100644
--- a/nova/volume/__init__.py
+++ b/nova/volume/__init__.py
@@ -16,16 +16,4 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-:mod:`nova.volume` -- Nova Block Storage
-=====================================================
-
-.. automodule:: nova.volume
- :platform: Unix
-.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
-.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
-.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
-.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
-.. moduleauthor:: Manish Singh <yosh@gimp.org>
-.. moduleauthor:: Andy Smith <andy@anarkystic.com>
-"""
+from nova.volume.api import API
diff --git a/nova/volume/api.py b/nova/volume/api.py
new file mode 100644
index 000000000..ce4831cc3
--- /dev/null
+++ b/nova/volume/api.py
@@ -0,0 +1,103 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handles all requests relating to volumes.
+"""
+
+import datetime
+
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import quota
+from nova import rpc
+from nova.db import base
+
+FLAGS = flags.FLAGS
+flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
+
+LOG = logging.getLogger('nova.volume')
+
+
+class API(base.Base):
+ """API for interacting with the volume manager."""
+
+ def create(self, context, size, name, description):
+ if quota.allowed_volumes(context, 1, size) < 1:
+ LOG.warn(_("Quota exceeeded for %s, tried to create %sG volume"),
+ context.project_id, size)
+ raise quota.QuotaError(_("Volume quota exceeded. You cannot "
+ "create a volume of size %s") % size)
+
+ options = {
+ 'size': size,
+ 'user_id': context.user.id,
+ 'project_id': context.project_id,
+ 'availability_zone': FLAGS.storage_availability_zone,
+ 'status': "creating",
+ 'attach_status': "detached",
+ 'display_name': name,
+ 'display_description': description}
+
+ volume = self.db.volume_create(context, options)
+ rpc.cast(context,
+ FLAGS.scheduler_topic,
+ {"method": "create_volume",
+ "args": {"topic": FLAGS.volume_topic,
+ "volume_id": volume['id']}})
+ return volume
+
+ def delete(self, context, volume_id):
+ volume = self.get(context, volume_id)
+ if volume['status'] != "available":
+ raise exception.ApiError(_("Volume status must be available"))
+ now = datetime.datetime.utcnow()
+ self.db.volume_update(context, volume_id, {'status': 'deleting',
+ 'terminated_at': now})
+ host = volume['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.volume_topic, host),
+ {"method": "delete_volume",
+ "args": {"volume_id": volume_id}})
+
+ def update(self, context, volume_id, fields):
+ self.db.volume_update(context, volume_id, fields)
+
+ def get(self, context, volume_id):
+ return self.db.volume_get(context, volume_id)
+
+ def get_all(self, context):
+ if context.user.is_admin():
+ return self.db.volume_get_all(context)
+ return self.db.volume_get_all_by_project(context, context.project_id)
+
+ def check_attach(self, context, volume_id):
+ volume = self.get(context, volume_id)
+ # TODO(vish): abstract status checking?
+ if volume['status'] != "available":
+ raise exception.ApiError(_("Volume status must be available"))
+ if volume['attach_status'] == "attached":
+ raise exception.ApiError(_("Volume is already attached"))
+
+ def check_detach(self, context, volume_id):
+ volume = self.get(context, volume_id)
+ # TODO(vish): abstract status checking?
+ if volume['status'] == "available":
+ raise exception.ApiError(_("Volume is already detached"))
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index 8353b9712..6bc925f3e 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -20,15 +20,15 @@ Drivers for volumes.
"""
-import logging
-import os
import time
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
+LOG = logging.getLogger("nova.volume.driver")
FLAGS = flags.FLAGS
flags.DEFINE_string('volume_group', 'nova-volumes',
'Name for the VG that will contain exported volumes')
@@ -73,13 +73,15 @@ class VolumeDriver(object):
tries = tries + 1
if tries >= FLAGS.num_shell_tries:
raise
- logging.exception(_("Recovering from a failed execute."
- "Try number %s"), tries)
+ LOG.exception(_("Recovering from a failed execute. "
+ "Try number %s"), tries)
time.sleep(tries ** 2)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
- if not os.path.isdir("/dev/%s" % FLAGS.volume_group):
+ out, err = self._execute("sudo vgs --noheadings -o name")
+ volume_groups = out.split()
+ if not FLAGS.volume_group in volume_groups:
raise exception.Error(_("volume group %s doesn't exist")
% FLAGS.volume_group)
@@ -205,7 +207,7 @@ class FakeAOEDriver(AOEDriver):
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
- logging.debug(_("FAKE AOE: %s"), cmd)
+ LOG.debug(_("FAKE AOE: %s"), cmd)
return (None, None)
@@ -310,5 +312,5 @@ class FakeISCSIDriver(ISCSIDriver):
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
- logging.debug(_("FAKE ISCSI: %s"), cmd)
+ LOG.debug(_("FAKE ISCSI: %s"), cmd)
return (None, None)
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index 966334c50..6348539c5 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -42,17 +42,18 @@ intact.
"""
-import logging
import datetime
from nova import context
from nova import exception
from nova import flags
+from nova import log as logging
from nova import manager
from nova import utils
+LOG = logging.getLogger('nova.volume.manager')
FLAGS = flags.FLAGS
flags.DEFINE_string('storage_availability_zone',
'nova',
@@ -81,7 +82,7 @@ class VolumeManager(manager.Manager):
self.driver.check_for_setup_error()
ctxt = context.get_admin_context()
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
- logging.debug(_("Re-exporting %s volumes"), len(volumes))
+ LOG.debug(_("Re-exporting %s volumes"), len(volumes))
for volume in volumes:
self.driver.ensure_export(ctxt, volume)
@@ -89,7 +90,7 @@ class VolumeManager(manager.Manager):
"""Creates and exports the volume."""
context = context.elevated()
volume_ref = self.db.volume_get(context, volume_id)
- logging.info(_("volume %s: creating"), volume_ref['name'])
+ LOG.info(_("volume %s: creating"), volume_ref['name'])
self.db.volume_update(context,
volume_id,
@@ -98,18 +99,18 @@ class VolumeManager(manager.Manager):
# before passing it to the driver.
volume_ref['host'] = self.host
- logging.debug(_("volume %s: creating lv of size %sG"),
- volume_ref['name'], volume_ref['size'])
+ LOG.debug(_("volume %s: creating lv of size %sG"), volume_ref['name'],
+ volume_ref['size'])
self.driver.create_volume(volume_ref)
- logging.debug(_("volume %s: creating export"), volume_ref['name'])
+ LOG.debug(_("volume %s: creating export"), volume_ref['name'])
self.driver.create_export(context, volume_ref)
now = datetime.datetime.utcnow()
self.db.volume_update(context,
volume_ref['id'], {'status': 'available',
'launched_at': now})
- logging.debug(_("volume %s: created successfully"), volume_ref['name'])
+ LOG.debug(_("volume %s: created successfully"), volume_ref['name'])
return volume_id
def delete_volume(self, context, volume_id):
@@ -120,12 +121,12 @@ class VolumeManager(manager.Manager):
raise exception.Error(_("Volume is still attached"))
if volume_ref['host'] != self.host:
raise exception.Error(_("Volume is not local to this node"))
- logging.debug(_("volume %s: removing export"), volume_ref['name'])
+ LOG.debug(_("volume %s: removing export"), volume_ref['name'])
self.driver.remove_export(context, volume_ref)
- logging.debug(_("volume %s: deleting"), volume_ref['name'])
+ LOG.debug(_("volume %s: deleting"), volume_ref['name'])
self.driver.delete_volume(volume_ref)
self.db.volume_destroy(context, volume_id)
- logging.debug(_("volume %s: deleted successfully"), volume_ref['name'])
+ LOG.debug(_("volume %s: deleted successfully"), volume_ref['name'])
return True
def setup_compute_volume(self, context, volume_id):
diff --git a/nova/wsgi.py b/nova/wsgi.py
index aa8f315d6..ae936d4c0 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -22,7 +22,6 @@ Utility methods for working with WSGI servers
"""
import json
-import logging
import sys
from xml.dom import minidom
@@ -35,18 +34,30 @@ import webob
import webob.dec
import webob.exc
+from nova import log as logging
-logging.getLogger("routes.middleware").addHandler(logging.StreamHandler())
+
+class WritableLogger(object):
+ """A thin wrapper that responds to `write` and logs."""
+
+ def __init__(self, logger, level=logging.DEBUG):
+ self.logger = logger
+ self.level = level
+
+ def write(self, msg):
+ self.logger.log(self.level, msg)
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, threads=1000):
+ logging.basicConfig()
self.pool = eventlet.GreenPool(threads)
def start(self, application, port, host='0.0.0.0', backlog=128):
"""Run a WSGI server with the given application."""
+ logging.audit(_("Starting %s on %s:%s"), sys.argv[0], host, port)
socket = eventlet.listen((host, port), backlog=backlog)
self.pool.spawn_n(self._run, application, socket)
@@ -59,7 +70,9 @@ class Server(object):
def _run(self, application, socket):
"""Start a WSGI server in a new green thread."""
- eventlet.wsgi.server(socket, application, custom_pool=self.pool)
+ logger = logging.getLogger('eventlet.wsgi.server')
+ eventlet.wsgi.server(socket, application, custom_pool=self.pool,
+ log=WritableLogger(logger))
class Application(object):
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
index 2d323a016..8e7a829d5 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
@@ -45,6 +45,7 @@ class PluginError(Exception):
def __init__(self, *args):
Exception.__init__(self, *args)
+
class ArgumentError(PluginError):
"""Raised when required arguments are missing, argument values are invalid,
or incompatible arguments are given.
@@ -67,6 +68,7 @@ def ignore_failure(func, *args, **kwargs):
ARGUMENT_PATTERN = re.compile(r'^[a-zA-Z0-9_:\.\-,]+$')
+
def validate_exists(args, key, default=None):
"""Validates that a string argument to a RPC method call is given, and
matches the shell-safe regex, with an optional default value in case it
@@ -76,20 +78,24 @@ def validate_exists(args, key, default=None):
"""
if key in args:
if len(args[key]) == 0:
- raise ArgumentError('Argument %r value %r is too short.' % (key, args[key]))
+ raise ArgumentError('Argument %r value %r is too short.' %
+ (key, args[key]))
if not ARGUMENT_PATTERN.match(args[key]):
- raise ArgumentError('Argument %r value %r contains invalid characters.' % (key, args[key]))
+ raise ArgumentError('Argument %r value %r contains invalid '
+ 'characters.' % (key, args[key]))
if args[key][0] == '-':
- raise ArgumentError('Argument %r value %r starts with a hyphen.' % (key, args[key]))
+ raise ArgumentError('Argument %r value %r starts with a hyphen.'
+ % (key, args[key]))
return args[key]
elif default is not None:
return default
else:
raise ArgumentError('Argument %s is required.' % key)
+
def validate_bool(args, key, default=None):
- """Validates that a string argument to a RPC method call is a boolean string,
- with an optional default value in case it does not exist.
+ """Validates that a string argument to a RPC method call is a boolean
+ string, with an optional default value in case it does not exist.
Returns the python boolean value.
"""
@@ -99,7 +105,9 @@ def validate_bool(args, key, default=None):
elif value.lower() == 'false':
return False
else:
- raise ArgumentError("Argument %s may not take value %r. Valid values are ['true', 'false']." % (key, value))
+ raise ArgumentError("Argument %s may not take value %r. "
+ "Valid values are ['true', 'false']." % (key, value))
+
def exists(args, key):
"""Validates that a freeform string argument to a RPC method call is given.
@@ -110,6 +118,7 @@ def exists(args, key):
else:
raise ArgumentError('Argument %s is required.' % key)
+
def optional(args, key):
"""If the given key is in args, return the corresponding value, otherwise
return None"""
@@ -122,13 +131,14 @@ def get_this_host(session):
def get_domain_0(session):
this_host_ref = get_this_host(session)
- expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"' % this_host_ref
+ expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"'
+ expr = expr % this_host_ref
return session.xenapi.VM.get_all_records_where(expr).keys()[0]
def create_vdi(session, sr_ref, name_label, virtual_size, read_only):
vdi_ref = session.xenapi.VDI.create(
- { 'name_label': name_label,
+ {'name_label': name_label,
'name_description': '',
'SR': sr_ref,
'virtual_size': str(virtual_size),
@@ -138,7 +148,7 @@ def create_vdi(session, sr_ref, name_label, virtual_size, read_only):
'xenstore_data': {},
'other_config': {},
'sm_config': {},
- 'tags': [] })
+ 'tags': []})
logging.debug('Created VDI %s (%s, %s, %s) on %s.', vdi_ref, name_label,
virtual_size, read_only, sr_ref)
return vdi_ref
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py
new file mode 100755
index 000000000..695bf3448
--- /dev/null
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py
@@ -0,0 +1,180 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2010 Citrix Systems, Inc.
+# Copyright 2010 OpenStack LLC.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+#
+# XenAPI plugin for reading/writing information to xenstore
+#
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+import subprocess
+
+import XenAPIPlugin
+
+import pluginlib_nova as pluginlib
+pluginlib.configure_logging("xenstore")
+
+
+def jsonify(fnc):
+ def wrapper(*args, **kwargs):
+ return json.dumps(fnc(*args, **kwargs))
+ return wrapper
+
+
+@jsonify
+def read_record(self, arg_dict):
+ """Returns the value stored at the given path for the given dom_id.
+ These must be encoded as key/value pairs in arg_dict. You can
+ optinally include a key 'ignore_missing_path'; if this is present
+ and boolean True, attempting to read a non-existent path will return
+ the string 'None' instead of raising an exception.
+ """
+ cmd = "xenstore-read /local/domain/%(dom_id)s/%(path)s" % arg_dict
+ try:
+ return _run_command(cmd).rstrip("\n")
+ except pluginlib.PluginError, e:
+ if arg_dict.get("ignore_missing_path", False):
+ cmd = "xenstore-exists /local/domain/%(dom_id)s/%(path)s; echo $?"
+ cmd = cmd % arg_dict
+ ret = _run_command(cmd).strip()
+ # If the path exists, the cmd should return "0"
+ if ret != "0":
+ # No such path, so ignore the error and return the
+ # string 'None', since None can't be marshalled
+ # over RPC.
+ return "None"
+ # Either we shouldn't ignore path errors, or another
+ # error was hit. Re-raise.
+ raise
+
+
+@jsonify
+def write_record(self, arg_dict):
+ """Writes to xenstore at the specified path. If there is information
+ already stored in that location, it is overwritten. As in read_record,
+ the dom_id and path must be specified in the arg_dict; additionally,
+ you must specify a 'value' key, whose value must be a string. Typically,
+ you can json-ify more complex values and store the json output.
+ """
+ cmd = "xenstore-write /local/domain/%(dom_id)s/%(path)s '%(value)s'"
+ cmd = cmd % arg_dict
+ _run_command(cmd)
+ return arg_dict["value"]
+
+
+@jsonify
+def list_records(self, arg_dict):
+ """Returns all the stored data at or below the given path for the
+ given dom_id. The data is returned as a json-ified dict, with the
+ path as the key and the stored value as the value. If the path
+ doesn't exist, an empty dict is returned.
+ """
+ cmd = "xenstore-ls /local/domain/%(dom_id)s/%(path)s" % arg_dict
+ cmd = cmd.rstrip("/")
+ try:
+ recs = _run_command(cmd)
+ except pluginlib.PluginError, e:
+ if "No such file or directory" in "%s" % e:
+ # Path doesn't exist.
+ return {}
+ return str(e)
+ raise
+ base_path = arg_dict["path"]
+ paths = _paths_from_ls(recs)
+ ret = {}
+ for path in paths:
+ if base_path:
+ arg_dict["path"] = "%s/%s" % (base_path, path)
+ else:
+ arg_dict["path"] = path
+ rec = read_record(self, arg_dict)
+ try:
+ val = json.loads(rec)
+ except ValueError:
+ val = rec
+ ret[path] = val
+ return ret
+
+
+@jsonify
+def delete_record(self, arg_dict):
+ """Just like it sounds: it removes the record for the specified
+ VM and the specified path from xenstore.
+ """
+ cmd = "xenstore-rm /local/domain/%(dom_id)s/%(path)s" % arg_dict
+ return _run_command(cmd)
+
+
+def _paths_from_ls(recs):
+ """The xenstore-ls command returns a listing that isn't terribly
+ useful. This method cleans that up into a dict with each path
+ as the key, and the associated string as the value.
+ """
+ ret = {}
+ last_nm = ""
+ level = 0
+ path = []
+ ret = []
+ for ln in recs.splitlines():
+ nm, val = ln.rstrip().split(" = ")
+ barename = nm.lstrip()
+ this_level = len(nm) - len(barename)
+ if this_level == 0:
+ ret.append(barename)
+ level = 0
+ path = []
+ elif this_level == level:
+ # child of same parent
+ ret.append("%s/%s" % ("/".join(path), barename))
+ elif this_level > level:
+ path.append(last_nm)
+ ret.append("%s/%s" % ("/".join(path), barename))
+ level = this_level
+ elif this_level < level:
+ path = path[:this_level]
+ ret.append("%s/%s" % ("/".join(path), barename))
+ level = this_level
+ last_nm = barename
+ return ret
+
+
+def _run_command(cmd):
+ """Abstracts out the basics of issuing system commands. If the command
+ returns anything in stderr, a PluginError is raised with that information.
+ Otherwise, the output from stdout is returned.
+ """
+ pipe = subprocess.PIPE
+ proc = subprocess.Popen([cmd], shell=True, stdin=pipe, stdout=pipe,
+ stderr=pipe, close_fds=True)
+ proc.wait()
+ err = proc.stderr.read()
+ if err:
+ raise pluginlib.PluginError(err)
+ return proc.stdout.read()
+
+
+if __name__ == "__main__":
+ XenAPIPlugin.dispatch(
+ {"read_record": read_record,
+ "write_record": write_record,
+ "list_records": list_records,
+ "delete_record": delete_record})
diff --git a/setup.cfg b/setup.cfg
index 14dcb5c8e..9c0a331e3 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -8,3 +8,17 @@ tag_build =
tag_date = 0
tag_svn_revision = 0
+[compile_catalog]
+directory = locale
+domain = nova
+
+[update_catalog]
+domain = nova
+output_dir = locale
+input_file = locale/nova.pot
+
+[extract_messages]
+keywords = _ l_ lazy_gettext
+mapping_file = babel.cfg
+output_file = locale/nova.pot
+
diff --git a/setup.py b/setup.py
index 1abf4d9fe..3608ff805 100644
--- a/setup.py
+++ b/setup.py
@@ -24,6 +24,15 @@ from setuptools.command.sdist import sdist
from sphinx.setup_command import BuildDoc
from nova.utils import parse_mailmap, str_dict_replace
+from nova import version
+
+if os.path.isdir('.bzr'):
+ with open("nova/vcsversion.py", 'w') as version_file:
+ vcs_cmd = subprocess.Popen(["bzr", "version-info", "--python"],
+ stdout=subprocess.PIPE)
+ vcsversion = vcs_cmd.communicate()[0]
+ version_file.write(vcsversion)
+
class local_BuildDoc(BuildDoc):
def run(self):
@@ -48,14 +57,25 @@ class local_sdist(sdist):
changelog_file.write(str_dict_replace(changelog, mailmap))
sdist.run(self)
+nova_cmdclass= { 'sdist': local_sdist,
+ 'build_sphinx' : local_BuildDoc }
+
+try:
+ from babel.messages import frontend as babel
+ nova_cmdclass['compile_catalog'] = babel.compile_catalog
+ nova_cmdclass['extract_messages'] = babel.extract_messages
+ nova_cmdclass['init_catalog'] = babel.init_catalog
+ nova_cmdclass['update_catalog'] = babel.update_catalog
+except:
+ pass
+
setup(name='nova',
- version='2011.1',
+ version=version.canonical_version_string(),
description='cloud computing fabric controller',
author='OpenStack',
author_email='nova@lists.launchpad.net',
url='http://www.openstack.org/',
- cmdclass={ 'sdist': local_sdist,
- 'build_sphinx' : local_BuildDoc },
+ cmdclass=nova_cmdclass,
packages=find_packages(exclude=['bin', 'smoketests']),
include_package_data=True,
test_suite='nose.collector',
@@ -64,9 +84,11 @@ setup(name='nova',
'bin/nova-dhcpbridge',
'bin/nova-import-canonical-imagestore',
'bin/nova-instancemonitor',
+ 'bin/nova-logspool',
'bin/nova-manage',
'bin/nova-network',
'bin/nova-objectstore',
'bin/nova-scheduler',
+ 'bin/nova-spoolsentry',
'bin/nova-volume',
'tools/nova-debug'])
diff --git a/smoketests/admin_smoketests.py b/smoketests/admin_smoketests.py
index 50bb3fa2e..1ef1c1425 100644
--- a/smoketests/admin_smoketests.py
+++ b/smoketests/admin_smoketests.py
@@ -19,10 +19,17 @@
import os
import random
import sys
-import time
import unittest
import zipfile
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
from nova import adminclient
from smoketests import flags
from smoketests import base
diff --git a/smoketests/user_smoketests.py b/smoketests/user_smoketests.py
index d29e3aea3..578c0722e 100644
--- a/smoketests/user_smoketests.py
+++ b/smoketests/user_smoketests.py
@@ -24,6 +24,14 @@ import sys
import time
import unittest
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
from smoketests import flags
from smoketests import base
@@ -40,6 +48,7 @@ flags.DEFINE_string('bundle_image', 'openwrt-x86-ext2.image',
TEST_PREFIX = 'test%s' % int (random.random()*1000000)
TEST_BUCKET = '%s_bucket' % TEST_PREFIX
TEST_KEY = '%s_key' % TEST_PREFIX
+TEST_GROUP = '%s_group' % TEST_PREFIX
TEST_DATA = {}
@@ -137,7 +146,7 @@ class InstanceTests(UserSmokeTestCase):
self.data['instance_id'] = reservation.instances[0].id
def test_003_instance_runs_within_60_seconds(self):
- reservations = self.conn.get_all_instances([data['instance_id']])
+ reservations = self.conn.get_all_instances([self.data['instance_id']])
instance = reservations[0].instances[0]
# allow 60 seconds to exit pending with IP
for x in xrange(60):
@@ -207,7 +216,7 @@ class InstanceTests(UserSmokeTestCase):
def test_999_tearDown(self):
self.delete_key_pair(self.conn, TEST_KEY)
if self.data.has_key('instance_id'):
- self.conn.terminate_instances([data['instance_id']])
+ self.conn.terminate_instances([self.data['instance_id']])
class VolumeTests(UserSmokeTestCase):
@@ -319,8 +328,80 @@ class VolumeTests(UserSmokeTestCase):
self.conn.delete_key_pair(TEST_KEY)
+class SecurityGroupTests(UserSmokeTestCase):
+
+ def __public_instance_is_accessible(self):
+ id_url = "latest/meta-data/instance-id"
+ options = "-s --max-time 1"
+ command = "curl %s %s/%s" % (options, self.data['public_ip'], id_url)
+ instance_id = commands.getoutput(command).strip()
+ if not instance_id:
+ return False
+ if instance_id != self.data['instance_id']:
+ raise Exception("Wrong instance id")
+ return True
+
+ def test_001_can_create_security_group(self):
+ self.conn.create_security_group(TEST_GROUP, description='test')
+
+ groups = self.conn.get_all_security_groups()
+ self.assertTrue(TEST_GROUP in [group.name for group in groups])
+
+ def test_002_can_launch_instance_in_security_group(self):
+ self.create_key_pair(self.conn, TEST_KEY)
+ reservation = self.conn.run_instances(FLAGS.test_image,
+ key_name=TEST_KEY,
+ security_groups=[TEST_GROUP],
+ instance_type='m1.tiny')
+
+ self.data['instance_id'] = reservation.instances[0].id
+
+ def test_003_can_authorize_security_group_ingress(self):
+ self.assertTrue(self.conn.authorize_security_group(TEST_GROUP,
+ ip_protocol='tcp',
+ from_port=80,
+ to_port=80))
+
+ def test_004_can_access_instance_over_public_ip(self):
+ result = self.conn.allocate_address()
+ self.assertTrue(hasattr(result, 'public_ip'))
+ self.data['public_ip'] = result.public_ip
+
+ result = self.conn.associate_address(self.data['instance_id'],
+ self.data['public_ip'])
+ start_time = time.time()
+ while not self.__public_instance_is_accessible():
+ # 1 minute to launch
+ if time.time() - start_time > 60:
+ raise Exception("Timeout")
+ time.sleep(1)
+
+ def test_005_can_revoke_security_group_ingress(self):
+ self.assertTrue(self.conn.revoke_security_group(TEST_GROUP,
+ ip_protocol='tcp',
+ from_port=80,
+ to_port=80))
+ start_time = time.time()
+ while self.__public_instance_is_accessible():
+ # 1 minute to teardown
+ if time.time() - start_time > 60:
+ raise Exception("Timeout")
+ time.sleep(1)
+
+
+ def test_999_tearDown(self):
+ self.conn.delete_key_pair(TEST_KEY)
+ self.conn.delete_security_group(TEST_GROUP)
+ groups = self.conn.get_all_security_groups()
+ self.assertFalse(TEST_GROUP in [group.name for group in groups])
+ self.conn.terminate_instances([self.data['instance_id']])
+ self.assertTrue(self.conn.release_address(self.data['public_ip']))
+
+
if __name__ == "__main__":
suites = {'image': unittest.makeSuite(ImageTests),
'instance': unittest.makeSuite(InstanceTests),
- 'volume': unittest.makeSuite(VolumeTests)}
+ 'security_group': unittest.makeSuite(SecurityGroupTests),
+ 'volume': unittest.makeSuite(VolumeTests)
+ }
sys.exit(base.run_tests(suites))
diff --git a/tools/ajaxterm/README.txt b/tools/ajaxterm/README.txt
new file mode 100644
index 000000000..4b0ae99af
--- /dev/null
+++ b/tools/ajaxterm/README.txt
@@ -0,0 +1,120 @@
+= [http://antony.lesuisse.org/qweb/trac/wiki/AjaxTerm Ajaxterm] =
+
+Ajaxterm is a web based terminal. It was totally inspired and works almost
+exactly like http://anyterm.org/ except it's much easier to install (see
+comparaison with anyterm below).
+
+Ajaxterm written in python (and some AJAX javascript for client side) and depends only on python2.3 or better.[[BR]]
+Ajaxterm is '''very simple to install''' on Linux, MacOS X, FreeBSD, Solaris, cygwin and any Unix that runs python2.3.[[BR]]
+Ajaxterm was written by Antony Lesuisse (email: al AT udev.org), License Public Domain.
+
+Use the [/qweb/forum/viewforum.php?id=2 Forum], if you have any question or remark.
+
+== News ==
+
+ * 2006-10-29: v0.10 allow space in login, cgi launch fix, redhat init
+ * 2006-07-12: v0.9 change uid, daemon fix (Daniel Fischer)
+ * 2006-07-04: v0.8 add login support to ssh (Sven Geggus), change max width to 256
+ * 2006-05-31: v0.7 minor fixes, daemon option
+ * 2006-05-23: v0.6 Applied debian and gentoo patches, renamed to Ajaxterm, default port 8022
+
+== Download and Install ==
+
+ * Release: [/qweb/files/Ajaxterm-0.10.tar.gz Ajaxterm-0.10.tar.gz]
+ * Browse src: [/qweb/trac/browser/trunk/ajaxterm/ ajaxterm/]
+
+To install Ajaxterm issue the following commands:
+{{{
+wget http://antony.lesuisse.org/qweb/files/Ajaxterm-0.10.tar.gz
+tar zxvf Ajaxterm-0.10.tar.gz
+cd Ajaxterm-0.10
+./ajaxterm.py
+}}}
+Then point your browser to this URL : http://localhost:8022/
+
+== Screenshot ==
+
+{{{
+#!html
+<center><img src="/qweb/trac/attachment/wiki/AjaxTerm/scr.png?format=raw" alt="ajaxterm screenshot" style=""/></center>
+}}}
+
+== Documentation and Caveats ==
+
+ * Ajaxterm only support latin1, if you use Ubuntu or any LANG==en_US.UTF-8 distribution don't forget to "unset LANG".
+
+ * If run as root ajaxterm will run /bin/login, otherwise it will run ssh
+ localhost. To use an other command use the -c option.
+
+ * By default Ajaxterm only listen at 127.0.0.1:8022. For remote access, it is
+ strongly recommended to use '''https SSL/TLS''', and that is simple to
+ configure if you use the apache web server using mod_proxy.[[BR]][[BR]]
+ Using ssl will also speed up ajaxterm (probably because of keepalive).[[BR]][[BR]]
+ Here is an configuration example:
+
+{{{
+ Listen 443
+ NameVirtualHost *:443
+
+ <VirtualHost *:443>
+ ServerName localhost
+ SSLEngine On
+ SSLCertificateKeyFile ssl/apache.pem
+ SSLCertificateFile ssl/apache.pem
+
+ ProxyRequests Off
+ <Proxy *>
+ Order deny,allow
+ Allow from all
+ </Proxy>
+ ProxyPass /ajaxterm/ http://localhost:8022/
+ ProxyPassReverse /ajaxterm/ http://localhost:8022/
+ </VirtualHost>
+}}}
+
+ * Using GET HTTP request seems to speed up ajaxterm, just click on GET in the
+ interface, but be warned that your keystrokes might be loggued (by apache or
+ any proxy). I usually enable it after the login.
+
+ * Ajaxterm commandline usage:
+
+{{{
+usage: ajaxterm.py [options]
+
+options:
+ -h, --help show this help message and exit
+ -pPORT, --port=PORT Set the TCP port (default: 8022)
+ -cCMD, --command=CMD set the command (default: /bin/login or ssh localhost)
+ -l, --log log requests to stderr (default: quiet mode)
+ -d, --daemon run as daemon in the background
+ -PPIDFILE, --pidfile=PIDFILE
+ set the pidfile (default: /var/run/ajaxterm.pid)
+ -iINDEX_FILE, --index=INDEX_FILE
+ default index file (default: ajaxterm.html)
+ -uUID, --uid=UID Set the daemon's user id
+}}}
+
+ * Ajaxterm was first written as a demo for qweb (my web framework), but
+ actually doesn't use many features of qweb.
+
+ * Compared to anyterm:
+ * There are no partial updates, ajaxterm updates either all the screen or
+ nothing. That make the code simpler and I also think it's faster. HTTP
+ replies are always gzencoded. When used in 80x25 mode, almost all of
+ them are below the 1500 bytes (size of an ethernet frame) and we just
+ replace the screen with the reply (no javascript string handling).
+ * Ajaxterm polls the server for updates with an exponentially growing
+ timeout when the screen hasn't changed. The timeout is also resetted as
+ soon as a key is pressed. Anyterm blocks on a pending request and use a
+ parallel connection for keypresses. The anyterm approch is better
+ when there aren't any keypress.
+
+ * Ajaxterm files are released in the Public Domain, (except [http://sarissa.sourceforge.net/doc/ sarissa*] which are LGPL).
+
+== TODO ==
+
+ * insert mode ESC [ 4 h
+ * change size x,y from gui (sending signal)
+ * vt102 graphic codepage
+ * use innerHTML or prototype instead of sarissa
+
diff --git a/tools/ajaxterm/ajaxterm.1 b/tools/ajaxterm/ajaxterm.1
new file mode 100644
index 000000000..46f2acb33
--- /dev/null
+++ b/tools/ajaxterm/ajaxterm.1
@@ -0,0 +1,35 @@
+.TH ajaxterm "1" "May 2006" "ajaxterm 0.5" "User commands"
+.SH NAME
+ajaxterm \- Web based terminal written in python
+
+.SH DESCRITPION
+\fBajaxterm\fR is a web based terminal written in python and some AJAX
+javascript for client side.
+It can use almost any web browser and even works through firewalls.
+
+.SH USAGE
+\fBajaxterm.py\fR [options]
+
+.SH OPTIONS
+A summary of the options supported by \fBajaxterm\fR is included below.
+ \fB-h, --help\fR show this help message and exit
+ \fB-pPORT, --port=PORT\fR Set the TCP port (default: 8022)
+ \fB-cCMD, --command=CMD\fR set the command (default: /bin/login or ssh localhost)
+ \fB-l, --log\fR log requests to stderr (default: quiet mode)
+
+.SH AUTHOR
+Antony Lesuisse <al@udev.org>
+
+This manual page was written for the Debian system by
+Julien Valroff <julien@kirya.net> (but may be used by others).
+
+.SH "REPORTING BUGS"
+Report any bugs to the author: Antony Lesuisse <al@udev.org>
+
+.SH COPYRIGHT
+Copyright Antony Lesuisse <al@udev.org>
+
+.SH SEE ALSO
+- \fBajaxterm\fR wiki page: http://antony.lesuisse.org/qweb/trac/wiki/AjaxTerm
+.br
+- \fBajaxterm\fR forum: http://antony.lesuisse.org/qweb/forum/viewforum.php?id=2
diff --git a/tools/ajaxterm/ajaxterm.css b/tools/ajaxterm/ajaxterm.css
new file mode 100644
index 000000000..b9a5f8771
--- /dev/null
+++ b/tools/ajaxterm/ajaxterm.css
@@ -0,0 +1,64 @@
+pre.stat {
+ margin: 0px;
+ padding: 4px;
+ display: block;
+ font-family: monospace;
+ white-space: pre;
+ background-color: black;
+ border-top: 1px solid black;
+ color: white;
+}
+pre.stat span {
+ padding: 0px;
+}
+pre.stat .on {
+ background-color: #080;
+ font-weight: bold;
+ color: white;
+ cursor: pointer;
+}
+pre.stat .off {
+ background-color: #888;
+ font-weight: bold;
+ color: white;
+ cursor: pointer;
+}
+pre.term {
+ margin: 0px;
+ padding: 4px;
+ display: block;
+ font-family: monospace;
+ white-space: pre;
+ background-color: black;
+ border-top: 1px solid white;
+ color: #eee;
+}
+pre.term span.f0 { color: #000; }
+pre.term span.f1 { color: #b00; }
+pre.term span.f2 { color: #0b0; }
+pre.term span.f3 { color: #bb0; }
+pre.term span.f4 { color: #00b; }
+pre.term span.f5 { color: #b0b; }
+pre.term span.f6 { color: #0bb; }
+pre.term span.f7 { color: #bbb; }
+pre.term span.f8 { color: #666; }
+pre.term span.f9 { color: #f00; }
+pre.term span.f10 { color: #0f0; }
+pre.term span.f11 { color: #ff0; }
+pre.term span.f12 { color: #00f; }
+pre.term span.f13 { color: #f0f; }
+pre.term span.f14 { color: #0ff; }
+pre.term span.f15 { color: #fff; }
+pre.term span.b0 { background-color: #000; }
+pre.term span.b1 { background-color: #b00; }
+pre.term span.b2 { background-color: #0b0; }
+pre.term span.b3 { background-color: #bb0; }
+pre.term span.b4 { background-color: #00b; }
+pre.term span.b5 { background-color: #b0b; }
+pre.term span.b6 { background-color: #0bb; }
+pre.term span.b7 { background-color: #bbb; }
+
+body { background-color: #888; }
+#term {
+ float: left;
+}
diff --git a/tools/ajaxterm/ajaxterm.html b/tools/ajaxterm/ajaxterm.html
new file mode 100644
index 000000000..7fdef5e94
--- /dev/null
+++ b/tools/ajaxterm/ajaxterm.html
@@ -0,0 +1,25 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html>
+<head>
+ <title>Ajaxterm</title>
+ <meta http-equiv="content-type" content="text/html; charset=UTF-8"/>
+ <link rel="stylesheet" type="text/css" href="ajaxterm.css"/>
+ <script type="text/javascript" src="sarissa.js"></script>
+ <script type="text/javascript" src="sarissa_dhtml.js"></script>
+ <script type="text/javascript" src="ajaxterm.js"></script>
+ <script type="text/javascript">
+ /*
+ ajaxterm.py creates a random session_id to demultiplex multiple connections,
+ and to add a layer of security - in its shipping form, ajaxterm accepted any session_id
+ and was susceptible to an easy exploit
+ */
+ SESSION_ID = '$session_id';
+ window.onload=function() {
+ t=ajaxterm.Terminal("term",80,25);
+ };
+ </script>
+</head>
+<body>
+<div id="term"></div>
+</body>
+</html>
diff --git a/tools/ajaxterm/ajaxterm.js b/tools/ajaxterm/ajaxterm.js
new file mode 100644
index 000000000..32b401930
--- /dev/null
+++ b/tools/ajaxterm/ajaxterm.js
@@ -0,0 +1,279 @@
+ajaxterm={};
+ajaxterm.Terminal_ctor=function(id,width,height) {
+ var ie=0;
+ if(window.ActiveXObject)
+ ie=1;
+ var sid=""+SESSION_ID;
+ var query0="s="+sid+"&w="+width+"&h="+height;
+ var query1=query0+"&c=1&k=";
+ var buf="";
+ var timeout;
+ var error_timeout;
+ var keybuf=[];
+ var sending=0;
+ var rmax=1;
+
+ var div=document.getElementById(id);
+ var dstat=document.createElement('pre');
+ var sled=document.createElement('span');
+ var opt_get=document.createElement('a');
+ var opt_color=document.createElement('a');
+ var opt_paste=document.createElement('a');
+ var sdebug=document.createElement('span');
+ var dterm=document.createElement('div');
+
+ function debug(s) {
+ sdebug.innerHTML=s;
+ }
+ function error() {
+ sled.className='off';
+ debug("Connection lost timeout ts:"+((new Date).getTime()));
+ }
+ function opt_add(opt,name) {
+ opt.className='off';
+ opt.innerHTML=' '+name+' ';
+ dstat.appendChild(opt);
+ dstat.appendChild(document.createTextNode(' '));
+ }
+ function do_get(event) {
+ opt_get.className=(opt_get.className=='off')?'on':'off';
+ debug('GET '+opt_get.className);
+ }
+ function do_color(event) {
+ var o=opt_color.className=(opt_color.className=='off')?'on':'off';
+ if(o=='on')
+ query1=query0+"&c=1&k=";
+ else
+ query1=query0+"&k=";
+ debug('Color '+opt_color.className);
+ }
+ function mozilla_clipboard() {
+ // mozilla sucks
+ try {
+ netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
+ } catch (err) {
+ debug('Access denied, <a href="http://kb.mozillazine.org/Granting_JavaScript_access_to_the_clipboard" target="_blank">more info</a>');
+ return undefined;
+ }
+ var clip = Components.classes["@mozilla.org/widget/clipboard;1"].createInstance(Components.interfaces.nsIClipboard);
+ var trans = Components.classes["@mozilla.org/widget/transferable;1"].createInstance(Components.interfaces.nsITransferable);
+ if (!clip || !trans) {
+ return undefined;
+ }
+ trans.addDataFlavor("text/unicode");
+ clip.getData(trans,clip.kGlobalClipboard);
+ var str=new Object();
+ var strLength=new Object();
+ try {
+ trans.getTransferData("text/unicode",str,strLength);
+ } catch(err) {
+ return "";
+ }
+ if (str) {
+ str=str.value.QueryInterface(Components.interfaces.nsISupportsString);
+ }
+ if (str) {
+ return str.data.substring(0,strLength.value / 2);
+ } else {
+ return "";
+ }
+ }
+ function do_paste(event) {
+ var p=undefined;
+ if (window.clipboardData) {
+ p=window.clipboardData.getData("Text");
+ } else if(window.netscape) {
+ p=mozilla_clipboard();
+ }
+ if (p) {
+ debug('Pasted');
+ queue(encodeURIComponent(p));
+ } else {
+ }
+ }
+ function update() {
+// debug("ts: "+((new Date).getTime())+" rmax:"+rmax);
+ if(sending==0) {
+ sending=1;
+ sled.className='on';
+ var r=new XMLHttpRequest();
+ var send="";
+ while(keybuf.length>0) {
+ send+=keybuf.pop();
+ }
+ var query=query1+send;
+ if(opt_get.className=='on') {
+ r.open("GET","u?"+query,true);
+ if(ie) {
+ r.setRequestHeader("If-Modified-Since", "Sat, 1 Jan 2000 00:00:00 GMT");
+ }
+ } else {
+ r.open("POST","u",true);
+ }
+ r.setRequestHeader('Content-Type','application/x-www-form-urlencoded');
+ r.onreadystatechange = function () {
+// debug("xhr:"+((new Date).getTime())+" state:"+r.readyState+" status:"+r.status+" statusText:"+r.statusText);
+ if (r.readyState==4) {
+ if(r.status==200) {
+ window.clearTimeout(error_timeout);
+ de=r.responseXML.documentElement;
+ if(de.tagName=="pre") {
+ if(ie) {
+ Sarissa.updateContentFromNode(de, dterm);
+ } else {
+ Sarissa.updateContentFromNode(de, dterm);
+// old=div.firstChild;
+// div.replaceChild(de,old);
+ }
+ rmax=100;
+ } else {
+ rmax*=2;
+ if(rmax>2000)
+ rmax=2000;
+ }
+ sending=0;
+ sled.className='off';
+ timeout=window.setTimeout(update,rmax);
+ } else {
+ debug("Connection error status:"+r.status);
+ }
+ }
+ }
+ error_timeout=window.setTimeout(error,5000);
+ if(opt_get.className=='on') {
+ r.send(null);
+ } else {
+ r.send(query);
+ }
+ }
+ }
+ function queue(s) {
+ keybuf.unshift(s);
+ if(sending==0) {
+ window.clearTimeout(timeout);
+ timeout=window.setTimeout(update,1);
+ }
+ }
+ function keypress(ev) {
+ if (!ev) var ev=window.event;
+// s="kp keyCode="+ev.keyCode+" which="+ev.which+" shiftKey="+ev.shiftKey+" ctrlKey="+ev.ctrlKey+" altKey="+ev.altKey;
+// debug(s);
+// return false;
+// else { if (!ev.ctrlKey || ev.keyCode==17) { return; }
+ var kc;
+ var k="";
+ if (ev.keyCode)
+ kc=ev.keyCode;
+ if (ev.which)
+ kc=ev.which;
+ if (ev.altKey) {
+ if (kc>=65 && kc<=90)
+ kc+=32;
+ if (kc>=97 && kc<=122) {
+ k=String.fromCharCode(27)+String.fromCharCode(kc);
+ }
+ } else if (ev.ctrlKey) {
+ if (kc>=65 && kc<=90) k=String.fromCharCode(kc-64); // Ctrl-A..Z
+ else if (kc>=97 && kc<=122) k=String.fromCharCode(kc-96); // Ctrl-A..Z
+ else if (kc==54) k=String.fromCharCode(30); // Ctrl-^
+ else if (kc==109) k=String.fromCharCode(31); // Ctrl-_
+ else if (kc==219) k=String.fromCharCode(27); // Ctrl-[
+ else if (kc==220) k=String.fromCharCode(28); // Ctrl-\
+ else if (kc==221) k=String.fromCharCode(29); // Ctrl-]
+ else if (kc==219) k=String.fromCharCode(29); // Ctrl-]
+ else if (kc==219) k=String.fromCharCode(0); // Ctrl-@
+ } else if (ev.which==0) {
+ if (kc==9) k=String.fromCharCode(9); // Tab
+ else if (kc==8) k=String.fromCharCode(127); // Backspace
+ else if (kc==27) k=String.fromCharCode(27); // Escape
+ else {
+ if (kc==33) k="[5~"; // PgUp
+ else if (kc==34) k="[6~"; // PgDn
+ else if (kc==35) k="[4~"; // End
+ else if (kc==36) k="[1~"; // Home
+ else if (kc==37) k="[D"; // Left
+ else if (kc==38) k="[A"; // Up
+ else if (kc==39) k="[C"; // Right
+ else if (kc==40) k="[B"; // Down
+ else if (kc==45) k="[2~"; // Ins
+ else if (kc==46) k="[3~"; // Del
+ else if (kc==112) k="[[A"; // F1
+ else if (kc==113) k="[[B"; // F2
+ else if (kc==114) k="[[C"; // F3
+ else if (kc==115) k="[[D"; // F4
+ else if (kc==116) k="[[E"; // F5
+ else if (kc==117) k="[17~"; // F6
+ else if (kc==118) k="[18~"; // F7
+ else if (kc==119) k="[19~"; // F8
+ else if (kc==120) k="[20~"; // F9
+ else if (kc==121) k="[21~"; // F10
+ else if (kc==122) k="[23~"; // F11
+ else if (kc==123) k="[24~"; // F12
+ if (k.length) {
+ k=String.fromCharCode(27)+k;
+ }
+ }
+ } else {
+ if (kc==8)
+ k=String.fromCharCode(127); // Backspace
+ else
+ k=String.fromCharCode(kc);
+ }
+ if(k.length) {
+// queue(encodeURIComponent(k));
+ if(k=="+") {
+ queue("%2B");
+ } else {
+ queue(escape(k));
+ }
+ }
+ ev.cancelBubble=true;
+ if (ev.stopPropagation) ev.stopPropagation();
+ if (ev.preventDefault) ev.preventDefault();
+ return false;
+ }
+ function keydown(ev) {
+ if (!ev) var ev=window.event;
+ if (ie) {
+// s="kd keyCode="+ev.keyCode+" which="+ev.which+" shiftKey="+ev.shiftKey+" ctrlKey="+ev.ctrlKey+" altKey="+ev.altKey;
+// debug(s);
+ o={9:1,8:1,27:1,33:1,34:1,35:1,36:1,37:1,38:1,39:1,40:1,45:1,46:1,112:1,
+ 113:1,114:1,115:1,116:1,117:1,118:1,119:1,120:1,121:1,122:1,123:1};
+ if (o[ev.keyCode] || ev.ctrlKey || ev.altKey) {
+ ev.which=0;
+ return keypress(ev);
+ }
+ }
+ }
+ function init() {
+ sled.appendChild(document.createTextNode('\xb7'));
+ sled.className='off';
+ dstat.appendChild(sled);
+ dstat.appendChild(document.createTextNode(' '));
+ opt_add(opt_color,'Colors');
+ opt_color.className='on';
+ opt_add(opt_get,'GET');
+ opt_add(opt_paste,'Paste');
+ dstat.appendChild(sdebug);
+ dstat.className='stat';
+ div.appendChild(dstat);
+ div.appendChild(dterm);
+ if(opt_color.addEventListener) {
+ opt_get.addEventListener('click',do_get,true);
+ opt_color.addEventListener('click',do_color,true);
+ opt_paste.addEventListener('click',do_paste,true);
+ } else {
+ opt_get.attachEvent("onclick", do_get);
+ opt_color.attachEvent("onclick", do_color);
+ opt_paste.attachEvent("onclick", do_paste);
+ }
+ document.onkeypress=keypress;
+ document.onkeydown=keydown;
+ timeout=window.setTimeout(update,100);
+ }
+ init();
+}
+ajaxterm.Terminal=function(id,width,height) {
+ return new this.Terminal_ctor(id,width,height);
+}
+
diff --git a/tools/ajaxterm/ajaxterm.py b/tools/ajaxterm/ajaxterm.py
new file mode 100755
index 000000000..bf27b264a
--- /dev/null
+++ b/tools/ajaxterm/ajaxterm.py
@@ -0,0 +1,586 @@
+#!/usr/bin/env python
+
+""" Ajaxterm """
+
+import array,cgi,fcntl,glob,mimetypes,optparse,os,pty,random,re,signal,select,sys,threading,time,termios,struct,pwd
+
+os.chdir(os.path.normpath(os.path.dirname(__file__)))
+# Optional: Add QWeb in sys path
+sys.path[0:0]=glob.glob('../../python')
+
+import qweb
+import string, subprocess, uuid
+
+global g_server
+TIMEOUT=300
+
+class Terminal:
+ def __init__(self,width=80,height=24):
+ self.width=width
+ self.height=height
+ self.init()
+ self.reset()
+ def init(self):
+ self.esc_seq={
+ "\x00": None,
+ "\x05": self.esc_da,
+ "\x07": None,
+ "\x08": self.esc_0x08,
+ "\x09": self.esc_0x09,
+ "\x0a": self.esc_0x0a,
+ "\x0b": self.esc_0x0a,
+ "\x0c": self.esc_0x0a,
+ "\x0d": self.esc_0x0d,
+ "\x0e": None,
+ "\x0f": None,
+ "\x1b#8": None,
+ "\x1b=": None,
+ "\x1b>": None,
+ "\x1b(0": None,
+ "\x1b(A": None,
+ "\x1b(B": None,
+ "\x1b[c": self.esc_da,
+ "\x1b[0c": self.esc_da,
+ "\x1b]R": None,
+ "\x1b7": self.esc_save,
+ "\x1b8": self.esc_restore,
+ "\x1bD": None,
+ "\x1bE": None,
+ "\x1bH": None,
+ "\x1bM": self.esc_ri,
+ "\x1bN": None,
+ "\x1bO": None,
+ "\x1bZ": self.esc_da,
+ "\x1ba": None,
+ "\x1bc": self.reset,
+ "\x1bn": None,
+ "\x1bo": None,
+ }
+ for k,v in self.esc_seq.items():
+ if v==None:
+ self.esc_seq[k]=self.esc_ignore
+ # regex
+ d={
+ r'\[\??([0-9;]*)([@ABCDEFGHJKLMPXacdefghlmnqrstu`])' : self.csi_dispatch,
+ r'\]([^\x07]+)\x07' : self.esc_ignore,
+ }
+ self.esc_re=[]
+ for k,v in d.items():
+ self.esc_re.append((re.compile('\x1b'+k),v))
+ # define csi sequences
+ self.csi_seq={
+ '@': (self.csi_at,[1]),
+ '`': (self.csi_G,[1]),
+ 'J': (self.csi_J,[0]),
+ 'K': (self.csi_K,[0]),
+ }
+ for i in [i[4] for i in dir(self) if i.startswith('csi_') and len(i)==5]:
+ if not self.csi_seq.has_key(i):
+ self.csi_seq[i]=(getattr(self,'csi_'+i),[1])
+ # Init 0-256 to latin1 and html translation table
+ self.trl1=""
+ for i in range(256):
+ if i<32:
+ self.trl1+=" "
+ elif i<127 or i>160:
+ self.trl1+=chr(i)
+ else:
+ self.trl1+="?"
+ self.trhtml=""
+ for i in range(256):
+ if i==0x0a or (i>32 and i<127) or i>160:
+ self.trhtml+=chr(i)
+ elif i<=32:
+ self.trhtml+="\xa0"
+ else:
+ self.trhtml+="?"
+ def reset(self,s=""):
+ self.scr=array.array('i',[0x000700]*(self.width*self.height))
+ self.st=0
+ self.sb=self.height-1
+ self.cx_bak=self.cx=0
+ self.cy_bak=self.cy=0
+ self.cl=0
+ self.sgr=0x000700
+ self.buf=""
+ self.outbuf=""
+ self.last_html=""
+ def peek(self,y1,x1,y2,x2):
+ return self.scr[self.width*y1+x1:self.width*y2+x2]
+ def poke(self,y,x,s):
+ pos=self.width*y+x
+ self.scr[pos:pos+len(s)]=s
+ def zero(self,y1,x1,y2,x2):
+ w=self.width*(y2-y1)+x2-x1+1
+ z=array.array('i',[0x000700]*w)
+ self.scr[self.width*y1+x1:self.width*y2+x2+1]=z
+ def scroll_up(self,y1,y2):
+ self.poke(y1,0,self.peek(y1+1,0,y2,self.width))
+ self.zero(y2,0,y2,self.width-1)
+ def scroll_down(self,y1,y2):
+ self.poke(y1+1,0,self.peek(y1,0,y2-1,self.width))
+ self.zero(y1,0,y1,self.width-1)
+ def scroll_right(self,y,x):
+ self.poke(y,x+1,self.peek(y,x,y,self.width))
+ self.zero(y,x,y,x)
+ def cursor_down(self):
+ if self.cy>=self.st and self.cy<=self.sb:
+ self.cl=0
+ q,r=divmod(self.cy+1,self.sb+1)
+ if q:
+ self.scroll_up(self.st,self.sb)
+ self.cy=self.sb
+ else:
+ self.cy=r
+ def cursor_right(self):
+ q,r=divmod(self.cx+1,self.width)
+ if q:
+ self.cl=1
+ else:
+ self.cx=r
+ def echo(self,c):
+ if self.cl:
+ self.cursor_down()
+ self.cx=0
+ self.scr[(self.cy*self.width)+self.cx]=self.sgr|ord(c)
+ self.cursor_right()
+ def esc_0x08(self,s):
+ self.cx=max(0,self.cx-1)
+ def esc_0x09(self,s):
+ x=self.cx+8
+ q,r=divmod(x,8)
+ self.cx=(q*8)%self.width
+ def esc_0x0a(self,s):
+ self.cursor_down()
+ def esc_0x0d(self,s):
+ self.cl=0
+ self.cx=0
+ def esc_save(self,s):
+ self.cx_bak=self.cx
+ self.cy_bak=self.cy
+ def esc_restore(self,s):
+ self.cx=self.cx_bak
+ self.cy=self.cy_bak
+ self.cl=0
+ def esc_da(self,s):
+ self.outbuf="\x1b[?6c"
+ def esc_ri(self,s):
+ self.cy=max(self.st,self.cy-1)
+ if self.cy==self.st:
+ self.scroll_down(self.st,self.sb)
+ def esc_ignore(self,*s):
+ pass
+# print "term:ignore: %s"%repr(s)
+ def csi_dispatch(self,seq,mo):
+ # CSI sequences
+ s=mo.group(1)
+ c=mo.group(2)
+ f=self.csi_seq.get(c,None)
+ if f:
+ try:
+ l=[min(int(i),1024) for i in s.split(';') if len(i)<4]
+ except ValueError:
+ l=[]
+ if len(l)==0:
+ l=f[1]
+ f[0](l)
+# else:
+# print 'csi ignore',c,l
+ def csi_at(self,l):
+ for i in range(l[0]):
+ self.scroll_right(self.cy,self.cx)
+ def csi_A(self,l):
+ self.cy=max(self.st,self.cy-l[0])
+ def csi_B(self,l):
+ self.cy=min(self.sb,self.cy+l[0])
+ def csi_C(self,l):
+ self.cx=min(self.width-1,self.cx+l[0])
+ self.cl=0
+ def csi_D(self,l):
+ self.cx=max(0,self.cx-l[0])
+ self.cl=0
+ def csi_E(self,l):
+ self.csi_B(l)
+ self.cx=0
+ self.cl=0
+ def csi_F(self,l):
+ self.csi_A(l)
+ self.cx=0
+ self.cl=0
+ def csi_G(self,l):
+ self.cx=min(self.width,l[0])-1
+ def csi_H(self,l):
+ if len(l)<2: l=[1,1]
+ self.cx=min(self.width,l[1])-1
+ self.cy=min(self.height,l[0])-1
+ self.cl=0
+ def csi_J(self,l):
+ if l[0]==0:
+ self.zero(self.cy,self.cx,self.height-1,self.width-1)
+ elif l[0]==1:
+ self.zero(0,0,self.cy,self.cx)
+ elif l[0]==2:
+ self.zero(0,0,self.height-1,self.width-1)
+ def csi_K(self,l):
+ if l[0]==0:
+ self.zero(self.cy,self.cx,self.cy,self.width-1)
+ elif l[0]==1:
+ self.zero(self.cy,0,self.cy,self.cx)
+ elif l[0]==2:
+ self.zero(self.cy,0,self.cy,self.width-1)
+ def csi_L(self,l):
+ for i in range(l[0]):
+ if self.cy<self.sb:
+ self.scroll_down(self.cy,self.sb)
+ def csi_M(self,l):
+ if self.cy>=self.st and self.cy<=self.sb:
+ for i in range(l[0]):
+ self.scroll_up(self.cy,self.sb)
+ def csi_P(self,l):
+ w,cx,cy=self.width,self.cx,self.cy
+ end=self.peek(cy,cx,cy,w)
+ self.csi_K([0])
+ self.poke(cy,cx,end[l[0]:])
+ def csi_X(self,l):
+ self.zero(self.cy,self.cx,self.cy,self.cx+l[0])
+ def csi_a(self,l):
+ self.csi_C(l)
+ def csi_c(self,l):
+ #'\x1b[?0c' 0-8 cursor size
+ pass
+ def csi_d(self,l):
+ self.cy=min(self.height,l[0])-1
+ def csi_e(self,l):
+ self.csi_B(l)
+ def csi_f(self,l):
+ self.csi_H(l)
+ def csi_h(self,l):
+ if l[0]==4:
+ pass
+# print "insert on"
+ def csi_l(self,l):
+ if l[0]==4:
+ pass
+# print "insert off"
+ def csi_m(self,l):
+ for i in l:
+ if i==0 or i==39 or i==49 or i==27:
+ self.sgr=0x000700
+ elif i==1:
+ self.sgr=(self.sgr|0x000800)
+ elif i==7:
+ self.sgr=0x070000
+ elif i>=30 and i<=37:
+ c=i-30
+ self.sgr=(self.sgr&0xff08ff)|(c<<8)
+ elif i>=40 and i<=47:
+ c=i-40
+ self.sgr=(self.sgr&0x00ffff)|(c<<16)
+# else:
+# print "CSI sgr ignore",l,i
+# print 'sgr: %r %x'%(l,self.sgr)
+ def csi_r(self,l):
+ if len(l)<2: l=[0,self.height]
+ self.st=min(self.height-1,l[0]-1)
+ self.sb=min(self.height-1,l[1]-1)
+ self.sb=max(self.st,self.sb)
+ def csi_s(self,l):
+ self.esc_save(0)
+ def csi_u(self,l):
+ self.esc_restore(0)
+ def escape(self):
+ e=self.buf
+ if len(e)>32:
+# print "error %r"%e
+ self.buf=""
+ elif e in self.esc_seq:
+ self.esc_seq[e](e)
+ self.buf=""
+ else:
+ for r,f in self.esc_re:
+ mo=r.match(e)
+ if mo:
+ f(e,mo)
+ self.buf=""
+ break
+# if self.buf=='': print "ESC %r\n"%e
+ def write(self,s):
+ for i in s:
+ if len(self.buf) or (i in self.esc_seq):
+ self.buf+=i
+ self.escape()
+ elif i == '\x1b':
+ self.buf+=i
+ else:
+ self.echo(i)
+ def read(self):
+ b=self.outbuf
+ self.outbuf=""
+ return b
+ def dump(self):
+ r=''
+ for i in self.scr:
+ r+=chr(i&255)
+ return r
+ def dumplatin1(self):
+ return self.dump().translate(self.trl1)
+ def dumphtml(self,color=1):
+ h=self.height
+ w=self.width
+ r=""
+ span=""
+ span_bg,span_fg=-1,-1
+ for i in range(h*w):
+ q,c=divmod(self.scr[i],256)
+ if color:
+ bg,fg=divmod(q,256)
+ else:
+ bg,fg=0,7
+ if i==self.cy*w+self.cx:
+ bg,fg=1,7
+ if (bg!=span_bg or fg!=span_fg or i==h*w-1):
+ if len(span):
+ r+='<span class="f%d b%d">%s</span>'%(span_fg,span_bg,cgi.escape(span.translate(self.trhtml)))
+ span=""
+ span_bg,span_fg=bg,fg
+ span+=chr(c)
+ if i%w==w-1:
+ span+='\n'
+ r='<?xml version="1.0" encoding="ISO-8859-1"?><pre class="term">%s</pre>'%r
+ if self.last_html==r:
+ return '<?xml version="1.0"?><idem></idem>'
+ else:
+ self.last_html=r
+# print self
+ return r
+ def __repr__(self):
+ d=self.dumplatin1()
+ r=""
+ for i in range(self.height):
+ r+="|%s|\n"%d[self.width*i:self.width*(i+1)]
+ return r
+
+class SynchronizedMethod:
+ def __init__(self,lock,orig):
+ self.lock=lock
+ self.orig=orig
+ def __call__(self,*l):
+ self.lock.acquire()
+ r=self.orig(*l)
+ self.lock.release()
+ return r
+
+class Multiplex:
+ def __init__(self,cmd=None):
+ signal.signal(signal.SIGCHLD, signal.SIG_IGN)
+ self.cmd=cmd
+ self.proc={}
+ self.lock=threading.RLock()
+ self.thread=threading.Thread(target=self.loop)
+ self.alive=1
+ self.lastActivity=time.time()
+ # synchronize methods
+ for name in ['create','fds','proc_read','proc_write','dump','die','run']:
+ orig=getattr(self,name)
+ setattr(self,name,SynchronizedMethod(self.lock,orig))
+ self.thread.start()
+ def create(self,w=80,h=25):
+ pid,fd=pty.fork()
+ if pid==0:
+ try:
+ fdl=[int(i) for i in os.listdir('/proc/self/fd')]
+ except OSError:
+ fdl=range(256)
+ for i in [i for i in fdl if i>2]:
+ try:
+ os.close(i)
+ except OSError:
+ pass
+ if self.cmd:
+ cmd=['/bin/sh','-c',self.cmd]
+ elif os.getuid()==0:
+ cmd=['/bin/login']
+ else:
+ sys.stdout.write("Login: ")
+ login=sys.stdin.readline().strip()
+ if re.match('^[0-9A-Za-z-_. ]+$',login):
+ cmd=['ssh']
+ cmd+=['-oPreferredAuthentications=keyboard-interactive,password']
+ cmd+=['-oNoHostAuthenticationForLocalhost=yes']
+ cmd+=['-oLogLevel=FATAL']
+ cmd+=['-F/dev/null','-l',login,'localhost']
+ else:
+ os._exit(0)
+ env={}
+ env["COLUMNS"]=str(w)
+ env["LINES"]=str(h)
+ env["TERM"]="linux"
+ env["PATH"]=os.environ['PATH']
+ os.execvpe(cmd[0],cmd,env)
+ else:
+ fcntl.fcntl(fd, fcntl.F_SETFL, os.O_NONBLOCK)
+ # python bug http://python.org/sf/1112949 on amd64
+ fcntl.ioctl(fd, struct.unpack('i',struct.pack('I',termios.TIOCSWINSZ))[0], struct.pack("HHHH",h,w,0,0))
+ self.proc[fd]={'pid':pid,'term':Terminal(w,h),'buf':'','time':time.time()}
+ return fd
+ def die(self):
+ self.alive=0
+ def run(self):
+ return self.alive
+ def fds(self):
+ return self.proc.keys()
+ def proc_kill(self,fd):
+ if fd in self.proc:
+ self.proc[fd]['time']=0
+ t=time.time()
+ for i in self.proc.keys():
+ t0=self.proc[i]['time']
+ if (t-t0)>TIMEOUT:
+ try:
+ os.close(i)
+ os.kill(self.proc[i]['pid'],signal.SIGTERM)
+ except (IOError,OSError):
+ pass
+ del self.proc[i]
+ def proc_read(self,fd):
+ try:
+ t=self.proc[fd]['term']
+ t.write(os.read(fd,65536))
+ reply=t.read()
+ if reply:
+ os.write(fd,reply)
+ self.proc[fd]['time']=time.time()
+ except (KeyError,IOError,OSError):
+ self.proc_kill(fd)
+ def proc_write(self,fd,s):
+ try:
+ os.write(fd,s)
+ except (IOError,OSError):
+ self.proc_kill(fd)
+ def dump(self,fd,color=1):
+ try:
+ return self.proc[fd]['term'].dumphtml(color)
+ except KeyError:
+ return False
+ def loop(self):
+ while self.run():
+ fds=self.fds()
+ i,o,e=select.select(fds, [], [], 1.0)
+ if time.time() - self.lastActivity > TIMEOUT:
+ global g_server
+ g_server.shutdown()
+ for fd in i:
+ self.proc_read(fd)
+ if len(i):
+ time.sleep(0.002)
+ for i in self.proc.keys():
+ try:
+ os.close(i)
+ os.kill(self.proc[i]['pid'],signal.SIGTERM)
+ except (IOError,OSError):
+ pass
+
+class AjaxTerm:
+ def __init__(self,cmd=None,index_file='ajaxterm.html',token=None):
+ self.files={}
+ self.token=token
+ for i in ['css','html','js']:
+ for j in glob.glob('*.%s'%i):
+ self.files[j]=file(j).read()
+ self.files['index']=file(index_file).read()
+ self.mime = mimetypes.types_map.copy()
+ self.mime['.html']= 'text/html; charset=UTF-8'
+ self.multi = Multiplex(cmd)
+ self.session = {}
+ def __call__(self, environ, start_response):
+ req = qweb.QWebRequest(environ, start_response,session=None)
+ if req.PATH_INFO.endswith('/u'):
+ s=req.REQUEST["s"]
+ k=req.REQUEST["k"]
+ c=req.REQUEST["c"]
+ w=req.REQUEST.int("w")
+ h=req.REQUEST.int("h")
+ if s in self.session:
+ term=self.session[s]
+ else:
+ raise Exception('Not Authorized')
+ # The original code below was insecure, because it allowed unauthorized sessions to be created
+ # if not (w>2 and w<256 and h>2 and h<100):
+ # w,h=80,25
+ # term=self.session[s]=self.multi.create(w,h)
+ if k:
+ self.multi.proc_write(term,k)
+ time.sleep(0.002)
+ self.multi.lastActivity = time.time();
+ dump=self.multi.dump(term,c)
+ req.response_headers['Content-Type']='text/xml'
+ if isinstance(dump,str):
+ req.write(dump)
+ req.response_gzencode=1
+ else:
+ del self.session[s]
+ req.write('<?xml version="1.0"?><idem></idem>')
+# print "sessions %r"%self.session
+ else:
+ n=os.path.basename(req.PATH_INFO)
+ if n in self.files:
+ req.response_headers['Content-Type'] = self.mime.get(os.path.splitext(n)[1].lower(), 'application/octet-stream')
+ req.write(self.files[n])
+ elif req.REQUEST['token'] == self.token:
+ req.response_headers['Content-Type'] = 'text/html; charset=UTF-8'
+ session_id = str(uuid.uuid4())
+ req.write(string.Template(self.files['index']).substitute(session_id=session_id))
+ term=self.session[session_id]=self.multi.create(80,25)
+ else:
+ raise Exception("Not Authorized")
+ return req
+
+def main():
+ parser = optparse.OptionParser()
+ parser.add_option("-p", "--port", dest="port", default="8022", help="Set the TCP port (default: 8022)")
+ parser.add_option("-c", "--command", dest="cmd", default=None,help="set the command (default: /bin/login or ssh 0.0.0.0)")
+ parser.add_option("-l", "--log", action="store_true", dest="log",default=0,help="log requests to stderr (default: quiet mode)")
+ parser.add_option("-d", "--daemon", action="store_true", dest="daemon", default=0, help="run as daemon in the background")
+ parser.add_option("-P", "--pidfile",dest="pidfile",default="/var/run/ajaxterm.pid",help="set the pidfile (default: /var/run/ajaxterm.pid)")
+ parser.add_option("-i", "--index", dest="index_file", default="ajaxterm.html",help="default index file (default: ajaxterm.html)")
+ parser.add_option("-u", "--uid", dest="uid", help="Set the daemon's user id")
+ parser.add_option("-t", "--token", dest="token", help="Set authorization token")
+ (o, a) = parser.parse_args()
+ if o.daemon:
+ pid=os.fork()
+ if pid == 0:
+ #os.setsid() ?
+ os.setpgrp()
+ nullin = file('/dev/null', 'r')
+ nullout = file('/dev/null', 'w')
+ os.dup2(nullin.fileno(), sys.stdin.fileno())
+ os.dup2(nullout.fileno(), sys.stdout.fileno())
+ os.dup2(nullout.fileno(), sys.stderr.fileno())
+ if os.getuid()==0 and o.uid:
+ try:
+ os.setuid(int(o.uid))
+ except:
+ os.setuid(pwd.getpwnam(o.uid).pw_uid)
+ else:
+ try:
+ file(o.pidfile,'w+').write(str(pid)+'\n')
+ except:
+ pass
+ print 'AjaxTerm at http://0.0.0.0:%s/ pid: %d' % (o.port,pid)
+ sys.exit(0)
+ else:
+ print 'AjaxTerm at http://0.0.0.0:%s/' % o.port
+ at=AjaxTerm(o.cmd,o.index_file,o.token)
+# f=lambda:os.system('firefox http://localhost:%s/&'%o.port)
+# qweb.qweb_wsgi_autorun(at,ip='localhost',port=int(o.port),threaded=0,log=o.log,callback_ready=None)
+ try:
+ global g_server
+ g_server = qweb.QWebWSGIServer(at,ip='0.0.0.0',port=int(o.port),threaded=0,log=o.log)
+ g_server.serve_forever()
+ except KeyboardInterrupt,e:
+ sys.excepthook(*sys.exc_info())
+ at.multi.die()
+
+if __name__ == '__main__':
+ main()
+
diff --git a/tools/ajaxterm/configure b/tools/ajaxterm/configure
new file mode 100755
index 000000000..45391f484
--- /dev/null
+++ b/tools/ajaxterm/configure
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+
+import optparse,os
+
+parser = optparse.OptionParser()
+parser.add_option("", "--prefix", dest="prefix",default="/usr/local",help="installation prefix (default: /usr/local)")
+parser.add_option("", "--confdir", dest="confdir", default="/etc",help="configuration files directory prefix (default: /etc)")
+parser.add_option("", "--port", dest="port", default="8022", help="set the listening TCP port (default: 8022)")
+parser.add_option("", "--command", dest="cmd", default=None,help="set the command (default: /bin/login or ssh localhost)")
+(o, a) = parser.parse_args()
+
+print "Configuring prefix=",o.prefix," port=",o.port
+
+etc=o.confdir
+port=o.port
+cmd=o.cmd
+bin=os.path.join(o.prefix,"bin")
+lib=os.path.join(o.prefix,"share/ajaxterm")
+man=os.path.join(o.prefix,"share/man/man1")
+
+file("ajaxterm.bin","w").write(file("configure.ajaxterm.bin").read()%locals())
+file("Makefile","w").write(file("configure.makefile").read()%locals())
+
+if os.path.isfile("/etc/gentoo-release"):
+ file("ajaxterm.initd","w").write(file("configure.initd.gentoo").read()%locals())
+elif os.path.isfile("/etc/fedora-release") or os.path.isfile("/etc/redhat-release"):
+ file("ajaxterm.initd","w").write(file("configure.initd.redhat").read()%locals())
+else:
+ file("ajaxterm.initd","w").write(file("configure.initd.debian").read()%locals())
+
+os.system("chmod a+x ajaxterm.bin")
+os.system("chmod a+x ajaxterm.initd")
diff --git a/tools/ajaxterm/configure.ajaxterm.bin b/tools/ajaxterm/configure.ajaxterm.bin
new file mode 100644
index 000000000..4d1f5a98f
--- /dev/null
+++ b/tools/ajaxterm/configure.ajaxterm.bin
@@ -0,0 +1,2 @@
+#!/bin/sh
+PYTHONPATH=%(lib)s exec %(lib)s/ajaxterm.py $@
diff --git a/tools/ajaxterm/configure.initd.debian b/tools/ajaxterm/configure.initd.debian
new file mode 100644
index 000000000..901082707
--- /dev/null
+++ b/tools/ajaxterm/configure.initd.debian
@@ -0,0 +1,33 @@
+#!/bin/sh
+
+PATH=/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
+DAEMON=%(bin)s/ajaxterm
+PORT=%(port)s
+PIDFILE=/var/run/ajaxterm.pid
+
+[ -x "$DAEMON" ] || exit 0
+
+#. /lib/lsb/init-functions
+
+case "$1" in
+ start)
+ echo "Starting ajaxterm on port $PORT"
+ start-stop-daemon --start --pidfile $PIDFILE --exec $DAEMON -- --daemon --port=$PORT --uid=nobody || return 2
+ ;;
+ stop)
+ echo "Stopping ajaxterm"
+ start-stop-daemon --stop --pidfile $PIDFILE
+ rm -f $PIDFILE
+ ;;
+ restart|force-reload)
+ $0 stop
+ sleep 1
+ $0 start
+ ;;
+ *)
+ echo "Usage: $SCRIPTNAME {start|stop|restart|force-reload}" >&2
+ exit 3
+ ;;
+esac
+
+:
diff --git a/tools/ajaxterm/configure.initd.gentoo b/tools/ajaxterm/configure.initd.gentoo
new file mode 100644
index 000000000..ac28ef0b6
--- /dev/null
+++ b/tools/ajaxterm/configure.initd.gentoo
@@ -0,0 +1,27 @@
+#!/sbin/runscript
+
+# AjaxTerm Gentoo script, 08 May 2006 Mark Gillespie
+
+DAEMON=%(bin)s/ajaxterm
+PORT=%(port)s
+PIDFILE=/var/run/ajaxterm.pid
+
+depend()
+{
+ need net
+}
+
+start()
+{
+ ebegin "Starting AjaxTerm on port $PORT"
+ start-stop-daemon --start --pidfile $PIDFILE --exec $DAEMON -- --daemon --port=$PORT --uid=nobody
+ eend $?
+}
+
+stop()
+{
+ ebegin "Stopping AjaxTerm"
+ start-stop-daemon --stop --pidfile $PIDFILE
+ rm -f $PIDFILE
+ eend $?
+}
diff --git a/tools/ajaxterm/configure.initd.redhat b/tools/ajaxterm/configure.initd.redhat
new file mode 100644
index 000000000..5c9788574
--- /dev/null
+++ b/tools/ajaxterm/configure.initd.redhat
@@ -0,0 +1,75 @@
+#
+# ajaxterm Startup script for ajaxterm
+#
+# chkconfig: - 99 99
+# description: Ajaxterm is a yadda yadda yadda
+# processname: ajaxterm
+# pidfile: /var/run/ajaxterm.pid
+# version: 1.0 Kevin Reichhart - ajaxterminit at lastname dot org
+
+# Source function library.
+. /etc/rc.d/init.d/functions
+
+if [ -f /etc/sysconfig/ajaxterm ]; then
+ . /etc/sysconfig/ajaxterm
+fi
+
+ajaxterm=/usr/local/bin/ajaxterm
+prog=ajaxterm
+pidfile=${PIDFILE-/var/run/ajaxterm.pid}
+lockfile=${LOCKFILE-/var/lock/subsys/ajaxterm}
+port=${PORT-8022}
+user=${xUSER-nobody}
+RETVAL=0
+
+
+start() {
+ echo -n $"Starting $prog: "
+ daemon $ajaxterm --daemon --port=$port --uid=$user $OPTIONS
+ RETVAL=$?
+ echo
+ [ $RETVAL = 0 ] && touch ${lockfile}
+ return $RETVAL
+}
+stop() {
+ echo -n $"Stopping $prog: "
+ killproc $ajaxterm
+ RETVAL=$?
+ echo
+ [ $RETVAL = 0 ] && rm -f ${lockfile} ${pidfile}
+}
+reload() {
+ echo -n $"Reloading $prog: "
+ killproc $ajaxterm -HUP
+ RETVAL=$?
+ echo
+}
+
+# See how we were called.
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ status)
+ status python ajaxterm
+ RETVAL=$?
+ ;;
+ restart)
+ stop
+ start
+ ;;
+ condrestart)
+ if [ -f ${pidfile} ] ; then
+ stop
+ start
+ fi
+ ;;
+ *)
+ echo $"Usage: $prog {start|stop|restart|condrestart}"
+ exit 1
+esac
+
+exit $RETVAL
diff --git a/tools/ajaxterm/configure.makefile b/tools/ajaxterm/configure.makefile
new file mode 100644
index 000000000..6bd80853d
--- /dev/null
+++ b/tools/ajaxterm/configure.makefile
@@ -0,0 +1,20 @@
+build:
+ true
+
+install:
+ install -d "%(bin)s"
+ install -d "%(lib)s"
+ install ajaxterm.bin "%(bin)s/ajaxterm"
+ install ajaxterm.initd "%(etc)s/init.d/ajaxterm"
+ install -m 644 ajaxterm.css ajaxterm.html ajaxterm.js qweb.py sarissa.js sarissa_dhtml.js "%(lib)s"
+ install -m 755 ajaxterm.py "%(lib)s"
+ gzip --best -c ajaxterm.1 > ajaxterm.1.gz
+ install -d "%(man)s"
+ install ajaxterm.1.gz "%(man)s"
+
+clean:
+ rm ajaxterm.bin
+ rm ajaxterm.initd
+ rm ajaxterm.1.gz
+ rm Makefile
+
diff --git a/tools/ajaxterm/qweb.py b/tools/ajaxterm/qweb.py
new file mode 100644
index 000000000..20c509230
--- /dev/null
+++ b/tools/ajaxterm/qweb.py
@@ -0,0 +1,1356 @@
+#!/usr/bin/python2.3
+#
+# vim:set et ts=4 fdc=0 fdn=2 fdl=0:
+#
+# There are no blank lines between blocks beacause i use folding from:
+# http://www.vim.org/scripts/script.php?script_id=515
+#
+
+"""= QWeb Framework =
+
+== What is QWeb ? ==
+
+QWeb is a python based [http://www.python.org/doc/peps/pep-0333/ WSGI]
+compatible web framework, it provides an infratructure to quickly build web
+applications consisting of:
+
+ * A lightweight request handler (QWebRequest)
+ * An xml templating engine (QWebXml and QWebHtml)
+ * A simple name based controler (qweb_control)
+ * A standalone WSGI Server (QWebWSGIServer)
+ * A cgi and fastcgi WSGI wrapper (taken from flup)
+ * A startup function that starts cgi, factgi or standalone according to the
+ evironement (qweb_autorun).
+
+QWeb applications are runnable in standalone mode (from commandline), via
+FastCGI, Regular CGI or by any python WSGI compliant server.
+
+QWeb doesn't provide any database access but it integrates nicely with ORMs
+such as SQLObject, SQLAlchemy or plain DB-API.
+
+Written by Antony Lesuisse (email al AT udev.org)
+
+Homepage: http://antony.lesuisse.org/qweb/trac/
+
+Forum: [http://antony.lesuisse.org/qweb/forum/viewforum.php?id=1 Forum]
+
+== Quick Start (for Linux, MacOS X and cygwin) ==
+
+Make sure you have at least python 2.3 installed and run the following commands:
+
+{{{
+$ wget http://antony.lesuisse.org/qweb/files/QWeb-0.7.tar.gz
+$ tar zxvf QWeb-0.7.tar.gz
+$ cd QWeb-0.7/examples/blog
+$ ./blog.py
+}}}
+
+And point your browser to http://localhost:8080/
+
+You may also try AjaxTerm which uses qweb request handler.
+
+== Download ==
+
+ * Version 0.7:
+ * Source [/qweb/files/QWeb-0.7.tar.gz QWeb-0.7.tar.gz]
+ * Python 2.3 Egg [/qweb/files/QWeb-0.7-py2.3.egg QWeb-0.7-py2.3.egg]
+ * Python 2.4 Egg [/qweb/files/QWeb-0.7-py2.4.egg QWeb-0.7-py2.4.egg]
+
+ * [/qweb/trac/browser Browse the source repository]
+
+== Documentation ==
+
+ * [/qweb/trac/browser/trunk/README.txt?format=raw Read the included documentation]
+ * QwebTemplating
+
+== Mailin-list ==
+
+ * Forum: [http://antony.lesuisse.org/qweb/forum/viewforum.php?id=1 Forum]
+ * No mailing-list exists yet, discussion should happen on: [http://mail.python.org/mailman/listinfo/web-sig web-sig] [http://mail.python.org/pipermail/web-sig/ archives]
+
+QWeb Components:
+----------------
+
+QWeb also feature a simple components api, that enables developers to easily
+produces reusable components.
+
+Default qweb components:
+
+ - qweb_static:
+ A qweb component to serve static content from the filesystem or from
+ zipfiles.
+
+ - qweb_dbadmin:
+ scaffolding for sqlobject
+
+License
+-------
+qweb/fcgi.py wich is BSD-like from saddi.com.
+Everything else is put in the public domain.
+
+
+TODO
+----
+ Announce QWeb to python-announce-list@python.org web-sig@python.org
+ qweb_core
+ rename request methods into
+ request_save_files
+ response_404
+ response_redirect
+ response_download
+ request callback_generator, callback_function ?
+ wsgi callback_server_local
+ xml tags explicitly call render_attributes(t_att)?
+ priority form-checkbox over t-value (for t-option)
+
+"""
+
+import BaseHTTPServer,SocketServer,Cookie
+import cgi,datetime,email,email.Message,errno,gzip,os,random,re,socket,sys,tempfile,time,types,urllib,urlparse,xml.dom
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+try:
+ import cStringIO as StringIO
+except ImportError:
+ import StringIO
+
+#----------------------------------------------------------
+# Qweb Xml t-raw t-esc t-if t-foreach t-set t-call t-trim
+#----------------------------------------------------------
+class QWebEval:
+ def __init__(self,data):
+ self.data=data
+ def __getitem__(self,expr):
+ if self.data.has_key(expr):
+ return self.data[expr]
+ r=None
+ try:
+ r=eval(expr,self.data)
+ except NameError,e:
+ pass
+ except AttributeError,e:
+ pass
+ except Exception,e:
+ print "qweb: expression error '%s' "%expr,e
+ if self.data.has_key("__builtins__"):
+ del self.data["__builtins__"]
+ return r
+ def eval_object(self,expr):
+ return self[expr]
+ def eval_str(self,expr):
+ if expr=="0":
+ return self.data[0]
+ if isinstance(self[expr],unicode):
+ return self[expr].encode("utf8")
+ return str(self[expr])
+ def eval_format(self,expr):
+ try:
+ return str(expr%self)
+ except:
+ return "qweb: format error '%s' "%expr
+# if isinstance(r,unicode):
+# return r.encode("utf8")
+ def eval_bool(self,expr):
+ if self.eval_object(expr):
+ return 1
+ else:
+ return 0
+class QWebXml:
+ """QWeb Xml templating engine
+
+ The templating engine use a very simple syntax, "magic" xml attributes, to
+ produce any kind of texutal output (even non-xml).
+
+ QWebXml:
+ the template engine core implements the basic magic attributes:
+
+ t-att t-raw t-esc t-if t-foreach t-set t-call t-trim
+
+ """
+ def __init__(self,x=None,zipname=None):
+ self.node=xml.dom.Node
+ self._t={}
+ self._render_tag={}
+ prefix='render_tag_'
+ for i in [j for j in dir(self) if j.startswith(prefix)]:
+ name=i[len(prefix):].replace('_','-')
+ self._render_tag[name]=getattr(self.__class__,i)
+
+ self._render_att={}
+ prefix='render_att_'
+ for i in [j for j in dir(self) if j.startswith(prefix)]:
+ name=i[len(prefix):].replace('_','-')
+ self._render_att[name]=getattr(self.__class__,i)
+
+ if x!=None:
+ if zipname!=None:
+ import zipfile
+ zf=zipfile.ZipFile(zipname, 'r')
+ self.add_template(zf.read(x))
+ else:
+ self.add_template(x)
+ def register_tag(self,tag,func):
+ self._render_tag[tag]=func
+ def add_template(self,x):
+ if hasattr(x,'documentElement'):
+ dom=x
+ elif x.startswith("<?xml"):
+ import xml.dom.minidom
+ dom=xml.dom.minidom.parseString(x)
+ else:
+ import xml.dom.minidom
+ dom=xml.dom.minidom.parse(x)
+ for n in dom.documentElement.childNodes:
+ if n.nodeName=="t":
+ self._t[str(n.getAttribute("t-name"))]=n
+ def get_template(self,name):
+ return self._t[name]
+
+ def eval_object(self,expr,v):
+ return QWebEval(v).eval_object(expr)
+ def eval_str(self,expr,v):
+ return QWebEval(v).eval_str(expr)
+ def eval_format(self,expr,v):
+ return QWebEval(v).eval_format(expr)
+ def eval_bool(self,expr,v):
+ return QWebEval(v).eval_bool(expr)
+
+ def render(self,tname,v={},out=None):
+ if self._t.has_key(tname):
+ return self.render_node(self._t[tname],v)
+ else:
+ return 'qweb: template "%s" not found'%tname
+ def render_node(self,e,v):
+ r=""
+ if e.nodeType==self.node.TEXT_NODE or e.nodeType==self.node.CDATA_SECTION_NODE:
+ r=e.data.encode("utf8")
+ elif e.nodeType==self.node.ELEMENT_NODE:
+ pre=""
+ g_att=""
+ t_render=None
+ t_att={}
+ for (an,av) in e.attributes.items():
+ an=str(an)
+ if isinstance(av,types.UnicodeType):
+ av=av.encode("utf8")
+ else:
+ av=av.nodeValue.encode("utf8")
+ if an.startswith("t-"):
+ for i in self._render_att:
+ if an[2:].startswith(i):
+ g_att+=self._render_att[i](self,e,an,av,v)
+ break
+ else:
+ if self._render_tag.has_key(an[2:]):
+ t_render=an[2:]
+ t_att[an[2:]]=av
+ else:
+ g_att+=' %s="%s"'%(an,cgi.escape(av,1));
+ if t_render:
+ if self._render_tag.has_key(t_render):
+ r=self._render_tag[t_render](self,e,t_att,g_att,v)
+ else:
+ r=self.render_element(e,g_att,v,pre,t_att.get("trim",0))
+ return r
+ def render_element(self,e,g_att,v,pre="",trim=0):
+ g_inner=[]
+ for n in e.childNodes:
+ g_inner.append(self.render_node(n,v))
+ name=str(e.nodeName)
+ inner="".join(g_inner)
+ if trim==0:
+ pass
+ elif trim=='left':
+ inner=inner.lstrip()
+ elif trim=='right':
+ inner=inner.rstrip()
+ elif trim=='both':
+ inner=inner.strip()
+ if name=="t":
+ return inner
+ elif len(inner):
+ return "<%s%s>%s%s</%s>"%(name,g_att,pre,inner,name)
+ else:
+ return "<%s%s/>"%(name,g_att)
+
+ # Attributes
+ def render_att_att(self,e,an,av,v):
+ if an.startswith("t-attf-"):
+ att,val=an[7:],self.eval_format(av,v)
+ elif an.startswith("t-att-"):
+ att,val=(an[6:],self.eval_str(av,v))
+ else:
+ att,val=self.eval_object(av,v)
+ return ' %s="%s"'%(att,cgi.escape(val,1))
+
+ # Tags
+ def render_tag_raw(self,e,t_att,g_att,v):
+ return self.eval_str(t_att["raw"],v)
+ def render_tag_rawf(self,e,t_att,g_att,v):
+ return self.eval_format(t_att["rawf"],v)
+ def render_tag_esc(self,e,t_att,g_att,v):
+ return cgi.escape(self.eval_str(t_att["esc"],v))
+ def render_tag_escf(self,e,t_att,g_att,v):
+ return cgi.escape(self.eval_format(t_att["escf"],v))
+ def render_tag_foreach(self,e,t_att,g_att,v):
+ expr=t_att["foreach"]
+ enum=self.eval_object(expr,v)
+ if enum!=None:
+ var=t_att.get('as',expr).replace('.','_')
+ d=v.copy()
+ size=-1
+ if isinstance(enum,types.ListType):
+ size=len(enum)
+ elif isinstance(enum,types.TupleType):
+ size=len(enum)
+ elif hasattr(enum,'count'):
+ size=enum.count()
+ d["%s_size"%var]=size
+ d["%s_all"%var]=enum
+ index=0
+ ru=[]
+ for i in enum:
+ d["%s_value"%var]=i
+ d["%s_index"%var]=index
+ d["%s_first"%var]=index==0
+ d["%s_even"%var]=index%2
+ d["%s_odd"%var]=(index+1)%2
+ d["%s_last"%var]=index+1==size
+ if index%2:
+ d["%s_parity"%var]='odd'
+ else:
+ d["%s_parity"%var]='even'
+ if isinstance(i,types.DictType):
+ d.update(i)
+ else:
+ d[var]=i
+ ru.append(self.render_element(e,g_att,d))
+ index+=1
+ return "".join(ru)
+ else:
+ return "qweb: t-foreach %s not found."%expr
+ def render_tag_if(self,e,t_att,g_att,v):
+ if self.eval_bool(t_att["if"],v):
+ return self.render_element(e,g_att,v)
+ else:
+ return ""
+ def render_tag_call(self,e,t_att,g_att,v):
+ # TODO t-prefix
+ if t_att.has_key("import"):
+ d=v
+ else:
+ d=v.copy()
+ d[0]=self.render_element(e,g_att,d)
+ return self.render(t_att["call"],d)
+ def render_tag_set(self,e,t_att,g_att,v):
+ if t_att.has_key("eval"):
+ v[t_att["set"]]=self.eval_object(t_att["eval"],v)
+ else:
+ v[t_att["set"]]=self.render_element(e,g_att,v)
+ return ""
+
+#----------------------------------------------------------
+# QWeb HTML (+deprecated QWebFORM and QWebOLD)
+#----------------------------------------------------------
+class QWebURL:
+ """ URL helper
+ assert req.PATH_INFO== "/site/admin/page_edit"
+ u = QWebURL(root_path="/site/",req_path=req.PATH_INFO)
+ s=u.url2_href("user/login",{'a':'1'})
+ assert s=="../user/login?a=1"
+
+ """
+ def __init__(self, root_path="/", req_path="/",defpath="",defparam={}):
+ self.defpath=defpath
+ self.defparam=defparam
+ self.root_path=root_path
+ self.req_path=req_path
+ self.req_list=req_path.split("/")[:-1]
+ self.req_len=len(self.req_list)
+ def decode(self,s):
+ h={}
+ for k,v in cgi.parse_qsl(s,1):
+ h[k]=v
+ return h
+ def encode(self,h):
+ return urllib.urlencode(h.items())
+ def request(self,req):
+ return req.REQUEST
+ def copy(self,path=None,param=None):
+ npath=self.defpath
+ if path:
+ npath=path
+ nparam=self.defparam.copy()
+ if param:
+ nparam.update(param)
+ return QWebURL(self.root_path,self.req_path,npath,nparam)
+ def path(self,path=''):
+ if not path:
+ path=self.defpath
+ pl=(self.root_path+path).split('/')
+ i=0
+ for i in range(min(len(pl), self.req_len)):
+ if pl[i]!=self.req_list[i]:
+ break
+ else:
+ i+=1
+ dd=self.req_len-i
+ if dd<0:
+ dd=0
+ return '/'.join(['..']*dd+pl[i:])
+ def href(self,path='',arg={}):
+ p=self.path(path)
+ tmp=self.defparam.copy()
+ tmp.update(arg)
+ s=self.encode(tmp)
+ if len(s):
+ return p+"?"+s
+ else:
+ return p
+ def form(self,path='',arg={}):
+ p=self.path(path)
+ tmp=self.defparam.copy()
+ tmp.update(arg)
+ r=''.join(['<input type="hidden" name="%s" value="%s"/>'%(k,cgi.escape(str(v),1)) for k,v in tmp.items()])
+ return (p,r)
+class QWebField:
+ def __init__(self,name=None,default="",check=None):
+ self.name=name
+ self.default=default
+ self.check=check
+ # optional attributes
+ self.type=None
+ self.trim=1
+ self.required=1
+ self.cssvalid="form_valid"
+ self.cssinvalid="form_invalid"
+ # set by addfield
+ self.form=None
+ # set by processing
+ self.input=None
+ self.css=None
+ self.value=None
+ self.valid=None
+ self.invalid=None
+ self.validate(1)
+ def validate(self,val=1,update=1):
+ if val:
+ self.valid=1
+ self.invalid=0
+ self.css=self.cssvalid
+ else:
+ self.valid=0
+ self.invalid=1
+ self.css=self.cssinvalid
+ if update and self.form:
+ self.form.update()
+ def invalidate(self,update=1):
+ self.validate(0,update)
+class QWebForm:
+ class QWebFormF:
+ pass
+ def __init__(self,e=None,arg=None,default=None):
+ self.fields={}
+ # all fields have been submitted
+ self.submitted=False
+ self.missing=[]
+ # at least one field is invalid or missing
+ self.invalid=False
+ self.error=[]
+ # all fields have been submitted and are valid
+ self.valid=False
+ # fields under self.f for convenience
+ self.f=self.QWebFormF()
+ if e:
+ self.add_template(e)
+ # assume that the fields are done with the template
+ if default:
+ self.set_default(default,e==None)
+ if arg!=None:
+ self.process_input(arg)
+ def __getitem__(self,k):
+ return self.fields[k]
+ def set_default(self,default,add_missing=1):
+ for k,v in default.items():
+ if self.fields.has_key(k):
+ self.fields[k].default=str(v)
+ elif add_missing:
+ self.add_field(QWebField(k,v))
+ def add_field(self,f):
+ self.fields[f.name]=f
+ f.form=self
+ setattr(self.f,f.name,f)
+ def add_template(self,e):
+ att={}
+ for (an,av) in e.attributes.items():
+ an=str(an)
+ if an.startswith("t-"):
+ att[an[2:]]=av.encode("utf8")
+ for i in ["form-text", "form-password", "form-radio", "form-checkbox", "form-select","form-textarea"]:
+ if att.has_key(i):
+ name=att[i].split(".")[-1]
+ default=att.get("default","")
+ check=att.get("check",None)
+ f=QWebField(name,default,check)
+ if i=="form-textarea":
+ f.type="textarea"
+ f.trim=0
+ if i=="form-checkbox":
+ f.type="checkbox"
+ f.required=0
+ self.add_field(f)
+ for n in e.childNodes:
+ if n.nodeType==n.ELEMENT_NODE:
+ self.add_template(n)
+ def process_input(self,arg):
+ for f in self.fields.values():
+ if arg.has_key(f.name):
+ f.input=arg[f.name]
+ f.value=f.input
+ if f.trim:
+ f.input=f.input.strip()
+ f.validate(1,False)
+ if f.check==None:
+ continue
+ elif callable(f.check):
+ pass
+ elif isinstance(f.check,str):
+ v=f.check
+ if f.check=="email":
+ v=r"/^[^@#!& ]+@[A-Za-z0-9-][.A-Za-z0-9-]{0,64}\.[A-Za-z]{2,5}$/"
+ if f.check=="date":
+ v=r"/^(19|20)\d\d-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])$/"
+ if not re.match(v[1:-1],f.input):
+ f.validate(0,False)
+ else:
+ f.value=f.default
+ self.update()
+ def validate_all(self,val=1):
+ for f in self.fields.values():
+ f.validate(val,0)
+ self.update()
+ def invalidate_all(self):
+ self.validate_all(0)
+ def update(self):
+ self.submitted=True
+ self.valid=True
+ self.errors=[]
+ for f in self.fields.values():
+ if f.required and f.input==None:
+ self.submitted=False
+ self.valid=False
+ self.missing.append(f.name)
+ if f.invalid:
+ self.valid=False
+ self.error.append(f.name)
+ # invalid have been submitted and
+ self.invalid=self.submitted and self.valid==False
+ def collect(self):
+ d={}
+ for f in self.fields.values():
+ d[f.name]=f.value
+ return d
+class QWebURLEval(QWebEval):
+ def __init__(self,data):
+ QWebEval.__init__(self,data)
+ def __getitem__(self,expr):
+ r=QWebEval.__getitem__(self,expr)
+ if isinstance(r,str):
+ return urllib.quote_plus(r)
+ else:
+ return r
+class QWebHtml(QWebXml):
+ """QWebHtml
+ QWebURL:
+ QWebField:
+ QWebForm:
+ QWebHtml:
+ an extended template engine, with a few utility class to easily produce
+ HTML, handle URLs and process forms, it adds the following magic attributes:
+
+ t-href t-action t-form-text t-form-password t-form-textarea t-form-radio
+ t-form-checkbox t-form-select t-option t-selected t-checked t-pager
+
+ # explication URL:
+ # v['tableurl']=QWebUrl({p=afdmin,saar=,orderby=,des=,mlink;meta_active=})
+ # t-href="tableurl?desc=1"
+ #
+ # explication FORM: t-if="form.valid()"
+ # Foreach i
+ # email: <input type="text" t-esc-name="i" t-esc-value="form[i].value" t-esc-class="form[i].css"/>
+ # <input type="radio" name="spamtype" t-esc-value="i" t-selected="i==form.f.spamtype.value"/>
+ # <option t-esc-value="cc" t-selected="cc==form.f.country.value"><t t-esc="cname"></option>
+ # Simple forms:
+ # <input t-form-text="form.email" t-check="email"/>
+ # <input t-form-password="form.email" t-check="email"/>
+ # <input t-form-radio="form.email" />
+ # <input t-form-checkbox="form.email" />
+ # <textarea t-form-textarea="form.email" t-check="email"/>
+ # <select t-form-select="form.email"/>
+ # <option t-value="1">
+ # <input t-form-radio="form.spamtype" t-value="1"/> Cars
+ # <input t-form-radio="form.spamtype" t-value="2"/> Sprt
+ """
+ # QWebForm from a template
+ def form(self,tname,arg=None,default=None):
+ form=QWebForm(self._t[tname],arg,default)
+ return form
+
+ # HTML Att
+ def eval_url(self,av,v):
+ s=QWebURLEval(v).eval_format(av)
+ a=s.split('?',1)
+ arg={}
+ if len(a)>1:
+ for k,v in cgi.parse_qsl(a[1],1):
+ arg[k]=v
+ b=a[0].split('/',1)
+ path=''
+ if len(b)>1:
+ path=b[1]
+ u=b[0]
+ return u,path,arg
+ def render_att_url_(self,e,an,av,v):
+ u,path,arg=self.eval_url(av,v)
+ if not isinstance(v.get(u,0),QWebURL):
+ out='qweb: missing url %r %r %r'%(u,path,arg)
+ else:
+ out=v[u].href(path,arg)
+ return ' %s="%s"'%(an[6:],cgi.escape(out,1))
+ def render_att_href(self,e,an,av,v):
+ return self.render_att_url_(e,"t-url-href",av,v)
+ def render_att_checked(self,e,an,av,v):
+ if self.eval_bool(av,v):
+ return ' %s="%s"'%(an[2:],an[2:])
+ else:
+ return ''
+ def render_att_selected(self,e,an,av,v):
+ return self.render_att_checked(e,an,av,v)
+
+ # HTML Tags forms
+ def render_tag_rawurl(self,e,t_att,g_att,v):
+ u,path,arg=self.eval_url(t_att["rawurl"],v)
+ return v[u].href(path,arg)
+ def render_tag_escurl(self,e,t_att,g_att,v):
+ u,path,arg=self.eval_url(t_att["escurl"],v)
+ return cgi.escape(v[u].href(path,arg))
+ def render_tag_action(self,e,t_att,g_att,v):
+ u,path,arg=self.eval_url(t_att["action"],v)
+ if not isinstance(v.get(u,0),QWebURL):
+ action,input=('qweb: missing url %r %r %r'%(u,path,arg),'')
+ else:
+ action,input=v[u].form(path,arg)
+ g_att+=' action="%s"'%action
+ return self.render_element(e,g_att,v,input)
+ def render_tag_form_text(self,e,t_att,g_att,v):
+ f=self.eval_object(t_att["form-text"],v)
+ g_att+=' type="text" name="%s" value="%s" class="%s"'%(f.name,cgi.escape(f.value,1),f.css)
+ return self.render_element(e,g_att,v)
+ def render_tag_form_password(self,e,t_att,g_att,v):
+ f=self.eval_object(t_att["form-password"],v)
+ g_att+=' type="password" name="%s" value="%s" class="%s"'%(f.name,cgi.escape(f.value,1),f.css)
+ return self.render_element(e,g_att,v)
+ def render_tag_form_textarea(self,e,t_att,g_att,v):
+ type="textarea"
+ f=self.eval_object(t_att["form-textarea"],v)
+ g_att+=' name="%s" class="%s"'%(f.name,f.css)
+ r="<%s%s>%s</%s>"%(type,g_att,cgi.escape(f.value,1),type)
+ return r
+ def render_tag_form_radio(self,e,t_att,g_att,v):
+ f=self.eval_object(t_att["form-radio"],v)
+ val=t_att["value"]
+ g_att+=' type="radio" name="%s" value="%s"'%(f.name,val)
+ if f.value==val:
+ g_att+=' checked="checked"'
+ return self.render_element(e,g_att,v)
+ def render_tag_form_checkbox(self,e,t_att,g_att,v):
+ f=self.eval_object(t_att["form-checkbox"],v)
+ val=t_att["value"]
+ g_att+=' type="checkbox" name="%s" value="%s"'%(f.name,val)
+ if f.value==val:
+ g_att+=' checked="checked"'
+ return self.render_element(e,g_att,v)
+ def render_tag_form_select(self,e,t_att,g_att,v):
+ f=self.eval_object(t_att["form-select"],v)
+ g_att+=' name="%s" class="%s"'%(f.name,f.css)
+ return self.render_element(e,g_att,v)
+ def render_tag_option(self,e,t_att,g_att,v):
+ f=self.eval_object(e.parentNode.getAttribute("t-form-select"),v)
+ val=t_att["option"]
+ g_att+=' value="%s"'%(val)
+ if f.value==val:
+ g_att+=' selected="selected"'
+ return self.render_element(e,g_att,v)
+
+ # HTML Tags others
+ def render_tag_pager(self,e,t_att,g_att,v):
+ pre=t_att["pager"]
+ total=int(self.eval_str(t_att["total"],v))
+ start=int(self.eval_str(t_att["start"],v))
+ step=int(self.eval_str(t_att.get("step","100"),v))
+ scope=int(self.eval_str(t_att.get("scope","5"),v))
+ # Compute Pager
+ p=pre+"_"
+ d={}
+ d[p+"tot_size"]=total
+ d[p+"tot_page"]=tot_page=total/step
+ d[p+"win_start0"]=total and start
+ d[p+"win_start1"]=total and start+1
+ d[p+"win_end0"]=max(0,min(start+step-1,total-1))
+ d[p+"win_end1"]=min(start+step,total)
+ d[p+"win_page0"]=win_page=start/step
+ d[p+"win_page1"]=win_page+1
+ d[p+"prev"]=(win_page!=0)
+ d[p+"prev_start"]=(win_page-1)*step
+ d[p+"next"]=(tot_page>=win_page+1)
+ d[p+"next_start"]=(win_page+1)*step
+ l=[]
+ begin=win_page-scope
+ end=win_page+scope
+ if begin<0:
+ end-=begin
+ if end>tot_page:
+ begin-=(end-tot_page)
+ i=max(0,begin)
+ while i<=min(end,tot_page) and total!=step:
+ l.append( { p+"page0":i, p+"page1":i+1, p+"start":i*step, p+"sel":(win_page==i) })
+ i+=1
+ d[p+"active"]=len(l)>1
+ d[p+"list"]=l
+ # Update v
+ v.update(d)
+ return ""
+
+#----------------------------------------------------------
+# QWeb Simple Controller
+#----------------------------------------------------------
+def qweb_control(self,jump='main',p=[]):
+ """ qweb_control(self,jump='main',p=[]):
+ A simple function to handle the controler part of your application. It
+ dispatch the control to the jump argument, while ensuring that prefix
+ function have been called.
+
+ qweb_control replace '/' to '_' and strip '_' from the jump argument.
+
+ name1
+ name1_name2
+ name1_name2_name3
+
+ """
+ jump=jump.replace('/','_').strip('_')
+ if not hasattr(self,jump):
+ return 0
+ done={}
+ todo=[]
+ while 1:
+ if jump!=None:
+ tmp=""
+ todo=[]
+ for i in jump.split("_"):
+ tmp+=i+"_";
+ if not done.has_key(tmp[:-1]):
+ todo.append(tmp[:-1])
+ jump=None
+ elif len(todo):
+ i=todo.pop(0)
+ done[i]=1
+ if hasattr(self,i):
+ f=getattr(self,i)
+ r=f(*p)
+ if isinstance(r,types.StringType):
+ jump=r
+ else:
+ break
+ return 1
+
+#----------------------------------------------------------
+# QWeb WSGI Request handler
+#----------------------------------------------------------
+class QWebSession(dict):
+ def __init__(self,environ,**kw):
+ dict.__init__(self)
+ default={
+ "path" : tempfile.gettempdir(),
+ "cookie_name" : "QWEBSID",
+ "cookie_lifetime" : 0,
+ "cookie_path" : '/',
+ "cookie_domain" : '',
+ "limit_cache" : 1,
+ "probability" : 0.01,
+ "maxlifetime" : 3600,
+ "disable" : 0,
+ }
+ for k,v in default.items():
+ setattr(self,'session_%s'%k,kw.get(k,v))
+ # Try to find session
+ self.session_found_cookie=0
+ self.session_found_url=0
+ self.session_found=0
+ self.session_orig=""
+ # Try cookie
+ c=Cookie.SimpleCookie()
+ c.load(environ.get('HTTP_COOKIE', ''))
+ if c.has_key(self.session_cookie_name):
+ sid=c[self.session_cookie_name].value[:64]
+ if re.match('[a-f0-9]+$',sid) and self.session_load(sid):
+ self.session_id=sid
+ self.session_found_cookie=1
+ self.session_found=1
+ # Try URL
+ if not self.session_found_cookie:
+ mo=re.search('&%s=([a-f0-9]+)'%self.session_cookie_name,environ.get('QUERY_STRING',''))
+ if mo and self.session_load(mo.group(1)):
+ self.session_id=mo.group(1)
+ self.session_found_url=1
+ self.session_found=1
+ # New session
+ if not self.session_found:
+ self.session_id='%032x'%random.randint(1,2**128)
+ self.session_trans_sid="&amp;%s=%s"%(self.session_cookie_name,self.session_id)
+ # Clean old session
+ if random.random() < self.session_probability:
+ self.session_clean()
+ def session_get_headers(self):
+ h=[]
+ if (not self.session_disable) and (len(self) or len(self.session_orig)):
+ self.session_save()
+ if not self.session_found_cookie:
+ c=Cookie.SimpleCookie()
+ c[self.session_cookie_name] = self.session_id
+ c[self.session_cookie_name]['path'] = self.session_cookie_path
+ if self.session_cookie_domain:
+ c[self.session_cookie_name]['domain'] = self.session_cookie_domain
+# if self.session_cookie_lifetime:
+# c[self.session_cookie_name]['expires'] = TODO date localtime or not, datetime.datetime(1970, 1, 1)
+ h.append(("Set-Cookie", c[self.session_cookie_name].OutputString()))
+ if self.session_limit_cache:
+ h.append(('Cache-Control','no-store, no-cache, must-revalidate, post-check=0, pre-check=0'))
+ h.append(('Expires','Thu, 19 Nov 1981 08:52:00 GMT'))
+ h.append(('Pragma','no-cache'))
+ return h
+ def session_load(self,sid):
+ fname=os.path.join(self.session_path,'qweb_sess_%s'%sid)
+ try:
+ orig=file(fname).read()
+ d=pickle.loads(orig)
+ except:
+ return
+ self.session_orig=orig
+ self.update(d)
+ return 1
+ def session_save(self):
+ if not os.path.isdir(self.session_path):
+ os.makedirs(self.session_path)
+ fname=os.path.join(self.session_path,'qweb_sess_%s'%self.session_id)
+ try:
+ oldtime=os.path.getmtime(fname)
+ except OSError,IOError:
+ oldtime=0
+ dump=pickle.dumps(self.copy())
+ if (dump != self.session_orig) or (time.time() > oldtime+self.session_maxlifetime/4):
+ tmpname=os.path.join(self.session_path,'qweb_sess_%s_%x'%(self.session_id,random.randint(1,2**32)))
+ f=file(tmpname,'wb')
+ f.write(dump)
+ f.close()
+ if sys.platform=='win32' and os.path.isfile(fname):
+ os.remove(fname)
+ os.rename(tmpname,fname)
+ def session_clean(self):
+ t=time.time()
+ try:
+ for i in [os.path.join(self.session_path,i) for i in os.listdir(self.session_path) if i.startswith('qweb_sess_')]:
+ if (t > os.path.getmtime(i)+self.session_maxlifetime):
+ os.unlink(i)
+ except OSError,IOError:
+ pass
+class QWebSessionMem(QWebSession):
+ def session_load(self,sid):
+ global _qweb_sessions
+ if not "_qweb_sessions" in globals():
+ _qweb_sessions={}
+ if _qweb_sessions.has_key(sid):
+ self.session_orig=_qweb_sessions[sid]
+ self.update(self.session_orig)
+ return 1
+ def session_save(self):
+ global _qweb_sessions
+ if not "_qweb_sessions" in globals():
+ _qweb_sessions={}
+ _qweb_sessions[self.session_id]=self.copy()
+class QWebSessionService:
+ def __init__(self, wsgiapp, url_rewrite=0):
+ self.wsgiapp=wsgiapp
+ self.url_rewrite_tags="a=href,area=href,frame=src,form=,fieldset="
+ def __call__(self, environ, start_response):
+ # TODO
+ # use QWebSession to provide environ["qweb.session"]
+ return self.wsgiapp(environ,start_response)
+class QWebDict(dict):
+ def __init__(self,*p):
+ dict.__init__(self,*p)
+ def __getitem__(self,key):
+ return self.get(key,"")
+ def int(self,key):
+ try:
+ return int(self.get(key,"0"))
+ except ValueError:
+ return 0
+class QWebListDict(dict):
+ def __init__(self,*p):
+ dict.__init__(self,*p)
+ def __getitem__(self,key):
+ return self.get(key,[])
+ def appendlist(self,key,val):
+ if self.has_key(key):
+ self[key].append(val)
+ else:
+ self[key]=[val]
+ def get_qwebdict(self):
+ d=QWebDict()
+ for k,v in self.items():
+ d[k]=v[-1]
+ return d
+class QWebRequest:
+ """QWebRequest a WSGI request handler.
+
+ QWebRequest is a WSGI request handler that feature GET, POST and POST
+ multipart methods, handles cookies and headers and provide a dict-like
+ SESSION Object (either on the filesystem or in memory).
+
+ It is constructed with the environ and start_response WSGI arguments:
+
+ req=qweb.QWebRequest(environ, start_response)
+
+ req has the folowing attributes :
+
+ req.environ standard WSGI dict (CGI and wsgi ones)
+
+ Some CGI vars as attributes from environ for convenience:
+
+ req.SCRIPT_NAME
+ req.PATH_INFO
+ req.REQUEST_URI
+
+ Some computed value (also for convenience)
+
+ req.FULL_URL full URL recontructed (http://host/query)
+ req.FULL_PATH (URL path before ?querystring)
+
+ Dict constructed from querystring and POST datas, PHP-like.
+
+ req.GET contains GET vars
+ req.POST contains POST vars
+ req.REQUEST contains merge of GET and POST
+ req.FILES contains uploaded files
+ req.GET_LIST req.POST_LIST req.REQUEST_LIST req.FILES_LIST multiple arguments versions
+ req.debug() returns an HTML dump of those vars
+
+ A dict-like session object.
+
+ req.SESSION the session start when the dict is not empty.
+
+ Attribute for handling the response
+
+ req.response_headers dict-like to set headers
+ req.response_cookies a SimpleCookie to set cookies
+ req.response_status a string to set the status like '200 OK'
+
+ req.write() to write to the buffer
+
+ req itselfs is an iterable object with the buffer, it will also also call
+ start_response automatically before returning anything via the iterator.
+
+ To make it short, it means that you may use
+
+ return req
+
+ at the end of your request handling to return the reponse to any WSGI
+ application server.
+ """
+ #
+ # This class contains part ripped from colubrid (with the permission of
+ # mitsuhiko) see http://wsgiarea.pocoo.org/colubrid/
+ #
+ # - the class HttpHeaders
+ # - the method load_post_data (tuned version)
+ #
+ class HttpHeaders(object):
+ def __init__(self):
+ self.data = [('Content-Type', 'text/html')]
+ def __setitem__(self, key, value):
+ self.set(key, value)
+ def __delitem__(self, key):
+ self.remove(key)
+ def __contains__(self, key):
+ key = key.lower()
+ for k, v in self.data:
+ if k.lower() == key:
+ return True
+ return False
+ def add(self, key, value):
+ self.data.append((key, value))
+ def remove(self, key, count=-1):
+ removed = 0
+ data = []
+ for _key, _value in self.data:
+ if _key.lower() != key.lower():
+ if count > -1:
+ if removed >= count:
+ break
+ else:
+ removed += 1
+ data.append((_key, _value))
+ self.data = data
+ def clear(self):
+ self.data = []
+ def set(self, key, value):
+ self.remove(key)
+ self.add(key, value)
+ def get(self, key=False, httpformat=False):
+ if not key:
+ result = self.data
+ else:
+ result = []
+ for _key, _value in self.data:
+ if _key.lower() == key.lower():
+ result.append((_key, _value))
+ if httpformat:
+ return '\n'.join(['%s: %s' % item for item in result])
+ return result
+ def load_post_data(self,environ,POST,FILES):
+ length = int(environ['CONTENT_LENGTH'])
+ DATA = environ['wsgi.input'].read(length)
+ if environ.get('CONTENT_TYPE', '').startswith('multipart'):
+ lines = ['Content-Type: %s' % environ.get('CONTENT_TYPE', '')]
+ for key, value in environ.items():
+ if key.startswith('HTTP_'):
+ lines.append('%s: %s' % (key, value))
+ raw = '\r\n'.join(lines) + '\r\n\r\n' + DATA
+ msg = email.message_from_string(raw)
+ for sub in msg.get_payload():
+ if not isinstance(sub, email.Message.Message):
+ continue
+ name_dict = cgi.parse_header(sub['Content-Disposition'])[1]
+ if 'filename' in name_dict:
+ # Nested MIME Messages are not supported'
+ if type([]) == type(sub.get_payload()):
+ continue
+ if not name_dict['filename'].strip():
+ continue
+ filename = name_dict['filename']
+ # why not keep all the filename? because IE always send 'C:\documents and settings\blub\blub.png'
+ filename = filename[filename.rfind('\\') + 1:]
+ if 'Content-Type' in sub:
+ content_type = sub['Content-Type']
+ else:
+ content_type = None
+ s = { "name":filename, "type":content_type, "data":sub.get_payload() }
+ FILES.appendlist(name_dict['name'], s)
+ else:
+ POST.appendlist(name_dict['name'], sub.get_payload())
+ else:
+ POST.update(cgi.parse_qs(DATA,keep_blank_values=1))
+ return DATA
+
+ def __init__(self,environ,start_response,session=QWebSession):
+ self.environ=environ
+ self.start_response=start_response
+ self.buffer=[]
+
+ self.SCRIPT_NAME = environ.get('SCRIPT_NAME', '')
+ self.PATH_INFO = environ.get('PATH_INFO', '')
+ # extensions:
+ self.FULL_URL = environ['FULL_URL'] = self.get_full_url(environ)
+ # REQUEST_URI is optional, fake it if absent
+ if not environ.has_key("REQUEST_URI"):
+ environ["REQUEST_URI"]=urllib.quote(self.SCRIPT_NAME+self.PATH_INFO)
+ if environ.get('QUERY_STRING'):
+ environ["REQUEST_URI"]+='?'+environ['QUERY_STRING']
+ self.REQUEST_URI = environ["REQUEST_URI"]
+ # full quote url path before the ?
+ self.FULL_PATH = environ['FULL_PATH'] = self.REQUEST_URI.split('?')[0]
+
+ self.request_cookies=Cookie.SimpleCookie()
+ self.request_cookies.load(environ.get('HTTP_COOKIE', ''))
+
+ self.response_started=False
+ self.response_gzencode=False
+ self.response_cookies=Cookie.SimpleCookie()
+ # to delete a cookie use: c[key]['expires'] = datetime.datetime(1970, 1, 1)
+ self.response_headers=self.HttpHeaders()
+ self.response_status="200 OK"
+
+ self.php=None
+ if self.environ.has_key("php"):
+ self.php=environ["php"]
+ self.SESSION=self.php._SESSION
+ self.GET=self.php._GET
+ self.POST=self.php._POST
+ self.REQUEST=self.php._ARG
+ self.FILES=self.php._FILES
+ else:
+ if isinstance(session,QWebSession):
+ self.SESSION=session
+ elif session:
+ self.SESSION=session(environ)
+ else:
+ self.SESSION=None
+ self.GET_LIST=QWebListDict(cgi.parse_qs(environ.get('QUERY_STRING', ''),keep_blank_values=1))
+ self.POST_LIST=QWebListDict()
+ self.FILES_LIST=QWebListDict()
+ self.REQUEST_LIST=QWebListDict(self.GET_LIST)
+ if environ['REQUEST_METHOD'] == 'POST':
+ self.DATA=self.load_post_data(environ,self.POST_LIST,self.FILES_LIST)
+ self.REQUEST_LIST.update(self.POST_LIST)
+ self.GET=self.GET_LIST.get_qwebdict()
+ self.POST=self.POST_LIST.get_qwebdict()
+ self.FILES=self.FILES_LIST.get_qwebdict()
+ self.REQUEST=self.REQUEST_LIST.get_qwebdict()
+ def get_full_url(environ):
+ # taken from PEP 333
+ if 'FULL_URL' in environ:
+ return environ['FULL_URL']
+ url = environ['wsgi.url_scheme']+'://'
+ if environ.get('HTTP_HOST'):
+ url += environ['HTTP_HOST']
+ else:
+ url += environ['SERVER_NAME']
+ if environ['wsgi.url_scheme'] == 'https':
+ if environ['SERVER_PORT'] != '443':
+ url += ':' + environ['SERVER_PORT']
+ else:
+ if environ['SERVER_PORT'] != '80':
+ url += ':' + environ['SERVER_PORT']
+ if environ.has_key('REQUEST_URI'):
+ url += environ['REQUEST_URI']
+ else:
+ url += urllib.quote(environ.get('SCRIPT_NAME', ''))
+ url += urllib.quote(environ.get('PATH_INFO', ''))
+ if environ.get('QUERY_STRING'):
+ url += '?' + environ['QUERY_STRING']
+ return url
+ get_full_url=staticmethod(get_full_url)
+ def save_files(self):
+ for k,v in self.FILES.items():
+ if not v.has_key("tmp_file"):
+ f=tempfile.NamedTemporaryFile()
+ f.write(v["data"])
+ f.flush()
+ v["tmp_file"]=f
+ v["tmp_name"]=f.name
+ def debug(self):
+ body=''
+ for name,d in [
+ ("GET",self.GET), ("POST",self.POST), ("REQUEST",self.REQUEST), ("FILES",self.FILES),
+ ("GET_LIST",self.GET_LIST), ("POST_LIST",self.POST_LIST), ("REQUEST_LIST",self.REQUEST_LIST), ("FILES_LIST",self.FILES_LIST),
+ ("SESSION",self.SESSION), ("environ",self.environ),
+ ]:
+ body+='<table border="1" width="100%" align="center">\n'
+ body+='<tr><th colspan="2" align="center">%s</th></tr>\n'%name
+ keys=d.keys()
+ keys.sort()
+ body+=''.join(['<tr><td>%s</td><td>%s</td></tr>\n'%(k,cgi.escape(repr(d[k]))) for k in keys])
+ body+='</table><br><br>\n\n'
+ return body
+ def write(self,s):
+ self.buffer.append(s)
+ def echo(self,*s):
+ self.buffer.extend([str(i) for i in s])
+ def response(self):
+ if not self.response_started:
+ if not self.php:
+ for k,v in self.FILES.items():
+ if v.has_key("tmp_file"):
+ try:
+ v["tmp_file"].close()
+ except OSError:
+ pass
+ if self.response_gzencode and self.environ.get('HTTP_ACCEPT_ENCODING','').find('gzip')!=-1:
+ zbuf=StringIO.StringIO()
+ zfile=gzip.GzipFile(mode='wb', fileobj=zbuf)
+ zfile.write(''.join(self.buffer))
+ zfile.close()
+ zbuf=zbuf.getvalue()
+ self.buffer=[zbuf]
+ self.response_headers['Content-Encoding']="gzip"
+ self.response_headers['Content-Length']=str(len(zbuf))
+ headers = self.response_headers.get()
+ if isinstance(self.SESSION, QWebSession):
+ headers.extend(self.SESSION.session_get_headers())
+ headers.extend([('Set-Cookie', self.response_cookies[i].OutputString()) for i in self.response_cookies])
+ self.start_response(self.response_status, headers)
+ self.response_started=True
+ return self.buffer
+ def __iter__(self):
+ return self.response().__iter__()
+ def http_redirect(self,url,permanent=1):
+ if permanent:
+ self.response_status="301 Moved Permanently"
+ else:
+ self.response_status="302 Found"
+ self.response_headers["Location"]=url
+ def http_404(self,msg="<h1>404 Not Found</h1>"):
+ self.response_status="404 Not Found"
+ if msg:
+ self.write(msg)
+ def http_download(self,fname,fstr,partial=0):
+# allow fstr to be a file-like object
+# if parital:
+# say accept ranages
+# parse range headers...
+# if range:
+# header("HTTP/1.1 206 Partial Content");
+# header("Content-Range: bytes $offset-".($fsize-1)."/".$fsize);
+# header("Content-Length: ".($fsize-$offset));
+# fseek($fd,$offset);
+# else:
+ self.response_headers["Content-Type"]="application/octet-stream"
+ self.response_headers["Content-Disposition"]="attachment; filename=\"%s\""%fname
+ self.response_headers["Content-Transfer-Encoding"]="binary"
+ self.response_headers["Content-Length"]="%d"%len(fstr)
+ self.write(fstr)
+
+#----------------------------------------------------------
+# QWeb WSGI HTTP Server to run any WSGI app
+# autorun, run an app as FCGI or CGI otherwise launch the server
+#----------------------------------------------------------
+class QWebWSGIHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+ def log_message(self,*p):
+ if self.server.log:
+ return BaseHTTPServer.BaseHTTPRequestHandler.log_message(self,*p)
+ def address_string(self):
+ return self.client_address[0]
+ def start_response(self,status,headers):
+ l=status.split(' ',1)
+ self.send_response(int(l[0]),l[1])
+ ctype_sent=0
+ for i in headers:
+ if i[0].lower()=="content-type":
+ ctype_sent=1
+ self.send_header(*i)
+ if not ctype_sent:
+ self.send_header("Content-type", "text/html")
+ self.end_headers()
+ return self.write
+ def write(self,data):
+ try:
+ self.wfile.write(data)
+ except (socket.error, socket.timeout),e:
+ print e
+ def bufferon(self):
+ if not getattr(self,'wfile_buf',0):
+ self.wfile_buf=1
+ self.wfile_bak=self.wfile
+ self.wfile=StringIO.StringIO()
+ def bufferoff(self):
+ if self.wfile_buf:
+ buf=self.wfile
+ self.wfile=self.wfile_bak
+ self.write(buf.getvalue())
+ self.wfile_buf=0
+ def serve(self,type):
+ path_info, parameters, query = urlparse.urlparse(self.path)[2:5]
+ environ = {
+ 'wsgi.version': (1,0),
+ 'wsgi.url_scheme': 'http',
+ 'wsgi.input': self.rfile,
+ 'wsgi.errors': sys.stderr,
+ 'wsgi.multithread': 0,
+ 'wsgi.multiprocess': 0,
+ 'wsgi.run_once': 0,
+ 'REQUEST_METHOD': self.command,
+ 'SCRIPT_NAME': '',
+ 'QUERY_STRING': query,
+ 'CONTENT_TYPE': self.headers.get('Content-Type', ''),
+ 'CONTENT_LENGTH': self.headers.get('Content-Length', ''),
+ 'REMOTE_ADDR': self.client_address[0],
+ 'REMOTE_PORT': str(self.client_address[1]),
+ 'SERVER_NAME': self.server.server_address[0],
+ 'SERVER_PORT': str(self.server.server_address[1]),
+ 'SERVER_PROTOCOL': self.request_version,
+ # extention
+ 'FULL_PATH': self.path,
+ 'qweb.mode': 'standalone',
+ }
+ if path_info:
+ environ['PATH_INFO'] = urllib.unquote(path_info)
+ for key, value in self.headers.items():
+ environ['HTTP_' + key.upper().replace('-', '_')] = value
+ # Hack to avoid may TCP packets
+ self.bufferon()
+ appiter=self.server.wsgiapp(environ, self.start_response)
+ for data in appiter:
+ self.write(data)
+ self.bufferoff()
+ self.bufferoff()
+ def do_GET(self):
+ self.serve('GET')
+ def do_POST(self):
+ self.serve('GET')
+class QWebWSGIServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
+ """ QWebWSGIServer
+ qweb_wsgi_autorun(wsgiapp,ip='127.0.0.1',port=8080,threaded=1)
+ A WSGI HTTP server threaded or not and a function to automatically run your
+ app according to the environement (either standalone, CGI or FastCGI).
+
+ This feature is called QWeb autorun. If you want to To use it on your
+ application use the following lines at the end of the main application
+ python file:
+
+ if __name__ == '__main__':
+ qweb.qweb_wsgi_autorun(your_wsgi_app)
+
+ this function will select the approriate running mode according to the
+ calling environement (http-server, FastCGI or CGI).
+ """
+ def __init__(self, wsgiapp, ip, port, threaded=1, log=1):
+ BaseHTTPServer.HTTPServer.__init__(self, (ip, port), QWebWSGIHandler)
+ self.wsgiapp = wsgiapp
+ self.threaded = threaded
+ self.log = log
+ def process_request(self,*p):
+ if self.threaded:
+ return SocketServer.ThreadingMixIn.process_request(self,*p)
+ else:
+ return BaseHTTPServer.HTTPServer.process_request(self,*p)
+def qweb_wsgi_autorun(wsgiapp,ip='127.0.0.1',port=8080,threaded=1,log=1,callback_ready=None):
+ if sys.platform=='win32':
+ fcgi=0
+ else:
+ fcgi=1
+ sock = socket.fromfd(0, socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ sock.getpeername()
+ except socket.error, e:
+ if e[0] == errno.ENOTSOCK:
+ fcgi=0
+ if fcgi or os.environ.has_key('REQUEST_METHOD'):
+ import fcgi
+ fcgi.WSGIServer(wsgiapp,multithreaded=False).run()
+ else:
+ if log:
+ print 'Serving on %s:%d'%(ip,port)
+ s=QWebWSGIServer(wsgiapp,ip=ip,port=port,threaded=threaded,log=log)
+ if callback_ready:
+ callback_ready()
+ try:
+ s.serve_forever()
+ except KeyboardInterrupt,e:
+ sys.excepthook(*sys.exc_info())
+
+#----------------------------------------------------------
+# Qweb Documentation
+#----------------------------------------------------------
+def qweb_doc():
+ body=__doc__
+ for i in [QWebXml ,QWebHtml ,QWebForm ,QWebURL ,qweb_control ,QWebRequest ,QWebSession ,QWebWSGIServer ,qweb_wsgi_autorun]:
+ n=i.__name__
+ d=i.__doc__
+ body+='\n\n%s\n%s\n\n%s'%(n,'-'*len(n),d)
+ return body
+
+ print qweb_doc()
+
+#
diff --git a/tools/ajaxterm/sarissa.js b/tools/ajaxterm/sarissa.js
new file mode 100644
index 000000000..6d13aa2e2
--- /dev/null
+++ b/tools/ajaxterm/sarissa.js
@@ -0,0 +1,647 @@
+/**
+ * ====================================================================
+ * About
+ * ====================================================================
+ * Sarissa is an ECMAScript library acting as a cross-browser wrapper for native XML APIs.
+ * The library supports Gecko based browsers like Mozilla and Firefox,
+ * Internet Explorer (5.5+ with MSXML3.0+), Konqueror, Safari and a little of Opera
+ * @version 0.9.6.1
+ * @author: Manos Batsis, mailto: mbatsis at users full stop sourceforge full stop net
+ * ====================================================================
+ * Licence
+ * ====================================================================
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * the GNU Lesser General Public License version 2.1 as published by
+ * the Free Software Foundation (your choice between the two).
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License or GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * or GNU Lesser General Public License along with this program; if not,
+ * write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * or visit http://www.gnu.org
+ *
+ */
+/**
+ * <p>Sarissa is a utility class. Provides "static" methods for DOMDocument and
+ * XMLHTTP objects, DOM Node serializatrion to XML strings and other goodies.</p>
+ * @constructor
+ */
+function Sarissa(){};
+/** @private */
+Sarissa.PARSED_OK = "Document contains no parsing errors";
+/**
+ * Tells you whether transformNode and transformNodeToObject are available. This functionality
+ * is contained in sarissa_ieemu_xslt.js and is deprecated. If you want to control XSLT transformations
+ * use the XSLTProcessor
+ * @deprecated
+ * @type boolean
+ */
+Sarissa.IS_ENABLED_TRANSFORM_NODE = false;
+/**
+ * tells you whether XMLHttpRequest (or equivalent) is available
+ * @type boolean
+ */
+Sarissa.IS_ENABLED_XMLHTTP = false;
+/**
+ * tells you whether selectNodes/selectSingleNode is available
+ * @type boolean
+ */
+Sarissa.IS_ENABLED_SELECT_NODES = false;
+var _sarissa_iNsCounter = 0;
+var _SARISSA_IEPREFIX4XSLPARAM = "";
+var _SARISSA_HAS_DOM_IMPLEMENTATION = document.implementation && true;
+var _SARISSA_HAS_DOM_CREATE_DOCUMENT = _SARISSA_HAS_DOM_IMPLEMENTATION && document.implementation.createDocument;
+var _SARISSA_HAS_DOM_FEATURE = _SARISSA_HAS_DOM_IMPLEMENTATION && document.implementation.hasFeature;
+var _SARISSA_IS_MOZ = _SARISSA_HAS_DOM_CREATE_DOCUMENT && _SARISSA_HAS_DOM_FEATURE;
+var _SARISSA_IS_SAFARI = (navigator.userAgent && navigator.vendor && (navigator.userAgent.toLowerCase().indexOf("applewebkit") != -1 || navigator.vendor.indexOf("Apple") != -1));
+var _SARISSA_IS_IE = document.all && window.ActiveXObject && navigator.userAgent.toLowerCase().indexOf("msie") > -1 && navigator.userAgent.toLowerCase().indexOf("opera") == -1;
+if(!window.Node || !window.Node.ELEMENT_NODE){
+ var Node = {ELEMENT_NODE: 1, ATTRIBUTE_NODE: 2, TEXT_NODE: 3, CDATA_SECTION_NODE: 4, ENTITY_REFERENCE_NODE: 5, ENTITY_NODE: 6, PROCESSING_INSTRUCTION_NODE: 7, COMMENT_NODE: 8, DOCUMENT_NODE: 9, DOCUMENT_TYPE_NODE: 10, DOCUMENT_FRAGMENT_NODE: 11, NOTATION_NODE: 12};
+};
+
+// IE initialization
+if(_SARISSA_IS_IE){
+ // for XSLT parameter names, prefix needed by IE
+ _SARISSA_IEPREFIX4XSLPARAM = "xsl:";
+ // used to store the most recent ProgID available out of the above
+ var _SARISSA_DOM_PROGID = "";
+ var _SARISSA_XMLHTTP_PROGID = "";
+ /**
+ * Called when the Sarissa_xx.js file is parsed, to pick most recent
+ * ProgIDs for IE, then gets destroyed.
+ * @param idList an array of MSXML PROGIDs from which the most recent will be picked for a given object
+ * @param enabledList an array of arrays where each array has two items; the index of the PROGID for which a certain feature is enabled
+ */
+ pickRecentProgID = function (idList, enabledList){
+ // found progID flag
+ var bFound = false;
+ for(var i=0; i < idList.length && !bFound; i++){
+ try{
+ var oDoc = new ActiveXObject(idList[i]);
+ o2Store = idList[i];
+ bFound = true;
+ for(var j=0;j<enabledList.length;j++)
+ if(i <= enabledList[j][1])
+ Sarissa["IS_ENABLED_"+enabledList[j][0]] = true;
+ }catch (objException){
+ // trap; try next progID
+ };
+ };
+ if (!bFound)
+ throw "Could not retreive a valid progID of Class: " + idList[idList.length-1]+". (original exception: "+e+")";
+ idList = null;
+ return o2Store;
+ };
+ // pick best available MSXML progIDs
+ _SARISSA_DOM_PROGID = pickRecentProgID(["Msxml2.DOMDocument.5.0", "Msxml2.DOMDocument.4.0", "Msxml2.DOMDocument.3.0", "MSXML2.DOMDocument", "MSXML.DOMDocument", "Microsoft.XMLDOM"], [["SELECT_NODES", 2],["TRANSFORM_NODE", 2]]);
+ _SARISSA_XMLHTTP_PROGID = pickRecentProgID(["Msxml2.XMLHTTP.5.0", "Msxml2.XMLHTTP.4.0", "MSXML2.XMLHTTP.3.0", "MSXML2.XMLHTTP", "Microsoft.XMLHTTP"], [["XMLHTTP", 4]]);
+ _SARISSA_THREADEDDOM_PROGID = pickRecentProgID(["Msxml2.FreeThreadedDOMDocument.5.0", "MSXML2.FreeThreadedDOMDocument.4.0", "MSXML2.FreeThreadedDOMDocument.3.0"]);
+ _SARISSA_XSLTEMPLATE_PROGID = pickRecentProgID(["Msxml2.XSLTemplate.5.0", "Msxml2.XSLTemplate.4.0", "MSXML2.XSLTemplate.3.0"], [["XSLTPROC", 2]]);
+ // we dont need this anymore
+ pickRecentProgID = null;
+ //============================================
+ // Factory methods (IE)
+ //============================================
+ // see non-IE version
+ Sarissa.getDomDocument = function(sUri, sName){
+ var oDoc = new ActiveXObject(_SARISSA_DOM_PROGID);
+ // if a root tag name was provided, we need to load it in the DOM
+ // object
+ if (sName){
+ // if needed, create an artifical namespace prefix the way Moz
+ // does
+ if (sUri){
+ oDoc.loadXML("<a" + _sarissa_iNsCounter + ":" + sName + " xmlns:a" + _sarissa_iNsCounter + "=\"" + sUri + "\" />");
+ // don't use the same prefix again
+ ++_sarissa_iNsCounter;
+ }
+ else
+ oDoc.loadXML("<" + sName + "/>");
+ };
+ return oDoc;
+ };
+ // see non-IE version
+ Sarissa.getParseErrorText = function (oDoc) {
+ var parseErrorText = Sarissa.PARSED_OK;
+ if(oDoc.parseError != 0){
+ parseErrorText = "XML Parsing Error: " + oDoc.parseError.reason +
+ "\nLocation: " + oDoc.parseError.url +
+ "\nLine Number " + oDoc.parseError.line + ", Column " +
+ oDoc.parseError.linepos +
+ ":\n" + oDoc.parseError.srcText +
+ "\n";
+ for(var i = 0; i < oDoc.parseError.linepos;i++){
+ parseErrorText += "-";
+ };
+ parseErrorText += "^\n";
+ };
+ return parseErrorText;
+ };
+ // see non-IE version
+ Sarissa.setXpathNamespaces = function(oDoc, sNsSet) {
+ oDoc.setProperty("SelectionLanguage", "XPath");
+ oDoc.setProperty("SelectionNamespaces", sNsSet);
+ };
+ /**
+ * Basic implementation of Mozilla's XSLTProcessor for IE.
+ * Reuses the same XSLT stylesheet for multiple transforms
+ * @constructor
+ */
+ XSLTProcessor = function(){
+ this.template = new ActiveXObject(_SARISSA_XSLTEMPLATE_PROGID);
+ this.processor = null;
+ };
+ /**
+ * Impoprts the given XSLT DOM and compiles it to a reusable transform
+ * @argument xslDoc The XSLT DOMDocument to import
+ */
+ XSLTProcessor.prototype.importStylesheet = function(xslDoc){
+ // convert stylesheet to free threaded
+ var converted = new ActiveXObject(_SARISSA_THREADEDDOM_PROGID);
+ converted.loadXML(xslDoc.xml);
+ this.template.stylesheet = converted;
+ this.processor = this.template.createProcessor();
+ // (re)set default param values
+ this.paramsSet = new Array();
+ };
+ /**
+ * Transform the given XML DOM
+ * @argument sourceDoc The XML DOMDocument to transform
+ * @return The transformation result as a DOM Document
+ */
+ XSLTProcessor.prototype.transformToDocument = function(sourceDoc){
+ this.processor.input = sourceDoc;
+ var outDoc = new ActiveXObject(_SARISSA_DOM_PROGID);
+ this.processor.output = outDoc;
+ this.processor.transform();
+ return outDoc;
+ };
+ /**
+ * Set global XSLT parameter of the imported stylesheet
+ * @argument nsURI The parameter namespace URI
+ * @argument name The parameter base name
+ * @argument value The new parameter value
+ */
+ XSLTProcessor.prototype.setParameter = function(nsURI, name, value){
+ /* nsURI is optional but cannot be null */
+ if(nsURI){
+ this.processor.addParameter(name, value, nsURI);
+ }else{
+ this.processor.addParameter(name, value);
+ };
+ /* update updated params for getParameter */
+ if(!this.paramsSet[""+nsURI]){
+ this.paramsSet[""+nsURI] = new Array();
+ };
+ this.paramsSet[""+nsURI][name] = value;
+ };
+ /**
+ * Gets a parameter if previously set by setParameter. Returns null
+ * otherwise
+ * @argument name The parameter base name
+ * @argument value The new parameter value
+ * @return The parameter value if reviously set by setParameter, null otherwise
+ */
+ XSLTProcessor.prototype.getParameter = function(nsURI, name){
+ nsURI = nsURI || "";
+ if(nsURI in this.paramsSet && name in this.paramsSet[nsURI]){
+ return this.paramsSet[nsURI][name];
+ }else{
+ return null;
+ };
+ };
+}
+else{ /* end IE initialization, try to deal with real browsers now ;-) */
+ if(_SARISSA_HAS_DOM_CREATE_DOCUMENT){
+ /**
+ * <p>Ensures the document was loaded correctly, otherwise sets the
+ * parseError to -1 to indicate something went wrong. Internal use</p>
+ * @private
+ */
+ Sarissa.__handleLoad__ = function(oDoc){
+ if (!oDoc.documentElement || oDoc.documentElement.tagName == "parsererror")
+ oDoc.parseError = -1;
+ Sarissa.__setReadyState__(oDoc, 4);
+ };
+ /**
+ * <p>Attached by an event handler to the load event. Internal use.</p>
+ * @private
+ */
+ _sarissa_XMLDocument_onload = function(){
+ Sarissa.__handleLoad__(this);
+ };
+ /**
+ * <p>Sets the readyState property of the given DOM Document object.
+ * Internal use.</p>
+ * @private
+ * @argument oDoc the DOM Document object to fire the
+ * readystatechange event
+ * @argument iReadyState the number to change the readystate property to
+ */
+ Sarissa.__setReadyState__ = function(oDoc, iReadyState){
+ oDoc.readyState = iReadyState;
+ if (oDoc.onreadystatechange != null && typeof oDoc.onreadystatechange == "function")
+ oDoc.onreadystatechange();
+ };
+ Sarissa.getDomDocument = function(sUri, sName){
+ var oDoc = document.implementation.createDocument(sUri?sUri:"", sName?sName:"", null);
+ oDoc.addEventListener("load", _sarissa_XMLDocument_onload, false);
+ return oDoc;
+ };
+ if(false && window.XMLDocument){
+ /**
+ * <p>Emulate IE's onreadystatechange attribute</p>
+ */
+ XMLDocument.prototype.onreadystatechange = null;
+ /**
+ * <p>Emulates IE's readyState property, which always gives an integer from 0 to 4:</p>
+ * <ul><li>1 == LOADING,</li>
+ * <li>2 == LOADED,</li>
+ * <li>3 == INTERACTIVE,</li>
+ * <li>4 == COMPLETED</li></ul>
+ */
+ XMLDocument.prototype.readyState = 0;
+ /**
+ * <p>Emulate IE's parseError attribute</p>
+ */
+ XMLDocument.prototype.parseError = 0;
+
+ // NOTE: setting async to false will only work with documents
+ // called over HTTP (meaning a server), not the local file system,
+ // unless you are using Moz 1.4+.
+ // BTW the try>catch block is for 1.4; I haven't found a way to check if
+ // the property is implemented without
+ // causing an error and I dont want to use user agent stuff for that...
+ var _SARISSA_SYNC_NON_IMPLEMENTED = false;// ("async" in XMLDocument.prototype) ? false: true;
+ /**
+ * <p>Keeps a handle to the original load() method. Internal use and only
+ * if Mozilla version is lower than 1.4</p>
+ * @private
+ */
+ XMLDocument.prototype._sarissa_load = XMLDocument.prototype.load;
+
+ /**
+ * <p>Overrides the original load method to provide synchronous loading for
+ * Mozilla versions prior to 1.4, using an XMLHttpRequest object (if
+ * async is set to false)</p>
+ * @returns the DOM Object as it was before the load() call (may be empty)
+ */
+ XMLDocument.prototype.load = function(sURI) {
+ var oDoc = document.implementation.createDocument("", "", null);
+ Sarissa.copyChildNodes(this, oDoc);
+ this.parseError = 0;
+ Sarissa.__setReadyState__(this, 1);
+ try {
+ if(this.async == false && _SARISSA_SYNC_NON_IMPLEMENTED) {
+ var tmp = new XMLHttpRequest();
+ tmp.open("GET", sURI, false);
+ tmp.send(null);
+ Sarissa.__setReadyState__(this, 2);
+ Sarissa.copyChildNodes(tmp.responseXML, this);
+ Sarissa.__setReadyState__(this, 3);
+ }
+ else {
+ this._sarissa_load(sURI);
+ };
+ }
+ catch (objException) {
+ this.parseError = -1;
+ }
+ finally {
+ if(this.async == false){
+ Sarissa.__handleLoad__(this);
+ };
+ };
+ return oDoc;
+ };
+
+
+ }//if(window.XMLDocument)
+ else if(document.implementation && document.implementation.hasFeature && document.implementation.hasFeature('LS', '3.0')){
+ Document.prototype.async = true;
+ Document.prototype.onreadystatechange = null;
+ Document.prototype.parseError = 0;
+ Document.prototype.load = function(sURI) {
+ var parser = document.implementation.createLSParser(this.async ? document.implementation.MODE_ASYNCHRONOUS : document.implementation.MODE_SYNCHRONOUS, null);
+ if(this.async){
+ var self = this;
+ parser.addEventListener("load",
+ function(e) {
+ self.readyState = 4;
+ Sarissa.copyChildNodes(e.newDocument, self.documentElement, false);
+ self.onreadystatechange.call();
+ },
+ false);
+ };
+ try {
+ var oDoc = parser.parseURI(sURI);
+ }
+ catch(e){
+ this.parseError = -1;
+ };
+ if(!this.async)
+ Sarissa.copyChildNodes(oDoc, this.documentElement, false);
+ return oDoc;
+ };
+ /**
+ * <p>Factory method to obtain a new DOM Document object</p>
+ * @argument sUri the namespace of the root node (if any)
+ * @argument sUri the local name of the root node (if any)
+ * @returns a new DOM Document
+ */
+ Sarissa.getDomDocument = function(sUri, sName){
+ return document.implementation.createDocument(sUri?sUri:"", sName?sName:"", null);
+ };
+ };
+ };//if(_SARISSA_HAS_DOM_CREATE_DOCUMENT)
+};
+//==========================================
+// Common stuff
+//==========================================
+if(!window.DOMParser){
+ /*
+ * DOMParser is a utility class, used to construct DOMDocuments from XML strings
+ * @constructor
+ */
+ DOMParser = function() {
+ };
+ if(_SARISSA_IS_SAFARI){
+ /**
+ * Construct a new DOM Document from the given XMLstring
+ * @param sXml the given XML string
+ * @param contentType the content type of the document the given string represents (one of text/xml, application/xml, application/xhtml+xml).
+ * @return a new DOM Document from the given XML string
+ */
+ DOMParser.prototype.parseFromString = function(sXml, contentType){
+ if(contentType.toLowerCase() != "application/xml"){
+ throw "Cannot handle content type: \"" + contentType + "\"";
+ };
+ var xmlhttp = new XMLHttpRequest();
+ xmlhttp.open("GET", "data:text/xml;charset=utf-8," + encodeURIComponent(str), false);
+ xmlhttp.send(null);
+ return xmlhttp.responseXML;
+ };
+ }else if(Sarissa.getDomDocument && Sarissa.getDomDocument() && "loadXML" in Sarissa.getDomDocument()){
+ DOMParser.prototype.parseFromString = function(sXml, contentType){
+ var doc = Sarissa.getDomDocument();
+ doc.loadXML(sXml);
+ return doc;
+ };
+ };
+};
+
+if(window.XMLHttpRequest){
+ Sarissa.IS_ENABLED_XMLHTTP = true;
+}
+else if(_SARISSA_IS_IE){
+ /**
+ * Emulate XMLHttpRequest
+ * @constructor
+ */
+ XMLHttpRequest = function() {
+ return new ActiveXObject(_SARISSA_XMLHTTP_PROGID);
+ };
+ Sarissa.IS_ENABLED_XMLHTTP = true;
+};
+
+if(!window.document.importNode && _SARISSA_IS_IE){
+ try{
+ /**
+ * Implements importNode for the current window document in IE using innerHTML.
+ * Testing showed that DOM was multiple times slower than innerHTML for this,
+ * sorry folks. If you encounter trouble (who knows what IE does behind innerHTML)
+ * please gimme a call.
+ * @param oNode the Node to import
+ * @param bChildren whether to include the children of oNode
+ * @returns the imported node for further use
+ */
+ window.document.importNode = function(oNode, bChildren){
+ var importNode = document.createElement("div");
+ if(bChildren)
+ importNode.innerHTML = Sarissa.serialize(oNode);
+ else
+ importNode.innerHTML = Sarissa.serialize(oNode.cloneNode(false));
+ return importNode.firstChild;
+ };
+ }catch(e){};
+};
+if(!Sarissa.getParseErrorText){
+ /**
+ * <p>Returns a human readable description of the parsing error. Usefull
+ * for debugging. Tip: append the returned error string in a &lt;pre&gt;
+ * element if you want to render it.</p>
+ * <p>Many thanks to Christian Stocker for the initial patch.</p>
+ * @argument oDoc The target DOM document
+ * @returns The parsing error description of the target Document in
+ * human readable form (preformated text)
+ */
+ Sarissa.getParseErrorText = function (oDoc){
+ var parseErrorText = Sarissa.PARSED_OK;
+ if(oDoc && oDoc.parseError && oDoc.parseError != 0){
+ /*moz*/
+ if(oDoc.documentElement.tagName == "parsererror"){
+ parseErrorText = oDoc.documentElement.firstChild.data;
+ parseErrorText += "\n" + oDoc.documentElement.firstChild.nextSibling.firstChild.data;
+ }/*konq*/
+ else{
+ parseErrorText = Sarissa.getText(oDoc.documentElement);/*.getElementsByTagName("h1")[0], false) + "\n";
+ parseErrorText += Sarissa.getText(oDoc.documentElement.getElementsByTagName("body")[0], false) + "\n";
+ parseErrorText += Sarissa.getText(oDoc.documentElement.getElementsByTagName("pre")[0], false);*/
+ };
+ };
+ return parseErrorText;
+ };
+};
+Sarissa.getText = function(oNode, deep){
+ var s = "";
+ var nodes = oNode.childNodes;
+ for(var i=0; i < nodes.length; i++){
+ var node = nodes[i];
+ var nodeType = node.nodeType;
+ if(nodeType == Node.TEXT_NODE || nodeType == Node.CDATA_SECTION_NODE){
+ s += node.data;
+ }else if(deep == true
+ && (nodeType == Node.ELEMENT_NODE
+ || nodeType == Node.DOCUMENT_NODE
+ || nodeType == Node.DOCUMENT_FRAGMENT_NODE)){
+ s += Sarissa.getText(node, true);
+ };
+ };
+ return s;
+};
+if(window.XMLSerializer){
+ /**
+ * <p>Factory method to obtain the serialization of a DOM Node</p>
+ * @returns the serialized Node as an XML string
+ */
+ Sarissa.serialize = function(oDoc){
+ var s = null;
+ if(oDoc){
+ s = oDoc.innerHTML?oDoc.innerHTML:(new XMLSerializer()).serializeToString(oDoc);
+ };
+ return s;
+ };
+}else{
+ if(Sarissa.getDomDocument && (Sarissa.getDomDocument("","foo", null)).xml){
+ // see non-IE version
+ Sarissa.serialize = function(oDoc) {
+ var s = null;
+ if(oDoc){
+ s = oDoc.innerHTML?oDoc.innerHTML:oDoc.xml;
+ };
+ return s;
+ };
+ /**
+ * Utility class to serialize DOM Node objects to XML strings
+ * @constructor
+ */
+ XMLSerializer = function(){};
+ /**
+ * Serialize the given DOM Node to an XML string
+ * @param oNode the DOM Node to serialize
+ */
+ XMLSerializer.prototype.serializeToString = function(oNode) {
+ return oNode.xml;
+ };
+ };
+};
+
+/**
+ * strips tags from a markup string
+ */
+Sarissa.stripTags = function (s) {
+ return s.replace(/<[^>]+>/g,"");
+};
+/**
+ * <p>Deletes all child nodes of the given node</p>
+ * @argument oNode the Node to empty
+ */
+Sarissa.clearChildNodes = function(oNode) {
+ // need to check for firstChild due to opera 8 bug with hasChildNodes
+ while(oNode.firstChild){
+ oNode.removeChild(oNode.firstChild);
+ };
+};
+/**
+ * <p> Copies the childNodes of nodeFrom to nodeTo</p>
+ * <p> <b>Note:</b> The second object's original content is deleted before
+ * the copy operation, unless you supply a true third parameter</p>
+ * @argument nodeFrom the Node to copy the childNodes from
+ * @argument nodeTo the Node to copy the childNodes to
+ * @argument bPreserveExisting whether to preserve the original content of nodeTo, default is false
+ */
+Sarissa.copyChildNodes = function(nodeFrom, nodeTo, bPreserveExisting) {
+ if((!nodeFrom) || (!nodeTo)){
+ throw "Both source and destination nodes must be provided";
+ };
+ if(!bPreserveExisting){
+ Sarissa.clearChildNodes(nodeTo);
+ };
+ var ownerDoc = nodeTo.nodeType == Node.DOCUMENT_NODE ? nodeTo : nodeTo.ownerDocument;
+ var nodes = nodeFrom.childNodes;
+ if(ownerDoc.importNode && (!_SARISSA_IS_IE)) {
+ for(var i=0;i < nodes.length;i++) {
+ nodeTo.appendChild(ownerDoc.importNode(nodes[i], true));
+ };
+ }
+ else{
+ for(var i=0;i < nodes.length;i++) {
+ nodeTo.appendChild(nodes[i].cloneNode(true));
+ };
+ };
+};
+
+/**
+ * <p> Moves the childNodes of nodeFrom to nodeTo</p>
+ * <p> <b>Note:</b> The second object's original content is deleted before
+ * the move operation, unless you supply a true third parameter</p>
+ * @argument nodeFrom the Node to copy the childNodes from
+ * @argument nodeTo the Node to copy the childNodes to
+ * @argument bPreserveExisting whether to preserve the original content of nodeTo, default is
+ */
+Sarissa.moveChildNodes = function(nodeFrom, nodeTo, bPreserveExisting) {
+ if((!nodeFrom) || (!nodeTo)){
+ throw "Both source and destination nodes must be provided";
+ };
+ if(!bPreserveExisting){
+ Sarissa.clearChildNodes(nodeTo);
+ };
+ var nodes = nodeFrom.childNodes;
+ // if within the same doc, just move, else copy and delete
+ if(nodeFrom.ownerDocument == nodeTo.ownerDocument){
+ while(nodeFrom.firstChild){
+ nodeTo.appendChild(nodeFrom.firstChild);
+ };
+ }else{
+ var ownerDoc = nodeTo.nodeType == Node.DOCUMENT_NODE ? nodeTo : nodeTo.ownerDocument;
+ if(ownerDoc.importNode && (!_SARISSA_IS_IE)) {
+ for(var i=0;i < nodes.length;i++) {
+ nodeTo.appendChild(ownerDoc.importNode(nodes[i], true));
+ };
+ }else{
+ for(var i=0;i < nodes.length;i++) {
+ nodeTo.appendChild(nodes[i].cloneNode(true));
+ };
+ };
+ Sarissa.clearChildNodes(nodeFrom);
+ };
+};
+
+/**
+ * <p>Serialize any object to an XML string. All properties are serialized using the property name
+ * as the XML element name. Array elements are rendered as <code>array-item</code> elements,
+ * using their index/key as the value of the <code>key</code> attribute.</p>
+ * @argument anyObject the object to serialize
+ * @argument objectName a name for that object
+ * @return the XML serializationj of the given object as a string
+ */
+Sarissa.xmlize = function(anyObject, objectName, indentSpace){
+ indentSpace = indentSpace?indentSpace:'';
+ var s = indentSpace + '<' + objectName + '>';
+ var isLeaf = false;
+ if(!(anyObject instanceof Object) || anyObject instanceof Number || anyObject instanceof String
+ || anyObject instanceof Boolean || anyObject instanceof Date){
+ s += Sarissa.escape(""+anyObject);
+ isLeaf = true;
+ }else{
+ s += "\n";
+ var itemKey = '';
+ var isArrayItem = anyObject instanceof Array;
+ for(var name in anyObject){
+ s += Sarissa.xmlize(anyObject[name], (isArrayItem?"array-item key=\""+name+"\"":name), indentSpace + " ");
+ };
+ s += indentSpace;
+ };
+ return s += (objectName.indexOf(' ')!=-1?"</array-item>\n":"</" + objectName + ">\n");
+};
+
+/**
+ * Escape the given string chacters that correspond to the five predefined XML entities
+ * @param sXml the string to escape
+ */
+Sarissa.escape = function(sXml){
+ return sXml.replace(/&/g, "&amp;")
+ .replace(/</g, "&lt;")
+ .replace(/>/g, "&gt;")
+ .replace(/"/g, "&quot;")
+ .replace(/'/g, "&apos;");
+};
+
+/**
+ * Unescape the given string. This turns the occurences of the predefined XML
+ * entities to become the characters they represent correspond to the five predefined XML entities
+ * @param sXml the string to unescape
+ */
+Sarissa.unescape = function(sXml){
+ return sXml.replace(/&apos;/g,"'")
+ .replace(/&quot;/g,"\"")
+ .replace(/&gt;/g,">")
+ .replace(/&lt;/g,"<")
+ .replace(/&amp;/g,"&");
+};
+// EOF
diff --git a/tools/ajaxterm/sarissa_dhtml.js b/tools/ajaxterm/sarissa_dhtml.js
new file mode 100644
index 000000000..2d85c817e
--- /dev/null
+++ b/tools/ajaxterm/sarissa_dhtml.js
@@ -0,0 +1,105 @@
+/**
+ * ====================================================================
+ * About
+ * ====================================================================
+ * Sarissa cross browser XML library - AJAX module
+ * @version 0.9.6.1
+ * @author: Copyright Manos Batsis, mailto: mbatsis at users full stop sourceforge full stop net
+ *
+ * This module contains some convinient AJAX tricks based on Sarissa
+ *
+ * ====================================================================
+ * Licence
+ * ====================================================================
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * the GNU Lesser General Public License version 2.1 as published by
+ * the Free Software Foundation (your choice between the two).
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License or GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * or GNU Lesser General Public License along with this program; if not,
+ * write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * or visit http://www.gnu.org
+ *
+ */
+/**
+ * Update an element with response of a GET request on the given URL.
+ * @addon
+ * @param sFromUrl the URL to make the request to
+ * @param oTargetElement the element to update
+ * @param xsltproc (optional) the transformer to use on the returned
+ * content before updating the target element with it
+ */
+Sarissa.updateContentFromURI = function(sFromUrl, oTargetElement, xsltproc) {
+ try{
+ oTargetElement.style.cursor = "wait";
+ var xmlhttp = new XMLHttpRequest();
+ xmlhttp.open("GET", sFromUrl);
+ function sarissa_dhtml_loadHandler() {
+ if (xmlhttp.readyState == 4) {
+ oTargetElement.style.cursor = "auto";
+ Sarissa.updateContentFromNode(xmlhttp.responseXML, oTargetElement, xsltproc);
+ };
+ };
+ xmlhttp.onreadystatechange = sarissa_dhtml_loadHandler;
+ xmlhttp.send(null);
+ oTargetElement.style.cursor = "auto";
+ }
+ catch(e){
+ oTargetElement.style.cursor = "auto";
+ throw e;
+ };
+};
+
+/**
+ * Update an element's content with the given DOM node.
+ * @addon
+ * @param sFromUrl the URL to make the request to
+ * @param oTargetElement the element to update
+ * @param xsltproc (optional) the transformer to use on the given
+ * DOM node before updating the target element with it
+ */
+Sarissa.updateContentFromNode = function(oNode, oTargetElement, xsltproc) {
+ try {
+ oTargetElement.style.cursor = "wait";
+ Sarissa.clearChildNodes(oTargetElement);
+ // check for parsing errors
+ var ownerDoc = oNode.nodeType == Node.DOCUMENT_NODE?oNode:oNode.ownerDocument;
+ if(ownerDoc.parseError && ownerDoc.parseError != 0) {
+ var pre = document.createElement("pre");
+ pre.appendChild(document.createTextNode(Sarissa.getParseErrorText(ownerDoc)));
+ oTargetElement.appendChild(pre);
+ }
+ else {
+ // transform if appropriate
+ if(xsltproc) {
+ oNode = xsltproc.transformToDocument(oNode);
+ };
+ // be smart, maybe the user wants to display the source instead
+ if(oTargetElement.tagName.toLowerCase == "textarea" || oTargetElement.tagName.toLowerCase == "input") {
+ oTargetElement.value = Sarissa.serialize(oNode);
+ }
+ else {
+ // ok that was not smart; it was paranoid. Keep up the good work by trying to use DOM instead of innerHTML
+ if(oNode.nodeType == Node.DOCUMENT_NODE || oNode.ownerDocument.documentElement == oNode) {
+ oTargetElement.innerHTML = Sarissa.serialize(oNode);
+ }
+ else{
+ oTargetElement.appendChild(oTargetElement.ownerDocument.importNode(oNode, true));
+ };
+ };
+ };
+ }
+ catch(e) {
+ throw e;
+ }
+ finally{
+ oTargetElement.style.cursor = "auto";
+ };
+};
+
diff --git a/tools/euca-get-ajax-console b/tools/euca-get-ajax-console
new file mode 100755
index 000000000..37060e74f
--- /dev/null
+++ b/tools/euca-get-ajax-console
@@ -0,0 +1,164 @@
+#!/usr/bin/env python
+# pylint: disable-msg=C0103
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Euca add-on to use ajax console"""
+
+import getopt
+import os
+import sys
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+import boto
+import nova
+from boto.ec2.connection import EC2Connection
+from euca2ools import Euca2ool, InstanceValidationError, Util, ConnectionFailed
+
+usage_string = """
+Retrieves a url to an ajax console terminal
+
+euca-get-ajax-console [-h, --help] [--version] [--debug] instance_id
+
+REQUIRED PARAMETERS
+
+instance_id: unique identifier for the instance show the console output for.
+
+OPTIONAL PARAMETERS
+
+"""
+
+
+# This class extends boto to add AjaxConsole functionality
+class NovaEC2Connection(EC2Connection):
+
+ def get_ajax_console(self, instance_id):
+ """
+ Retrieves a console connection for the specified instance.
+
+ :type instance_id: string
+ :param instance_id: The instance ID of a running instance on the cloud.
+
+ :rtype: :class:`AjaxConsole`
+ """
+
+ class AjaxConsole:
+ def __init__(self, parent=None):
+ self.parent = parent
+ self.instance_id = None
+ self.url = None
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'instanceId':
+ self.instance_id = value
+ elif name == 'url':
+ self.url = value
+ else:
+ setattr(self, name, value)
+
+ params = {}
+ self.build_list_params(params, [instance_id], 'InstanceId')
+ return self.get_object('GetAjaxConsole', params, AjaxConsole)
+ pass
+
+
+def override_connect_ec2(aws_access_key_id=None,
+ aws_secret_access_key=None, **kwargs):
+ return NovaEC2Connection(aws_access_key_id,
+ aws_secret_access_key, **kwargs)
+
+# override boto's connect_ec2 method, so that we can use NovaEC2Connection
+boto.connect_ec2 = override_connect_ec2
+
+
+def usage(status=1):
+ print usage_string
+ Util().usage()
+ sys.exit(status)
+
+
+def version():
+ print Util().version()
+ sys.exit()
+
+
+def display_console_output(console_output):
+ print console_output.instance_id
+ print console_output.timestamp
+ print console_output.output
+
+
+def display_ajax_console_output(console_output):
+ print console_output.url
+
+
+def main():
+ try:
+ euca = Euca2ool()
+ except Exception, e:
+ print e
+ usage()
+
+ instance_id = None
+
+ for name, value in euca.opts:
+ if name in ('-h', '--help'):
+ usage(0)
+ elif name == '--version':
+ version()
+ elif name == '--debug':
+ debug = True
+
+ for arg in euca.args:
+ instance_id = arg
+ break
+
+ if instance_id:
+ try:
+ euca.validate_instance_id(instance_id)
+ except InstanceValidationError:
+ print 'Invalid instance id'
+ sys.exit(1)
+
+ try:
+ euca_conn = euca.make_connection()
+ except ConnectionFailed, e:
+ print e.message
+ sys.exit(1)
+ try:
+ console_output = euca_conn.get_ajax_console(instance_id)
+ except Exception, ex:
+ euca.display_error_and_exit('%s' % ex)
+
+ display_ajax_console_output(console_output)
+ else:
+ print 'instance_id must be specified'
+ usage()
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/install_venv.py b/tools/install_venv.py
index 32c372352..4e3941210 100644
--- a/tools/install_venv.py
+++ b/tools/install_venv.py
@@ -66,7 +66,8 @@ def check_dependencies():
# Try installing it via easy_install...
if HAS_EASY_INSTALL:
print 'Installing virtualenv via easy_install...',
- if not run_command(['which', 'easy_install']):
+ if not (run_command(['which', 'easy_install']) and
+ run_command(['easy_install', 'virtualenv'])):
die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,'
' please install it using your favorite package management tool')
print 'done.'