summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.bzrignore1
-rw-r--r--Authors4
-rw-r--r--babel.cfg2
-rwxr-xr-xbin/nova-ajax-console-proxy137
-rwxr-xr-xbin/nova-api-paste2
-rwxr-xr-xbin/nova-console44
-rwxr-xr-xbin/nova-dhcpbridge17
-rwxr-xr-xbin/nova-instancemonitor7
-rw-r--r--bin/nova-logspool156
-rwxr-xr-xbin/nova-manage22
-rw-r--r--bin/nova-spoolsentry97
-rwxr-xr-xcontrib/nova.sh2
-rw-r--r--doc/source/conf.py8
-rw-r--r--krm_mapping.json.sample3
-rw-r--r--locale/nova.pot2130
-rw-r--r--nova/api/__init__.py1
-rw-r--r--nova/api/ec2/__init__.py94
-rw-r--r--nova/api/ec2/admin.py38
-rw-r--r--nova/api/ec2/apirequest.py12
-rw-r--r--nova/api/ec2/cloud.py83
-rw-r--r--nova/api/ec2/metadatarequesthandler.py7
-rw-r--r--nova/api/openstack/__init__.py30
-rw-r--r--nova/api/openstack/backup_schedules.py6
-rw-r--r--nova/api/openstack/common.py24
-rw-r--r--nova/api/openstack/consoles.py96
-rw-r--r--nova/api/openstack/images.py19
-rw-r--r--nova/api/openstack/servers.py54
-rw-r--r--nova/api/openstack/shared_ip_groups.py (renamed from nova/api/openstack/sharedipgroups.py)10
-rw-r--r--nova/auth/dbdriver.py1
-rw-r--r--nova/auth/ldapdriver.py99
-rw-r--r--nova/auth/manager.py75
-rw-r--r--nova/auth/novarc.template4
-rw-r--r--nova/auth/signer.py15
-rw-r--r--nova/cloudpipe/pipelib.py4
-rw-r--r--nova/compute/api.py116
-rw-r--r--nova/compute/disk.py11
-rw-r--r--nova/compute/manager.py154
-rw-r--r--nova/compute/monitor.py39
-rw-r--r--nova/console/__init__.py13
-rw-r--r--nova/console/api.py75
-rw-r--r--nova/console/fake.py58
-rw-r--r--nova/console/manager.py127
-rw-r--r--nova/console/xvp.conf.template16
-rw-r--r--nova/console/xvp.py194
-rw-r--r--nova/crypto.py5
-rw-r--r--nova/db/api.py66
-rw-r--r--nova/db/sqlalchemy/__init__.py8
-rw-r--r--nova/db/sqlalchemy/api.py162
-rw-r--r--nova/db/sqlalchemy/models.py27
-rw-r--r--nova/exception.py7
-rw-r--r--nova/fakerabbit.py20
-rw-r--r--nova/flags.py43
-rw-r--r--nova/image/glance.py22
-rw-r--r--nova/log.py254
-rw-r--r--nova/network/api.py6
-rw-r--r--nova/network/linux_net.py23
-rw-r--r--nova/network/manager.py20
-rw-r--r--nova/objectstore/handler.py68
-rw-r--r--nova/rpc.py30
-rw-r--r--nova/scheduler/manager.py5
-rw-r--r--nova/service.py28
-rw-r--r--nova/tests/api/openstack/fakes.py2
-rw-r--r--nova/tests/api/openstack/test_images.py3
-rw-r--r--nova/tests/api/openstack/test_servers.py10
-rw-r--r--nova/tests/api/openstack/test_shared_ip_groups.py (renamed from nova/tests/api/openstack/test_sharedipgroups.py)2
-rw-r--r--nova/tests/objectstore_unittest.py2
-rw-r--r--nova/tests/test_access.py1
-rw-r--r--nova/tests/test_auth.py9
-rw-r--r--nova/tests/test_cloud.py49
-rw-r--r--nova/tests/test_compute.py18
-rw-r--r--nova/tests/test_console.py129
-rw-r--r--nova/tests/test_log.py110
-rw-r--r--nova/tests/test_network.py9
-rw-r--r--nova/tests/test_quota.py4
-rw-r--r--nova/tests/test_rpc.py11
-rw-r--r--nova/tests/test_virt.py142
-rw-r--r--nova/tests/test_volume.py6
-rw-r--r--nova/tests/xenapi/stubs.py24
-rw-r--r--nova/twistd.py25
-rw-r--r--nova/utils.py33
-rw-r--r--nova/version.py46
-rw-r--r--nova/virt/connection.py5
-rw-r--r--nova/virt/fake.py8
-rw-r--r--nova/virt/hyperv.py67
-rw-r--r--nova/virt/images.py8
-rw-r--r--nova/virt/libvirt.xml.template13
-rw-r--r--nova/virt/libvirt_conn.py495
-rw-r--r--nova/virt/xenapi/fake.py24
-rw-r--r--nova/virt/xenapi/vm_utils.py72
-rw-r--r--nova/virt/xenapi/vmops.py31
-rw-r--r--nova/virt/xenapi/volume_utils.py44
-rw-r--r--nova/virt/xenapi/volumeops.py31
-rw-r--r--nova/virt/xenapi_conn.py43
-rw-r--r--nova/volume/api.py10
-rw-r--r--nova/volume/driver.py16
-rw-r--r--nova/volume/manager.py21
-rw-r--r--nova/wsgi.py19
-rw-r--r--setup.cfg14
-rw-r--r--setup.py28
-rw-r--r--smoketests/admin_smoketests.py9
-rw-r--r--smoketests/user_smoketests.py87
-rw-r--r--tools/ajaxterm/README.txt120
-rw-r--r--tools/ajaxterm/ajaxterm.135
-rw-r--r--tools/ajaxterm/ajaxterm.css64
-rw-r--r--tools/ajaxterm/ajaxterm.html25
-rw-r--r--tools/ajaxterm/ajaxterm.js279
-rwxr-xr-xtools/ajaxterm/ajaxterm.py586
-rwxr-xr-xtools/ajaxterm/configure32
-rw-r--r--tools/ajaxterm/configure.ajaxterm.bin2
-rw-r--r--tools/ajaxterm/configure.initd.debian33
-rw-r--r--tools/ajaxterm/configure.initd.gentoo27
-rw-r--r--tools/ajaxterm/configure.initd.redhat75
-rw-r--r--tools/ajaxterm/configure.makefile20
-rw-r--r--tools/ajaxterm/qweb.py1356
-rw-r--r--tools/ajaxterm/sarissa.js647
-rw-r--r--tools/ajaxterm/sarissa_dhtml.js105
-rwxr-xr-xtools/euca-get-ajax-console164
-rw-r--r--tools/install_venv.py3
118 files changed, 9399 insertions, 755 deletions
diff --git a/.bzrignore b/.bzrignore
index d81a7d829..b271561a3 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -12,3 +12,4 @@ CA/openssl.cnf
CA/serial*
CA/newcerts/*.pem
CA/private/cakey.pem
+nova/vcsversion.py
diff --git a/Authors b/Authors
index 8dfaf9557..6e6ecfe1b 100644
--- a/Authors
+++ b/Authors
@@ -23,8 +23,11 @@ Jonathan Bryce <jbryce@jbryce.com>
Josh Kearney <josh.kearney@rackspace.com>
Joshua McKenty <jmckenty@gmail.com>
Justin Santa Barbara <justin@fathomdb.com>
+Ken Pepple <ken.pepple@gmail.com>
+Lorin Hochstein <lorin@isi.edu>
Matt Dietz <matt.dietz@rackspace.com>
Michael Gundlach <michael.gundlach@rackspace.com>
+Monsyne Dragon <mdragon@rackspace.com>
Monty Taylor <mordred@inaugust.com>
Paul Voccio <paul@openstack.org>
Rick Clark <rick@openstack.org>
@@ -40,4 +43,3 @@ Trey Morris <trey.morris@rackspace.com>
Vishvananda Ishaya <vishvananda@gmail.com>
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
Zhixue Wu <Zhixue.Wu@citrix.com>
-
diff --git a/babel.cfg b/babel.cfg
new file mode 100644
index 000000000..15cd6cb76
--- /dev/null
+++ b/babel.cfg
@@ -0,0 +1,2 @@
+[python: **.py]
+
diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy
new file mode 100755
index 000000000..2bc407658
--- /dev/null
+++ b/bin/nova-ajax-console-proxy
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+# pylint: disable-msg=C0103
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Ajax Console Proxy Server"""
+
+from eventlet import greenthread
+from eventlet.green import urllib2
+
+import exceptions
+import gettext
+import logging
+import os
+import sys
+import time
+import urlparse
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+gettext.install('nova', unicode=1)
+
+from nova import flags
+from nova import log as logging
+from nova import rpc
+from nova import utils
+from nova import wsgi
+
+FLAGS = flags.FLAGS
+
+flags.DEFINE_integer('ajax_console_idle_timeout', 300,
+ 'Seconds before idle connection destroyed')
+
+LOG = logging.getLogger('nova.ajax_console_proxy')
+LOG.setLevel(logging.DEBUG)
+LOG.addHandler(logging.StreamHandler())
+
+
+class AjaxConsoleProxy(object):
+ tokens = {}
+
+ def __call__(self, env, start_response):
+ try:
+ req_url = '%s://%s%s?%s' % (env['wsgi.url_scheme'],
+ env['HTTP_HOST'],
+ env['PATH_INFO'],
+ env['QUERY_STRING'])
+ if 'HTTP_REFERER' in env:
+ auth_url = env['HTTP_REFERER']
+ else:
+ auth_url = req_url
+
+ auth_params = urlparse.parse_qs(urlparse.urlparse(auth_url).query)
+ parsed_url = urlparse.urlparse(req_url)
+
+ auth_info = AjaxConsoleProxy.tokens[auth_params['token'][0]]
+ args = auth_info['args']
+ auth_info['last_activity'] = time.time()
+
+ remote_url = ("http://%s:%s%s?token=%s" % (
+ str(args['host']),
+ str(args['port']),
+ parsed_url.path,
+ str(args['token'])))
+
+ opener = urllib2.urlopen(remote_url, env['wsgi.input'].read())
+ body = opener.read()
+ info = opener.info()
+
+ start_response("200 OK", info.dict.items())
+ return body
+ except (exceptions.KeyError):
+ if env['PATH_INFO'] != '/favicon.ico':
+ LOG.audit("Unauthorized request %s, %s"
+ % (req_url, str(env)))
+ start_response("401 NOT AUTHORIZED", [])
+ return "Not Authorized"
+ except Exception:
+ start_response("500 ERROR", [])
+ return "Server Error"
+
+ def register_listeners(self):
+ class Callback:
+ def __call__(self, data, message):
+ if data['method'] == 'authorize_ajax_console':
+ AjaxConsoleProxy.tokens[data['args']['token']] = \
+ {'args': data['args'], 'last_activity': time.time()}
+
+ conn = rpc.Connection.instance(new=True)
+ consumer = rpc.TopicConsumer(
+ connection=conn,
+ topic=FLAGS.ajax_console_proxy_topic)
+ consumer.register_callback(Callback())
+
+ def delete_expired_tokens():
+ now = time.time()
+ to_delete = []
+ for k, v in AjaxConsoleProxy.tokens.items():
+ if now - v['last_activity'] > FLAGS.ajax_console_idle_timeout:
+ to_delete.append(k)
+
+ for k in to_delete:
+ del AjaxConsoleProxy.tokens[k]
+
+ utils.LoopingCall(consumer.fetch, auto_ack=True,
+ enable_callbacks=True).start(0.1)
+ utils.LoopingCall(delete_expired_tokens).start(1)
+
+if __name__ == '__main__':
+ utils.default_flagfile()
+ FLAGS(sys.argv)
+ server = wsgi.Server()
+ acp = AjaxConsoleProxy()
+ acp.register_listeners()
+ server.start(acp, FLAGS.ajax_console_proxy_port, host='0.0.0.0')
+ server.wait()
diff --git a/bin/nova-api-paste b/bin/nova-api-paste
index 6ee833a18..419f0bbdc 100755
--- a/bin/nova-api-paste
+++ b/bin/nova-api-paste
@@ -21,7 +21,6 @@
"""Starter script for Nova API."""
import gettext
-import logging
import os
import sys
@@ -38,6 +37,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import flags
+from nova import log as logging
from nova import wsgi
LOG = logging.getLogger('nova.api')
diff --git a/bin/nova-console b/bin/nova-console
new file mode 100755
index 000000000..802cc80b6
--- /dev/null
+++ b/bin/nova-console
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Starter script for Nova Console Proxy."""
+
+import eventlet
+eventlet.monkey_patch()
+
+import gettext
+import os
+import sys
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+gettext.install('nova', unicode=1)
+
+from nova import service
+from nova import utils
+
+if __name__ == '__main__':
+ utils.default_flagfile()
+ service.serve()
+ service.wait()
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 828aba3d1..1a994d956 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -22,7 +22,6 @@ Handle lease database updates from DHCP servers.
"""
import gettext
-import logging
import os
import sys
@@ -39,6 +38,7 @@ gettext.install('nova', unicode=1)
from nova import context
from nova import db
from nova import flags
+from nova import log as logging
from nova import rpc
from nova import utils
from nova.network import linux_net
@@ -49,11 +49,13 @@ flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('num_networks', 'nova.network.manager')
flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager')
+LOG = logging.getLogger('nova.dhcpbridge')
+
def add_lease(mac, ip_address, _hostname, _interface):
"""Set the IP that was assigned by the DHCP server."""
if FLAGS.fake_rabbit:
- logging.debug("leasing ip")
+ LOG.debug(_("leasing ip"))
network_manager = utils.import_object(FLAGS.network_manager)
network_manager.lease_fixed_ip(context.get_admin_context(),
mac,
@@ -68,14 +70,14 @@ def add_lease(mac, ip_address, _hostname, _interface):
def old_lease(mac, ip_address, hostname, interface):
"""Update just as add lease."""
- logging.debug("Adopted old lease or got a change of mac/hostname")
+ LOG.debug(_("Adopted old lease or got a change of mac/hostname"))
add_lease(mac, ip_address, hostname, interface)
def del_lease(mac, ip_address, _hostname, _interface):
"""Called when a lease expires."""
if FLAGS.fake_rabbit:
- logging.debug("releasing ip")
+ LOG.debug(_("releasing ip"))
network_manager = utils.import_object(FLAGS.network_manager)
network_manager.release_fixed_ip(context.get_admin_context(),
mac,
@@ -100,6 +102,7 @@ def main():
flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile)
utils.default_flagfile(flagfile)
argv = FLAGS(sys.argv)
+ logging.basicConfig()
interface = os.environ.get('DNSMASQ_INTERFACE', 'br0')
if int(os.environ.get('TESTING', '0')):
FLAGS.fake_rabbit = True
@@ -117,9 +120,9 @@ def main():
mac = argv[2]
ip = argv[3]
hostname = argv[4]
- logging.debug("Called %s for mac %s with ip %s and "
- "hostname %s on interface %s",
- action, mac, ip, hostname, interface)
+ LOG.debug(_("Called %s for mac %s with ip %s and "
+ "hostname %s on interface %s"),
+ action, mac, ip, hostname, interface)
globals()[action + '_lease'](mac, ip, hostname, interface)
else:
print init_leases(interface)
diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor
index 5dac3ffe6..7dca02014 100755
--- a/bin/nova-instancemonitor
+++ b/bin/nova-instancemonitor
@@ -23,7 +23,6 @@
import gettext
import os
-import logging
import sys
from twisted.application import service
@@ -37,19 +36,23 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
+from nova import log as logging
from nova import utils
from nova import twistd
from nova.compute import monitor
+# TODO(todd): shouldn't this be done with flags? And what about verbose?
logging.getLogger('boto').setLevel(logging.WARN)
+LOG = logging.getLogger('nova.instancemonitor')
+
if __name__ == '__main__':
utils.default_flagfile()
twistd.serve(__file__)
if __name__ == '__builtin__':
- logging.warn('Starting instance monitor')
+ LOG.warn(_('Starting instance monitor'))
# pylint: disable-msg=C0103
monitor = monitor.InstanceMonitor()
diff --git a/bin/nova-logspool b/bin/nova-logspool
new file mode 100644
index 000000000..097459b12
--- /dev/null
+++ b/bin/nova-logspool
@@ -0,0 +1,156 @@
+#!/usr/bin/env python
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Tools for working with logs generated by nova components
+"""
+
+
+import json
+import os
+import re
+import sys
+
+
+class Request(object):
+ def __init__(self):
+ self.time = ""
+ self.host = ""
+ self.logger = ""
+ self.message = ""
+ self.trace = ""
+ self.env = ""
+ self.request_id = ""
+
+ def add_error_line(self, error_line):
+ self.time = " ".join(error_line.split(" ")[:3])
+ self.host = error_line.split(" ")[3]
+ self.logger = error_line.split("(")[1].split(" ")[0]
+ self.request_id = error_line.split("[")[1].split(" ")[0]
+ error_lines = error_line.split("#012")
+ self.message = self.clean_log_line(error_lines.pop(0))
+ self.trace = "\n".join([self.clean_trace(l) for l in error_lines])
+
+ def add_environment_line(self, env_line):
+ self.env = self.clean_env_line(env_line)
+
+ def clean_log_line(self, line):
+ """Remove log format for time, level, etc: split after context"""
+ return line.split('] ')[-1]
+
+ def clean_env_line(self, line):
+ """Also has an 'Environment: ' string in the message"""
+ return re.sub(r'^Environment: ', '', self.clean_log_line(line))
+
+ def clean_trace(self, line):
+ """trace has a different format, so split on TRACE:"""
+ return line.split('TRACE: ')[-1]
+
+ def to_dict(self):
+ return {'traceback': self.trace, 'message': self.message,
+ 'host': self.host, 'env': self.env, 'logger': self.logger,
+ 'request_id': self.request_id}
+
+
+class LogReader(object):
+ def __init__(self, filename):
+ self.filename = filename
+ self._errors = {}
+
+ def process(self, spooldir):
+ with open(self.filename) as f:
+ line = f.readline()
+ while len(line) > 0:
+ parts = line.split(" ")
+ level = (len(parts) < 6) or parts[5]
+ if level == 'ERROR':
+ self.handle_logged_error(line)
+ elif level == '[-]' and self.last_error:
+ # twisted stack trace line
+ clean_line = " ".join(line.split(" ")[6:])
+ self.last_error.trace = self.last_error.trace + clean_line
+ else:
+ self.last_error = None
+ line = f.readline()
+ self.update_spool(spooldir)
+
+ def handle_logged_error(self, line):
+ request_id = re.search(r' \[([A-Z0-9\-/]+)', line)
+ if not request_id:
+ raise Exception("Unable to parse request id from %s" % line)
+ request_id = request_id.group(1)
+ data = self._errors.get(request_id, Request())
+ if self.is_env_line(line):
+ data.add_environment_line(line)
+ elif self.is_error_line(line):
+ data.add_error_line(line)
+ else:
+ # possibly error from twsited
+ data.add_error_line(line)
+ self.last_error = data
+ self._errors[request_id] = data
+
+ def is_env_line(self, line):
+ return re.search('Environment: ', line)
+
+ def is_error_line(self, line):
+ return re.search('raised', line)
+
+ def update_spool(self, directory):
+ processed_dir = "%s/processed" % directory
+ self._ensure_dir_exists(processed_dir)
+ for rid, value in self._errors.iteritems():
+ if not self.has_been_processed(processed_dir, rid):
+ with open("%s/%s" % (directory, rid), "w") as spool:
+ spool.write(json.dumps(value.to_dict()))
+ self.flush_old_processed_spool(processed_dir)
+
+ def _ensure_dir_exists(self, d):
+ mkdir = False
+ try:
+ os.stat(d)
+ except:
+ mkdir = True
+ if mkdir:
+ os.mkdir(d)
+
+ def has_been_processed(self, processed_dir, rid):
+ rv = False
+ try:
+ os.stat("%s/%s" % (processed_dir, rid))
+ rv = True
+ except:
+ pass
+ return rv
+
+ def flush_old_processed_spool(self, processed_dir):
+ keys = self._errors.keys()
+ procs = os.listdir(processed_dir)
+ for p in procs:
+ if p not in keys:
+ # log has rotated and the old error won't be seen again
+ os.unlink("%s/%s" % (processed_dir, p))
+
+if __name__ == '__main__':
+ filename = '/var/log/nova.log'
+ spooldir = '/var/spool/nova'
+ if len(sys.argv) > 1:
+ filename = sys.argv[1]
+ if len(sys.argv) > 2:
+ spooldir = sys.argv[2]
+ LogReader(filename).process(spooldir)
diff --git a/bin/nova-manage b/bin/nova-manage
index 3416c1a52..3f5957190 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -55,8 +55,8 @@
import datetime
import gettext
-import logging
import os
+import re
import sys
import time
@@ -333,6 +333,11 @@ class ProjectCommands(object):
arguments: name project_manager [description]"""
self.manager.create_project(name, project_manager, description)
+ def modify(self, name, project_manager, description=None):
+ """Modifies a project
+ arguments: name project_manager [description]"""
+ self.manager.modify_project(name, project_manager, description)
+
def delete(self, name):
"""Deletes an existing project
arguments: name"""
@@ -499,6 +504,15 @@ class ServiceCommands(object):
db.service_update(ctxt, svc['id'], {'disabled': True})
+class LogCommands(object):
+ def request(self, request_id, logfile='/var/log/nova.log'):
+ """Show all fields in the log for the given request. Assumes you
+ haven't changed the log format too much.
+ ARGS: request_id [logfile]"""
+ lines = utils.execute("cat %s | grep '\[%s '" % (logfile, request_id))
+ print re.sub('#012', "\n", "\n".join(lines))
+
+
CATEGORIES = [
('user', UserCommands),
('project', ProjectCommands),
@@ -507,7 +521,8 @@ CATEGORIES = [
('vpn', VpnCommands),
('floating', FloatingIpCommands),
('network', NetworkCommands),
- ('service', ServiceCommands)]
+ ('service', ServiceCommands),
+ ('log', LogCommands)]
def lazy_match(name, key_value_tuples):
@@ -546,9 +561,6 @@ def main():
utils.default_flagfile()
argv = FLAGS(sys.argv)
- if FLAGS.verbose:
- logging.getLogger().setLevel(logging.DEBUG)
-
script_name = argv.pop(0)
if len(argv) < 1:
print script_name + " category action [<args>]"
diff --git a/bin/nova-spoolsentry b/bin/nova-spoolsentry
new file mode 100644
index 000000000..ab20268a9
--- /dev/null
+++ b/bin/nova-spoolsentry
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import base64
+import json
+import logging
+import os
+import shutil
+import sys
+import urllib
+import urllib2
+try:
+ import cPickle as pickle
+except:
+ import pickle
+
+
+class SpoolSentry(object):
+ def __init__(self, spool_dir, sentry_url, key=None):
+ self.spool_dir = spool_dir
+ self.sentry_url = sentry_url
+ self.key = key
+
+ def process(self):
+ for fname in os.listdir(self.spool_dir):
+ if fname == "processed":
+ continue
+ try:
+ sourcefile = "%s/%s" % (self.spool_dir, fname)
+ with open(sourcefile) as f:
+ fdata = f.read()
+ data_from_json = json.loads(fdata)
+ data = self.build_data(data_from_json)
+ self.send_data(data)
+ destfile = "%s/processed/%s" % (self.spool_dir, fname)
+ shutil.move(sourcefile, destfile)
+ except:
+ logging.exception("Unable to upload record %s", fname)
+ raise
+
+ def build_data(self, filejson):
+ env = {'SERVER_NAME': 'unknown', 'SERVER_PORT': '0000',
+ 'SCRIPT_NAME': '/unknown/', 'PATH_INFO': 'unknown'}
+ if filejson['env']:
+ env = json.loads(filejson['env'])
+ url = "http://%s:%s%s%s" % (env['SERVER_NAME'], env['SERVER_PORT'],
+ env['SCRIPT_NAME'], env['PATH_INFO'])
+ rv = {'logger': filejson['logger'], 'level': logging.ERROR,
+ 'server_name': filejson['host'], 'url': url,
+ 'message': filejson['message'],
+ 'traceback': filejson['traceback']}
+ rv['data'] = {}
+ if filejson['env']:
+ rv['data']['META'] = env
+ if filejson['request_id']:
+ rv['data']['request_id'] = filejson['request_id']
+ return rv
+
+ def send_data(self, data):
+ data = {
+ 'data': base64.b64encode(pickle.dumps(data).encode('zlib')),
+ 'key': self.key
+ }
+ req = urllib2.Request(self.sentry_url)
+ res = urllib2.urlopen(req, urllib.urlencode(data))
+ if res.getcode() != 200:
+ raise Exception("Bad HTTP code: %s" % res.getcode())
+ txt = res.read()
+
+if __name__ == '__main__':
+ sentryurl = 'http://127.0.0.1/sentry/store/'
+ key = ''
+ spooldir = '/var/spool/nova'
+ if len(sys.argv) > 1:
+ sentryurl = sys.argv[1]
+ if len(sys.argv) > 2:
+ key = sys.argv[2]
+ if len(sys.argv) > 3:
+ spooldir = sys.argv[3]
+ SpoolSentry(spooldir, sentryurl, key).process()
diff --git a/contrib/nova.sh b/contrib/nova.sh
index da1ba030c..e06706295 100755
--- a/contrib/nova.sh
+++ b/contrib/nova.sh
@@ -78,6 +78,7 @@ if [ "$CMD" == "install" ]; then
sudo apt-get install -y user-mode-linux kvm libvirt-bin
sudo apt-get install -y screen euca2ools vlan curl rabbitmq-server
sudo apt-get install -y lvm2 iscsitarget open-iscsi
+ sudo apt-get install -y socat
echo "ISCSITARGET_ENABLE=true" | sudo tee /etc/default/iscsitarget
sudo /etc/init.d/iscsitarget restart
sudo modprobe kvm
@@ -155,6 +156,7 @@ if [ "$CMD" == "run" ]; then
screen_it network "$NOVA_DIR/bin/nova-network"
screen_it scheduler "$NOVA_DIR/bin/nova-scheduler"
screen_it volume "$NOVA_DIR/bin/nova-volume"
+ screen_it ajax_console_proxy "$NOVA_DIR/bin/nova-ajax-console-proxy"
screen_it test ". $NOVA_DIR/novarc"
screen -S nova -x
fi
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 8f1b370cc..996dfb0a7 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -60,10 +60,12 @@ copyright = u'2010, United States Government as represented by the Administrator
# |version| and |release|, also used in various other places throughout the
# built documents.
#
-# The short X.Y version.
-version = '2011.1'
+from nova import version as nova_version
+#import nova.version
# The full version, including alpha/beta/rc tags.
-release = '2011.1-prerelease'
+release = nova_version.version_string()
+# The short X.Y version.
+version = nova_version.canonical_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/krm_mapping.json.sample b/krm_mapping.json.sample
new file mode 100644
index 000000000..1ecfba635
--- /dev/null
+++ b/krm_mapping.json.sample
@@ -0,0 +1,3 @@
+{
+ "machine" : ["kernel", "ramdisk"]
+}
diff --git a/locale/nova.pot b/locale/nova.pot
new file mode 100644
index 000000000..a96411e33
--- /dev/null
+++ b/locale/nova.pot
@@ -0,0 +1,2130 @@
+# Translations template for nova.
+# Copyright (C) 2011 ORGANIZATION
+# This file is distributed under the same license as the nova project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2011.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: nova 2011.1\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2011-01-10 11:25-0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
+"Language-Team: LANGUAGE <LL@li.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 0.9.4\n"
+
+#: nova/crypto.py:46
+msgid "Filename of root CA"
+msgstr ""
+
+#: nova/crypto.py:49
+msgid "Filename of private key"
+msgstr ""
+
+#: nova/crypto.py:51
+msgid "Filename of root Certificate Revokation List"
+msgstr ""
+
+#: nova/crypto.py:53
+msgid "Where we keep our keys"
+msgstr ""
+
+#: nova/crypto.py:55
+msgid "Where we keep our root CA"
+msgstr ""
+
+#: nova/crypto.py:57
+msgid "Should we use a CA for each project?"
+msgstr ""
+
+#: nova/crypto.py:61
+#, python-format
+msgid "Subject for certificate for users, %s for project, user, timestamp"
+msgstr ""
+
+#: nova/crypto.py:66
+#, python-format
+msgid "Subject for certificate for projects, %s for project, timestamp"
+msgstr ""
+
+#: nova/crypto.py:71
+#, python-format
+msgid "Subject for certificate for vpns, %s for project, timestamp"
+msgstr ""
+
+#: nova/crypto.py:258
+#, python-format
+msgid "Flags path: %s"
+msgstr ""
+
+#: nova/exception.py:33
+msgid "Unexpected error while running command."
+msgstr ""
+
+#: nova/exception.py:36
+#, python-format
+msgid ""
+"%s\n"
+"Command: %s\n"
+"Exit code: %s\n"
+"Stdout: %r\n"
+"Stderr: %r"
+msgstr ""
+
+#: nova/exception.py:86
+msgid "Uncaught exception"
+msgstr ""
+
+#: nova/fakerabbit.py:48
+#, python-format
+msgid "(%s) publish (key: %s) %s"
+msgstr ""
+
+#: nova/fakerabbit.py:53
+#, python-format
+msgid "Publishing to route %s"
+msgstr ""
+
+#: nova/fakerabbit.py:83
+#, python-format
+msgid "Declaring queue %s"
+msgstr ""
+
+#: nova/fakerabbit.py:89
+#, python-format
+msgid "Declaring exchange %s"
+msgstr ""
+
+#: nova/fakerabbit.py:95
+#, python-format
+msgid "Binding %s to %s with key %s"
+msgstr ""
+
+#: nova/fakerabbit.py:120
+#, python-format
+msgid "Getting from %s: %s"
+msgstr ""
+
+#: nova/rpc.py:92
+#, python-format
+msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds."
+msgstr ""
+
+#: nova/rpc.py:99
+#, python-format
+msgid "Unable to connect to AMQP server after %d tries. Shutting down."
+msgstr ""
+
+#: nova/rpc.py:118
+msgid "Reconnected to queue"
+msgstr ""
+
+#: nova/rpc.py:125
+msgid "Failed to fetch message from queue"
+msgstr ""
+
+#: nova/rpc.py:155
+#, python-format
+msgid "Initing the Adapter Consumer for %s"
+msgstr ""
+
+#: nova/rpc.py:170
+#, python-format
+msgid "received %s"
+msgstr ""
+
+#: nova/rpc.py:183
+#, python-format
+msgid "no method for message: %s"
+msgstr ""
+
+#: nova/rpc.py:184
+#, python-format
+msgid "No method for message: %s"
+msgstr ""
+
+#: nova/rpc.py:245
+#, python-format
+msgid "Returning exception %s to caller"
+msgstr ""
+
+#: nova/rpc.py:286
+#, python-format
+msgid "unpacked context: %s"
+msgstr ""
+
+#: nova/rpc.py:305
+msgid "Making asynchronous call..."
+msgstr ""
+
+#: nova/rpc.py:308
+#, python-format
+msgid "MSG_ID is %s"
+msgstr ""
+
+#: nova/rpc.py:356
+#, python-format
+msgid "response %s"
+msgstr ""
+
+#: nova/rpc.py:365
+#, python-format
+msgid "topic is %s"
+msgstr ""
+
+#: nova/rpc.py:366
+#, python-format
+msgid "message %s"
+msgstr ""
+
+#: nova/service.py:157
+#, python-format
+msgid "Starting %s node"
+msgstr ""
+
+#: nova/service.py:169
+msgid "Service killed that has no database entry"
+msgstr ""
+
+#: nova/service.py:190
+msgid "The service database object disappeared, Recreating it."
+msgstr ""
+
+#: nova/service.py:202
+msgid "Recovered model server connection!"
+msgstr ""
+
+#: nova/service.py:208
+msgid "model server went away"
+msgstr ""
+
+#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43
+#, python-format
+msgid "Data store %s is unreachable. Trying again in %d seconds."
+msgstr ""
+
+#: nova/service.py:232 nova/twistd.py:232
+#, python-format
+msgid "Serving %s"
+msgstr ""
+
+#: nova/service.py:234 nova/twistd.py:264
+msgid "Full set of FLAGS:"
+msgstr ""
+
+#: nova/twistd.py:211
+#, python-format
+msgid "pidfile %s does not exist. Daemon not running?\n"
+msgstr ""
+
+#: nova/twistd.py:268
+#, python-format
+msgid "Starting %s"
+msgstr ""
+
+#: nova/utils.py:53
+#, python-format
+msgid "Inner Exception: %s"
+msgstr ""
+
+#: nova/utils.py:54
+#, python-format
+msgid "Class %s cannot be found"
+msgstr ""
+
+#: nova/utils.py:113
+#, python-format
+msgid "Fetching %s"
+msgstr ""
+
+#: nova/utils.py:125
+#, python-format
+msgid "Running cmd (subprocess): %s"
+msgstr ""
+
+#: nova/utils.py:138
+#, python-format
+msgid "Result was %s"
+msgstr ""
+
+#: nova/utils.py:171
+#, python-format
+msgid "debug in callback: %s"
+msgstr ""
+
+#: nova/utils.py:176
+#, python-format
+msgid "Running %s"
+msgstr ""
+
+#: nova/utils.py:207
+#, python-format
+msgid "Couldn't get IP, using 127.0.0.1 %s"
+msgstr ""
+
+#: nova/utils.py:289
+#, python-format
+msgid "Invalid backend: %s"
+msgstr ""
+
+#: nova/utils.py:300
+#, python-format
+msgid "backend %s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:133
+msgid "Too many failed authentications."
+msgstr ""
+
+#: nova/api/ec2/__init__.py:142
+#, python-format
+msgid ""
+"Access key %s has had %d failed authentications and will be locked out "
+"for %d minutes."
+msgstr ""
+
+#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140
+#, python-format
+msgid "Authentication Failure: %s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:190
+#, python-format
+msgid "Authenticated Request For %s:%s)"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:227
+#, python-format
+msgid "action: %s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:229
+#, python-format
+msgid "arg: %s\t\tval: %s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:301
+#, python-format
+msgid "Unauthorized request for controller=%s and action=%s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:339
+#, python-format
+msgid "NotFound raised: %s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:342
+#, python-format
+msgid "ApiError raised: %s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:349
+#, python-format
+msgid "Unexpected error raised: %s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:354
+msgid "An unknown error has occurred. Please try your request again."
+msgstr ""
+
+#: nova/api/ec2/admin.py:84
+#, python-format
+msgid "Creating new user: %s"
+msgstr ""
+
+#: nova/api/ec2/admin.py:92
+#, python-format
+msgid "Deleting user: %s"
+msgstr ""
+
+#: nova/api/ec2/admin.py:114
+#, python-format
+msgid "Adding role %s to user %s for project %s"
+msgstr ""
+
+#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415
+#, python-format
+msgid "Adding sitewide role %s to user %s"
+msgstr ""
+
+#: nova/api/ec2/admin.py:122
+#, python-format
+msgid "Removing role %s from user %s for project %s"
+msgstr ""
+
+#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441
+#, python-format
+msgid "Removing sitewide role %s from user %s"
+msgstr ""
+
+#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192
+msgid "operation must be add or remove"
+msgstr ""
+
+#: nova/api/ec2/admin.py:142
+#, python-format
+msgid "Getting x509 for user: %s on project: %s"
+msgstr ""
+
+#: nova/api/ec2/admin.py:159
+#, python-format
+msgid "Create project %s managed by %s"
+msgstr ""
+
+#: nova/api/ec2/admin.py:170
+#, python-format
+msgid "Delete project: %s"
+msgstr ""
+
+#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533
+#, python-format
+msgid "Adding user %s to project %s"
+msgstr ""
+
+#: nova/api/ec2/admin.py:188
+#, python-format
+msgid "Removing user %s from project %s"
+msgstr ""
+
+#: nova/api/ec2/apirequest.py:95
+#, python-format
+msgid "Unsupported API request: controller = %s,action = %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:117
+#, python-format
+msgid "Generating root CA: %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:277
+#, python-format
+msgid "Create key pair %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:285
+#, python-format
+msgid "Delete key pair %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:357
+#, python-format
+msgid "%s is not a valid ipProtocol"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:361
+msgid "Invalid port range"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:392
+#, python-format
+msgid "Revoke security group ingress %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414
+msgid "No rule for the specified parameters."
+msgstr ""
+
+#: nova/api/ec2/cloud.py:421
+#, python-format
+msgid "Authorize security group ingress %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:432
+#, python-format
+msgid "This rule already exists in group %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:460
+#, python-format
+msgid "Create Security Group %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:463
+#, python-format
+msgid "group %s already exists"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:475
+#, python-format
+msgid "Delete security group %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452
+#, python-format
+msgid "Get console output for instance %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:543
+#, python-format
+msgid "Create volume of %s GB"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:567
+#, python-format
+msgid "Attach volume %s to instacne %s at %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:579
+#, python-format
+msgid "Detach volume %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:686
+msgid "Allocate address"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:691
+#, python-format
+msgid "Release address %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:696
+#, python-format
+msgid "Associate address %s to instance %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:703
+#, python-format
+msgid "Disassociate address %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:730
+msgid "Going to start terminating instances"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:738
+#, python-format
+msgid "Reboot instance %r"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:775
+#, python-format
+msgid "De-registering image %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:783
+#, python-format
+msgid "Registered image %s with id %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804
+#, python-format
+msgid "attribute not supported: %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:794
+#, python-format
+msgid "invalid id: %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:807
+msgid "user or group not specified"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:809
+msgid "only group \"all\" is supported"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:811
+msgid "operation_type must be add or remove"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:812
+#, python-format
+msgid "Updating image %s publicity"
+msgstr ""
+
+#: nova/api/ec2/metadatarequesthandler.py:75
+#, python-format
+msgid "Failed to get metadata for ip: %s"
+msgstr ""
+
+#: nova/api/openstack/__init__.py:70
+#, python-format
+msgid "Caught error: %s"
+msgstr ""
+
+#: nova/api/openstack/__init__.py:86
+msgid "Including admin operations in API."
+msgstr ""
+
+#: nova/api/openstack/servers.py:184
+#, python-format
+msgid "Compute.api::lock %s"
+msgstr ""
+
+#: nova/api/openstack/servers.py:199
+#, python-format
+msgid "Compute.api::unlock %s"
+msgstr ""
+
+#: nova/api/openstack/servers.py:213
+#, python-format
+msgid "Compute.api::get_lock %s"
+msgstr ""
+
+#: nova/api/openstack/servers.py:224
+#, python-format
+msgid "Compute.api::pause %s"
+msgstr ""
+
+#: nova/api/openstack/servers.py:235
+#, python-format
+msgid "Compute.api::unpause %s"
+msgstr ""
+
+#: nova/api/openstack/servers.py:246
+#, python-format
+msgid "compute.api::suspend %s"
+msgstr ""
+
+#: nova/api/openstack/servers.py:257
+#, python-format
+msgid "compute.api::resume %s"
+msgstr ""
+
+#: nova/auth/dbdriver.py:84
+#, python-format
+msgid "User %s already exists"
+msgstr ""
+
+#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207
+#, python-format
+msgid "Project can't be created because manager %s doesn't exist"
+msgstr ""
+
+#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204
+#, python-format
+msgid "Project can't be created because project %s already exists"
+msgstr ""
+
+#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241
+#, python-format
+msgid "Project can't be modified because manager %s doesn't exist"
+msgstr ""
+
+#: nova/auth/dbdriver.py:245
+#, python-format
+msgid "User \"%s\" not found"
+msgstr ""
+
+#: nova/auth/dbdriver.py:248
+#, python-format
+msgid "Project \"%s\" not found"
+msgstr ""
+
+#: nova/auth/fakeldap.py:33
+msgid "Attempted to instantiate singleton"
+msgstr ""
+
+#: nova/auth/ldapdriver.py:181
+#, python-format
+msgid "LDAP object for %s doesn't exist"
+msgstr ""
+
+#: nova/auth/ldapdriver.py:218
+#, python-format
+msgid "Project can't be created because user %s doesn't exist"
+msgstr ""
+
+#: nova/auth/ldapdriver.py:478
+#, python-format
+msgid "User %s is already a member of the group %s"
+msgstr ""
+
+#: nova/auth/ldapdriver.py:507
+#, python-format
+msgid ""
+"Attempted to remove the last member of a group. Deleting the group at %s "
+"instead."
+msgstr ""
+
+#: nova/auth/ldapdriver.py:528
+#, python-format
+msgid "Group at dn %s doesn't exist"
+msgstr ""
+
+#: nova/auth/manager.py:259
+#, python-format
+msgid "Looking up user: %r"
+msgstr ""
+
+#: nova/auth/manager.py:263
+#, python-format
+msgid "Failed authorization for access key %s"
+msgstr ""
+
+#: nova/auth/manager.py:264
+#, python-format
+msgid "No user found for access key %s"
+msgstr ""
+
+#: nova/auth/manager.py:270
+#, python-format
+msgid "Using project name = user name (%s)"
+msgstr ""
+
+#: nova/auth/manager.py:275
+#, python-format
+msgid "failed authorization: no project named %s (user=%s)"
+msgstr ""
+
+#: nova/auth/manager.py:277
+#, python-format
+msgid "No project called %s could be found"
+msgstr ""
+
+#: nova/auth/manager.py:281
+#, python-format
+msgid "Failed authorization: user %s not admin and not member of project %s"
+msgstr ""
+
+#: nova/auth/manager.py:283
+#, python-format
+msgid "User %s is not a member of project %s"
+msgstr ""
+
+#: nova/auth/manager.py:292 nova/auth/manager.py:303
+#, python-format
+msgid "Invalid signature for user %s"
+msgstr ""
+
+#: nova/auth/manager.py:293 nova/auth/manager.py:304
+msgid "Signature does not match"
+msgstr ""
+
+#: nova/auth/manager.py:374
+msgid "Must specify project"
+msgstr ""
+
+#: nova/auth/manager.py:408
+#, python-format
+msgid "The %s role can not be found"
+msgstr ""
+
+#: nova/auth/manager.py:410
+#, python-format
+msgid "The %s role is global only"
+msgstr ""
+
+#: nova/auth/manager.py:412
+#, python-format
+msgid "Adding role %s to user %s in project %s"
+msgstr ""
+
+#: nova/auth/manager.py:438
+#, python-format
+msgid "Removing role %s from user %s on project %s"
+msgstr ""
+
+#: nova/auth/manager.py:505
+#, python-format
+msgid "Created project %s with manager %s"
+msgstr ""
+
+#: nova/auth/manager.py:523
+#, python-format
+msgid "modifying project %s"
+msgstr ""
+
+#: nova/auth/manager.py:553
+#, python-format
+msgid "Remove user %s from project %s"
+msgstr ""
+
+#: nova/auth/manager.py:581
+#, python-format
+msgid "Deleting project %s"
+msgstr ""
+
+#: nova/auth/manager.py:637
+#, python-format
+msgid "Created user %s (admin: %r)"
+msgstr ""
+
+#: nova/auth/manager.py:645
+#, python-format
+msgid "Deleting user %s"
+msgstr ""
+
+#: nova/auth/manager.py:655
+#, python-format
+msgid "Access Key change for user %s"
+msgstr ""
+
+#: nova/auth/manager.py:657
+#, python-format
+msgid "Secret Key change for user %s"
+msgstr ""
+
+#: nova/auth/manager.py:659
+#, python-format
+msgid "Admin status set to %r for user %s"
+msgstr ""
+
+#: nova/auth/manager.py:708
+#, python-format
+msgid "No vpn data for project %s"
+msgstr ""
+
+#: nova/cloudpipe/pipelib.py:45
+msgid "Template for script to run on cloudpipe instance boot"
+msgstr ""
+
+#: nova/cloudpipe/pipelib.py:48
+msgid "Network to push into openvpn config"
+msgstr ""
+
+#: nova/cloudpipe/pipelib.py:51
+msgid "Netmask to push into openvpn config"
+msgstr ""
+
+#: nova/cloudpipe/pipelib.py:97
+#, python-format
+msgid "Launching VPN for %s"
+msgstr ""
+
+#: nova/compute/api.py:67
+#, python-format
+msgid "Instance %d was not found in get_network_topic"
+msgstr ""
+
+#: nova/compute/api.py:73
+#, python-format
+msgid "Instance %d has no host"
+msgstr ""
+
+#: nova/compute/api.py:92
+#, python-format
+msgid "Quota exceeeded for %s, tried to run %s instances"
+msgstr ""
+
+#: nova/compute/api.py:94
+#, python-format
+msgid "Instance quota exceeded. You can only run %s more instances of this type."
+msgstr ""
+
+#: nova/compute/api.py:109
+msgid "Creating a raw instance"
+msgstr ""
+
+#: nova/compute/api.py:156
+#, python-format
+msgid "Going to run %s instances..."
+msgstr ""
+
+#: nova/compute/api.py:180
+#, python-format
+msgid "Casting to scheduler for %s/%s's instance %s"
+msgstr ""
+
+#: nova/compute/api.py:279
+#, python-format
+msgid "Going to try and terminate %s"
+msgstr ""
+
+#: nova/compute/api.py:283
+#, python-format
+msgid "Instance %d was not found during terminate"
+msgstr ""
+
+#: nova/compute/api.py:288
+#, python-format
+msgid "Instance %d is already being terminated"
+msgstr ""
+
+#: nova/compute/api.py:450
+#, python-format
+msgid "Invalid device specified: %s. Example device: /dev/vdb"
+msgstr ""
+
+#: nova/compute/api.py:465
+msgid "Volume isn't attached to anything!"
+msgstr ""
+
+#: nova/compute/disk.py:71
+#, python-format
+msgid "Input partition size not evenly divisible by sector size: %d / %d"
+msgstr ""
+
+#: nova/compute/disk.py:75
+#, python-format
+msgid "Bytes for local storage not evenly divisible by sector size: %d / %d"
+msgstr ""
+
+#: nova/compute/disk.py:128
+#, python-format
+msgid "Could not attach image to loopback: %s"
+msgstr ""
+
+#: nova/compute/disk.py:136
+#, python-format
+msgid "Failed to load partition: %s"
+msgstr ""
+
+#: nova/compute/disk.py:158
+#, python-format
+msgid "Failed to mount filesystem: %s"
+msgstr ""
+
+#: nova/compute/instance_types.py:41
+#, python-format
+msgid "Unknown instance type: %s"
+msgstr ""
+
+#: nova/compute/manager.py:69
+#, python-format
+msgid "check_instance_lock: decorating: |%s|"
+msgstr ""
+
+#: nova/compute/manager.py:71
+#, python-format
+msgid "check_instance_lock: arguments: |%s| |%s| |%s|"
+msgstr ""
+
+#: nova/compute/manager.py:75
+#, python-format
+msgid "check_instance_lock: locked: |%s|"
+msgstr ""
+
+#: nova/compute/manager.py:77
+#, python-format
+msgid "check_instance_lock: admin: |%s|"
+msgstr ""
+
+#: nova/compute/manager.py:82
+#, python-format
+msgid "check_instance_lock: executing: |%s|"
+msgstr ""
+
+#: nova/compute/manager.py:86
+#, python-format
+msgid "check_instance_lock: not executing |%s|"
+msgstr ""
+
+#: nova/compute/manager.py:157
+msgid "Instance has already been created"
+msgstr ""
+
+#: nova/compute/manager.py:158
+#, python-format
+msgid "instance %s: starting..."
+msgstr ""
+
+#: nova/compute/manager.py:197
+#, python-format
+msgid "instance %s: Failed to spawn"
+msgstr ""
+
+#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228
+#, python-format
+msgid "Terminating instance %s"
+msgstr ""
+
+#: nova/compute/manager.py:217
+#, python-format
+msgid "Disassociating address %s"
+msgstr ""
+
+#: nova/compute/manager.py:230
+#, python-format
+msgid "Deallocating address %s"
+msgstr ""
+
+#: nova/compute/manager.py:243
+#, python-format
+msgid "trying to destroy already destroyed instance: %s"
+msgstr ""
+
+#: nova/compute/manager.py:257
+#, python-format
+msgid "Rebooting instance %s"
+msgstr ""
+
+#: nova/compute/manager.py:260
+#, python-format
+msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)"
+msgstr ""
+
+#: nova/compute/manager.py:286
+#, python-format
+msgid "instance %s: snapshotting"
+msgstr ""
+
+#: nova/compute/manager.py:289
+#, python-format
+msgid "trying to snapshot a non-running instance: %s (state: %s excepted: %s)"
+msgstr ""
+
+#: nova/compute/manager.py:301
+#, python-format
+msgid "instance %s: rescuing"
+msgstr ""
+
+#: nova/compute/manager.py:316
+#, python-format
+msgid "instance %s: unrescuing"
+msgstr ""
+
+#: nova/compute/manager.py:335
+#, python-format
+msgid "instance %s: pausing"
+msgstr ""
+
+#: nova/compute/manager.py:352
+#, python-format
+msgid "instance %s: unpausing"
+msgstr ""
+
+#: nova/compute/manager.py:369
+#, python-format
+msgid "instance %s: retrieving diagnostics"
+msgstr ""
+
+#: nova/compute/manager.py:382
+#, python-format
+msgid "instance %s: suspending"
+msgstr ""
+
+#: nova/compute/manager.py:401
+#, python-format
+msgid "instance %s: resuming"
+msgstr ""
+
+#: nova/compute/manager.py:420
+#, python-format
+msgid "instance %s: locking"
+msgstr ""
+
+#: nova/compute/manager.py:432
+#, python-format
+msgid "instance %s: unlocking"
+msgstr ""
+
+#: nova/compute/manager.py:442
+#, python-format
+msgid "instance %s: getting locked state"
+msgstr ""
+
+#: nova/compute/manager.py:462
+#, python-format
+msgid "instance %s: attaching volume %s to %s"
+msgstr ""
+
+#: nova/compute/manager.py:478
+#, python-format
+msgid "instance %s: attach failed %s, removing"
+msgstr ""
+
+#: nova/compute/manager.py:493
+#, python-format
+msgid "Detach volume %s from mountpoint %s on instance %s"
+msgstr ""
+
+#: nova/compute/manager.py:497
+#, python-format
+msgid "Detaching volume from unknown instance %s"
+msgstr ""
+
+#: nova/compute/monitor.py:259
+#, python-format
+msgid "updating %s..."
+msgstr ""
+
+#: nova/compute/monitor.py:289
+msgid "unexpected error during update"
+msgstr ""
+
+#: nova/compute/monitor.py:355
+#, python-format
+msgid "Cannot get blockstats for \"%s\" on \"%s\""
+msgstr ""
+
+#: nova/compute/monitor.py:377
+#, python-format
+msgid "Cannot get ifstats for \"%s\" on \"%s\""
+msgstr ""
+
+#: nova/compute/monitor.py:412
+msgid "unexpected exception getting connection"
+msgstr ""
+
+#: nova/compute/monitor.py:427
+#, python-format
+msgid "Found instance: %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:43
+msgid "Use of empty request context is deprecated"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:132
+#, python-format
+msgid "No service for id %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:229
+#, python-format
+msgid "No service for %s, %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:574
+#, python-format
+msgid "No floating ip for address %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:668
+#, python-format
+msgid "No instance for id %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598
+#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103
+#, python-format
+msgid "Instance %s not found"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:891
+#, python-format
+msgid "no keypair for user %s, name %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064
+#, python-format
+msgid "No network for id %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1036
+#, python-format
+msgid "No network for bridge %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1050
+#, python-format
+msgid "No network for instance %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1180
+#, python-format
+msgid "Token %s does not exist"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1205
+#, python-format
+msgid "No quota for project_id %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1356
+#, python-format
+msgid "No volume for id %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1401
+#, python-format
+msgid "Volume %s not found"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1413
+#, python-format
+msgid "No export device found for volume %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1426
+#, python-format
+msgid "No target id found for volume %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1471
+#, python-format
+msgid "No security group with id %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1488
+#, python-format
+msgid "No security group named %s for project: %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1576
+#, python-format
+msgid "No secuity group rule with id %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1650
+#, python-format
+msgid "No user for id %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1666
+#, python-format
+msgid "No user for access key %s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:1728
+#, python-format
+msgid "No project with id %s"
+msgstr ""
+
+#: nova/image/glance.py:78
+#, python-format
+msgid "Parallax returned HTTP error %d from request for /images"
+msgstr ""
+
+#: nova/image/glance.py:97
+#, python-format
+msgid "Parallax returned HTTP error %d from request for /images/detail"
+msgstr ""
+
+#: nova/image/s3.py:82
+#, python-format
+msgid "Image %s could not be found"
+msgstr ""
+
+#: nova/network/api.py:39
+#, python-format
+msgid "Quota exceeeded for %s, tried to allocate address"
+msgstr ""
+
+#: nova/network/api.py:42
+msgid "Address quota exceeded. You cannot allocate any more addresses"
+msgstr ""
+
+#: nova/network/linux_net.py:176
+#, python-format
+msgid "Starting VLAN inteface %s"
+msgstr ""
+
+#: nova/network/linux_net.py:186
+#, python-format
+msgid "Starting Bridge interface for %s"
+msgstr ""
+
+#: nova/network/linux_net.py:254
+#, python-format
+msgid "Hupping dnsmasq threw %s"
+msgstr ""
+
+#: nova/network/linux_net.py:256
+#, python-format
+msgid "Pid %d is stale, relaunching dnsmasq"
+msgstr ""
+
+#: nova/network/linux_net.py:334
+#, python-format
+msgid "Killing dnsmasq threw %s"
+msgstr ""
+
+#: nova/network/manager.py:135
+msgid "setting network host"
+msgstr ""
+
+#: nova/network/manager.py:190
+#, python-format
+msgid "Leasing IP %s"
+msgstr ""
+
+#: nova/network/manager.py:194
+#, python-format
+msgid "IP %s leased that isn't associated"
+msgstr ""
+
+#: nova/network/manager.py:197
+#, python-format
+msgid "IP %s leased to bad mac %s vs %s"
+msgstr ""
+
+#: nova/network/manager.py:205
+#, python-format
+msgid "IP %s leased that was already deallocated"
+msgstr ""
+
+#: nova/network/manager.py:214
+#, python-format
+msgid "IP %s released that isn't associated"
+msgstr ""
+
+#: nova/network/manager.py:217
+#, python-format
+msgid "IP %s released from bad mac %s vs %s"
+msgstr ""
+
+#: nova/network/manager.py:220
+#, python-format
+msgid "IP %s released that was not leased"
+msgstr ""
+
+#: nova/network/manager.py:442
+#, python-format
+msgid "Dissassociated %s stale fixed ip(s)"
+msgstr ""
+
+#: nova/objectstore/handler.py:106
+#, python-format
+msgid "Unknown S3 value type %r"
+msgstr ""
+
+#: nova/objectstore/handler.py:137
+msgid "Authenticated request"
+msgstr ""
+
+#: nova/objectstore/handler.py:182
+msgid "List of buckets requested"
+msgstr ""
+
+#: nova/objectstore/handler.py:209
+#, python-format
+msgid "List keys for bucket %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:217
+#, python-format
+msgid "Unauthorized attempt to access bucket %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:235
+#, python-format
+msgid "Creating bucket %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:245
+#, python-format
+msgid "Deleting bucket %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:249
+#, python-format
+msgid "Unauthorized attempt to delete bucket %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:271
+#, python-format
+msgid "Getting object: %s / %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:274
+#, python-format
+msgid "Unauthorized attempt to get object %s from bucket %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:292
+#, python-format
+msgid "Putting object: %s / %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:295
+#, python-format
+msgid "Unauthorized attempt to upload object %s to bucket %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:314
+#, python-format
+msgid "Deleting object: %s / %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:393
+#, python-format
+msgid "Not authorized to upload image: invalid directory %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:401
+#, python-format
+msgid "Not authorized to upload image: unauthorized bucket %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:406
+#, python-format
+msgid "Starting image upload: %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:420
+#, python-format
+msgid "Not authorized to update attributes of image %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:428
+#, python-format
+msgid "Toggling publicity flag of image %s %r"
+msgstr ""
+
+#: nova/objectstore/handler.py:433
+#, python-format
+msgid "Updating user fields on image %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:447
+#, python-format
+msgid "Unauthorized attempt to delete image %s"
+msgstr ""
+
+#: nova/objectstore/handler.py:452
+#, python-format
+msgid "Deleted image: %s"
+msgstr ""
+
+#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73
+#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118
+msgid "No hosts found"
+msgstr ""
+
+#: nova/scheduler/driver.py:66
+msgid "Must implement a fallback schedule"
+msgstr ""
+
+#: nova/scheduler/manager.py:69
+#, python-format
+msgid "Casting to %s %s for %s"
+msgstr ""
+
+#: nova/scheduler/simple.py:63
+msgid "All hosts have too many cores"
+msgstr ""
+
+#: nova/scheduler/simple.py:95
+msgid "All hosts have too many gigabytes"
+msgstr ""
+
+#: nova/scheduler/simple.py:115
+msgid "All hosts have too many networks"
+msgstr ""
+
+#: nova/tests/test_cloud.py:198
+msgid "Can't test instances without a real virtual env."
+msgstr ""
+
+#: nova/tests/test_cloud.py:210
+#, python-format
+msgid "Need to watch instance %s until it's running..."
+msgstr ""
+
+#: nova/tests/test_compute.py:104
+#, python-format
+msgid "Running instances: %s"
+msgstr ""
+
+#: nova/tests/test_compute.py:110
+#, python-format
+msgid "After terminating instances: %s"
+msgstr ""
+
+#: nova/tests/test_rpc.py:89
+#, python-format
+msgid "Nested received %s, %s"
+msgstr ""
+
+#: nova/tests/test_rpc.py:94
+#, python-format
+msgid "Nested return %s"
+msgstr ""
+
+#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125
+#, python-format
+msgid "Received %s"
+msgstr ""
+
+#: nova/tests/test_volume.py:162
+#, python-format
+msgid "Target %s allocated"
+msgstr ""
+
+#: nova/virt/connection.py:73
+msgid "Failed to open connection to the hypervisor"
+msgstr ""
+
+#: nova/virt/fake.py:210
+#, python-format
+msgid "Instance %s Not Found"
+msgstr ""
+
+#: nova/virt/hyperv.py:118
+msgid "In init host"
+msgstr ""
+
+#: nova/virt/hyperv.py:131
+#, python-format
+msgid "Attempt to create duplicate vm %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:148
+#, python-format
+msgid "Starting VM %s "
+msgstr ""
+
+#: nova/virt/hyperv.py:150
+#, python-format
+msgid "Started VM %s "
+msgstr ""
+
+#: nova/virt/hyperv.py:152
+#, python-format
+msgid "spawn vm failed: %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:169
+#, python-format
+msgid "Failed to create VM %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125
+#, python-format
+msgid "Created VM %s..."
+msgstr ""
+
+#: nova/virt/hyperv.py:188
+#, python-format
+msgid "Set memory for vm %s..."
+msgstr ""
+
+#: nova/virt/hyperv.py:198
+#, python-format
+msgid "Set vcpus for vm %s..."
+msgstr ""
+
+#: nova/virt/hyperv.py:202
+#, python-format
+msgid "Creating disk for %s by attaching disk file %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:227
+#, python-format
+msgid "Failed to add diskdrive to VM %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:230
+#, python-format
+msgid "New disk drive path is %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:247
+#, python-format
+msgid "Failed to add vhd file to VM %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:249
+#, python-format
+msgid "Created disk for %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:253
+#, python-format
+msgid "Creating nic for %s "
+msgstr ""
+
+#: nova/virt/hyperv.py:272
+msgid "Failed creating a port on the external vswitch"
+msgstr ""
+
+#: nova/virt/hyperv.py:273
+#, python-format
+msgid "Failed creating port for %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:275
+#, python-format
+msgid "Created switch port %s on switch %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:285
+#, python-format
+msgid "Failed to add nic to VM %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:287
+#, python-format
+msgid "Created nic for %s "
+msgstr ""
+
+#: nova/virt/hyperv.py:320
+#, python-format
+msgid "WMI job failed: %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:322
+#, python-format
+msgid "WMI job succeeded: %s, Elapsed=%s "
+msgstr ""
+
+#: nova/virt/hyperv.py:358
+#, python-format
+msgid "Got request to destroy vm %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:383
+#, python-format
+msgid "Failed to destroy vm %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:389
+#, python-format
+msgid "Del: disk %s vm %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:405
+#, python-format
+msgid ""
+"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, "
+"cpu_time=%s"
+msgstr ""
+
+#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301
+#, python-format
+msgid "duplicate name found: %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:444
+#, python-format
+msgid "Successfully changed vm state of %s to %s"
+msgstr ""
+
+#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449
+#, python-format
+msgid "Failed to change vm state of %s to %s"
+msgstr ""
+
+#: nova/virt/images.py:70
+#, python-format
+msgid "Finished retreving %s -- placed in %s"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:144
+#, python-format
+msgid "Connecting to libvirt: %s"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:157
+msgid "Connection to libvirt broke"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:229
+#, python-format
+msgid "instance %s: deleting instance files %s"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:271
+#, python-format
+msgid "No disk at %s"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:278
+msgid "Instance snapshotting is not supported for libvirtat this time"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:294
+#, python-format
+msgid "instance %s: rebooted"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:297
+#, python-format
+msgid "_wait_for_reboot failed: %s"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:340
+#, python-format
+msgid "instance %s: rescued"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:343
+#, python-format
+msgid "_wait_for_rescue failed: %s"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:370
+#, python-format
+msgid "instance %s: is running"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:381
+#, python-format
+msgid "instance %s: booted"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116
+#, python-format
+msgid "instance %s: failed to boot"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:395
+#, python-format
+msgid "virsh said: %r"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:399
+msgid "cool, it's a device"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:407
+#, python-format
+msgid "data: %r, fpath: %r"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:415
+#, python-format
+msgid "Contents of file %s: %r"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:449
+#, python-format
+msgid "instance %s: Creating image"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:505
+#, python-format
+msgid "instance %s: injecting key into image %s"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:508
+#, python-format
+msgid "instance %s: injecting net into image %s"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:516
+#, python-format
+msgid "instance %s: ignoring error injecting data into image %s (%s)"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547
+#, python-format
+msgid "instance %s: starting toXML method"
+msgstr ""
+
+#: nova/virt/libvirt_conn.py:589
+#, python-format
+msgid "instance %s: finished toXML method"
+msgstr ""
+
+#: nova/virt/xenapi_conn.py:113
+msgid ""
+"Must specify xenapi_connection_url, xenapi_connection_username "
+"(optionally), and xenapi_connection_password to use "
+"connection_type=xenapi"
+msgstr ""
+
+#: nova/virt/xenapi_conn.py:263
+#, python-format
+msgid "Task [%s] %s status: success %s"
+msgstr ""
+
+#: nova/virt/xenapi_conn.py:271
+#, python-format
+msgid "Task [%s] %s status: %s %s"
+msgstr ""
+
+#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300
+#, python-format
+msgid "Got exception: %s"
+msgstr ""
+
+#: nova/virt/xenapi/fake.py:72
+#, python-format
+msgid "%s: _db_content => %s"
+msgstr ""
+
+#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338
+#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404
+msgid "Raising NotImplemented"
+msgstr ""
+
+#: nova/virt/xenapi/fake.py:249
+#, python-format
+msgid "xenapi.fake does not have an implementation for %s"
+msgstr ""
+
+#: nova/virt/xenapi/fake.py:283
+#, python-format
+msgid "Calling %s %s"
+msgstr ""
+
+#: nova/virt/xenapi/fake.py:288
+#, python-format
+msgid "Calling getter %s"
+msgstr ""
+
+#: nova/virt/xenapi/fake.py:340
+#, python-format
+msgid ""
+"xenapi.fake does not have an implementation for %s or it has been called "
+"with the wrong number of arguments"
+msgstr ""
+
+#: nova/virt/xenapi/network_utils.py:40
+#, python-format
+msgid "Found non-unique network for bridge %s"
+msgstr ""
+
+#: nova/virt/xenapi/network_utils.py:43
+#, python-format
+msgid "Found no network for bridge %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:127
+#, python-format
+msgid "Created VM %s as %s."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:147
+#, python-format
+msgid "Creating VBD for VM %s, VDI %s ... "
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:149
+#, python-format
+msgid "Created VBD %s for VM %s, VDI %s."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:165
+#, python-format
+msgid "VBD not found in instance %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:175
+#, python-format
+msgid "Unable to unplug VBD %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:187
+#, python-format
+msgid "Unable to destroy VBD %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:202
+#, python-format
+msgid "Creating VIF for VM %s, network %s."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:205
+#, python-format
+msgid "Created VIF %s for VM %s, network %s."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:216
+#, python-format
+msgid "Snapshotting VM %s with label '%s'..."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:229
+#, python-format
+msgid "Created snapshot %s from VM %s."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:243
+#, python-format
+msgid "Asking xapi to upload %s as '%s'"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:261
+#, python-format
+msgid "Asking xapi to fetch %s as %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:279
+#, python-format
+msgid "Looking up vdi %s for PV kernel"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:290
+#, python-format
+msgid "PV Kernel in VDI:%d"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:318
+#, python-format
+msgid "VDI %s is still available"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:331
+#, python-format
+msgid "(VM_UTILS) xenserver vm state -> |%s|"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:333
+#, python-format
+msgid "(VM_UTILS) xenapi power_state -> |%s|"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:390
+#, python-format
+msgid "VHD %s has parent %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:407
+#, python-format
+msgid "Re-scanning SR %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:431
+#, python-format
+msgid "Parent %s doesn't match original parent %s, waiting for coalesce..."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:448
+#, python-format
+msgid "No VDIs found for VM %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:452
+#, python-format
+msgid "Unexpected number of VDIs (%s) found for VM %s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:62
+#, python-format
+msgid "Attempted to create non-unique name %s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:99
+#, python-format
+msgid "Starting VM %s..."
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:101
+#, python-format
+msgid "Spawning VM %s created %s."
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:112
+#, python-format
+msgid "Instance %s: booted"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:137
+#, python-format
+msgid "Instance not present %s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:166
+#, python-format
+msgid "Starting snapshot for VM %s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:174
+#, python-format
+msgid "Unable to Snapshot %s: %s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:184
+#, python-format
+msgid "Finished snapshot and upload for VM %s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:252
+#, python-format
+msgid "suspend: instance not present %s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:262
+#, python-format
+msgid "resume: instance not present %s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:271
+#, python-format
+msgid "Instance not found %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:57
+#, python-format
+msgid "Introducing %s..."
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:74
+#, python-format
+msgid "Introduced %s as %s."
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:78
+msgid "Unable to create Storage Repository"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:90
+#, python-format
+msgid "Unable to find SR from VBD %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:96
+#, python-format
+msgid "Forgetting SR %s ... "
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:101
+#, python-format
+msgid "Ignoring exception %s when getting PBDs for %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:107
+#, python-format
+msgid "Ignoring exception %s when unplugging PBD %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:111
+#, python-format
+msgid "Forgetting SR %s done."
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:113
+#, python-format
+msgid "Ignoring exception %s when forgetting SR %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:123
+#, python-format
+msgid "Unable to introduce VDI on SR %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:128
+#, python-format
+msgid "Unable to get record of VDI %s on"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:146
+#, python-format
+msgid "Unable to introduce VDI for SR %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:175
+#, python-format
+msgid "Unable to obtain target information %s, %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:197
+#, python-format
+msgid "Mountpoint cannot be translated: %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:51
+#, python-format
+msgid "Attach_volume: %s, %s, %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:69
+#, python-format
+msgid "Unable to create VDI on SR %s for instance %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:81
+#, python-format
+msgid "Unable to use SR %s for instance %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:93
+#, python-format
+msgid "Unable to attach volume to instance %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:95
+#, python-format
+msgid "Mountpoint %s attached to instance %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:106
+#, python-format
+msgid "Detach_volume: %s, %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:113
+#, python-format
+msgid "Unable to locate volume %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:121
+#, python-format
+msgid "Unable to detach volume %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:128
+#, python-format
+msgid "Mountpoint %s detached from instance %s"
+msgstr ""
+
+#: nova/volume/api.py:44
+#, python-format
+msgid "Quota exceeeded for %s, tried to create %sG volume"
+msgstr ""
+
+#: nova/volume/api.py:46
+#, python-format
+msgid "Volume quota exceeded. You cannot create a volume of size %s"
+msgstr ""
+
+#: nova/volume/api.py:70 nova/volume/api.py:95
+msgid "Volume status must be available"
+msgstr ""
+
+#: nova/volume/api.py:97
+msgid "Volume is already attached"
+msgstr ""
+
+#: nova/volume/api.py:103
+msgid "Volume is already detached"
+msgstr ""
+
+#: nova/volume/driver.py:76
+#, python-format
+msgid "Recovering from a failed execute. Try number %s"
+msgstr ""
+
+#: nova/volume/driver.py:85
+#, python-format
+msgid "volume group %s doesn't exist"
+msgstr ""
+
+#: nova/volume/driver.py:210
+#, python-format
+msgid "FAKE AOE: %s"
+msgstr ""
+
+#: nova/volume/driver.py:315
+#, python-format
+msgid "FAKE ISCSI: %s"
+msgstr ""
+
+#: nova/volume/manager.py:85
+#, python-format
+msgid "Re-exporting %s volumes"
+msgstr ""
+
+#: nova/volume/manager.py:93
+#, python-format
+msgid "volume %s: creating"
+msgstr ""
+
+#: nova/volume/manager.py:102
+#, python-format
+msgid "volume %s: creating lv of size %sG"
+msgstr ""
+
+#: nova/volume/manager.py:106
+#, python-format
+msgid "volume %s: creating export"
+msgstr ""
+
+#: nova/volume/manager.py:113
+#, python-format
+msgid "volume %s: created successfully"
+msgstr ""
+
+#: nova/volume/manager.py:121
+msgid "Volume is still attached"
+msgstr ""
+
+#: nova/volume/manager.py:123
+msgid "Volume is not local to this node"
+msgstr ""
+
+#: nova/volume/manager.py:124
+#, python-format
+msgid "volume %s: removing export"
+msgstr ""
+
+#: nova/volume/manager.py:126
+#, python-format
+msgid "volume %s: deleting"
+msgstr ""
+
+#: nova/volume/manager.py:129
+#, python-format
+msgid "volume %s: deleted successfully"
+msgstr ""
+
diff --git a/nova/api/__init__.py b/nova/api/__init__.py
index 26fed847b..803470570 100644
--- a/nova/api/__init__.py
+++ b/nova/api/__init__.py
@@ -24,7 +24,6 @@ Root WSGI middleware for all API controllers.
:ec2api_subdomain: subdomain running the EC2 API (default: ec2)
"""
-import logging
import routes
import webob.dec
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index aa3bfaeb4..2fa1f636c 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -20,7 +20,7 @@ Starting point for routing EC2 requests.
"""
-import logging
+import datetime
import routes
import webob
import webob.dec
@@ -29,6 +29,7 @@ import webob.exc
from nova import context
from nova import exception
from nova import flags
+from nova import log as logging
from nova import wsgi
from nova.api.ec2 import apirequest
from nova.api.ec2 import admin
@@ -37,6 +38,7 @@ from nova.auth import manager
FLAGS = flags.FLAGS
+LOG = logging.getLogger("nova.api")
flags.DEFINE_boolean('use_forwarded_for', False,
'Treat X-Forwarded-For as the canonical remote address. '
'Only enable this if you have a sanitizing proxy.')
@@ -52,10 +54,6 @@ flags.DEFINE_list('lockout_memcached_servers', None,
'Memcached servers or None for in process cache.')
-_log = logging.getLogger("api")
-_log.setLevel(logging.DEBUG)
-
-
class API(wsgi.Middleware):
"""Routing for all EC2 API requests."""
@@ -64,6 +62,40 @@ class API(wsgi.Middleware):
if FLAGS.use_lockout:
self.application = Lockout(self.application)
+ @webob.dec.wsgify
+ def __call__(self, req):
+ rv = req.get_response(self.application)
+ self.log_request_completion(rv, req)
+ return rv
+
+ def log_request_completion(self, response, request):
+ controller = request.environ.get('ec2.controller', None)
+ if controller:
+ controller = controller.__class__.__name__
+ action = request.environ.get('ec2.action', None)
+ ctxt = request.environ.get('ec2.context', None)
+ seconds = 'X'
+ microseconds = 'X'
+ if ctxt:
+ delta = datetime.datetime.utcnow() - \
+ ctxt.timestamp
+ seconds = delta.seconds
+ microseconds = delta.microseconds
+ LOG.info(
+ "%s.%ss %s %s %s %s:%s %s [%s] %s %s",
+ seconds,
+ microseconds,
+ request.remote_addr,
+ request.method,
+ request.path_info,
+ controller,
+ action,
+ response.status_int,
+ request.user_agent,
+ request.content_type,
+ response.content_type,
+ context=ctxt)
+
class Lockout(wsgi.Middleware):
"""Lockout for x minutes on y failed auths in a z minute period.
@@ -98,7 +130,7 @@ class Lockout(wsgi.Middleware):
failures_key = "authfailures-%s" % access_key
failures = int(self.mc.get(failures_key) or 0)
if failures >= FLAGS.lockout_attempts:
- detail = "Too many failed authentications."
+ detail = _("Too many failed authentications.")
raise webob.exc.HTTPForbidden(detail=detail)
res = req.get_response(self.application)
if res.status_int == 403:
@@ -107,9 +139,9 @@ class Lockout(wsgi.Middleware):
# NOTE(vish): To use incr, failures has to be a string.
self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60)
elif failures >= FLAGS.lockout_attempts:
- _log.warn('Access key %s has had %d failed authentications'
- ' and will be locked out for %d minutes.' %
- (access_key, failures, FLAGS.lockout_minutes))
+ LOG.warn(_('Access key %s has had %d failed authentications'
+ ' and will be locked out for %d minutes.'),
+ access_key, failures, FLAGS.lockout_minutes)
self.mc.set(failures_key, str(failures),
time=FLAGS.lockout_minutes * 60)
return res
@@ -142,8 +174,9 @@ class Authenticate(wsgi.Middleware):
req.method,
req.host,
req.path)
- except exception.Error, ex:
- logging.debug(_("Authentication Failure: %s") % ex)
+ # Be explicit for what exceptions are 403, the rest bubble as 500
+ except (exception.NotFound, exception.NotAuthorized) as ex:
+ LOG.audit(_("Authentication Failure: %s"), str(ex))
raise webob.exc.HTTPForbidden()
# Authenticated!
@@ -154,6 +187,8 @@ class Authenticate(wsgi.Middleware):
project=project,
remote_address=remote_address)
req.environ['ec2.context'] = ctxt
+ LOG.audit(_('Authenticated Request For %s:%s)'), user.name,
+ project.name, context=req.environ['ec2.context'])
return self.application
@@ -189,9 +224,9 @@ class Router(wsgi.Middleware):
except:
raise webob.exc.HTTPBadRequest()
- _log.debug(_('action: %s') % action)
+ LOG.debug(_('action: %s'), action)
for key, value in args.items():
- _log.debug(_('arg: %s\t\tval: %s') % (key, value))
+ LOG.debug(_('arg: %s\t\tval: %s'), key, value)
# Success!
req.environ['ec2.controller'] = controller
@@ -263,6 +298,9 @@ class Authorizer(wsgi.Middleware):
if self._matches_any_role(context, allowed_roles):
return self.application
else:
+ LOG.audit(_("Unauthorized request for controller=%s "
+ "and action=%s"), controller_name, action,
+ context=context)
raise webob.exc.HTTPUnauthorized()
def _matches_any_role(self, context, roles):
@@ -297,15 +335,24 @@ class Executor(wsgi.Application):
result = None
try:
result = api_request.send(context, **args)
+ except exception.NotFound as ex:
+ LOG.info(_('NotFound raised: %s'), str(ex), context=context)
+ return self._error(req, context, type(ex).__name__, str(ex))
except exception.ApiError as ex:
-
+ LOG.exception(_('ApiError raised: %s'), str(ex), context=context)
if ex.code:
- return self._error(req, ex.code, ex.message)
+ return self._error(req, context, ex.code, str(ex))
else:
- return self._error(req, type(ex).__name__, ex.message)
- # TODO(vish): do something more useful with unknown exceptions
+ return self._error(req, context, type(ex).__name__, str(ex))
except Exception as ex:
- return self._error(req, type(ex).__name__, str(ex))
+ extra = {'environment': req.environ}
+ LOG.exception(_('Unexpected error raised: %s'), str(ex),
+ extra=extra, context=context)
+ return self._error(req,
+ context,
+ 'UnknownError',
+ _('An unknown error has occurred. '
+ 'Please try your request again.'))
else:
resp = webob.Response()
resp.status = 200
@@ -313,15 +360,16 @@ class Executor(wsgi.Application):
resp.body = str(result)
return resp
- def _error(self, req, code, message):
- logging.error("%s: %s", code, message)
+ def _error(self, req, context, code, message):
+ LOG.error("%s: %s", code, message, context=context)
resp = webob.Response()
resp.status = 400
resp.headers['Content-Type'] = 'text/xml'
resp.body = str('<?xml version="1.0"?>\n'
- '<Response><Errors><Error><Code>%s</Code>'
- '<Message>%s</Message></Error></Errors>'
- '<RequestID>?</RequestID></Response>' % (code, message))
+ '<Response><Errors><Error><Code>%s</Code>'
+ '<Message>%s</Message></Error></Errors>'
+ '<RequestID>%s</RequestID></Response>' %
+ (code, message, context.request_id))
return resp
diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py
index fac01369e..758b612e8 100644
--- a/nova/api/ec2/admin.py
+++ b/nova/api/ec2/admin.py
@@ -24,9 +24,13 @@ import base64
from nova import db
from nova import exception
+from nova import log as logging
from nova.auth import manager
+LOG = logging.getLogger('nova.api.ec2.admin')
+
+
def user_dict(user, base64_file=None):
"""Convert the user object to a result dict"""
if user:
@@ -75,17 +79,18 @@ class AdminController(object):
return {'userSet':
[user_dict(u) for u in manager.AuthManager().get_users()]}
- def register_user(self, _context, name, **_kwargs):
+ def register_user(self, context, name, **_kwargs):
"""Creates a new user, and returns generated credentials."""
+ LOG.audit(_("Creating new user: %s"), name, context=context)
return user_dict(manager.AuthManager().create_user(name))
- def deregister_user(self, _context, name, **_kwargs):
+ def deregister_user(self, context, name, **_kwargs):
"""Deletes a single user (NOT undoable.)
Should throw an exception if the user has instances,
volumes, or buckets remaining.
"""
+ LOG.audit(_("Deleting user: %s"), name, context=context)
manager.AuthManager().delete_user(name)
-
return True
def describe_roles(self, context, project_roles=True, **kwargs):
@@ -105,15 +110,27 @@ class AdminController(object):
operation='add', **kwargs):
"""Add or remove a role for a user and project."""
if operation == 'add':
+ if project:
+ LOG.audit(_("Adding role %s to user %s for project %s"), role,
+ user, project, context=context)
+ else:
+ LOG.audit(_("Adding sitewide role %s to user %s"), role, user,
+ context=context)
manager.AuthManager().add_role(user, role, project)
elif operation == 'remove':
+ if project:
+ LOG.audit(_("Removing role %s from user %s for project %s"),
+ role, user, project, context=context)
+ else:
+ LOG.audit(_("Removing sitewide role %s from user %s"), role,
+ user, context=context)
manager.AuthManager().remove_role(user, role, project)
else:
- raise exception.ApiError('operation must be add or remove')
+ raise exception.ApiError(_('operation must be add or remove'))
return True
- def generate_x509_for_user(self, _context, name, project=None, **kwargs):
+ def generate_x509_for_user(self, context, name, project=None, **kwargs):
"""Generates and returns an x509 certificate for a single user.
Is usually called from a client that will wrap this with
access and secret key info, and return a zip file.
@@ -122,6 +139,8 @@ class AdminController(object):
project = name
project = manager.AuthManager().get_project(project)
user = manager.AuthManager().get_user(name)
+ LOG.audit(_("Getting x509 for user: %s on project: %s"), name,
+ project, context=context)
return user_dict(user, base64.b64encode(project.get_credentials(user)))
def describe_project(self, context, name, **kwargs):
@@ -137,6 +156,8 @@ class AdminController(object):
def register_project(self, context, name, manager_user, description=None,
member_users=None, **kwargs):
"""Creates a new project"""
+ LOG.audit(_("Create project %s managed by %s"), name, manager_user,
+ context=context)
return project_dict(
manager.AuthManager().create_project(
name,
@@ -146,6 +167,7 @@ class AdminController(object):
def deregister_project(self, context, name):
"""Permanently deletes a project."""
+ LOG.audit(_("Delete project: %s"), name, context=context)
manager.AuthManager().delete_project(name)
return True
@@ -159,11 +181,15 @@ class AdminController(object):
**kwargs):
"""Add or remove a user from a project."""
if operation == 'add':
+ LOG.audit(_("Adding user %s to project %s"), user, project,
+ context=context)
manager.AuthManager().add_to_project(user, project)
elif operation == 'remove':
+ LOG.audit(_("Removing user %s from project %s"), user, project,
+ context=context)
manager.AuthManager().remove_from_project(user, project)
else:
- raise exception.ApiError('operation must be add or remove')
+ raise exception.ApiError(_('operation must be add or remove'))
return True
# FIXME(vish): these host commands don't work yet, perhaps some of the
diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py
index a90fbeb0c..d0b417db1 100644
--- a/nova/api/ec2/apirequest.py
+++ b/nova/api/ec2/apirequest.py
@@ -20,13 +20,13 @@
APIRequest class
"""
-import logging
import re
# TODO(termie): replace minidom with etree
from xml.dom import minidom
-_log = logging.getLogger("api")
-_log.setLevel(logging.DEBUG)
+from nova import log as logging
+
+LOG = logging.getLogger("nova.api.request")
_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
@@ -94,7 +94,7 @@ class APIRequest(object):
except AttributeError:
_error = _('Unsupported API request: controller = %s,'
'action = %s') % (self.controller, self.action)
- _log.warning(_error)
+ LOG.exception(_error)
# TODO: Raise custom exception, trap in apiserver,
# and reraise as 400 error.
raise Exception(_error)
@@ -142,7 +142,7 @@ class APIRequest(object):
response = xml.toxml()
xml.unlink()
- _log.debug(response)
+ LOG.debug(response)
return response
def _render_dict(self, xml, el, data):
@@ -151,7 +151,7 @@ class APIRequest(object):
val = data[key]
el.appendChild(self._render_data(xml, key, val))
except:
- _log.debug(data)
+ LOG.debug(data)
raise
def _render_data(self, xml, el_name, data):
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 6619b5452..39174d554 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -24,18 +24,16 @@ datastore.
import base64
import datetime
-import logging
-import re
-import os
-
-from nova import context
import IPy
+import os
from nova import compute
+from nova import context
from nova import crypto
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import network
from nova import rpc
from nova import utils
@@ -44,6 +42,9 @@ from nova.compute import instance_types
FLAGS = flags.FLAGS
+flags.DECLARE('service_down_time', 'nova.scheduler.driver')
+
+LOG = logging.getLogger("nova.api.cloud")
InvalidInputException = exception.InvalidInputException
@@ -131,15 +132,6 @@ class CloudController(object):
result[key] = [line]
return result
- def _trigger_refresh_security_group(self, context, security_group):
- nodes = set([instance['host'] for instance in security_group.instances
- if instance['host'] is not None])
- for node in nodes:
- rpc.cast(context,
- '%s.%s' % (FLAGS.compute_topic, node),
- {"method": "refresh_security_group",
- "args": {"security_group_id": security_group.id}})
-
def get_metadata(self, address):
ctxt = context.get_admin_context()
instance_ref = self.compute_api.get_all(ctxt, fixed_ip=address)
@@ -207,7 +199,7 @@ class CloudController(object):
'zoneState': 'available'}]}
services = db.service_get_all(context)
- now = db.get_time()
+ now = datetime.datetime.utcnow()
hosts = []
for host in [service['host'] for service in services]:
if not host in hosts:
@@ -247,6 +239,7 @@ class CloudController(object):
FLAGS.cc_host,
FLAGS.cc_port,
FLAGS.ec2_suffix)}]
+ return {'regionInfo': regions}
def describe_snapshots(self,
context,
@@ -282,6 +275,7 @@ class CloudController(object):
return {'keypairsSet': result}
def create_key_pair(self, context, key_name, **kwargs):
+ LOG.audit(_("Create key pair %s"), key_name, context=context)
data = _gen_key(context, context.user.id, key_name)
return {'keyName': key_name,
'keyFingerprint': data['fingerprint'],
@@ -289,6 +283,7 @@ class CloudController(object):
# TODO(vish): when context is no longer an object, pass it here
def delete_key_pair(self, context, key_name, **kwargs):
+ LOG.audit(_("Delete key pair %s"), key_name, context=context)
try:
db.key_pair_destroy(context, context.user.id, key_name)
except exception.NotFound:
@@ -395,6 +390,8 @@ class CloudController(object):
return False
def revoke_security_group_ingress(self, context, group_name, **kwargs):
+ LOG.audit(_("Revoke security group ingress %s"), group_name,
+ context=context)
self.compute_api.ensure_default_security_group(context)
security_group = db.security_group_get_by_name(context,
context.project_id,
@@ -402,8 +399,8 @@ class CloudController(object):
criteria = self._revoke_rule_args_to_dict(context, **kwargs)
if criteria == None:
- raise exception.ApiError(_("No rule for the specified "
- "parameters."))
+ raise exception.ApiError(_("Not enough parameters to build a "
+ "valid rule."))
for rule in security_group.rules:
match = True
@@ -412,7 +409,8 @@ class CloudController(object):
match = False
if match:
db.security_group_rule_destroy(context, rule['id'])
- self._trigger_refresh_security_group(context, security_group)
+ self.compute_api.trigger_security_group_rules_refresh(context,
+ security_group['id'])
return True
raise exception.ApiError(_("No rule for the specified parameters."))
@@ -421,12 +419,17 @@ class CloudController(object):
# for these operations, so support for newer API versions
# is sketchy.
def authorize_security_group_ingress(self, context, group_name, **kwargs):
+ LOG.audit(_("Authorize security group ingress %s"), group_name,
+ context=context)
self.compute_api.ensure_default_security_group(context)
security_group = db.security_group_get_by_name(context,
context.project_id,
group_name)
values = self._revoke_rule_args_to_dict(context, **kwargs)
+ if values is None:
+ raise exception.ApiError(_("Not enough parameters to build a "
+ "valid rule."))
values['parent_group_id'] = security_group.id
if self._security_group_rule_exists(security_group, values):
@@ -435,7 +438,8 @@ class CloudController(object):
security_group_rule = db.security_group_rule_create(context, values)
- self._trigger_refresh_security_group(context, security_group)
+ self.compute_api.trigger_security_group_rules_refresh(context,
+ security_group['id'])
return True
@@ -457,6 +461,7 @@ class CloudController(object):
return source_project_id
def create_security_group(self, context, group_name, group_description):
+ LOG.audit(_("Create Security Group %s"), group_name, context=context)
self.compute_api.ensure_default_security_group(context)
if db.security_group_exists(context, context.project_id, group_name):
raise exception.ApiError(_('group %s already exists') % group_name)
@@ -471,6 +476,7 @@ class CloudController(object):
group_ref)]}
def delete_security_group(self, context, group_name, **kwargs):
+ LOG.audit(_("Delete security group %s"), group_name, context=context)
security_group = db.security_group_get_by_name(context,
context.project_id,
group_name)
@@ -478,6 +484,8 @@ class CloudController(object):
return True
def get_console_output(self, context, instance_id, **kwargs):
+ LOG.audit(_("Get console output for instance %s"), instance_id,
+ context=context)
# instance_id is passed in as a list of instances
ec2_id = instance_id[0]
instance_id = ec2_id_to_id(ec2_id)
@@ -493,6 +501,11 @@ class CloudController(object):
"Timestamp": now,
"output": base64.b64encode(output)}
+ def get_ajax_console(self, context, instance_id, **kwargs):
+ ec2_id = instance_id[0]
+ internal_id = ec2_id_to_id(ec2_id)
+ return self.compute_api.get_ajax_console(context, internal_id)
+
def describe_volumes(self, context, volume_id=None, **kwargs):
volumes = self.volume_api.get_all(context)
# NOTE(vish): volume_id is an optional list of volume ids to filter by.
@@ -536,6 +549,7 @@ class CloudController(object):
return v
def create_volume(self, context, size, **kwargs):
+ LOG.audit(_("Create volume of %s GB"), size, context=context)
volume = self.volume_api.create(context, size,
kwargs.get('display_name'),
kwargs.get('display_description'))
@@ -559,6 +573,8 @@ class CloudController(object):
return True
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
+ LOG.audit(_("Attach volume %s to instacne %s at %s"), volume_id,
+ instance_id, device, context=context)
self.compute_api.attach_volume(context, instance_id, volume_id, device)
volume = self.volume_api.get(context, volume_id)
return {'attachTime': volume['attach_time'],
@@ -569,6 +585,7 @@ class CloudController(object):
'volumeId': volume_id}
def detach_volume(self, context, volume_id, **kwargs):
+ LOG.audit(_("Detach volume %s"), volume_id, context=context)
volume = self.volume_api.get(context, volume_id)
instance = self.compute_api.detach_volume(context, volume_id)
return {'attachTime': volume['attach_time'],
@@ -586,19 +603,24 @@ class CloudController(object):
return [{label: x} for x in lst]
def describe_instances(self, context, **kwargs):
- return self._format_describe_instances(context)
+ return self._format_describe_instances(context, **kwargs)
- def _format_describe_instances(self, context):
- return {'reservationSet': self._format_instances(context)}
+ def _format_describe_instances(self, context, **kwargs):
+ return {'reservationSet': self._format_instances(context, **kwargs)}
def _format_run_instances(self, context, reservation_id):
i = self._format_instances(context, reservation_id=reservation_id)
assert len(i) == 1
return i[0]
- def _format_instances(self, context, **kwargs):
+ def _format_instances(self, context, instance_id=None, **kwargs):
reservations = {}
- instances = self.compute_api.get_all(context, **kwargs)
+ # NOTE(vish): instance_id is an optional list of ids to filter by
+ if instance_id:
+ instance_id = [ec2_id_to_id(x) for x in instance_id]
+ instances = [self.compute_api.get(context, x) for x in instance_id]
+ else:
+ instances = self.compute_api.get_all(context, **kwargs)
for instance in instances:
if not context.user.is_admin():
if instance['image_id'] == FLAGS.vpn_image_id:
@@ -670,19 +692,24 @@ class CloudController(object):
return {'addressesSet': addresses}
def allocate_address(self, context, **kwargs):
+ LOG.audit(_("Allocate address"), context=context)
public_ip = self.network_api.allocate_floating_ip(context)
return {'addressSet': [{'publicIp': public_ip}]}
def release_address(self, context, public_ip, **kwargs):
+ LOG.audit(_("Release address %s"), public_ip, context=context)
self.network_api.release_floating_ip(context, public_ip)
return {'releaseResponse': ["Address released."]}
def associate_address(self, context, instance_id, public_ip, **kwargs):
+ LOG.audit(_("Associate address %s to instance %s"), public_ip,
+ instance_id, context=context)
instance_id = ec2_id_to_id(instance_id)
self.compute_api.associate_floating_ip(context, instance_id, public_ip)
return {'associateResponse': ["Address associated."]}
def disassociate_address(self, context, public_ip, **kwargs):
+ LOG.audit(_("Disassociate address %s"), public_ip, context=context)
self.network_api.disassociate_floating_ip(context, public_ip)
return {'disassociateResponse': ["Address disassociated."]}
@@ -709,7 +736,7 @@ class CloudController(object):
def terminate_instances(self, context, instance_id, **kwargs):
"""Terminate each instance in instance_id, which is a list of ec2 ids.
instance_id is a kwarg so its name cannot be modified."""
- logging.debug("Going to start terminating instances")
+ LOG.debug(_("Going to start terminating instances"))
for ec2_id in instance_id:
instance_id = ec2_id_to_id(ec2_id)
self.compute_api.delete(context, instance_id)
@@ -717,6 +744,7 @@ class CloudController(object):
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids"""
+ LOG.audit(_("Reboot instance %r"), instance_id, context=context)
for ec2_id in instance_id:
instance_id = ec2_id_to_id(ec2_id)
self.compute_api.reboot(context, instance_id)
@@ -753,6 +781,7 @@ class CloudController(object):
return {'imagesSet': images}
def deregister_image(self, context, image_id, **kwargs):
+ LOG.audit(_("De-registering image %s"), image_id, context=context)
self.image_service.deregister(context, image_id)
return {'imageId': image_id}
@@ -760,7 +789,8 @@ class CloudController(object):
if image_location is None and 'name' in kwargs:
image_location = kwargs['name']
image_id = self.image_service.register(context, image_location)
- logging.debug("Registered %s as %s" % (image_location, image_id))
+ LOG.audit(_("Registered image %s with id %s"), image_location,
+ image_id, context=context)
return {'imageId': image_id}
def describe_image_attribute(self, context, image_id, attribute, **kwargs):
@@ -788,6 +818,7 @@ class CloudController(object):
raise exception.ApiError(_('only group "all" is supported'))
if not operation_type in ['add', 'remove']:
raise exception.ApiError(_('operation_type must be add or remove'))
+ LOG.audit(_("Updating image %s publicity"), image_id, context=context)
return self.image_service.modify(context, image_id, operation_type)
def update_image(self, context, image_id, **kwargs):
diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py
index a57a6698a..848f0b034 100644
--- a/nova/api/ec2/metadatarequesthandler.py
+++ b/nova/api/ec2/metadatarequesthandler.py
@@ -18,15 +18,15 @@
"""Metadata request handler."""
-import logging
-
import webob.dec
import webob.exc
+from nova import log as logging
from nova import flags
from nova.api.ec2 import cloud
+LOG = logging.getLogger('nova.api.ec2.metadata')
FLAGS = flags.FLAGS
@@ -72,8 +72,7 @@ class MetadataRequestHandler(object):
remote_address = req.headers.get('X-Forwarded-For', remote_address)
meta_data = cc.get_metadata(remote_address)
if meta_data is None:
- logging.error(_('Failed to get metadata for ip: %s') %
- remote_address)
+ LOG.error(_('Failed to get metadata for ip: %s'), remote_address)
raise webob.exc.HTTPNotFound()
data = self.lookup(req.path_info, meta_data)
if data is None:
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index a1430caed..f96e2af91 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -20,28 +20,25 @@
WSGI middleware for OpenStack API controllers.
"""
-import time
-
-import logging
import routes
-import traceback
import webob.dec
import webob.exc
import webob
-from nova import context
from nova import flags
+from nova import log as logging
from nova import utils
from nova import wsgi
from nova.api.openstack import faults
from nova.api.openstack import backup_schedules
+from nova.api.openstack import consoles
from nova.api.openstack import flavors
from nova.api.openstack import images
-from nova.api.openstack import ratelimiting
from nova.api.openstack import servers
-from nova.api.openstack import sharedipgroups
+from nova.api.openstack import shared_ip_groups
+LOG = logging.getLogger('nova.api.openstack')
FLAGS = flags.FLAGS
flags.DEFINE_string('os_api_auth',
'nova.api.openstack.auth.AuthMiddleware',
@@ -51,6 +48,10 @@ flags.DEFINE_string('os_api_ratelimiting',
'nova.api.openstack.ratelimiting.RateLimitingMiddleware',
'Default ratelimiting implementation for the Openstack API')
+flags.DEFINE_string('os_krm_mapping_file',
+ 'krm_mapping.json',
+ 'Location of OpenStack Flavor/OS:EC2 Kernel/Ramdisk/Machine JSON file.')
+
flags.DEFINE_bool('allow_admin_api',
False,
'When True, this API service will accept admin operations.')
@@ -71,8 +72,7 @@ class API(wsgi.Middleware):
try:
return req.get_response(self.application)
except Exception as ex:
- logging.warn(_("Caught error: %s") % str(ex))
- logging.error(traceback.format_exc())
+ LOG.exception(_("Caught error: %s"), str(ex))
exc = webob.exc.HTTPInternalServerError(explanation=str(ex))
return faults.Fault(exc)
@@ -88,7 +88,7 @@ class APIRouter(wsgi.Router):
server_members = {'action': 'POST'}
if FLAGS.allow_admin_api:
- logging.debug("Including admin operations in API.")
+ LOG.debug(_("Including admin operations in API."))
server_members['pause'] = 'POST'
server_members['unpause'] = 'POST'
server_members["diagnostics"] = "GET"
@@ -105,12 +105,18 @@ class APIRouter(wsgi.Router):
parent_resource=dict(member_name='server',
collection_name='servers'))
+ mapper.resource("console", "consoles",
+ controller=consoles.Controller(),
+ parent_resource=dict(member_name='server',
+ collection_name='servers'))
+
mapper.resource("image", "images", controller=images.Controller(),
collection={'detail': 'GET'})
mapper.resource("flavor", "flavors", controller=flavors.Controller(),
collection={'detail': 'GET'})
- mapper.resource("sharedipgroup", "sharedipgroups",
- controller=sharedipgroups.Controller())
+ mapper.resource("shared_ip_group", "shared_ip_groups",
+ collection={'detail': 'GET'},
+ controller=shared_ip_groups.Controller())
super(APIRouter, self).__init__(mapper)
diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py
index fcc07bdd3..197125d86 100644
--- a/nova/api/openstack/backup_schedules.py
+++ b/nova/api/openstack/backup_schedules.py
@@ -15,7 +15,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+import logging
import time
+
from webob import exc
from nova import wsgi
@@ -46,8 +48,8 @@ class Controller(wsgi.Controller):
def create(self, req, server_id):
""" No actual update method required, since the existing API allows
both create and update through a POST """
- return faults.Fault(exc.HTTPNotFound())
+ return faults.Fault(exc.HTTPNotImplemented())
def delete(self, req, server_id, id):
""" Deletes an existing backup schedule """
- return faults.Fault(exc.HTTPNotFound())
+ return faults.Fault(exc.HTTPNotImplemented())
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index ac0572c96..037ed47a0 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova import exception
+
def limited(items, req):
"""Return a slice of items according to requested offset and limit.
@@ -34,3 +36,25 @@ def limited(items, req):
limit = min(1000, limit)
range_end = offset + limit
return items[offset:range_end]
+
+
+def get_image_id_from_image_hash(image_service, context, image_hash):
+ """Given an Image ID Hash, return an objectstore Image ID.
+
+ image_service - reference to objectstore compatible image service.
+ context - security context for image service requests.
+ image_hash - hash of the image ID.
+ """
+
+ # FIX(sandy): This is terribly inefficient. It pulls all images
+ # from objectstore in order to find the match. ObjectStore
+ # should have a numeric counterpart to the string ID.
+ try:
+ items = image_service.detail(context)
+ except NotImplementedError:
+ items = image_service.index(context)
+ for image in items:
+ image_id = image['imageId']
+ if abs(hash(image_id)) == int(image_hash):
+ return image_id
+ raise exception.NotFound(image_hash)
diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py
new file mode 100644
index 000000000..9ebdbe710
--- /dev/null
+++ b/nova/api/openstack/consoles.py
@@ -0,0 +1,96 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from webob import exc
+
+from nova import console
+from nova import exception
+from nova import wsgi
+from nova.api.openstack import faults
+
+
+def _translate_keys(cons):
+ """Coerces a console instance into proper dictionary format """
+ pool = cons['pool']
+ info = {'id': cons['id'],
+ 'console_type': pool['console_type']}
+ return dict(console=info)
+
+
+def _translate_detail_keys(cons):
+ """Coerces a console instance into proper dictionary format with
+ correctly mapped attributes """
+ pool = cons['pool']
+ info = {'id': cons['id'],
+ 'console_type': pool['console_type'],
+ 'password': cons['password'],
+ 'port': cons['port'],
+ 'host': pool['public_hostname']}
+ return dict(console=info)
+
+
+class Controller(wsgi.Controller):
+ """The Consoles Controller for the Openstack API"""
+
+ _serialization_metadata = {
+ 'application/xml': {
+ 'attributes': {
+ 'console': []}}}
+
+ def __init__(self):
+ self.console_api = console.API()
+ super(Controller, self).__init__()
+
+ def index(self, req, server_id):
+ """Returns a list of consoles for this instance"""
+ consoles = self.console_api.get_consoles(
+ req.environ['nova.context'],
+ int(server_id))
+ return dict(consoles=[_translate_keys(console)
+ for console in consoles])
+
+ def create(self, req, server_id):
+ """Creates a new console"""
+ #info = self._deserialize(req.body, req)
+ self.console_api.create_console(
+ req.environ['nova.context'],
+ int(server_id))
+
+ def show(self, req, server_id, id):
+ """Shows in-depth information on a specific console"""
+ try:
+ console = self.console_api.get_console(
+ req.environ['nova.context'],
+ int(server_id),
+ int(id))
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ return _translate_detail_keys(console)
+
+ def update(self, req, server_id, id):
+ """You can't update a console"""
+ raise faults.Fault(exc.HTTPNotImplemented())
+
+ def delete(self, req, server_id, id):
+ """Deletes a console"""
+ try:
+ self.console_api.delete_console(req.environ['nova.context'],
+ int(server_id),
+ int(id))
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ return exc.HTTPAccepted()
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index 0b239aab8..a5f55a489 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import logging
+
from webob import exc
from nova import compute
@@ -26,6 +28,7 @@ from nova.api.openstack import common
from nova.api.openstack import faults
import nova.image.service
+
FLAGS = flags.FLAGS
@@ -88,6 +91,12 @@ def _filter_keys(item, keys):
return dict((k, v) for k, v in item.iteritems() if k in keys)
+def _convert_image_id_to_hash(image):
+ image_id = abs(hash(image['imageId']))
+ image['imageId'] = image_id
+ image['id'] = image_id
+
+
class Controller(wsgi.Controller):
_serialization_metadata = {
@@ -112,6 +121,9 @@ class Controller(wsgi.Controller):
items = self._service.detail(req.environ['nova.context'])
except NotImplementedError:
items = self._service.index(req.environ['nova.context'])
+ for image in items:
+ _convert_image_id_to_hash(image)
+
items = common.limited(items, req)
items = [_translate_keys(item) for item in items]
items = [_translate_status(item) for item in items]
@@ -119,7 +131,12 @@ class Controller(wsgi.Controller):
def show(self, req, id):
"""Return data about the given image id"""
- return dict(image=self._service.show(req.environ['nova.context'], id))
+ image_id = common.get_image_id_from_image_hash(self._service,
+ req.environ['nova.context'], id)
+
+ image = self._service.show(req.environ['nova.context'], image_id)
+ _convert_image_id_to_hash(image)
+ return dict(image=image)
def delete(self, req, id):
# Only public images are supported for now.
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 10679ccb6..29af82533 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -15,14 +15,17 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
+import json
import traceback
from webob import exc
from nova import compute
from nova import exception
+from nova import flags
+from nova import log as logging
from nova import wsgi
+from nova import utils
from nova.api.openstack import common
from nova.api.openstack import faults
from nova.auth import manager as auth_manager
@@ -35,6 +38,9 @@ LOG = logging.getLogger('server')
LOG.setLevel(logging.DEBUG)
+FLAGS = flags.FLAGS
+
+
def _translate_detail_keys(inst):
""" Coerces into dictionary format, mapping everything to Rackspace-like
attributes for return"""
@@ -44,7 +50,7 @@ def _translate_detail_keys(inst):
power_state.RUNNING: 'active',
power_state.BLOCKED: 'active',
power_state.SUSPENDED: 'suspended',
- power_state.PAUSED: 'error',
+ power_state.PAUSED: 'paused',
power_state.SHUTDOWN: 'active',
power_state.SHUTOFF: 'active',
power_state.CRASHED: 'error'}
@@ -81,6 +87,7 @@ class Controller(wsgi.Controller):
def __init__(self):
self.compute_api = compute.API()
+ self._image_service = utils.import_object(FLAGS.image_service)
super(Controller, self).__init__()
def index(self, req):
@@ -117,6 +124,18 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
+ def _get_kernel_ramdisk_from_image(self, image_id):
+ mapping_filename = FLAGS.os_krm_mapping_file
+
+ with open(mapping_filename) as f:
+ mapping = json.load(f)
+ if image_id in mapping:
+ return mapping[image_id]
+
+ raise exception.NotFound(
+ _("No entry for image '%s' in mapping file '%s'") %
+ (image_id, mapping_filename))
+
def create(self, req):
""" Creates a new server for a given user """
env = self._deserialize(req.body, req)
@@ -125,10 +144,15 @@ class Controller(wsgi.Controller):
key_pair = auth_manager.AuthManager.get_key_pairs(
req.environ['nova.context'])[0]
+ image_id = common.get_image_id_from_image_hash(self._image_service,
+ req.environ['nova.context'], env['server']['imageId'])
+ kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(image_id)
instances = self.compute_api.create(
req.environ['nova.context'],
instance_types.get_by_flavor_id(env['server']['flavorId']),
- env['server']['imageId'],
+ image_id,
+ kernel_id=kernel_id,
+ ramdisk_id=ramdisk_id,
display_name=env['server']['name'],
display_description=env['server']['name'],
key_name=key_pair['name'],
@@ -158,6 +182,7 @@ class Controller(wsgi.Controller):
""" Multi-purpose method used to reboot, rebuild, and
resize a server """
input_dict = self._deserialize(req.body, req)
+ #TODO(sandy): rebuild/resize not supported.
try:
reboot_type = input_dict['reboot']['type']
except Exception:
@@ -181,7 +206,7 @@ class Controller(wsgi.Controller):
self.compute_api.lock(context, id)
except:
readable = traceback.format_exc()
- logging.error(_("Compute.api::lock %s"), readable)
+ LOG.exception(_("Compute.api::lock %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
@@ -196,7 +221,7 @@ class Controller(wsgi.Controller):
self.compute_api.unlock(context, id)
except:
readable = traceback.format_exc()
- logging.error(_("Compute.api::unlock %s"), readable)
+ LOG.exception(_("Compute.api::unlock %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
@@ -210,7 +235,7 @@ class Controller(wsgi.Controller):
self.compute_api.get_lock(context, id)
except:
readable = traceback.format_exc()
- logging.error(_("Compute.api::get_lock %s"), readable)
+ LOG.exception(_("Compute.api::get_lock %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
@@ -221,7 +246,7 @@ class Controller(wsgi.Controller):
self.compute_api.pause(ctxt, id)
except:
readable = traceback.format_exc()
- logging.error(_("Compute.api::pause %s"), readable)
+ LOG.exception(_("Compute.api::pause %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
@@ -232,7 +257,7 @@ class Controller(wsgi.Controller):
self.compute_api.unpause(ctxt, id)
except:
readable = traceback.format_exc()
- logging.error(_("Compute.api::unpause %s"), readable)
+ LOG.exception(_("Compute.api::unpause %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
@@ -243,7 +268,7 @@ class Controller(wsgi.Controller):
self.compute_api.suspend(context, id)
except:
readable = traceback.format_exc()
- logging.error(_("compute.api::suspend %s"), readable)
+ LOG.exception(_("compute.api::suspend %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
@@ -254,10 +279,19 @@ class Controller(wsgi.Controller):
self.compute_api.resume(context, id)
except:
readable = traceback.format_exc()
- logging.error(_("compute.api::resume %s"), readable)
+ LOG.exception(_("compute.api::resume %s"), readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ def get_ajax_console(self, req, id):
+ """ Returns a url to an instance's ajaxterm console. """
+ try:
+ self.compute_api.get_ajax_console(req.environ['nova.context'],
+ int(id))
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ return exc.HTTPAccepted()
+
def diagnostics(self, req, id):
"""Permit Admins to retrieve server diagnostics."""
ctxt = req.environ["nova.context"]
diff --git a/nova/api/openstack/sharedipgroups.py b/nova/api/openstack/shared_ip_groups.py
index 845f5bead..bd3cc23a8 100644
--- a/nova/api/openstack/sharedipgroups.py
+++ b/nova/api/openstack/shared_ip_groups.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import logging
+
from webob import exc
from nova import wsgi
@@ -29,7 +31,7 @@ def _translate_keys(inst):
def _translate_detail_keys(inst):
""" Coerces a shared IP group instance into proper dictionary format with
correctly mapped attributes """
- return dict(sharedIpGroup=inst)
+ return dict(sharedIpGroups=inst)
class Controller(wsgi.Controller):
@@ -54,12 +56,12 @@ class Controller(wsgi.Controller):
def delete(self, req, id):
""" Deletes a Shared IP Group """
- raise faults.Fault(exc.HTTPNotFound())
+ raise faults.Fault(exc.HTTPNotImplemented())
- def detail(self, req, id):
+ def detail(self, req):
""" Returns a complete list of Shared IP Groups """
return _translate_detail_keys({})
def create(self, req):
""" Creates a new Shared IP group """
- raise faults.Fault(exc.HTTPNotFound())
+ raise faults.Fault(exc.HTTPNotImplemented())
diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py
index 47e435cb6..0eb6fe588 100644
--- a/nova/auth/dbdriver.py
+++ b/nova/auth/dbdriver.py
@@ -20,7 +20,6 @@
Auth driver using the DB as its backend.
"""
-import logging
import sys
from nova import context
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index 7616ff112..bc53e0ec6 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -24,11 +24,11 @@ other backends by creating another class that exposes the same
public methods.
"""
-import logging
import sys
from nova import exception
from nova import flags
+from nova import log as logging
FLAGS = flags.FLAGS
@@ -65,6 +65,8 @@ flags.DEFINE_string('ldap_netadmin',
flags.DEFINE_string('ldap_developer',
'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers')
+LOG = logging.getLogger("nova.ldapdriver")
+
# TODO(vish): make an abstract base class with the same public methods
# to define a set interface for AuthDrivers. I'm delaying
@@ -117,8 +119,7 @@ class LdapDriver(object):
def get_project(self, pid):
"""Retrieve project by id"""
- dn = 'cn=%s,%s' % (pid,
- FLAGS.ldap_project_subtree)
+ dn = self.__project_to_dn(pid)
attr = self.__find_object(dn, LdapDriver.project_pattern)
return self.__to_project(attr)
@@ -226,7 +227,8 @@ class LdapDriver(object):
('description', [description]),
(LdapDriver.project_attribute, [manager_dn]),
('member', members)]
- self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr)
+ dn = self.__project_to_dn(name, search=False)
+ self.conn.add_s(dn, attr)
return self.__to_project(dict(attr))
def modify_project(self, project_id, manager_uid=None, description=None):
@@ -244,23 +246,22 @@ class LdapDriver(object):
manager_dn))
if description:
attr.append((self.ldap.MOD_REPLACE, 'description', description))
- self.conn.modify_s('cn=%s,%s' % (project_id,
- FLAGS.ldap_project_subtree),
- attr)
+ dn = self.__project_to_dn(project_id)
+ self.conn.modify_s(dn, attr)
def add_to_project(self, uid, project_id):
"""Add user to project"""
- dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ dn = self.__project_to_dn(project_id)
return self.__add_to_group(uid, dn)
def remove_from_project(self, uid, project_id):
"""Remove user from project"""
- dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ dn = self.__project_to_dn(project_id)
return self.__remove_from_group(uid, dn)
def is_in_project(self, uid, project_id):
"""Check if user is in project"""
- dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ dn = self.__project_to_dn(project_id)
return self.__is_in_group(uid, dn)
def has_role(self, uid, role, project_id=None):
@@ -300,7 +301,7 @@ class LdapDriver(object):
roles.append(role)
return roles
else:
- project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ project_dn = self.__project_to_dn(project_id)
query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' %
(LdapDriver.project_pattern, self.__uid_to_dn(uid)))
roles = self.__find_objects(project_dn, query)
@@ -333,7 +334,7 @@ class LdapDriver(object):
def delete_project(self, project_id):
"""Delete a project"""
- project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ project_dn = self.__project_to_dn(project_id)
self.__delete_roles(project_dn)
self.__delete_group(project_dn)
@@ -365,9 +366,10 @@ class LdapDriver(object):
def __get_ldap_user(self, uid):
"""Retrieve LDAP user entry by id"""
- attr = self.__find_object(self.__uid_to_dn(uid),
- '(objectclass=novaUser)')
- return attr
+ dn = FLAGS.ldap_user_subtree
+ query = ('(&(%s=%s)(objectclass=novaUser))' %
+ (FLAGS.ldap_user_id_attribute, uid))
+ return self.__find_object(dn, query)
def __find_object(self, dn, query=None, scope=None):
"""Find an object by dn and query"""
@@ -418,15 +420,13 @@ class LdapDriver(object):
query = '(objectclass=groupOfNames)'
return self.__find_object(dn, query) is not None
- @staticmethod
- def __role_to_dn(role, project_id=None):
+ def __role_to_dn(self, role, project_id=None):
"""Convert role to corresponding dn"""
if project_id is None:
return FLAGS.__getitem__("ldap_%s" % role).value
else:
- return 'cn=%s,cn=%s,%s' % (role,
- project_id,
- FLAGS.ldap_project_subtree)
+ project_dn = self.__project_to_dn(project_id)
+ return 'cn=%s,%s' % (role, project_dn)
def __create_group(self, group_dn, name, uid,
description, member_uids=None):
@@ -502,8 +502,8 @@ class LdapDriver(object):
try:
self.conn.modify_s(group_dn, attr)
except self.ldap.OBJECT_CLASS_VIOLATION:
- logging.debug(_("Attempted to remove the last member of a group. "
- "Deleting the group at %s instead."), group_dn)
+ LOG.debug(_("Attempted to remove the last member of a group. "
+ "Deleting the group at %s instead."), group_dn)
self.__delete_group(group_dn)
def __remove_from_all(self, uid):
@@ -532,6 +532,42 @@ class LdapDriver(object):
for role_dn in self.__find_role_dns(project_dn):
self.__delete_group(role_dn)
+ def __to_project(self, attr):
+ """Convert ldap attributes to Project object"""
+ if attr is None:
+ return None
+ member_dns = attr.get('member', [])
+ return {
+ 'id': attr['cn'][0],
+ 'name': attr['cn'][0],
+ 'project_manager_id':
+ self.__dn_to_uid(attr[LdapDriver.project_attribute][0]),
+ 'description': attr.get('description', [None])[0],
+ 'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
+
+ def __uid_to_dn(self, uid, search=True):
+ """Convert uid to dn"""
+ # By default return a generated DN
+ userdn = (FLAGS.ldap_user_id_attribute + '=%s,%s'
+ % (uid, FLAGS.ldap_user_subtree))
+ if search:
+ query = ('%s=%s' % (FLAGS.ldap_user_id_attribute, uid))
+ user = self.__find_dns(FLAGS.ldap_user_subtree, query)
+ if len(user) > 0:
+ userdn = user[0]
+ return userdn
+
+ def __project_to_dn(self, pid, search=True):
+ """Convert pid to dn"""
+ # By default return a generated DN
+ projectdn = ('cn=%s,%s' % (pid, FLAGS.ldap_project_subtree))
+ if search:
+ query = ('(&(cn=%s)%s)' % (pid, LdapDriver.project_pattern))
+ project = self.__find_dns(FLAGS.ldap_project_subtree, query)
+ if len(project) > 0:
+ projectdn = project[0]
+ return projectdn
+
@staticmethod
def __to_user(attr):
"""Convert ldap attributes to User object"""
@@ -548,30 +584,11 @@ class LdapDriver(object):
else:
return None
- def __to_project(self, attr):
- """Convert ldap attributes to Project object"""
- if attr is None:
- return None
- member_dns = attr.get('member', [])
- return {
- 'id': attr['cn'][0],
- 'name': attr['cn'][0],
- 'project_manager_id':
- self.__dn_to_uid(attr[LdapDriver.project_attribute][0]),
- 'description': attr.get('description', [None])[0],
- 'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
-
@staticmethod
def __dn_to_uid(dn):
"""Convert user dn to uid"""
return dn.split(',')[0].split('=')[1]
- @staticmethod
- def __uid_to_dn(uid):
- """Convert uid to dn"""
- return (FLAGS.ldap_user_id_attribute + '=%s,%s'
- % (uid, FLAGS.ldap_user_subtree))
-
class FakeLdapDriver(LdapDriver):
"""Fake Ldap Auth driver"""
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index d3e266952..89f02998d 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -20,7 +20,6 @@
Nova authentication management
"""
-import logging
import os
import shutil
import string # pylint: disable-msg=W0402
@@ -33,6 +32,7 @@ from nova import crypto
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
from nova.auth import signer
@@ -70,6 +70,8 @@ flags.DEFINE_string('credential_rc_file', '%src',
flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver',
'Driver that auth manager uses')
+LOG = logging.getLogger('nova.auth.manager')
+
class AuthBase(object):
"""Base class for objects relating to auth
@@ -254,43 +256,51 @@ class AuthManager(object):
# TODO(vish): check for valid timestamp
(access_key, _sep, project_id) = access.partition(':')
- logging.info(_('Looking up user: %r'), access_key)
+ LOG.debug(_('Looking up user: %r'), access_key)
user = self.get_user_from_access_key(access_key)
- logging.info('user: %r', user)
+ LOG.debug('user: %r', user)
if user == None:
+ LOG.audit(_("Failed authorization for access key %s"), access_key)
raise exception.NotFound(_('No user found for access key %s')
% access_key)
# NOTE(vish): if we stop using project name as id we need better
# logic to find a default project for user
if project_id == '':
+ LOG.debug(_("Using project name = user name (%s)"), user.name)
project_id = user.name
project = self.get_project(project_id)
if project == None:
+ LOG.audit(_("failed authorization: no project named %s (user=%s)"),
+ project_id, user.name)
raise exception.NotFound(_('No project called %s could be found')
% project_id)
if not self.is_admin(user) and not self.is_project_member(user,
project):
+ LOG.audit(_("Failed authorization: user %s not admin and not "
+ "member of project %s"), user.name, project.name)
raise exception.NotFound(_('User %s is not a member of project %s')
% (user.id, project.id))
if check_type == 's3':
sign = signer.Signer(user.secret.encode())
expected_signature = sign.s3_authorization(headers, verb, path)
- logging.debug('user.secret: %s', user.secret)
- logging.debug('expected_signature: %s', expected_signature)
- logging.debug('signature: %s', signature)
+ LOG.debug('user.secret: %s', user.secret)
+ LOG.debug('expected_signature: %s', expected_signature)
+ LOG.debug('signature: %s', signature)
if signature != expected_signature:
+ LOG.audit(_("Invalid signature for user %s"), user.name)
raise exception.NotAuthorized(_('Signature does not match'))
elif check_type == 'ec2':
# NOTE(vish): hmac can't handle unicode, so encode ensures that
# secret isn't unicode
expected_signature = signer.Signer(user.secret.encode()).generate(
params, verb, server_string, path)
- logging.debug('user.secret: %s', user.secret)
- logging.debug('expected_signature: %s', expected_signature)
- logging.debug('signature: %s', signature)
+ LOG.debug('user.secret: %s', user.secret)
+ LOG.debug('expected_signature: %s', expected_signature)
+ LOG.debug('signature: %s', signature)
if signature != expected_signature:
+ LOG.audit(_("Invalid signature for user %s"), user.name)
raise exception.NotAuthorized(_('Signature does not match'))
return (user, project)
@@ -398,6 +408,12 @@ class AuthManager(object):
raise exception.NotFound(_("The %s role can not be found") % role)
if project is not None and role in FLAGS.global_roles:
raise exception.NotFound(_("The %s role is global only") % role)
+ if project:
+ LOG.audit(_("Adding role %s to user %s in project %s"), role,
+ User.safe_id(user), Project.safe_id(project))
+ else:
+ LOG.audit(_("Adding sitewide role %s to user %s"), role,
+ User.safe_id(user))
with self.driver() as drv:
drv.add_role(User.safe_id(user), role, Project.safe_id(project))
@@ -418,6 +434,12 @@ class AuthManager(object):
@type project: Project or project_id
@param project: Project in which to remove local role.
"""
+ if project:
+ LOG.audit(_("Removing role %s from user %s on project %s"),
+ role, User.safe_id(user), Project.safe_id(project))
+ else:
+ LOG.audit(_("Removing sitewide role %s from user %s"), role,
+ User.safe_id(user))
with self.driver() as drv:
drv.remove_role(User.safe_id(user), role, Project.safe_id(project))
@@ -480,6 +502,8 @@ class AuthManager(object):
description,
member_users)
if project_dict:
+ LOG.audit(_("Created project %s with manager %s"), name,
+ manager_user)
project = Project(**project_dict)
return project
@@ -496,6 +520,7 @@ class AuthManager(object):
@param project: This will be the new description of the project.
"""
+ LOG.audit(_("modifying project %s"), Project.safe_id(project))
if manager_user:
manager_user = User.safe_id(manager_user)
with self.driver() as drv:
@@ -505,6 +530,8 @@ class AuthManager(object):
def add_to_project(self, user, project):
"""Add user to project"""
+ LOG.audit(_("Adding user %s to project %s"), User.safe_id(user),
+ Project.safe_id(project))
with self.driver() as drv:
return drv.add_to_project(User.safe_id(user),
Project.safe_id(project))
@@ -523,6 +550,8 @@ class AuthManager(object):
def remove_from_project(self, user, project):
"""Removes a user from a project"""
+ LOG.audit(_("Remove user %s from project %s"), User.safe_id(user),
+ Project.safe_id(project))
with self.driver() as drv:
return drv.remove_from_project(User.safe_id(user),
Project.safe_id(project))
@@ -549,6 +578,7 @@ class AuthManager(object):
def delete_project(self, project):
"""Deletes a project"""
+ LOG.audit(_("Deleting project %s"), Project.safe_id(project))
with self.driver() as drv:
drv.delete_project(Project.safe_id(project))
@@ -603,13 +633,16 @@ class AuthManager(object):
with self.driver() as drv:
user_dict = drv.create_user(name, access, secret, admin)
if user_dict:
- return User(**user_dict)
+ rv = User(**user_dict)
+ LOG.audit(_("Created user %s (admin: %r)"), rv.name, rv.admin)
+ return rv
def delete_user(self, user):
"""Deletes a user
Additionally deletes all users key_pairs"""
uid = User.safe_id(user)
+ LOG.audit(_("Deleting user %s"), uid)
db.key_pair_destroy_all_by_user(context.get_admin_context(),
uid)
with self.driver() as drv:
@@ -618,6 +651,12 @@ class AuthManager(object):
def modify_user(self, user, access_key=None, secret_key=None, admin=None):
"""Modify credentials for a user"""
uid = User.safe_id(user)
+ if access_key:
+ LOG.audit(_("Access Key change for user %s"), uid)
+ if secret_key:
+ LOG.audit(_("Secret Key change for user %s"), uid)
+ if admin is not None:
+ LOG.audit(_("Admin status set to %r for user %s"), admin, uid)
with self.driver() as drv:
drv.modify_user(uid, access_key, secret_key, admin)
@@ -645,8 +684,7 @@ class AuthManager(object):
else:
regions = {'nova': FLAGS.cc_host}
for region, host in regions.iteritems():
- rc = self.__generate_rc(user.access,
- user.secret,
+ rc = self.__generate_rc(user,
pid,
use_dmz,
host)
@@ -666,7 +704,7 @@ class AuthManager(object):
port=vpn_port)
zippy.writestr(FLAGS.credential_vpn_file, config)
else:
- logging.warn(_("No vpn data for project %s"), pid)
+ LOG.warn(_("No vpn data for project %s"), pid)
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(pid))
zippy.close()
@@ -686,7 +724,7 @@ class AuthManager(object):
return self.__generate_rc(user.access, user.secret, pid, use_dmz)
@staticmethod
- def __generate_rc(access, secret, pid, use_dmz=True, host=None):
+ def __generate_rc(user, pid, use_dmz=True, host=None):
"""Generate rc file for user"""
if use_dmz:
cc_host = FLAGS.cc_dmz
@@ -699,14 +737,19 @@ class AuthManager(object):
s3_host = host
cc_host = host
rc = open(FLAGS.credentials_template).read()
- rc = rc % {'access': access,
+ rc = rc % {'access': user.access,
'project': pid,
- 'secret': secret,
+ 'secret': user.secret,
'ec2': '%s://%s:%s%s' % (FLAGS.ec2_prefix,
cc_host,
FLAGS.cc_port,
FLAGS.ec2_suffix),
's3': 'http://%s:%s' % (s3_host, FLAGS.s3_port),
+ 'os': '%s://%s:%s%s' % (FLAGS.os_prefix,
+ cc_host,
+ FLAGS.cc_port,
+ FLAGS.os_suffix),
+ 'user': user.name,
'nova': FLAGS.ca_file,
'cert': FLAGS.credential_cert_file,
'key': FLAGS.credential_key_file}
diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template
index 1b8ecb173..c53a4acdc 100644
--- a/nova/auth/novarc.template
+++ b/nova/auth/novarc.template
@@ -10,3 +10,7 @@ export NOVA_CERT=${NOVA_KEY_DIR}/%(nova)s
export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set
alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}"
alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}"
+export CLOUD_SERVERS_API_KEY="%(access)s"
+export CLOUD_SERVERS_USERNAME="%(user)s"
+export CLOUD_SERVERS_URL="%(os)s"
+
diff --git a/nova/auth/signer.py b/nova/auth/signer.py
index f7d29f534..744e315d4 100644
--- a/nova/auth/signer.py
+++ b/nova/auth/signer.py
@@ -46,7 +46,6 @@ Utility class for parsing signed AMI manifests.
import base64
import hashlib
import hmac
-import logging
import urllib
# NOTE(vish): for new boto
@@ -54,9 +53,13 @@ import boto
# NOTE(vish): for old boto
import boto.utils
+from nova import log as logging
from nova.exception import Error
+LOG = logging.getLogger('nova.signer')
+
+
class Signer(object):
"""Hacked up code from boto/connection.py"""
@@ -120,7 +123,7 @@ class Signer(object):
def _calc_signature_2(self, params, verb, server_string, path):
"""Generate AWS signature version 2 string."""
- logging.debug('using _calc_signature_2')
+ LOG.debug('using _calc_signature_2')
string_to_sign = '%s\n%s\n%s\n' % (verb, server_string, path)
if self.hmac_256:
current_hmac = self.hmac_256
@@ -136,13 +139,13 @@ class Signer(object):
val = urllib.quote(val, safe='-_~')
pairs.append(urllib.quote(key, safe='') + '=' + val)
qs = '&'.join(pairs)
- logging.debug('query string: %s', qs)
+ LOG.debug('query string: %s', qs)
string_to_sign += qs
- logging.debug('string_to_sign: %s', string_to_sign)
+ LOG.debug('string_to_sign: %s', string_to_sign)
current_hmac.update(string_to_sign)
b64 = base64.b64encode(current_hmac.digest())
- logging.debug('len(b64)=%d', len(b64))
- logging.debug('base64 encoded digest: %s', b64)
+ LOG.debug('len(b64)=%d', len(b64))
+ LOG.debug('base64 encoded digest: %s', b64)
return b64
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index 09361828d..8aefd341f 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -22,7 +22,6 @@ an instance with it.
"""
-import logging
import os
import string
import tempfile
@@ -33,6 +32,7 @@ from nova import crypto
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
from nova.auth import manager
# TODO(eday): Eventually changes these to something not ec2-specific
@@ -51,7 +51,7 @@ flags.DEFINE_string('dmz_mask',
_('Netmask to push into openvpn config'))
-LOG = logging.getLogger('nova-cloudpipe')
+LOG = logging.getLogger('nova.cloudpipe')
class CloudPipe(object):
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 015934ee7..56402c11b 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -21,12 +21,12 @@ Handles all requests relating to instances (guest vms).
"""
import datetime
-import logging
import time
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import network
from nova import quota
from nova import rpc
@@ -36,6 +36,7 @@ from nova.compute import instance_types
from nova.db import base
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.compute.api')
def generate_default_hostname(instance_id):
@@ -63,13 +64,13 @@ class API(base.Base):
try:
instance = self.get(context, instance_id)
except exception.NotFound as e:
- logging.warning("Instance %d was not found in get_network_topic",
- instance_id)
+ LOG.warning(_("Instance %d was not found in get_network_topic"),
+ instance_id)
raise e
host = instance['host']
if not host:
- raise exception.Error("Instance %d has no host" % instance_id)
+ raise exception.Error(_("Instance %d has no host") % instance_id)
topic = self.db.queue_get_for(context, FLAGS.compute_topic, host)
return rpc.call(context,
topic,
@@ -88,10 +89,10 @@ class API(base.Base):
type_data = instance_types.INSTANCE_TYPES[instance_type]
num_instances = quota.allowed_instances(context, max_count, type_data)
if num_instances < min_count:
- logging.warn("Quota exceeeded for %s, tried to run %s instances",
- context.project_id, min_count)
- raise quota.QuotaError("Instance quota exceeded. You can only "
- "run %s more instances of this type." %
+ LOG.warn(_("Quota exceeeded for %s, tried to run %s instances"),
+ context.project_id, min_count)
+ raise quota.QuotaError(_("Instance quota exceeded. You can only "
+ "run %s more instances of this type.") %
num_instances, "InstanceLimitExceeded")
is_vpn = image_id == FLAGS.vpn_image_id
@@ -105,8 +106,10 @@ class API(base.Base):
if kernel_id == str(FLAGS.null_kernel):
kernel_id = None
ramdisk_id = None
- logging.debug("Creating a raw instance")
+ LOG.debug(_("Creating a raw instance"))
# Make sure we have access to kernel and ramdisk (if not raw)
+ logging.debug("Using Kernel=%s, Ramdisk=%s" %
+ (kernel_id, ramdisk_id))
if kernel_id:
self.image_service.show(context, kernel_id)
if ramdisk_id:
@@ -152,7 +155,7 @@ class API(base.Base):
elevated = context.elevated()
instances = []
- logging.debug(_("Going to run %s instances..."), num_instances)
+ LOG.debug(_("Going to run %s instances..."), num_instances)
for num in range(num_instances):
instance = dict(mac_address=utils.generate_mac(),
launch_index=num,
@@ -170,13 +173,14 @@ class API(base.Base):
# Set sane defaults if not specified
updates = dict(hostname=generate_hostname(instance_id))
- if 'display_name' not in instance:
+ if (not hasattr(instance, 'display_name')) or \
+ instance.display_name == None:
updates['display_name'] = "Server %s" % instance_id
instance = self.update(context, instance_id, **updates)
instances.append(instance)
- logging.debug(_("Casting to scheduler for %s/%s's instance %s"),
+ LOG.debug(_("Casting to scheduler for %s/%s's instance %s"),
context.project_id, context.user_id, instance_id)
rpc.cast(context,
FLAGS.scheduler_topic,
@@ -184,6 +188,9 @@ class API(base.Base):
"args": {"topic": FLAGS.compute_topic,
"instance_id": instance_id}})
+ for group_id in security_groups:
+ self.trigger_security_group_members_refresh(elevated, group_id)
+
return instances
def ensure_default_security_group(self, context):
@@ -203,6 +210,60 @@ class API(base.Base):
'project_id': context.project_id}
db.security_group_create(context, values)
+ def trigger_security_group_rules_refresh(self, context, security_group_id):
+ """Called when a rule is added to or removed from a security_group"""
+
+ security_group = self.db.security_group_get(context, security_group_id)
+
+ hosts = set()
+ for instance in security_group['instances']:
+ if instance['host'] is not None:
+ hosts.add(instance['host'])
+
+ for host in hosts:
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "refresh_security_group_rules",
+ "args": {"security_group_id": security_group.id}})
+
+ def trigger_security_group_members_refresh(self, context, group_id):
+ """Called when a security group gains a new or loses a member
+
+ Sends an update request to each compute node for whom this is
+ relevant."""
+
+ # First, we get the security group rules that reference this group as
+ # the grantee..
+ security_group_rules = \
+ self.db.security_group_rule_get_by_security_group_grantee(
+ context,
+ group_id)
+
+ # ..then we distill the security groups to which they belong..
+ security_groups = set()
+ for rule in security_group_rules:
+ security_groups.add(rule['parent_group_id'])
+
+ # ..then we find the instances that are members of these groups..
+ instances = set()
+ for security_group in security_groups:
+ for instance in security_group['instances']:
+ instances.add(instance['id'])
+
+ # ...then we find the hosts where they live...
+ hosts = set()
+ for instance in instances:
+ if instance['host']:
+ hosts.add(instance['host'])
+
+ # ...and finally we tell these nodes to refresh their view of this
+ # particular security group.
+ for host in hosts:
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "refresh_security_group_members",
+ "args": {"security_group_id": group_id}})
+
def update(self, context, instance_id, **kwargs):
"""Updates the instance in the datastore.
@@ -218,17 +279,17 @@ class API(base.Base):
return self.db.instance_update(context, instance_id, kwargs)
def delete(self, context, instance_id):
- logging.debug("Going to try and terminate %s" % instance_id)
+ LOG.debug(_("Going to try and terminate %s"), instance_id)
try:
instance = self.get(context, instance_id)
except exception.NotFound as e:
- logging.warning(_("Instance %s was not found during terminate"),
- instance_id)
+ LOG.warning(_("Instance %d was not found during terminate"),
+ instance_id)
raise e
if (instance['state_description'] == 'terminating'):
- logging.warning(_("Instance %s is already being terminated"),
- instance_id)
+ LOG.warning(_("Instance %d is already being terminated"),
+ instance_id)
return
self.update(context,
@@ -353,7 +414,26 @@ class API(base.Base):
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "unrescue_instance",
- "args": {"instance_id": instance_id}})
+ "args": {"instance_id": instance['id']}})
+
+ def get_ajax_console(self, context, instance_id):
+ """Get a url to an AJAX Console"""
+
+ instance = self.get(context, instance_id)
+
+ output = rpc.call(context,
+ '%s.%s' % (FLAGS.compute_topic,
+ instance['host']),
+ {'method': 'get_ajax_console',
+ 'args': {'instance_id': instance['id']}})
+
+ rpc.cast(context, '%s' % FLAGS.ajax_console_proxy_topic,
+ {'method': 'authorize_ajax_console',
+ 'args': {'token': output['token'], 'host': output['host'],
+ 'port': output['port']}})
+
+ return {'url': '%s?token=%s' % (FLAGS.ajax_console_proxy_url,
+ output['token'])}
def lock(self, context, instance_id):
"""
diff --git a/nova/compute/disk.py b/nova/compute/disk.py
index 814a258cd..741499294 100644
--- a/nova/compute/disk.py
+++ b/nova/compute/disk.py
@@ -22,14 +22,15 @@ Includes injection of SSH PGP keys into authorized_keys file.
"""
-import logging
import os
import tempfile
from nova import exception
from nova import flags
+from nova import log as logging
+LOG = logging.getLogger('nova.compute.disk')
FLAGS = flags.FLAGS
flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10,
'minimum size in bytes of root partition')
@@ -67,12 +68,12 @@ def partition(infile, outfile, local_bytes=0, resize=True,
execute('resize2fs %s' % infile)
file_size = FLAGS.minimum_root_size
elif file_size % sector_size != 0:
- logging.warn(_("Input partition size not evenly divisible by"
- " sector size: %d / %d"), file_size, sector_size)
+ LOG.warn(_("Input partition size not evenly divisible by"
+ " sector size: %d / %d"), file_size, sector_size)
primary_sectors = file_size / sector_size
if local_bytes % sector_size != 0:
- logging.warn(_("Bytes for local storage not evenly divisible"
- " by sector size: %d / %d"), local_bytes, sector_size)
+ LOG.warn(_("Bytes for local storage not evenly divisible"
+ " by sector size: %d / %d"), local_bytes, sector_size)
local_sectors = local_bytes / sector_size
mbr_last = 62 # a
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index d5136eb26..6b2fc4adb 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -36,10 +36,12 @@ terminating it.
import datetime
import logging
+import socket
import functools
from nova import exception
from nova import flags
+from nova import log as logging
from nova import manager
from nova import rpc
from nova import utils
@@ -52,6 +54,11 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection',
'Driver to use for controlling virtualization')
flags.DEFINE_string('stub_network', False,
'Stub network related code')
+flags.DEFINE_string('console_host', socket.gethostname(),
+ 'Console proxy host to use to connect to instances on'
+ 'this host.')
+
+LOG = logging.getLogger('nova.compute.manager')
def checks_instance_lock(function):
@@ -64,23 +71,25 @@ def checks_instance_lock(function):
@functools.wraps(function)
def decorated_function(self, context, instance_id, *args, **kwargs):
- logging.info(_("check_instance_lock: decorating: |%s|"), function)
- logging.info(_("check_instance_lock: arguments: |%s| |%s| |%s|"),
- self,
- context,
- instance_id)
+ LOG.info(_("check_instance_lock: decorating: |%s|"), function,
+ context=context)
+ LOG.info(_("check_instance_lock: arguments: |%s| |%s| |%s|"),
+ self, context, instance_id, context=context)
locked = self.get_lock(context, instance_id)
admin = context.is_admin
- logging.info(_("check_instance_lock: locked: |%s|"), locked)
- logging.info(_("check_instance_lock: admin: |%s|"), admin)
+ LOG.info(_("check_instance_lock: locked: |%s|"), locked,
+ context=context)
+ LOG.info(_("check_instance_lock: admin: |%s|"), admin,
+ context=context)
# if admin or unlocked call function otherwise log error
if admin or not locked:
- logging.info(_("check_instance_lock: executing: |%s|"), function)
+ LOG.info(_("check_instance_lock: executing: |%s|"), function,
+ context=context)
function(self, context, instance_id, *args, **kwargs)
else:
- logging.error(_("check_instance_lock: not executing |%s|"),
- function)
+ LOG.error(_("check_instance_lock: not executing |%s|"),
+ function, context=context)
return False
return decorated_function
@@ -118,6 +127,15 @@ class ComputeManager(manager.Manager):
state = power_state.NOSTATE
self.db.instance_set_state(context, instance_id, state)
+ def get_console_topic(self, context, **_kwargs):
+ """Retrieves the console host for a project on this host
+ Currently this is just set in the flags for each compute
+ host."""
+ #TODO(mdragon): perhaps make this variable by console_type?
+ return self.db.queue_get_for(context,
+ FLAGS.console_topic,
+ FLAGS.console_host)
+
def get_network_topic(self, context, **_kwargs):
"""Retrieves the network host for a project on this host"""
# TODO(vish): This method should be memoized. This will make
@@ -132,10 +150,20 @@ class ComputeManager(manager.Manager):
FLAGS.network_topic,
host)
+ def get_console_pool_info(self, context, console_type):
+ return self.driver.get_console_pool_info(console_type)
+
+ @exception.wrap_exception
+ def refresh_security_group_rules(self, context,
+ security_group_id, **_kwargs):
+ """This call passes straight through to the virtualization driver."""
+ return self.driver.refresh_security_group_rules(security_group_id)
+
@exception.wrap_exception
- def refresh_security_group(self, context, security_group_id, **_kwargs):
- """This call passes stright through to the virtualization driver."""
- self.driver.refresh_security_group(security_group_id)
+ def refresh_security_group_members(self, context,
+ security_group_id, **_kwargs):
+ """This call passes straight through to the virtualization driver."""
+ return self.driver.refresh_security_group_members(security_group_id)
@exception.wrap_exception
def run_instance(self, context, instance_id, **_kwargs):
@@ -144,7 +172,8 @@ class ComputeManager(manager.Manager):
instance_ref = self.db.instance_get(context, instance_id)
if instance_ref['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created"))
- logging.debug(_("instance %s: starting..."), instance_id)
+ LOG.audit(_("instance %s: starting..."), instance_id,
+ context=context)
self.db.instance_update(context,
instance_id,
{'host': self.host})
@@ -182,8 +211,8 @@ class ComputeManager(manager.Manager):
instance_id,
{'launched_at': now})
except Exception: # pylint: disable-msg=W0702
- logging.exception(_("instance %s: Failed to spawn"),
- instance_ref['name'])
+ LOG.exception(_("instance %s: Failed to spawn"), instance_id,
+ context=context)
self.db.instance_set_state(context,
instance_id,
power_state.SHUTDOWN)
@@ -195,14 +224,15 @@ class ComputeManager(manager.Manager):
def terminate_instance(self, context, instance_id):
"""Terminate an instance on this machine."""
context = context.elevated()
-
instance_ref = self.db.instance_get(context, instance_id)
+ LOG.audit(_("Terminating instance %s"), instance_id, context=context)
if not FLAGS.stub_network:
address = self.db.instance_get_floating_address(context,
instance_ref['id'])
if address:
- logging.debug(_("Disassociating address %s") % address)
+ LOG.debug(_("Disassociating address %s"), address,
+ context=context)
# NOTE(vish): Right now we don't really care if the ip is
# disassociated. We may need to worry about
# checking this later.
@@ -214,15 +244,14 @@ class ComputeManager(manager.Manager):
address = self.db.instance_get_fixed_address(context,
instance_ref['id'])
if address:
- logging.debug(_("Deallocating address %s") % address)
+ LOG.debug(_("Deallocating address %s"), address,
+ context=context)
# NOTE(vish): Currently, nothing needs to be done on the
# network node until release. If this changes,
# we will need to cast here.
self.network_manager.deallocate_fixed_ip(context.elevated(),
address)
- logging.debug(_("instance %s: terminating"), instance_id)
-
volumes = instance_ref.get('volumes', []) or []
for volume in volumes:
self.detach_volume(context, instance_id, volume['id'])
@@ -242,15 +271,16 @@ class ComputeManager(manager.Manager):
context = context.elevated()
self._update_state(context, instance_id)
instance_ref = self.db.instance_get(context, instance_id)
+ LOG.audit(_("Rebooting instance %s"), instance_id, context=context)
if instance_ref['state'] != power_state.RUNNING:
- logging.warn(_('trying to reboot a non-running '
- 'instance: %s (state: %s excepted: %s)'),
- instance_id,
- instance_ref['state'],
- power_state.RUNNING)
+ LOG.warn(_('trying to reboot a non-running '
+ 'instance: %s (state: %s excepted: %s)'),
+ instance_id,
+ instance_ref['state'],
+ power_state.RUNNING,
+ context=context)
- logging.debug(_('instance %s: rebooting'), instance_ref['name'])
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -270,13 +300,12 @@ class ComputeManager(manager.Manager):
# potentially?
self._update_state(context, instance_id)
- logging.debug(_('instance %s: snapshotting'), instance_ref['name'])
+ LOG.audit(_('instance %s: snapshotting'), instance_id,
+ context=context)
if instance_ref['state'] != power_state.RUNNING:
- logging.warn(_('trying to snapshot a non-running '
- 'instance: %s (state: %s excepted: %s)'),
- instance_id,
- instance_ref['state'],
- power_state.RUNNING)
+ LOG.warn(_('trying to snapshot a non-running '
+ 'instance: %s (state: %s excepted: %s)'),
+ instance_id, instance_ref['state'], power_state.RUNNING)
self.driver.snapshot(instance_ref, name)
@@ -286,8 +315,7 @@ class ComputeManager(manager.Manager):
"""Rescue an instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
-
- logging.debug(_('instance %s: rescuing'), instance_id)
+ LOG.audit(_('instance %s: rescuing'), instance_id, context=context)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -302,8 +330,7 @@ class ComputeManager(manager.Manager):
"""Rescue an instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
-
- logging.debug(_('instance %s: unrescuing'), instance_id)
+ LOG.audit(_('instance %s: unrescuing'), instance_id, context=context)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -322,8 +349,7 @@ class ComputeManager(manager.Manager):
"""Pause an instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
-
- logging.debug('instance %s: pausing', instance_id)
+ LOG.audit(_('instance %s: pausing'), instance_id, context=context)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -340,8 +366,7 @@ class ComputeManager(manager.Manager):
"""Unpause a paused instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
-
- logging.debug('instance %s: unpausing', instance_id)
+ LOG.audit(_('instance %s: unpausing'), instance_id, context=context)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -358,8 +383,8 @@ class ComputeManager(manager.Manager):
instance_ref = self.db.instance_get(context, instance_id)
if instance_ref["state"] == power_state.RUNNING:
- logging.debug(_("instance %s: retrieving diagnostics"),
- instance_id)
+ LOG.audit(_("instance %s: retrieving diagnostics"), instance_id,
+ context=context)
return self.driver.get_diagnostics(instance_ref)
@exception.wrap_exception
@@ -371,8 +396,7 @@ class ComputeManager(manager.Manager):
"""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
-
- logging.debug(_('instance %s: suspending'), instance_id)
+ LOG.audit(_('instance %s: suspending'), instance_id, context=context)
self.db.instance_set_state(context, instance_id,
power_state.NOSTATE,
'suspending')
@@ -391,8 +415,7 @@ class ComputeManager(manager.Manager):
"""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
-
- logging.debug(_('instance %s: resuming'), instance_id)
+ LOG.audit(_('instance %s: resuming'), instance_id, context=context)
self.db.instance_set_state(context, instance_id,
power_state.NOSTATE,
'resuming')
@@ -411,7 +434,7 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- logging.debug(_('instance %s: locking'), instance_id)
+ LOG.debug(_('instance %s: locking'), instance_id, context=context)
self.db.instance_update(context, instance_id, {'locked': True})
@exception.wrap_exception
@@ -423,7 +446,7 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- logging.debug(_('instance %s: unlocking'), instance_id)
+ LOG.debug(_('instance %s: unlocking'), instance_id, context=context)
self.db.instance_update(context, instance_id, {'locked': False})
@exception.wrap_exception
@@ -433,7 +456,8 @@ class ComputeManager(manager.Manager):
"""
context = context.elevated()
- logging.debug(_('instance %s: getting locked state'), instance_id)
+ LOG.debug(_('instance %s: getting locked state'), instance_id,
+ context=context)
instance_ref = self.db.instance_get(context, instance_id)
return instance_ref['locked']
@@ -441,19 +465,27 @@ class ComputeManager(manager.Manager):
def get_console_output(self, context, instance_id):
"""Send the console output for an instance."""
context = context.elevated()
- logging.debug(_("instance %s: getting console output"), instance_id)
instance_ref = self.db.instance_get(context, instance_id)
-
+ LOG.audit(_("Get console output for instance %s"), instance_id,
+ context=context)
return self.driver.get_console_output(instance_ref)
@exception.wrap_exception
+ def get_ajax_console(self, context, instance_id):
+ """Return connection information for an ajax console"""
+ context = context.elevated()
+ logging.debug(_("instance %s: getting ajax console"), instance_id)
+ instance_ref = self.db.instance_get(context, instance_id)
+
+ return self.driver.get_ajax_console(instance_ref)
+
@checks_instance_lock
def attach_volume(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instance."""
context = context.elevated()
- logging.debug(_("instance %s: attaching volume %s to %s"), instance_id,
- volume_id, mountpoint)
instance_ref = self.db.instance_get(context, instance_id)
+ LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id,
+ volume_id, mountpoint, context=context)
dev_path = self.volume_manager.setup_compute_volume(context,
volume_id)
try:
@@ -468,8 +500,8 @@ class ComputeManager(manager.Manager):
# NOTE(vish): The inline callback eats the exception info so we
# log the traceback here and reraise the same
# ecxception below.
- logging.exception(_("instance %s: attach failed %s, removing"),
- instance_id, mountpoint)
+ LOG.exception(_("instance %s: attach failed %s, removing"),
+ instance_id, mountpoint, context=context)
self.volume_manager.remove_compute_volume(context,
volume_id)
raise exc
@@ -481,14 +513,14 @@ class ComputeManager(manager.Manager):
def detach_volume(self, context, instance_id, volume_id):
"""Detach a volume from an instance."""
context = context.elevated()
- logging.debug(_("instance %s: detaching volume %s"),
- instance_id,
- volume_id)
instance_ref = self.db.instance_get(context, instance_id)
volume_ref = self.db.volume_get(context, volume_id)
+ LOG.audit(_("Detach volume %s from mountpoint %s on instance %s"),
+ volume_id, volume_ref['mountpoint'], instance_id,
+ context=context)
if instance_ref['name'] not in self.driver.list_instances():
- logging.warn(_("Detaching volume from unknown instance %s"),
- instance_ref['name'])
+ LOG.warn(_("Detaching volume from unknown instance %s"),
+ instance_id, context=context)
else:
self.driver.detach_volume(instance_ref['name'],
volume_ref['mountpoint'])
diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py
index 60c347a5e..14d0e8ca1 100644
--- a/nova/compute/monitor.py
+++ b/nova/compute/monitor.py
@@ -25,19 +25,17 @@ Instance Monitoring:
"""
import datetime
-import logging
import os
-import sys
import time
import boto
import boto.s3
import rrdtool
-from twisted.internet import defer
from twisted.internet import task
from twisted.application import service
from nova import flags
+from nova import log as logging
from nova.virt import connection as virt_connection
@@ -91,6 +89,9 @@ RRD_VALUES = {
utcnow = datetime.datetime.utcnow
+LOG = logging.getLogger('nova.compute.monitor')
+
+
def update_rrd(instance, name, data):
"""
Updates the specified RRD file.
@@ -255,20 +256,20 @@ class Instance(object):
Updates the instances statistics and stores the resulting graphs
in the internal object store on the cloud controller.
"""
- logging.debug(_('updating %s...'), self.instance_id)
+ LOG.debug(_('updating %s...'), self.instance_id)
try:
data = self.fetch_cpu_stats()
if data != None:
- logging.debug('CPU: %s', data)
+ LOG.debug('CPU: %s', data)
update_rrd(self, 'cpu', data)
data = self.fetch_net_stats()
- logging.debug('NET: %s', data)
+ LOG.debug('NET: %s', data)
update_rrd(self, 'net', data)
data = self.fetch_disk_stats()
- logging.debug('DISK: %s', data)
+ LOG.debug('DISK: %s', data)
update_rrd(self, 'disk', data)
# TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls
@@ -285,7 +286,7 @@ class Instance(object):
graph_disk(self, '1w')
graph_disk(self, '1m')
except Exception:
- logging.exception(_('unexpected error during update'))
+ LOG.exception(_('unexpected error during update'))
self.last_updated = utcnow()
@@ -309,7 +310,7 @@ class Instance(object):
self.cputime = float(info['cpu_time'])
self.cputime_last_updated = utcnow()
- logging.debug('CPU: %d', self.cputime)
+ LOG.debug('CPU: %d', self.cputime)
# Skip calculation on first pass. Need delta to get a meaningful value.
if cputime_last_updated == None:
@@ -319,17 +320,17 @@ class Instance(object):
d = self.cputime_last_updated - cputime_last_updated
t = d.days * 86400 + d.seconds
- logging.debug('t = %d', t)
+ LOG.debug('t = %d', t)
# Calculate change over time in number of nanoseconds of CPU time used.
cputime_delta = self.cputime - cputime_last
- logging.debug('cputime_delta = %s', cputime_delta)
+ LOG.debug('cputime_delta = %s', cputime_delta)
# Get the number of virtual cpus in this domain.
vcpus = int(info['num_cpu'])
- logging.debug('vcpus = %d', vcpus)
+ LOG.debug('vcpus = %d', vcpus)
# Calculate CPU % used and cap at 100.
return min(cputime_delta / (t * vcpus * 1.0e9) * 100, 100)
@@ -351,8 +352,8 @@ class Instance(object):
rd += rd_bytes
wr += wr_bytes
except TypeError:
- logging.error(_('Cannot get blockstats for "%s" on "%s"'),
- disk, self.instance_id)
+ LOG.error(_('Cannot get blockstats for "%s" on "%s"'),
+ disk, self.instance_id)
raise
return '%d:%d' % (rd, wr)
@@ -373,8 +374,8 @@ class Instance(object):
rx += stats[0]
tx += stats[4]
except TypeError:
- logging.error(_('Cannot get ifstats for "%s" on "%s"'),
- interface, self.instance_id)
+ LOG.error(_('Cannot get ifstats for "%s" on "%s"'),
+ interface, self.instance_id)
raise
return '%d:%d' % (rx, tx)
@@ -408,7 +409,7 @@ class InstanceMonitor(object, service.Service):
try:
conn = virt_connection.get_connection(read_only=True)
except Exception, exn:
- logging.exception(_('unexpected exception getting connection'))
+ LOG.exception(_('unexpected exception getting connection'))
time.sleep(FLAGS.monitoring_instances_delay)
return
@@ -416,14 +417,14 @@ class InstanceMonitor(object, service.Service):
try:
self.updateInstances_(conn, domain_ids)
except Exception, exn:
- logging.exception('updateInstances_')
+ LOG.exception('updateInstances_')
def updateInstances_(self, conn, domain_ids):
for domain_id in domain_ids:
if not domain_id in self._instances:
instance = Instance(conn, domain_id)
self._instances[domain_id] = instance
- logging.debug(_('Found instance: %s'), domain_id)
+ LOG.debug(_('Found instance: %s'), domain_id)
for key in self._instances.keys():
instance = self._instances[key]
diff --git a/nova/console/__init__.py b/nova/console/__init__.py
new file mode 100644
index 000000000..dfc72cd61
--- /dev/null
+++ b/nova/console/__init__.py
@@ -0,0 +1,13 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+"""
+:mod:`nova.console` -- Console Prxy to set up VM console access (i.e. with xvp)
+=====================================================
+
+.. automodule:: nova.console
+ :platform: Unix
+ :synopsis: Wrapper around console proxies such as xvp to set up
+ multitenant VM console access
+.. moduleauthor:: Monsyne Dragon <mdragon@rackspace.com>
+"""
+from nova.console.api import API
diff --git a/nova/console/api.py b/nova/console/api.py
new file mode 100644
index 000000000..3850d2c44
--- /dev/null
+++ b/nova/console/api.py
@@ -0,0 +1,75 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handles ConsoleProxy API requests
+"""
+
+from nova import exception
+from nova.db import base
+
+
+from nova import flags
+from nova import rpc
+
+
+FLAGS = flags.FLAGS
+
+
+class API(base.Base):
+ """API for spining up or down console proxy connections"""
+
+ def __init__(self, **kwargs):
+ super(API, self).__init__(**kwargs)
+
+ def get_consoles(self, context, instance_id):
+ return self.db.console_get_all_by_instance(context, instance_id)
+
+ def get_console(self, context, instance_id, console_id):
+ return self.db.console_get(context, console_id, instance_id)
+
+ def delete_console(self, context, instance_id, console_id):
+ console = self.db.console_get(context,
+ console_id,
+ instance_id)
+ pool = console['pool']
+ rpc.cast(context,
+ self.db.queue_get_for(context,
+ FLAGS.console_topic,
+ pool['host']),
+ {"method": "remove_console",
+ "args": {"console_id": console['id']}})
+
+ def create_console(self, context, instance_id):
+ instance = self.db.instance_get(context, instance_id)
+ #NOTE(mdragon): If we wanted to return this the console info
+ # here, as we would need to do a call.
+ # They can just do an index later to fetch
+ # console info. I am not sure which is better
+ # here.
+ rpc.cast(context,
+ self._get_console_topic(context, instance['host']),
+ {"method": "add_console",
+ "args": {"instance_id": instance_id}})
+
+ def _get_console_topic(self, context, instance_host):
+ topic = self.db.queue_get_for(context,
+ FLAGS.compute_topic,
+ instance_host)
+ return rpc.call(context,
+ topic,
+ {"method": "get_console_topic", "args": {'fake': 1}})
diff --git a/nova/console/fake.py b/nova/console/fake.py
new file mode 100644
index 000000000..7a90d5221
--- /dev/null
+++ b/nova/console/fake.py
@@ -0,0 +1,58 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Fake ConsoleProxy driver for tests.
+"""
+
+from nova import exception
+
+
+class FakeConsoleProxy(object):
+ """Fake ConsoleProxy driver."""
+
+ @property
+ def console_type(self):
+ return "fake"
+
+ def setup_console(self, context, console):
+ """Sets up actual proxies"""
+ pass
+
+ def teardown_console(self, context, console):
+ """Tears down actual proxies"""
+ pass
+
+ def init_host(self):
+ """Start up any config'ed consoles on start"""
+ pass
+
+ def generate_password(self, length=8):
+ """Returns random console password"""
+ return "fakepass"
+
+ def get_port(self, context):
+ """get available port for consoles that need one"""
+ return 5999
+
+ def fix_pool_password(self, password):
+ """Trim password to length, and any other massaging"""
+ return password
+
+ def fix_console_password(self, password):
+ """Trim password to length, and any other massaging"""
+ return password
diff --git a/nova/console/manager.py b/nova/console/manager.py
new file mode 100644
index 000000000..c55ca8e8f
--- /dev/null
+++ b/nova/console/manager.py
@@ -0,0 +1,127 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Console Proxy Service
+"""
+
+import functools
+import logging
+import socket
+
+from nova import exception
+from nova import flags
+from nova import manager
+from nova import rpc
+from nova import utils
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('console_driver',
+ 'nova.console.xvp.XVPConsoleProxy',
+ 'Driver to use for the console proxy')
+flags.DEFINE_boolean('stub_compute', False,
+ 'Stub calls to compute worker for tests')
+flags.DEFINE_string('console_public_hostname',
+ socket.gethostname(),
+ 'Publicly visable name for this console host')
+
+
+class ConsoleProxyManager(manager.Manager):
+
+ """ Sets up and tears down any proxy connections needed for accessing
+ instance consoles securely"""
+
+ def __init__(self, console_driver=None, *args, **kwargs):
+ if not console_driver:
+ console_driver = FLAGS.console_driver
+ self.driver = utils.import_object(console_driver)
+ super(ConsoleProxyManager, self).__init__(*args, **kwargs)
+ self.driver.host = self.host
+
+ def init_host(self):
+ self.driver.init_host()
+
+ @exception.wrap_exception
+ def add_console(self, context, instance_id, password=None,
+ port=None, **kwargs):
+ instance = self.db.instance_get(context, instance_id)
+ host = instance['host']
+ name = instance['name']
+ pool = self.get_pool_for_instance_host(context, host)
+ try:
+ console = self.db.console_get_by_pool_instance(context,
+ pool['id'],
+ instance_id)
+ except exception.NotFound:
+ logging.debug("Adding console")
+ if not password:
+ password = self.driver.generate_password()
+ if not port:
+ port = self.driver.get_port(context)
+ console_data = {'instance_name': name,
+ 'instance_id': instance_id,
+ 'password': password,
+ 'pool_id': pool['id']}
+ if port:
+ console_data['port'] = port
+ console = self.db.console_create(context, console_data)
+ self.driver.setup_console(context, console)
+ return console['id']
+
+ @exception.wrap_exception
+ def remove_console(self, context, console_id, **_kwargs):
+ try:
+ console = self.db.console_get(context, console_id)
+ except exception.NotFound:
+ logging.debug(_('Tried to remove non-existant console '
+ '%(console_id)s.') %
+ {'console_id': console_id})
+ return
+ self.db.console_delete(context, console_id)
+ self.driver.teardown_console(context, console)
+
+ def get_pool_for_instance_host(self, context, instance_host):
+ context = context.elevated()
+ console_type = self.driver.console_type
+ try:
+ pool = self.db.console_pool_get_by_host_type(context,
+ instance_host,
+ self.host,
+ console_type)
+ except exception.NotFound:
+ #NOTE(mdragon): Right now, the only place this info exists is the
+ # compute worker's flagfile, at least for
+ # xenserver. Thus we ned to ask.
+ if FLAGS.stub_compute:
+ pool_info = {'address': '127.0.0.1',
+ 'username': 'test',
+ 'password': '1234pass'}
+ else:
+ pool_info = rpc.call(context,
+ self.db.queue_get_for(context,
+ FLAGS.compute_topic,
+ instance_host),
+ {"method": "get_console_pool_info",
+ "args": {"console_type": console_type}})
+ pool_info['password'] = self.driver.fix_pool_password(
+ pool_info['password'])
+ pool_info['host'] = self.host
+ pool_info['public_hostname'] = FLAGS.console_public_hostname
+ pool_info['console_type'] = self.driver.console_type
+ pool_info['compute_host'] = instance_host
+ pool = self.db.console_pool_create(context, pool_info)
+ return pool
diff --git a/nova/console/xvp.conf.template b/nova/console/xvp.conf.template
new file mode 100644
index 000000000..695ddbe96
--- /dev/null
+++ b/nova/console/xvp.conf.template
@@ -0,0 +1,16 @@
+# One time password use with time window
+OTP ALLOW IPCHECK HTTP 60
+#if $multiplex_port
+MULTIPLEX $multiplex_port
+#end if
+
+#for $pool in $pools
+POOL $pool.address
+ DOMAIN $pool.address
+ MANAGER root $pool.password
+ HOST $pool.address
+ VM - dummy 0123456789ABCDEF
+ #for $console in $pool.consoles
+ VM #if $multiplex_port then '-' else $console.port # $console.instance_name $pass_encode($console.password)
+ #end for
+#end for
diff --git a/nova/console/xvp.py b/nova/console/xvp.py
new file mode 100644
index 000000000..2a76223da
--- /dev/null
+++ b/nova/console/xvp.py
@@ -0,0 +1,194 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+XVP (Xenserver VNC Proxy) driver.
+"""
+
+import fcntl
+import logging
+import os
+import signal
+import subprocess
+
+from Cheetah.Template import Template
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import utils
+
+flags.DEFINE_string('console_xvp_conf_template',
+ utils.abspath('console/xvp.conf.template'),
+ 'XVP conf template')
+flags.DEFINE_string('console_xvp_conf',
+ '/etc/xvp.conf',
+ 'generated XVP conf file')
+flags.DEFINE_string('console_xvp_pid',
+ '/var/run/xvp.pid',
+ 'XVP master process pid file')
+flags.DEFINE_string('console_xvp_log',
+ '/var/log/xvp.log',
+ 'XVP log file')
+flags.DEFINE_integer('console_xvp_multiplex_port',
+ 5900,
+ "port for XVP to multiplex VNC connections on")
+FLAGS = flags.FLAGS
+
+
+class XVPConsoleProxy(object):
+ """Sets up XVP config, and manages xvp daemon"""
+
+ def __init__(self):
+ self.xvpconf_template = open(FLAGS.console_xvp_conf_template).read()
+ self.host = FLAGS.host # default, set by manager.
+ super(XVPConsoleProxy, self).__init__()
+
+ @property
+ def console_type(self):
+ return "vnc+xvp"
+
+ def get_port(self, context):
+ """get available port for consoles that need one"""
+ #TODO(mdragon): implement port selection for non multiplex ports,
+ # we are not using that, but someone else may want
+ # it.
+ return FLAGS.console_xvp_multiplex_port
+
+ def setup_console(self, context, console):
+ """Sets up actual proxies"""
+ self._rebuild_xvp_conf(context.elevated())
+
+ def teardown_console(self, context, console):
+ """Tears down actual proxies"""
+ self._rebuild_xvp_conf(context.elevated())
+
+ def init_host(self):
+ """Start up any config'ed consoles on start"""
+ ctxt = context.get_admin_context()
+ self._rebuild_xvp_conf(ctxt)
+
+ def fix_pool_password(self, password):
+ """Trim password to length, and encode"""
+ return self._xvp_encrypt(password, is_pool_password=True)
+
+ def fix_console_password(self, password):
+ """Trim password to length, and encode"""
+ return self._xvp_encrypt(password)
+
+ def generate_password(self, length=8):
+ """Returns random console password"""
+ return os.urandom(length * 2).encode('base64')[:length]
+
+ def _rebuild_xvp_conf(self, context):
+ logging.debug("Rebuilding xvp conf")
+ pools = [pool for pool in
+ db.console_pool_get_all_by_host_type(context, self.host,
+ self.console_type)
+ if pool['consoles']]
+ if not pools:
+ logging.debug("No console pools!")
+ self._xvp_stop()
+ return
+ conf_data = {'multiplex_port': FLAGS.console_xvp_multiplex_port,
+ 'pools': pools,
+ 'pass_encode': self.fix_console_password}
+ config = str(Template(self.xvpconf_template, searchList=[conf_data]))
+ self._write_conf(config)
+ self._xvp_restart()
+
+ def _write_conf(self, config):
+ logging.debug('Re-wrote %s' % FLAGS.console_xvp_conf)
+ with open(FLAGS.console_xvp_conf, 'w') as cfile:
+ cfile.write(config)
+
+ def _xvp_stop(self):
+ logging.debug("Stopping xvp")
+ pid = self._xvp_pid()
+ if not pid:
+ return
+ try:
+ os.kill(pid, signal.SIGTERM)
+ except OSError:
+ #if it's already not running, no problem.
+ pass
+
+ def _xvp_start(self):
+ if self._xvp_check_running():
+ return
+ logging.debug("Starting xvp")
+ try:
+ utils.execute('xvp -p %s -c %s -l %s' %
+ (FLAGS.console_xvp_pid,
+ FLAGS.console_xvp_conf,
+ FLAGS.console_xvp_log))
+ except exception.ProcessExecutionError, err:
+ logging.error("Error starting xvp: %s" % err)
+
+ def _xvp_restart(self):
+ logging.debug("Restarting xvp")
+ if not self._xvp_check_running():
+ logging.debug("xvp not running...")
+ self._xvp_start()
+ else:
+ pid = self._xvp_pid()
+ os.kill(pid, signal.SIGUSR1)
+
+ def _xvp_pid(self):
+ try:
+ with open(FLAGS.console_xvp_pid, 'r') as pidfile:
+ pid = int(pidfile.read())
+ except IOError:
+ return None
+ except ValueError:
+ return None
+ return pid
+
+ def _xvp_check_running(self):
+ pid = self._xvp_pid()
+ if not pid:
+ return False
+ try:
+ os.kill(pid, 0)
+ except OSError:
+ return False
+ return True
+
+ def _xvp_encrypt(self, password, is_pool_password=False):
+ """Call xvp to obfuscate passwords for config file.
+
+ Args:
+ - password: the password to encode, max 8 char for vm passwords,
+ and 16 chars for pool passwords. passwords will
+ be trimmed to max len before encoding.
+ - is_pool_password: True if this this is the XenServer api password
+ False if it's a VM console password
+ (xvp uses different keys and max lengths for pool passwords)
+
+ Note that xvp's obfuscation should not be considered 'real' encryption.
+ It simply DES encrypts the passwords with static keys plainly viewable
+ in the xvp source code."""
+ maxlen = 8
+ flag = '-e'
+ if is_pool_password:
+ maxlen = 16
+ flag = '-x'
+ #xvp will blow up on passwords that are too long (mdragon)
+ password = password[:maxlen]
+ out, err = utils.execute('xvp %s' % flag, process_input=password)
+ return out.strip()
diff --git a/nova/crypto.py b/nova/crypto.py
index b8405552d..a34b940f5 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -24,7 +24,6 @@ Includes root and intermediate CAs, SSH key_pairs and x509 certificates.
import base64
import gettext
import hashlib
-import logging
import os
import shutil
import struct
@@ -39,8 +38,10 @@ gettext.install('nova', unicode=1)
from nova import context
from nova import db
from nova import flags
+from nova import log as logging
+LOG = logging.getLogger("nova.crypto")
FLAGS = flags.FLAGS
flags.DEFINE_string('ca_file', 'cacert.pem', _('Filename of root CA'))
flags.DEFINE_string('key_file',
@@ -254,7 +255,7 @@ def _sign_csr(csr_text, ca_folder):
csrfile = open(inbound, "w")
csrfile.write(csr_text)
csrfile.close()
- logging.debug(_("Flags path: %s") % ca_folder)
+ LOG.debug(_("Flags path: %s"), ca_folder)
start = os.getcwd()
# Change working dir to CA
os.chdir(ca_folder)
diff --git a/nova/db/api.py b/nova/db/api.py
index 0fa5eb1e8..cf84157bc 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -81,6 +81,11 @@ def service_get(context, service_id):
return IMPL.service_get(context, service_id)
+def service_get_all(context):
+ """Get a list of all services on any machine on any topic of any type"""
+ return IMPL.service_get_all(context)
+
+
def service_get_all_by_topic(context, topic):
"""Get all compute services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic)
@@ -772,6 +777,13 @@ def security_group_rule_get_by_security_group(context, security_group_id):
security_group_id)
+def security_group_rule_get_by_security_group_grantee(context,
+ security_group_id):
+ """Get all rules that grant access to the given security group."""
+ return IMPL.security_group_rule_get_by_security_group_grantee(context,
+ security_group_id)
+
+
def security_group_rule_destroy(context, security_group_rule_id):
"""Deletes a security group rule."""
return IMPL.security_group_rule_destroy(context, security_group_rule_id)
@@ -894,3 +906,57 @@ def host_get_networks(context, host):
"""
return IMPL.host_get_networks(context, host)
+
+
+##################
+
+
+def console_pool_create(context, values):
+ """Create console pool."""
+ return IMPL.console_pool_create(context, values)
+
+
+def console_pool_get(context, pool_id):
+ """Get a console pool."""
+ return IMPL.console_pool_get(context, pool_id)
+
+
+def console_pool_get_by_host_type(context, compute_host, proxy_host,
+ console_type):
+ """Fetch a console pool for a given proxy host, compute host, and type."""
+ return IMPL.console_pool_get_by_host_type(context,
+ compute_host,
+ proxy_host,
+ console_type)
+
+
+def console_pool_get_all_by_host_type(context, host, console_type):
+ """Fetch all pools for given proxy host and type."""
+ return IMPL.console_pool_get_all_by_host_type(context,
+ host,
+ console_type)
+
+
+def console_create(context, values):
+ """Create a console."""
+ return IMPL.console_create(context, values)
+
+
+def console_delete(context, console_id):
+ """Delete a console."""
+ return IMPL.console_delete(context, console_id)
+
+
+def console_get_by_pool_instance(context, pool_id, instance_id):
+ """Get console entry for a given instance and pool."""
+ return IMPL.console_get_by_pool_instance(context, pool_id, instance_id)
+
+
+def console_get_all_by_instance(context, instance_id):
+ """Get consoles for a given instance."""
+ return IMPL.console_get_all_by_instance(context, instance_id)
+
+
+def console_get(context, console_id, instance_id=None):
+ """Get a specific console (possibly on a given instance)."""
+ return IMPL.console_get(context, console_id, instance_id)
diff --git a/nova/db/sqlalchemy/__init__.py b/nova/db/sqlalchemy/__init__.py
index 22aa1cfe6..501373942 100644
--- a/nova/db/sqlalchemy/__init__.py
+++ b/nova/db/sqlalchemy/__init__.py
@@ -19,16 +19,17 @@
"""
SQLAlchemy database backend
"""
-import logging
import time
from sqlalchemy.exc import OperationalError
from nova import flags
+from nova import log as logging
from nova.db.sqlalchemy import models
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.db.sqlalchemy')
for i in xrange(FLAGS.sql_max_retries):
@@ -39,5 +40,6 @@ for i in xrange(FLAGS.sql_max_retries):
models.register_models()
break
except OperationalError:
- logging.exception(_("Data store is unreachable."
- " Trying again in %d seconds.") % FLAGS.sql_retry_interval)
+ LOG.exception(_("Data store %s is unreachable."
+ " Trying again in %d seconds."),
+ FLAGS.sql_connection, FLAGS.sql_retry_interval)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 45427597a..4561fa219 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -135,6 +135,18 @@ def service_get(context, service_id, session=None):
@require_admin_context
+def service_get_all(context, session=None):
+ if not session:
+ session = get_session()
+
+ result = session.query(models.Service).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
+
+ return result
+
+
+@require_admin_context
def service_get_all_by_topic(context, topic):
session = get_session()
return session.query(models.Service).\
@@ -650,7 +662,7 @@ def instance_get(context, instance_id, session=None):
if is_admin_context(context):
result = session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
- options(joinedload('security_groups')).\
+ options(joinedload_all('security_groups.rules')).\
options(joinedload('volumes')).\
filter_by(id=instance_id).\
filter_by(deleted=can_read_deleted(context)).\
@@ -658,7 +670,7 @@ def instance_get(context, instance_id, session=None):
elif is_user_context(context):
result = session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
- options(joinedload('security_groups')).\
+ options(joinedload_all('security_groups.rules')).\
options(joinedload('volumes')).\
filter_by(project_id=context.project_id).\
filter_by(id=instance_id).\
@@ -1579,6 +1591,44 @@ def security_group_rule_get(context, security_group_rule_id, session=None):
@require_context
+def security_group_rule_get_by_security_group(context, security_group_id,
+ session=None):
+ if not session:
+ session = get_session()
+ if is_admin_context(context):
+ result = session.query(models.SecurityGroupIngressRule).\
+ filter_by(deleted=can_read_deleted(context)).\
+ filter_by(parent_group_id=security_group_id).\
+ all()
+ else:
+ # TODO(vish): Join to group and check for project_id
+ result = session.query(models.SecurityGroupIngressRule).\
+ filter_by(deleted=False).\
+ filter_by(parent_group_id=security_group_id).\
+ all()
+ return result
+
+
+@require_context
+def security_group_rule_get_by_security_group_grantee(context,
+ security_group_id,
+ session=None):
+ if not session:
+ session = get_session()
+ if is_admin_context(context):
+ result = session.query(models.SecurityGroupIngressRule).\
+ filter_by(deleted=can_read_deleted(context)).\
+ filter_by(group_id=security_group_id).\
+ all()
+ else:
+ result = session.query(models.SecurityGroupIngressRule).\
+ filter_by(deleted=False).\
+ filter_by(group_id=security_group_id).\
+ all()
+ return result
+
+
+@require_context
def security_group_rule_create(context, values):
security_group_rule_ref = models.SecurityGroupIngressRule()
security_group_rule_ref.update(values)
@@ -1813,3 +1863,111 @@ def host_get_networks(context, host):
filter_by(deleted=False).\
filter_by(host=host).\
all()
+
+
+##################
+
+
+def console_pool_create(context, values):
+ pool = models.ConsolePool()
+ pool.update(values)
+ pool.save()
+ return pool
+
+
+def console_pool_get(context, pool_id):
+ session = get_session()
+ result = session.query(models.ConsolePool).\
+ filter_by(deleted=False).\
+ filter_by(id=pool_id).\
+ first()
+ if not result:
+ raise exception.NotFound(_("No console pool with id %(pool_id)s") %
+ {'pool_id': pool_id})
+
+ return result
+
+
+def console_pool_get_by_host_type(context, compute_host, host,
+ console_type):
+ session = get_session()
+ result = session.query(models.ConsolePool).\
+ filter_by(host=host).\
+ filter_by(console_type=console_type).\
+ filter_by(compute_host=compute_host).\
+ filter_by(deleted=False).\
+ options(joinedload('consoles')).\
+ first()
+ if not result:
+ raise exception.NotFound(_('No console pool of type %(type)s '
+ 'for compute host %(compute_host)s '
+ 'on proxy host %(host)s') %
+ {'type': console_type,
+ 'compute_host': compute_host,
+ 'host': host})
+ return result
+
+
+def console_pool_get_all_by_host_type(context, host, console_type):
+ session = get_session()
+ return session.query(models.ConsolePool).\
+ filter_by(host=host).\
+ filter_by(console_type=console_type).\
+ filter_by(deleted=False).\
+ options(joinedload('consoles')).\
+ all()
+
+
+def console_create(context, values):
+ console = models.Console()
+ console.update(values)
+ console.save()
+ return console
+
+
+def console_delete(context, console_id):
+ session = get_session()
+ with session.begin():
+ # consoles are meant to be transient. (mdragon)
+ session.execute('delete from consoles '
+ 'where id=:id', {'id': console_id})
+
+
+def console_get_by_pool_instance(context, pool_id, instance_id):
+ session = get_session()
+ result = session.query(models.Console).\
+ filter_by(pool_id=pool_id).\
+ filter_by(instance_id=instance_id).\
+ options(joinedload('pool')).\
+ first()
+ if not result:
+ raise exception.NotFound(_('No console for instance %(instance_id)s '
+ 'in pool %(pool_id)s') %
+ {'instance_id': instance_id,
+ 'pool_id': pool_id})
+ return result
+
+
+def console_get_all_by_instance(context, instance_id):
+ session = get_session()
+ results = session.query(models.Console).\
+ filter_by(instance_id=instance_id).\
+ options(joinedload('pool')).\
+ all()
+ return results
+
+
+def console_get(context, console_id, instance_id=None):
+ session = get_session()
+ query = session.query(models.Console).\
+ filter_by(id=console_id)
+ if instance_id:
+ query = query.filter_by(instance_id=instance_id)
+ result = query.options(joinedload('pool')).first()
+ if not result:
+ idesc = (_("on instance %s") % instance_id) if instance_id else ""
+ raise exception.NotFound(_("No console with id %(console_id)s"
+ " %(instance)s") %
+ {'instance': idesc,
+ 'console_id': console_id})
+ return result
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 1ed366127..2a966448c 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -540,6 +540,31 @@ class FloatingIp(BASE, NovaBase):
host = Column(String(255)) # , ForeignKey('hosts.id'))
+class ConsolePool(BASE, NovaBase):
+ """Represents pool of consoles on the same physical node."""
+ __tablename__ = 'console_pools'
+ id = Column(Integer, primary_key=True)
+ address = Column(String(255))
+ username = Column(String(255))
+ password = Column(String(255))
+ console_type = Column(String(255))
+ public_hostname = Column(String(255))
+ host = Column(String(255))
+ compute_host = Column(String(255))
+
+
+class Console(BASE, NovaBase):
+ """Represents a console session for an instance."""
+ __tablename__ = 'consoles'
+ id = Column(Integer, primary_key=True)
+ instance_name = Column(String(255))
+ instance_id = Column(Integer)
+ password = Column(String(255))
+ port = Column(Integer, nullable=True)
+ pool_id = Column(Integer, ForeignKey('console_pools.id'))
+ pool = relationship(ConsolePool, backref=backref('consoles'))
+
+
def register_models():
"""Register Models and create metadata.
@@ -552,7 +577,7 @@ def register_models():
Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp,
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,
- Project, Certificate) # , Image, Host
+ Project, Certificate, ConsolePool, Console) # , Image, Host
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
diff --git a/nova/exception.py b/nova/exception.py
index 277033e0f..7680e534a 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -21,9 +21,8 @@ Nova base exception handling, including decorator for re-raising
Nova-type exceptions. SHOULD include dedicated exception logging.
"""
-import logging
-import sys
-import traceback
+from nova import log as logging
+LOG = logging.getLogger('nova.exception')
class ProcessExecutionError(IOError):
@@ -84,7 +83,7 @@ def wrap_exception(f):
except Exception, e:
if not isinstance(e, Error):
#exc_type, exc_value, exc_traceback = sys.exc_info()
- logging.exception(_('Uncaught exception'))
+ LOG.exception(_('Uncaught exception'))
#logging.error(traceback.extract_stack(exc_traceback))
raise Error(str(e))
raise
diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py
index 79d8b894d..7c2d7177b 100644
--- a/nova/fakerabbit.py
+++ b/nova/fakerabbit.py
@@ -18,12 +18,16 @@
"""Based a bit on the carrot.backeds.queue backend... but a lot better."""
-import logging
import Queue as queue
from carrot.backends import base
from eventlet import greenthread
+from nova import log as logging
+
+
+LOG = logging.getLogger("nova.fakerabbit")
+
EXCHANGES = {}
QUEUES = {}
@@ -41,12 +45,12 @@ class Exchange(object):
self._routes = {}
def publish(self, message, routing_key=None):
- logging.debug(_('(%s) publish (key: %s) %s'),
- self.name, routing_key, message)
+ LOG.debug(_('(%s) publish (key: %s) %s'),
+ self.name, routing_key, message)
routing_key = routing_key.split('.')[0]
if routing_key in self._routes:
for f in self._routes[routing_key]:
- logging.debug(_('Publishing to route %s'), f)
+ LOG.debug(_('Publishing to route %s'), f)
f(message, routing_key=routing_key)
def bind(self, callback, routing_key):
@@ -76,19 +80,19 @@ class Backend(base.BaseBackend):
def queue_declare(self, queue, **kwargs):
global QUEUES
if queue not in QUEUES:
- logging.debug(_('Declaring queue %s'), queue)
+ LOG.debug(_('Declaring queue %s'), queue)
QUEUES[queue] = Queue(queue)
def exchange_declare(self, exchange, type, *args, **kwargs):
global EXCHANGES
if exchange not in EXCHANGES:
- logging.debug(_('Declaring exchange %s'), exchange)
+ LOG.debug(_('Declaring exchange %s'), exchange)
EXCHANGES[exchange] = Exchange(exchange, type)
def queue_bind(self, queue, exchange, routing_key, **kwargs):
global EXCHANGES
global QUEUES
- logging.debug(_('Binding %s to %s with key %s'),
+ LOG.debug(_('Binding %s to %s with key %s'),
queue, exchange, routing_key)
EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key)
@@ -113,7 +117,7 @@ class Backend(base.BaseBackend):
content_type=content_type,
content_encoding=content_encoding)
message.result = True
- logging.debug(_('Getting from %s: %s'), queue, message)
+ LOG.debug(_('Getting from %s: %s'), queue, message)
return message
def prepare_message(self, message_data, delivery_mode,
diff --git a/nova/flags.py b/nova/flags.py
index 4b7334927..76ab2f788 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -29,8 +29,6 @@ import sys
import gflags
-from nova import utils
-
class FlagValues(gflags.FlagValues):
"""Extension of gflags.FlagValues that allows undefined and runtime flags.
@@ -202,10 +200,22 @@ def DECLARE(name, module_string, flag_values=FLAGS):
"%s not defined by %s" % (name, module_string))
+def _get_my_ip():
+ """Returns the actual ip of the local machine."""
+ try:
+ csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ csock.connect(('8.8.8.8', 80))
+ (addr, port) = csock.getsockname()
+ csock.close()
+ return addr
+ except socket.gaierror as ex:
+ return "127.0.0.1"
+
+
# __GLOBAL FLAGS ONLY__
# Define any app-specific flags in their own files, docs at:
-# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39
-
+# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#a9
+DEFINE_string('my_ip', _get_my_ip(), 'host ip address')
DEFINE_list('region_list',
[],
'list of region=url pairs separated by commas')
@@ -213,16 +223,25 @@ DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
DEFINE_integer('glance_port', 9292, 'glance port')
-DEFINE_string('glance_host', utils.get_my_ip(), 'glance host')
+DEFINE_string('glance_host', '$my_ip', 'glance host')
DEFINE_integer('s3_port', 3333, 's3 port')
-DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)')
-DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)')
+DEFINE_string('s3_host', '$my_ip', 's3 host (for infrastructure)')
+DEFINE_string('s3_dmz', '$my_ip', 's3 dmz ip (for instances)')
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
+DEFINE_string('console_topic', 'console',
+ 'the topic console proxy nodes listen on')
DEFINE_string('scheduler_topic', 'scheduler',
'the topic scheduler nodes listen on')
DEFINE_string('volume_topic', 'volume', 'the topic volume nodes listen on')
DEFINE_string('network_topic', 'network', 'the topic network nodes listen on')
-
+DEFINE_string('ajax_console_proxy_topic', 'ajax_proxy',
+ 'the topic ajax proxy nodes listen on')
+DEFINE_string('ajax_console_proxy_url',
+ 'http://127.0.0.1:8000',
+ 'location of ajax console proxy, \
+ in the form "http://127.0.0.1:8000"')
+DEFINE_string('ajax_console_proxy_port',
+ 8000, 'port that ajax_console_proxy binds')
DEFINE_bool('verbose', False, 'show debug output')
DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit')
DEFINE_bool('fake_network', False,
@@ -236,10 +255,12 @@ DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
DEFINE_string('ec2_prefix', 'http', 'prefix for ec2')
-DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server')
-DEFINE_string('cc_dmz', utils.get_my_ip(), 'internal ip of api server')
+DEFINE_string('os_prefix', 'http', 'prefix for openstack')
+DEFINE_string('cc_host', '$my_ip', 'ip of api server')
+DEFINE_string('cc_dmz', '$my_ip', 'internal ip of api server')
DEFINE_integer('cc_port', 8773, 'cloud controller port')
DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2')
+DEFINE_string('os_suffix', '/v1.0/', 'suffix for openstack')
DEFINE_string('default_project', 'openstack', 'default project for openstack')
DEFINE_string('default_image', 'ami-11111',
@@ -271,6 +292,8 @@ DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval')
DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager',
'Manager for compute')
+DEFINE_string('console_manager', 'nova.console.manager.ConsoleProxyManager',
+ 'Manager for console proxy')
DEFINE_string('network_manager', 'nova.network.manager.VlanManager',
'Manager for network')
DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager',
diff --git a/nova/image/glance.py b/nova/image/glance.py
index cb3936df1..a3a2f4308 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -19,19 +19,17 @@
import httplib
import json
-import logging
import urlparse
-import webob.exc
-
-from nova import utils
-from nova import flags
from nova import exception
-import nova.image.service
+from nova import flags
+from nova import log as logging
+from nova.image import service
-FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.image.glance')
+FLAGS = flags.FLAGS
flags.DEFINE_string('glance_teller_address', 'http://127.0.0.1',
'IP address or URL where Glance\'s Teller service resides')
flags.DEFINE_string('glance_teller_port', '9191',
@@ -77,8 +75,8 @@ class ParallaxClient(object):
data = json.loads(res.read())['images']
return data
else:
- logging.warn(_("Parallax returned HTTP error %d from "
- "request for /images"), res.status_int)
+ LOG.warn(_("Parallax returned HTTP error %d from "
+ "request for /images"), res.status_int)
return []
finally:
c.close()
@@ -96,8 +94,8 @@ class ParallaxClient(object):
data = json.loads(res.read())['images']
return data
else:
- logging.warn(_("Parallax returned HTTP error %d from "
- "request for /images/detail"), res.status_int)
+ LOG.warn(_("Parallax returned HTTP error %d from "
+ "request for /images/detail"), res.status_int)
return []
finally:
c.close()
@@ -165,7 +163,7 @@ class ParallaxClient(object):
c.close()
-class GlanceImageService(nova.image.service.BaseImageService):
+class GlanceImageService(service.BaseImageService):
"""Provides storage and retrieval of disk image objects within Glance."""
def __init__(self):
diff --git a/nova/log.py b/nova/log.py
new file mode 100644
index 000000000..c1428c051
--- /dev/null
+++ b/nova/log.py
@@ -0,0 +1,254 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Nova logging handler.
+
+This module adds to logging functionality by adding the option to specify
+a context object when calling the various log methods. If the context object
+is not specified, default formatting is used.
+
+It also allows setting of formatting information through flags.
+"""
+
+
+import cStringIO
+import json
+import logging
+import logging.handlers
+import traceback
+
+from nova import flags
+from nova import version
+
+
+FLAGS = flags.FLAGS
+
+flags.DEFINE_string('logging_context_format_string',
+ '(%(name)s %(nova_version)s): %(levelname)s '
+ '[%(request_id)s %(user)s '
+ '%(project)s] %(message)s',
+ 'format string to use for log messages')
+
+flags.DEFINE_string('logging_default_format_string',
+ '(%(name)s %(nova_version)s): %(levelname)s [N/A] '
+ '%(message)s',
+ 'format string to use for log messages')
+
+flags.DEFINE_string('logging_debug_format_suffix',
+ 'from %(processName)s (pid=%(process)d) %(funcName)s'
+ ' %(pathname)s:%(lineno)d',
+ 'data to append to log format when level is DEBUG')
+
+flags.DEFINE_string('logging_exception_prefix',
+ '(%(name)s): TRACE: ',
+ 'prefix each line of exception output with this format')
+
+flags.DEFINE_list('default_log_levels',
+ ['amqplib=WARN',
+ 'sqlalchemy=WARN',
+ 'eventlet.wsgi.server=WARN'],
+ 'list of logger=LEVEL pairs')
+
+flags.DEFINE_bool('use_syslog', False, 'output to syslog')
+flags.DEFINE_string('logfile', None, 'output to named file')
+
+
+# A list of things we want to replicate from logging.
+# levels
+CRITICAL = logging.CRITICAL
+FATAL = logging.FATAL
+ERROR = logging.ERROR
+WARNING = logging.WARNING
+WARN = logging.WARN
+INFO = logging.INFO
+DEBUG = logging.DEBUG
+NOTSET = logging.NOTSET
+# methods
+getLogger = logging.getLogger
+debug = logging.debug
+info = logging.info
+warning = logging.warning
+warn = logging.warn
+error = logging.error
+exception = logging.exception
+critical = logging.critical
+log = logging.log
+# handlers
+StreamHandler = logging.StreamHandler
+FileHandler = logging.FileHandler
+# logging.SysLogHandler is nicer than logging.logging.handler.SysLogHandler.
+SysLogHandler = logging.handlers.SysLogHandler
+
+
+# our new audit level
+AUDIT = logging.INFO + 1
+logging.addLevelName(AUDIT, 'AUDIT')
+
+
+def _dictify_context(context):
+ if context == None:
+ return None
+ if not isinstance(context, dict) \
+ and getattr(context, 'to_dict', None):
+ context = context.to_dict()
+ return context
+
+
+def basicConfig():
+ logging.basicConfig()
+ for handler in logging.root.handlers:
+ handler.setFormatter(_formatter)
+ if FLAGS.verbose:
+ logging.root.setLevel(logging.DEBUG)
+ if FLAGS.use_syslog:
+ syslog = SysLogHandler(address='/dev/log')
+ syslog.setFormatter(_formatter)
+ logging.root.addHandler(syslog)
+ if FLAGS.logfile:
+ logfile = FileHandler(FLAGS.logfile)
+ logfile.setFormatter(_formatter)
+ logging.root.addHandler(logfile)
+
+
+class NovaLogger(logging.Logger):
+ """
+ NovaLogger manages request context and formatting.
+
+ This becomes the class that is instanciated by logging.getLogger.
+ """
+ def __init__(self, name, level=NOTSET):
+ level_name = self._get_level_from_flags(name, FLAGS)
+ level = globals()[level_name]
+ logging.Logger.__init__(self, name, level)
+
+ def _get_level_from_flags(self, name, FLAGS):
+ # if exactly "nova", or a child logger, honor the verbose flag
+ if (name == "nova" or name.startswith("nova.")) and FLAGS.verbose:
+ return 'DEBUG'
+ for pair in FLAGS.default_log_levels:
+ logger, _sep, level = pair.partition('=')
+ # NOTE(todd): if we set a.b, we want a.b.c to have the same level
+ # (but not a.bc, so we check the dot)
+ if name == logger:
+ return level
+ if name.startswith(logger) and name[len(logger)] == '.':
+ return level
+ return 'INFO'
+
+ def _log(self, level, msg, args, exc_info=None, extra=None, context=None):
+ """Extract context from any log call"""
+ if not extra:
+ extra = {}
+ if context:
+ extra.update(_dictify_context(context))
+ extra.update({"nova_version": version.version_string_with_vcs()})
+ logging.Logger._log(self, level, msg, args, exc_info, extra)
+
+ def addHandler(self, handler):
+ """Each handler gets our custom formatter"""
+ handler.setFormatter(_formatter)
+ logging.Logger.addHandler(self, handler)
+
+ def audit(self, msg, *args, **kwargs):
+ """Shortcut for our AUDIT level"""
+ if self.isEnabledFor(AUDIT):
+ self._log(AUDIT, msg, args, **kwargs)
+
+ def exception(self, msg, *args, **kwargs):
+ """Logging.exception doesn't handle kwargs, so breaks context"""
+ if not kwargs.get('exc_info'):
+ kwargs['exc_info'] = 1
+ self.error(msg, *args, **kwargs)
+ # NOTE(todd): does this really go here, or in _log ?
+ extra = kwargs.get('extra')
+ if not extra:
+ return
+ env = extra.get('environment')
+ if env:
+ env = env.copy()
+ for k in env.keys():
+ if not isinstance(env[k], str):
+ env.pop(k)
+ message = "Environment: %s" % json.dumps(env)
+ kwargs.pop('exc_info')
+ self.error(message, **kwargs)
+
+logging.setLoggerClass(NovaLogger)
+
+
+class NovaRootLogger(NovaLogger):
+ pass
+
+if not isinstance(logging.root, NovaRootLogger):
+ logging.root = NovaRootLogger("nova.root", WARNING)
+ NovaLogger.root = logging.root
+ NovaLogger.manager.root = logging.root
+
+
+class NovaFormatter(logging.Formatter):
+ """
+ A nova.context.RequestContext aware formatter configured through flags.
+
+ The flags used to set format strings are: logging_context_foramt_string
+ and logging_default_format_string. You can also specify
+ logging_debug_format_suffix to append extra formatting if the log level is
+ debug.
+
+ For information about what variables are available for the formatter see:
+ http://docs.python.org/library/logging.html#formatter
+ """
+
+ def format(self, record):
+ """Uses contextstring if request_id is set, otherwise default"""
+ if record.__dict__.get('request_id', None):
+ self._fmt = FLAGS.logging_context_format_string
+ else:
+ self._fmt = FLAGS.logging_default_format_string
+ if record.levelno == logging.DEBUG \
+ and FLAGS.logging_debug_format_suffix:
+ self._fmt += " " + FLAGS.logging_debug_format_suffix
+ # Cache this on the record, Logger will respect our formated copy
+ if record.exc_info:
+ record.exc_text = self.formatException(record.exc_info, record)
+ return logging.Formatter.format(self, record)
+
+ def formatException(self, exc_info, record=None):
+ """Format exception output with FLAGS.logging_exception_prefix"""
+ if not record:
+ return logging.Formatter.formatException(self, exc_info)
+ stringbuffer = cStringIO.StringIO()
+ traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
+ None, stringbuffer)
+ lines = stringbuffer.getvalue().split("\n")
+ stringbuffer.close()
+ formatted_lines = []
+ for line in lines:
+ pl = FLAGS.logging_exception_prefix % record.__dict__
+ fl = "%s%s" % (pl, line)
+ formatted_lines.append(fl)
+ return "\n".join(formatted_lines)
+
+_formatter = NovaFormatter()
+
+
+def audit(msg, *args, **kwargs):
+ """Shortcut for logging to root log with sevrity 'AUDIT'."""
+ if len(logging.root.handlers) == 0:
+ basicConfig()
+ logging.root.log(AUDIT, msg, *args, **kwargs)
diff --git a/nova/network/api.py b/nova/network/api.py
index cbd912047..bf43acb51 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -20,15 +20,15 @@
Handles all requests relating to instances (guest vms).
"""
-import logging
-
from nova import db
from nova import flags
+from nova import log as logging
from nova import quota
from nova import rpc
from nova.db import base
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.network')
class API(base.Base):
@@ -36,7 +36,7 @@ class API(base.Base):
def allocate_floating_ip(self, context):
if quota.allowed_floating_ips(context, 1) < 1:
- logging.warn(_("Quota exceeeded for %s, tried to allocate "
+ LOG.warn(_("Quota exceeeded for %s, tried to allocate "
"address"),
context.project_id)
raise quota.QuotaError(_("Address quota exceeded. You cannot "
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 931a89554..3743fc7e8 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -17,16 +17,17 @@
Implements vlans, bridges, and iptables rules using linux utilities.
"""
-import logging
import os
-# TODO(ja): does the definition of network_path belong here?
-
from nova import db
from nova import flags
+from nova import log as logging
from nova import utils
+LOG = logging.getLogger("nova.linux_net")
+
+
def _bin_file(script):
"""Return the absolute path to scipt in the bin directory"""
return os.path.abspath(os.path.join(__file__, "../../../bin", script))
@@ -45,7 +46,7 @@ flags.DEFINE_string('vlan_interface', 'eth0',
'network device for vlans')
flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'),
'location of nova-dhcpbridge')
-flags.DEFINE_string('routing_source_ip', utils.get_my_ip(),
+flags.DEFINE_string('routing_source_ip', '$my_ip',
'Public IP of network host')
flags.DEFINE_bool('use_nova_chains', False,
'use the nova_ routing chains instead of default')
@@ -172,7 +173,7 @@ def ensure_vlan(vlan_num):
"""Create a vlan unless it already exists"""
interface = "vlan%s" % vlan_num
if not _device_exists(interface):
- logging.debug(_("Starting VLAN inteface %s"), interface)
+ LOG.debug(_("Starting VLAN inteface %s"), interface)
_execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD")
_execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num))
_execute("sudo ifconfig %s up" % interface)
@@ -182,7 +183,7 @@ def ensure_vlan(vlan_num):
def ensure_bridge(bridge, interface, net_attrs=None):
"""Create a bridge unless it already exists"""
if not _device_exists(bridge):
- logging.debug(_("Starting Bridge interface for %s"), interface)
+ LOG.debug(_("Starting Bridge interface for %s"), interface)
_execute("sudo brctl addbr %s" % bridge)
_execute("sudo brctl setfd %s 0" % bridge)
# _execute("sudo brctl setageing %s 10" % bridge)
@@ -208,6 +209,8 @@ def ensure_bridge(bridge, interface, net_attrs=None):
_confirm_rule("FORWARD", "--in-interface %s -j ACCEPT" % bridge)
_confirm_rule("FORWARD", "--out-interface %s -j ACCEPT" % bridge)
+ _execute("sudo iptables -N nova-local", check_exit_code=False)
+ _confirm_rule("FORWARD", "-j nova-local")
def get_dhcp_hosts(context, network_id):
@@ -248,9 +251,9 @@ def update_dhcp(context, network_id):
_execute('sudo kill -HUP %d' % pid)
return
except Exception as exc: # pylint: disable-msg=W0703
- logging.debug(_("Hupping dnsmasq threw %s"), exc)
+ LOG.debug(_("Hupping dnsmasq threw %s"), exc)
else:
- logging.debug(_("Pid %d is stale, relaunching dnsmasq"), pid)
+ LOG.debug(_("Pid %d is stale, relaunching dnsmasq"), pid)
# FLAGFILE and DNSMASQ_INTERFACE in env
env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile,
@@ -270,7 +273,7 @@ def _host_dhcp(fixed_ip_ref):
def _execute(cmd, *args, **kwargs):
"""Wrapper around utils._execute for fake_network"""
if FLAGS.fake_network:
- logging.debug("FAKE NET: %s", cmd)
+ LOG.debug("FAKE NET: %s", cmd)
return "fake", 0
else:
return utils.execute(cmd, *args, **kwargs)
@@ -328,7 +331,7 @@ def _stop_dnsmasq(network):
try:
_execute('sudo kill -TERM %d' % pid)
except Exception as exc: # pylint: disable-msg=W0703
- logging.debug(_("Killing dnsmasq threw %s"), exc)
+ LOG.debug(_("Killing dnsmasq threw %s"), exc)
def _dhcp_file(bridge, kind):
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 16aa8f895..c75ecc671 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -45,7 +45,6 @@ topologies. All of the network commands are issued to a subclass of
"""
import datetime
-import logging
import math
import socket
@@ -55,11 +54,13 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import manager
from nova import utils
from nova import rpc
+LOG = logging.getLogger("nova.network.manager")
FLAGS = flags.FLAGS
flags.DEFINE_string('flat_network_bridge', 'br100',
'Bridge for simple network instances')
@@ -73,7 +74,7 @@ flags.DEFINE_string('flat_network_dhcp_start', '10.0.0.2',
'Dhcp start for FlatDhcp')
flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks')
flags.DEFINE_integer('num_networks', 1000, 'Number of networks to support')
-flags.DEFINE_string('vpn_ip', utils.get_my_ip(),
+flags.DEFINE_string('vpn_ip', '$my_ip',
'Public IP for the cloudpipe VPN servers')
flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks')
flags.DEFINE_integer('network_size', 256,
@@ -131,7 +132,7 @@ class NetworkManager(manager.Manager):
def set_network_host(self, context, network_id):
"""Safely sets the host of the network."""
- logging.debug(_("setting network host"))
+ LOG.debug(_("setting network host"), context=context)
host = self.db.network_set_host(context,
network_id,
self.host)
@@ -186,7 +187,7 @@ class NetworkManager(manager.Manager):
def lease_fixed_ip(self, context, mac, address):
"""Called by dhcp-bridge when ip is leased."""
- logging.debug("Leasing IP %s", address)
+ LOG.debug(_("Leasing IP %s"), address, context=context)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
if not instance_ref:
@@ -201,12 +202,12 @@ class NetworkManager(manager.Manager):
{'leased': True,
'updated_at': now})
if not fixed_ip_ref['allocated']:
- logging.warn(_("IP %s leased that was already deallocated"),
- address)
+ LOG.warn(_("IP %s leased that was already deallocated"), address,
+ context=context)
def release_fixed_ip(self, context, mac, address):
"""Called by dhcp-bridge when ip is released."""
- logging.debug("Releasing IP %s", address)
+ LOG.debug("Releasing IP %s", address, context=context)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
if not instance_ref:
@@ -216,7 +217,8 @@ class NetworkManager(manager.Manager):
raise exception.Error(_("IP %s released from bad mac %s vs %s") %
(address, instance_ref['mac_address'], mac))
if not fixed_ip_ref['leased']:
- logging.warn(_("IP %s released that was not leased"), address)
+ LOG.warn(_("IP %s released that was not leased"), address,
+ context=context)
self.db.fixed_ip_update(context,
fixed_ip_ref['address'],
{'leased': False})
@@ -437,7 +439,7 @@ class VlanManager(NetworkManager):
self.host,
time)
if num:
- logging.debug(_("Dissassociated %s stale fixed ip(s)"), num)
+ LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num)
def init_host(self):
"""Do any initialization that needs to be run if this is a
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
index 52257f69f..bc26fd3c5 100644
--- a/nova/objectstore/handler.py
+++ b/nova/objectstore/handler.py
@@ -39,7 +39,6 @@ S3 client with this module::
import datetime
import json
-import logging
import multiprocessing
import os
import urllib
@@ -54,12 +53,14 @@ from twisted.web import static
from nova import context
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
from nova.auth import manager
from nova.objectstore import bucket
from nova.objectstore import image
+LOG = logging.getLogger('nova.objectstore.handler')
FLAGS = flags.FLAGS
flags.DEFINE_string('s3_listen_host', '', 'Host to listen on.')
@@ -132,9 +133,11 @@ def get_context(request):
request.uri,
headers=request.getAllHeaders(),
check_type='s3')
- return context.RequestContext(user, project)
+ rv = context.RequestContext(user, project)
+ LOG.audit(_("Authenticated request"), context=rv)
+ return rv
except exception.Error as ex:
- logging.debug(_("Authentication Failure: %s"), ex)
+ LOG.debug(_("Authentication Failure: %s"), ex)
raise exception.NotAuthorized()
@@ -176,7 +179,7 @@ class S3(ErrorHandlingResource):
def render_GET(self, request): # pylint: disable-msg=R0201
"""Renders the GET request for a list of buckets as XML"""
- logging.debug('List of buckets requested')
+ LOG.debug(_('List of buckets requested'), context=request.context)
buckets = [b for b in bucket.Bucket.all() \
if b.is_authorized(request.context)]
@@ -203,7 +206,7 @@ class BucketResource(ErrorHandlingResource):
def render_GET(self, request):
"Returns the keys for the bucket resource"""
- logging.debug("List keys for bucket %s", self.name)
+ LOG.debug(_("List keys for bucket %s"), self.name)
try:
bucket_object = bucket.Bucket(self.name)
@@ -211,6 +214,8 @@ class BucketResource(ErrorHandlingResource):
return error.NoResource(message="No such bucket").render(request)
if not bucket_object.is_authorized(request.context):
+ LOG.audit(_("Unauthorized attempt to access bucket %s"),
+ self.name, context=request.context)
raise exception.NotAuthorized()
prefix = get_argument(request, "prefix", u"")
@@ -227,8 +232,8 @@ class BucketResource(ErrorHandlingResource):
def render_PUT(self, request):
"Creates the bucket resource"""
- logging.debug(_("Creating bucket %s"), self.name)
- logging.debug("calling bucket.Bucket.create(%r, %r)",
+ LOG.debug(_("Creating bucket %s"), self.name)
+ LOG.debug("calling bucket.Bucket.create(%r, %r)",
self.name,
request.context)
bucket.Bucket.create(self.name, request.context)
@@ -237,10 +242,12 @@ class BucketResource(ErrorHandlingResource):
def render_DELETE(self, request):
"""Deletes the bucket resource"""
- logging.debug(_("Deleting bucket %s"), self.name)
+ LOG.debug(_("Deleting bucket %s"), self.name)
bucket_object = bucket.Bucket(self.name)
if not bucket_object.is_authorized(request.context):
+ LOG.audit(_("Unauthorized attempt to delete bucket %s"),
+ self.name, context=request.context)
raise exception.NotAuthorized()
bucket_object.delete()
@@ -261,11 +268,12 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
- logging.debug(_("Getting object: %s / %s"),
- self.bucket.name,
- self.name)
+ LOG.debug(_("Getting object: %s / %s"), self.bucket.name, self.name)
if not self.bucket.is_authorized(request.context):
+ LOG.audit(_("Unauthorized attempt to get object %s from bucket "
+ "%s"), self.name, self.bucket.name,
+ context=request.context)
raise exception.NotAuthorized()
obj = self.bucket[urllib.unquote(self.name)]
@@ -281,11 +289,12 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
- logging.debug(_("Putting object: %s / %s"),
- self.bucket.name,
- self.name)
+ LOG.debug(_("Putting object: %s / %s"), self.bucket.name, self.name)
if not self.bucket.is_authorized(request.context):
+ LOG.audit(_("Unauthorized attempt to upload object %s to bucket "
+ "%s"),
+ self.name, self.bucket.name, context=request.context)
raise exception.NotAuthorized()
key = urllib.unquote(self.name)
@@ -302,11 +311,13 @@ class ObjectResource(ErrorHandlingResource):
authorized to delete the object.
"""
- logging.debug(_("Deleting object: %s / %s"),
- self.bucket.name,
- self.name)
+ LOG.debug(_("Deleting object: %s / %s"), self.bucket.name, self.name,
+ context=request.context)
if not self.bucket.is_authorized(request.context):
+ LOG.audit("Unauthorized attempt to delete object %s from "
+ "bucket %s", self.name, self.bucket.name,
+ context=request.context)
raise exception.NotAuthorized()
del self.bucket[urllib.unquote(self.name)]
@@ -379,13 +390,21 @@ class ImagesResource(resource.Resource):
image_path = os.path.join(FLAGS.images_path, image_id)
if not image_path.startswith(FLAGS.images_path) or \
os.path.exists(image_path):
+ LOG.audit(_("Not authorized to upload image: invalid directory "
+ "%s"),
+ image_path, context=request.context)
raise exception.NotAuthorized()
bucket_object = bucket.Bucket(image_location.split("/")[0])
if not bucket_object.is_authorized(request.context):
+ LOG.audit(_("Not authorized to upload image: unauthorized "
+ "bucket %s"), bucket_object.name,
+ context=request.context)
raise exception.NotAuthorized()
+ LOG.audit(_("Starting image upload: %s"), image_id,
+ context=request.context)
p = multiprocessing.Process(target=image.Image.register_aws_image,
args=(image_id, image_location, request.context))
p.start()
@@ -398,17 +417,21 @@ class ImagesResource(resource.Resource):
image_id = get_argument(request, 'image_id', u'')
image_object = image.Image(image_id)
if not image_object.is_authorized(request.context):
- logging.debug(_("not authorized for render_POST in images"))
+ LOG.audit(_("Not authorized to update attributes of image %s"),
+ image_id, context=request.context)
raise exception.NotAuthorized()
operation = get_argument(request, 'operation', u'')
if operation:
# operation implies publicity toggle
- logging.debug(_("handling publicity toggle"))
- image_object.set_public(operation == 'add')
+ newstatus = (operation == 'add')
+ LOG.audit(_("Toggling publicity flag of image %s %r"), image_id,
+ newstatus, context=request.context)
+ image_object.set_public(newstatus)
else:
# other attributes imply update
- logging.debug(_("update user fields"))
+ LOG.audit(_("Updating user fields on image %s"), image_id,
+ context=request.context)
clean_args = {}
for arg in request.args.keys():
clean_args[arg] = request.args[arg][0]
@@ -421,9 +444,12 @@ class ImagesResource(resource.Resource):
image_object = image.Image(image_id)
if not image_object.is_authorized(request.context):
+ LOG.audit(_("Unauthorized attempt to delete image %s"),
+ image_id, context=request.context)
raise exception.NotAuthorized()
image_object.delete()
+ LOG.audit(_("Deleted image: %s"), image_id, context=request.context)
request.setResponseCode(204)
return ''
diff --git a/nova/rpc.py b/nova/rpc.py
index 844088348..49b11602b 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -22,7 +22,6 @@ No fan-out support yet.
"""
import json
-import logging
import sys
import time
import traceback
@@ -36,13 +35,12 @@ from nova import context
from nova import exception
from nova import fakerabbit
from nova import flags
+from nova import log as logging
from nova import utils
FLAGS = flags.FLAGS
-
-LOG = logging.getLogger('amqplib')
-LOG.setLevel(logging.DEBUG)
+LOG = logging.getLogger('nova.rpc')
class Connection(carrot_connection.BrokerConnection):
@@ -91,15 +89,16 @@ class Consumer(messaging.Consumer):
self.failed_connection = False
break
except: # Catching all because carrot sucks
- logging.exception(_("AMQP server on %s:%d is unreachable."
- " Trying again in %d seconds.") % (
- FLAGS.rabbit_host,
- FLAGS.rabbit_port,
- FLAGS.rabbit_retry_interval))
+ LOG.exception(_("AMQP server on %s:%d is unreachable."
+ " Trying again in %d seconds.") % (
+ FLAGS.rabbit_host,
+ FLAGS.rabbit_port,
+ FLAGS.rabbit_retry_interval))
self.failed_connection = True
if self.failed_connection:
- logging.exception(_("Unable to connect to AMQP server"
- " after %d tries. Shutting down.") % FLAGS.rabbit_max_retries)
+ LOG.exception(_("Unable to connect to AMQP server "
+ "after %d tries. Shutting down."),
+ FLAGS.rabbit_max_retries)
sys.exit(1)
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
@@ -116,14 +115,14 @@ class Consumer(messaging.Consumer):
self.declare()
super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
if self.failed_connection:
- logging.error(_("Reconnected to queue"))
+ LOG.error(_("Reconnected to queue"))
self.failed_connection = False
# NOTE(vish): This is catching all errors because we really don't
# exceptions to be logged 10 times a second if some
# persistent failure occurs.
except Exception: # pylint: disable-msg=W0703
if not self.failed_connection:
- logging.exception(_("Failed to fetch message from queue"))
+ LOG.exception(_("Failed to fetch message from queue"))
self.failed_connection = True
def attach_to_eventlet(self):
@@ -193,6 +192,7 @@ class AdapterConsumer(TopicConsumer):
if msg_id:
msg_reply(msg_id, rval, None)
except Exception as e:
+ logging.exception("Exception during message handling")
if msg_id:
msg_reply(msg_id, None, sys.exc_info())
return
@@ -242,8 +242,8 @@ def msg_reply(msg_id, reply=None, failure=None):
if failure:
message = str(failure[1])
tb = traceback.format_exception(*failure)
- logging.error(_("Returning exception %s to caller"), message)
- logging.error(tb)
+ LOG.error(_("Returning exception %s to caller"), message)
+ LOG.error(tb)
failure = (failure[0].__name__, str(failure[1]), tb)
conn = Connection.instance(True)
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 44e21f2fd..a4d6dd574 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -21,15 +21,16 @@
Scheduler Service
"""
-import logging
import functools
from nova import db
from nova import flags
+from nova import log as logging
from nova import manager
from nova import rpc
from nova import utils
+LOG = logging.getLogger('nova.scheduler.manager')
FLAGS = flags.FLAGS
flags.DEFINE_string('scheduler_driver',
'nova.scheduler.chance.ChanceScheduler',
@@ -65,4 +66,4 @@ class SchedulerManager(manager.Manager):
db.queue_get_for(context, topic, host),
{"method": method,
"args": kwargs})
- logging.debug(_("Casting to %s %s for %s"), topic, host, method)
+ LOG.debug(_("Casting to %s %s for %s"), topic, host, method)
diff --git a/nova/service.py b/nova/service.py
index 7203430c6..523c1a8d7 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -21,7 +21,6 @@ Generic Node baseclass for all workers that run on hosts
"""
import inspect
-import logging
import os
import sys
import time
@@ -35,10 +34,10 @@ from sqlalchemy.exc import OperationalError
from nova import context
from nova import db
from nova import exception
+from nova import log as logging
from nova import flags
from nova import rpc
from nova import utils
-from nova.db.sqlalchemy import models
FLAGS = flags.FLAGS
@@ -155,7 +154,7 @@ class Service(object):
report_interval = FLAGS.report_interval
if not periodic_interval:
periodic_interval = FLAGS.periodic_interval
- logging.warn(_("Starting %s node"), topic)
+ logging.audit(_("Starting %s node"), topic)
service_obj = cls(host, binary, topic, manager,
report_interval, periodic_interval)
@@ -209,29 +208,28 @@ class Service(object):
logging.exception(_("model server went away"))
try:
+ # NOTE(vish): This is late-loaded to make sure that the
+ # database is not created before flags have
+ # been loaded.
+ from nova.db.sqlalchemy import models
models.register_models()
except OperationalError:
- logging.exception(_("Data store is unreachable."
- " Trying again in %d seconds.") %
- FLAGS.sql_retry_interval)
+ logging.exception(_("Data store %s is unreachable."
+ " Trying again in %d seconds.") %
+ (FLAGS.sql_connection,
+ FLAGS.sql_retry_interval))
time.sleep(FLAGS.sql_retry_interval)
def serve(*services):
- argv = FLAGS(sys.argv)
+ FLAGS(sys.argv)
+ logging.basicConfig()
if not services:
services = [Service.create()]
name = '_'.join(x.binary for x in services)
- logging.debug("Serving %s" % name)
-
- logging.getLogger('amqplib').setLevel(logging.WARN)
-
- if FLAGS.verbose:
- logging.getLogger().setLevel(logging.DEBUG)
- else:
- logging.getLogger().setLevel(logging.WARNING)
+ logging.debug(_("Serving %s"), name)
logging.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 291a0e468..194304e79 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -107,7 +107,7 @@ def stub_out_rate_limiting(stubs):
def stub_out_networking(stubs):
def get_my_ip():
return '127.0.0.1'
- stubs.Set(nova.utils, 'get_my_ip', get_my_ip)
+ stubs.Set(nova.flags, '_get_my_ip', get_my_ip)
def stub_out_compute_api_snapshot(stubs):
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index 0f274bd15..00ca739a5 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -22,7 +22,6 @@ and as a WSGI layer
import json
import datetime
-import logging
import unittest
import stubout
@@ -173,6 +172,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase):
IMAGE_FIXTURES = [
{'id': '23g2ogk23k4hhkk4k42l',
+ 'imageId': '23g2ogk23k4hhkk4k42l',
'name': 'public image #1',
'created_at': str(datetime.datetime.utcnow()),
'updated_at': str(datetime.datetime.utcnow()),
@@ -182,6 +182,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase):
'status': 'available',
'image_type': 'kernel'},
{'id': 'slkduhfas73kkaskgdas',
+ 'imageId': 'slkduhfas73kkaskgdas',
'name': 'public image #2',
'created_at': str(datetime.datetime.utcnow()),
'updated_at': str(datetime.datetime.utcnow()),
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 6e611a55d..0396daf98 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -133,6 +133,12 @@ class ServersTest(unittest.TestCase):
def queue_get_for(context, *args):
return 'network_topic'
+ def kernel_ramdisk_mapping(*args, **kwargs):
+ return (1, 1)
+
+ def image_id_from_hash(*args, **kwargs):
+ return 2
+
self.stubs.Set(nova.db.api, 'project_get_network', project_get_network)
self.stubs.Set(nova.db.api, 'instance_create', instance_create)
self.stubs.Set(nova.rpc, 'cast', fake_method)
@@ -142,6 +148,10 @@ class ServersTest(unittest.TestCase):
self.stubs.Set(nova.db.api, 'queue_get_for', queue_get_for)
self.stubs.Set(nova.network.manager.VlanManager, 'allocate_fixed_ip',
fake_method)
+ self.stubs.Set(nova.api.openstack.servers.Controller,
+ "_get_kernel_ramdisk_from_image", kernel_ramdisk_mapping)
+ self.stubs.Set(nova.api.openstack.common,
+ "get_image_id_from_image_hash", image_id_from_hash)
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
diff --git a/nova/tests/api/openstack/test_sharedipgroups.py b/nova/tests/api/openstack/test_shared_ip_groups.py
index d199951d8..c2fc3a203 100644
--- a/nova/tests/api/openstack/test_sharedipgroups.py
+++ b/nova/tests/api/openstack/test_shared_ip_groups.py
@@ -19,7 +19,7 @@ import unittest
import stubout
-from nova.api.openstack import sharedipgroups
+from nova.api.openstack import shared_ip_groups
class SharedIpGroupsTest(unittest.TestCase):
diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py
index ceac17adb..da86e6e11 100644
--- a/nova/tests/objectstore_unittest.py
+++ b/nova/tests/objectstore_unittest.py
@@ -23,7 +23,6 @@ Unittets for S3 objectstore clone.
import boto
import glob
import hashlib
-import logging
import os
import shutil
import tempfile
@@ -63,7 +62,6 @@ class ObjectStoreTestCase(test.TestCase):
self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
images_path=os.path.join(OSS_TEMPDIR, 'images'),
ca_path=os.path.join(os.path.dirname(__file__), 'CA'))
- logging.getLogger().setLevel(logging.DEBUG)
self.auth_manager = manager.AuthManager()
self.auth_manager.create_user('user1')
diff --git a/nova/tests/test_access.py b/nova/tests/test_access.py
index 58fdea3b5..0929903cf 100644
--- a/nova/tests/test_access.py
+++ b/nova/tests/test_access.py
@@ -17,7 +17,6 @@
# under the License.
import unittest
-import logging
import webob
from nova import context
diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py
index 15d40bc53..35ffffb67 100644
--- a/nova/tests/test_auth.py
+++ b/nova/tests/test_auth.py
@@ -16,17 +16,18 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
from M2Crypto import X509
import unittest
from nova import crypto
from nova import flags
+from nova import log as logging
from nova import test
from nova.auth import manager
from nova.api.ec2 import cloud
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.auth_unittest')
class user_generator(object):
@@ -211,12 +212,12 @@ class AuthManagerTestCase(object):
# NOTE(vish): Setup runs genroot.sh if it hasn't been run
cloud.CloudController().setup()
_key, cert_str = crypto.generate_x509_cert(user.id, project.id)
- logging.debug(cert_str)
+ LOG.debug(cert_str)
full_chain = crypto.fetch_ca(project_id=project.id, chain=True)
int_cert = crypto.fetch_ca(project_id=project.id, chain=False)
cloud_cert = crypto.fetch_ca()
- logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain)
+ LOG.debug("CA chain:\n\n =====\n%s\n\n=====", full_chain)
signed_cert = X509.load_cert_string(cert_str)
chain_cert = X509.load_cert_string(full_chain)
int_cert = X509.load_cert_string(int_cert)
@@ -331,7 +332,7 @@ class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase):
test.TestCase.__init__(self, *args, **kwargs)
import nova.auth.fakeldap as fakeldap
if FLAGS.flush_db:
- logging.info("Flushing datastore")
+ LOG.info("Flushing datastore")
r = fakeldap.Store.instance()
r.flushdb()
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index ba58fab59..8e43eec00 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -18,7 +18,6 @@
from base64 import b64decode
import json
-import logging
from M2Crypto import BIO
from M2Crypto import RSA
import os
@@ -31,6 +30,7 @@ from nova import context
from nova import crypto
from nova import db
from nova import flags
+from nova import log as logging
from nova import rpc
from nova import service
from nova import test
@@ -41,6 +41,7 @@ from nova.objectstore import image
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.cloud')
# Temp dirs for working with image attributes through the cloud controller
# (stole this from objectstore_unittest.py)
@@ -56,7 +57,6 @@ class CloudTestCase(test.TestCase):
images_path=IMAGES_PATH)
self.conn = rpc.Connection.instance()
- logging.getLogger().setLevel(logging.DEBUG)
# set up our cloud
self.cloud = cloud.CloudController()
@@ -133,6 +133,23 @@ class CloudTestCase(test.TestCase):
db.volume_destroy(self.context, vol1['id'])
db.volume_destroy(self.context, vol2['id'])
+ def test_describe_instances(self):
+ """Makes sure describe_instances works and filters results."""
+ inst1 = db.instance_create(self.context, {'reservation_id': 'a'})
+ inst2 = db.instance_create(self.context, {'reservation_id': 'a'})
+ result = self.cloud.describe_instances(self.context)
+ result = result['reservationSet'][0]
+ self.assertEqual(len(result['instancesSet']), 2)
+ instance_id = cloud.id_to_ec2_id(inst2['id'])
+ result = self.cloud.describe_instances(self.context,
+ instance_id=[instance_id])
+ result = result['reservationSet'][0]
+ self.assertEqual(len(result['instancesSet']), 1)
+ self.assertEqual(result['instancesSet'][0]['instanceId'],
+ instance_id)
+ db.instance_destroy(self.context, inst1['id'])
+ db.instance_destroy(self.context, inst2['id'])
+
def test_console_output(self):
image_id = FLAGS.default_image
instance_type = FLAGS.default_instance_type
@@ -141,7 +158,6 @@ class CloudTestCase(test.TestCase):
'instance_type': instance_type,
'max_count': max_count}
rv = self.cloud.run_instances(self.context, **kwargs)
- print rv
instance_id = rv['instancesSet'][0]['instanceId']
output = self.cloud.get_console_output(context=self.context,
instance_id=[instance_id])
@@ -151,6 +167,19 @@ class CloudTestCase(test.TestCase):
greenthread.sleep(0.3)
rv = self.cloud.terminate_instances(self.context, [instance_id])
+ def test_ajax_console(self):
+ kwargs = {'image_id': image_id}
+ rv = yield self.cloud.run_instances(self.context, **kwargs)
+ instance_id = rv['instancesSet'][0]['instanceId']
+ output = yield self.cloud.get_console_output(context=self.context,
+ instance_id=[instance_id])
+ self.assertEquals(b64decode(output['output']),
+ 'http://fakeajaxconsole.com/?token=FAKETOKEN')
+ # TODO(soren): We need this until we can stop polling in the rpc code
+ # for unit tests.
+ greenthread.sleep(0.3)
+ rv = yield self.cloud.terminate_instances(self.context, [instance_id])
+
def test_key_generation(self):
result = self._create_key('test')
private_key = result['private_key']
@@ -179,7 +208,7 @@ class CloudTestCase(test.TestCase):
def test_run_instances(self):
if FLAGS.connection_type == 'fake':
- logging.debug("Can't test instances without a real virtual env.")
+ LOG.debug(_("Can't test instances without a real virtual env."))
return
image_id = FLAGS.default_image
instance_type = FLAGS.default_instance_type
@@ -191,25 +220,25 @@ class CloudTestCase(test.TestCase):
# TODO: check for proper response
instance_id = rv['reservationSet'][0].keys()[0]
instance = rv['reservationSet'][0][instance_id][0]
- logging.debug("Need to watch instance %s until it's running..." %
- instance['instance_id'])
+ LOG.debug(_("Need to watch instance %s until it's running..."),
+ instance['instance_id'])
while True:
greenthread.sleep(1)
info = self.cloud._get_instance(instance['instance_id'])
- logging.debug(info['state'])
+ LOG.debug(info['state'])
if info['state'] == power_state.RUNNING:
break
self.assert_(rv)
- if connection_type != 'fake':
+ if FLAGS.connection_type != 'fake':
time.sleep(45) # Should use boto for polling here
for reservations in rv['reservationSet']:
# for res_id in reservations.keys():
- # logging.debug(reservations[res_id])
+ # LOG.debug(reservations[res_id])
# for instance in reservations[res_id]:
for instance in reservations[reservations.keys()[0]]:
instance_id = instance['instance_id']
- logging.debug("Terminating instance %s" % instance_id)
+ LOG.debug(_("Terminating instance %s"), instance_id)
rv = self.compute.terminate_instance(instance_id)
def test_instance_update_state(self):
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 534493dfe..52660ee74 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -20,25 +20,25 @@ Tests For Compute
"""
import datetime
-import logging
from nova import compute
from nova import context
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.compute')
class ComputeTestCase(test.TestCase):
"""Test case for compute"""
def setUp(self):
- logging.getLogger().setLevel(logging.DEBUG)
super(ComputeTestCase, self).setUp()
self.flags(connection_type='fake',
stub_network=True,
@@ -101,13 +101,13 @@ class ComputeTestCase(test.TestCase):
self.compute.run_instance(self.context, instance_id)
instances = db.instance_get_all(context.get_admin_context())
- logging.info(_("Running instances: %s"), instances)
+ LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
self.compute.terminate_instance(self.context, instance_id)
instances = db.instance_get_all(context.get_admin_context())
- logging.info(_("After terminating instances: %s"), instances)
+ LOG.info(_("After terminating instances: %s"), instances)
self.assertEqual(len(instances), 0)
def test_run_terminate_timestamps(self):
@@ -169,6 +169,16 @@ class ComputeTestCase(test.TestCase):
self.assert_(console)
self.compute.terminate_instance(self.context, instance_id)
+ def test_ajax_console(self):
+ """Make sure we can get console output from instance"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+
+ console = self.compute.get_ajax_console(self.context,
+ instance_id)
+ self.assert_(console)
+ self.compute.terminate_instance(self.context, instance_id)
+
def test_run_instance_existing(self):
"""Ensure failure when running an instance that already exists"""
instance_id = self._create_instance()
diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py
new file mode 100644
index 000000000..31b5ca79c
--- /dev/null
+++ b/nova/tests/test_console.py
@@ -0,0 +1,129 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Tests For Console proxy.
+"""
+
+import datetime
+import logging
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import test
+from nova import utils
+from nova.auth import manager
+from nova.console import manager as console_manager
+
+FLAGS = flags.FLAGS
+
+
+class ConsoleTestCase(test.TestCase):
+ """Test case for console proxy"""
+ def setUp(self):
+ logging.getLogger().setLevel(logging.DEBUG)
+ super(ConsoleTestCase, self).setUp()
+ self.flags(console_driver='nova.console.fake.FakeConsoleProxy',
+ stub_compute=True)
+ self.console = utils.import_object(FLAGS.console_manager)
+ self.manager = manager.AuthManager()
+ self.user = self.manager.create_user('fake', 'fake', 'fake')
+ self.project = self.manager.create_project('fake', 'fake', 'fake')
+ self.context = context.get_admin_context()
+ self.host = 'test_compute_host'
+
+ def tearDown(self):
+ self.manager.delete_user(self.user)
+ self.manager.delete_project(self.project)
+ super(ConsoleTestCase, self).tearDown()
+
+ def _create_instance(self):
+ """Create a test instance"""
+ inst = {}
+ #inst['host'] = self.host
+ #inst['name'] = 'instance-1234'
+ inst['image_id'] = 'ami-test'
+ inst['reservation_id'] = 'r-fakeres'
+ inst['launch_time'] = '10'
+ inst['user_id'] = self.user.id
+ inst['project_id'] = self.project.id
+ inst['instance_type'] = 'm1.tiny'
+ inst['mac_address'] = utils.generate_mac()
+ inst['ami_launch_index'] = 0
+ return db.instance_create(self.context, inst)['id']
+
+ def test_get_pool_for_instance_host(self):
+ pool = self.console.get_pool_for_instance_host(self.context, self.host)
+ self.assertEqual(pool['compute_host'], self.host)
+
+ def test_get_pool_creates_new_pool_if_needed(self):
+ self.assertRaises(exception.NotFound,
+ db.console_pool_get_by_host_type,
+ self.context,
+ self.host,
+ self.console.host,
+ self.console.driver.console_type)
+ pool = self.console.get_pool_for_instance_host(self.context,
+ self.host)
+ pool2 = db.console_pool_get_by_host_type(self.context,
+ self.host,
+ self.console.host,
+ self.console.driver.console_type)
+ self.assertEqual(pool['id'], pool2['id'])
+
+ def test_get_pool_does_not_create_new_pool_if_exists(self):
+ pool_info = {'address': '127.0.0.1',
+ 'username': 'test',
+ 'password': '1234pass',
+ 'host': self.console.host,
+ 'console_type': self.console.driver.console_type,
+ 'compute_host': 'sometesthostname'}
+ new_pool = db.console_pool_create(self.context, pool_info)
+ pool = self.console.get_pool_for_instance_host(self.context,
+ 'sometesthostname')
+ self.assertEqual(pool['id'], new_pool['id'])
+
+ def test_add_console(self):
+ instance_id = self._create_instance()
+ self.console.add_console(self.context, instance_id)
+ instance = db.instance_get(self.context, instance_id)
+ pool = db.console_pool_get_by_host_type(self.context,
+ instance['host'],
+ self.console.host,
+ self.console.driver.console_type)
+
+ console_instances = [con['instance_id'] for con in pool.consoles]
+ self.assert_(instance_id in console_instances)
+
+ def test_add_console_does_not_duplicate(self):
+ instance_id = self._create_instance()
+ cons1 = self.console.add_console(self.context, instance_id)
+ cons2 = self.console.add_console(self.context, instance_id)
+ self.assertEqual(cons1, cons2)
+
+ def test_remove_console(self):
+ instance_id = self._create_instance()
+ console_id = self.console.add_console(self.context, instance_id)
+ self.console.remove_console(self.context, console_id)
+
+ self.assertRaises(exception.NotFound,
+ db.console_get,
+ self.context,
+ console_id)
diff --git a/nova/tests/test_log.py b/nova/tests/test_log.py
new file mode 100644
index 000000000..beb1d97cf
--- /dev/null
+++ b/nova/tests/test_log.py
@@ -0,0 +1,110 @@
+import cStringIO
+
+from nova import context
+from nova import log
+from nova import test
+
+
+def _fake_context():
+ return context.RequestContext(1, 1)
+
+
+class RootLoggerTestCase(test.TrialTestCase):
+ def setUp(self):
+ super(RootLoggerTestCase, self).setUp()
+ self.log = log.logging.root
+
+ def tearDown(self):
+ super(RootLoggerTestCase, self).tearDown()
+ log.NovaLogger.manager.loggerDict = {}
+
+ def test_is_nova_instance(self):
+ self.assert_(isinstance(self.log, log.NovaLogger))
+
+ def test_name_is_nova_root(self):
+ self.assertEqual("nova.root", self.log.name)
+
+ def test_handlers_have_nova_formatter(self):
+ formatters = []
+ for h in self.log.handlers:
+ f = h.formatter
+ if isinstance(f, log.NovaFormatter):
+ formatters.append(f)
+ self.assert_(formatters)
+ self.assertEqual(len(formatters), len(self.log.handlers))
+
+ def test_handles_context_kwarg(self):
+ self.log.info("foo", context=_fake_context())
+ self.assert_(True) # didn't raise exception
+
+ def test_module_level_methods_handle_context_arg(self):
+ log.info("foo", context=_fake_context())
+ self.assert_(True) # didn't raise exception
+
+ def test_module_level_audit_handles_context_arg(self):
+ log.audit("foo", context=_fake_context())
+ self.assert_(True) # didn't raise exception
+
+
+class NovaFormatterTestCase(test.TrialTestCase):
+ def setUp(self):
+ super(NovaFormatterTestCase, self).setUp()
+ self.flags(logging_context_format_string="HAS CONTEXT "\
+ "[%(request_id)s]: %(message)s",
+ logging_default_format_string="NOCTXT: %(message)s",
+ logging_debug_format_suffix="--DBG")
+ self.log = log.logging.root
+ self.stream = cStringIO.StringIO()
+ handler = log.StreamHandler(self.stream)
+ self.log.addHandler(handler)
+ self.log.setLevel(log.DEBUG)
+
+ def tearDown(self):
+ super(NovaFormatterTestCase, self).tearDown()
+ log.NovaLogger.manager.loggerDict = {}
+
+ def test_uncontextualized_log(self):
+ self.log.info("foo")
+ self.assertEqual("NOCTXT: foo\n", self.stream.getvalue())
+
+ def test_contextualized_log(self):
+ ctxt = _fake_context()
+ self.log.info("bar", context=ctxt)
+ expected = "HAS CONTEXT [%s]: bar\n" % ctxt.request_id
+ self.assertEqual(expected, self.stream.getvalue())
+
+ def test_debugging_log(self):
+ self.log.debug("baz")
+ self.assertEqual("NOCTXT: baz --DBG\n", self.stream.getvalue())
+
+
+class NovaLoggerTestCase(test.TrialTestCase):
+ def setUp(self):
+ super(NovaLoggerTestCase, self).setUp()
+ self.flags(default_log_levels=["nova-test=AUDIT"], verbose=False)
+ self.log = log.getLogger('nova-test')
+
+ def tearDown(self):
+ super(NovaLoggerTestCase, self).tearDown()
+ log.NovaLogger.manager.loggerDict = {}
+
+ def test_has_level_from_flags(self):
+ self.assertEqual(log.AUDIT, self.log.level)
+
+ def test_child_log_has_level_of_parent_flag(self):
+ l = log.getLogger('nova-test.foo')
+ self.assertEqual(log.AUDIT, l.level)
+
+
+class VerboseLoggerTestCase(test.TrialTestCase):
+ def setUp(self):
+ super(VerboseLoggerTestCase, self).setUp()
+ self.flags(default_log_levels=["nova.test=AUDIT"], verbose=True)
+ self.log = log.getLogger('nova.test')
+
+ def tearDown(self):
+ super(VerboseLoggerTestCase, self).tearDown()
+ log.NovaLogger.manager.loggerDict = {}
+
+ def test_will_be_verbose_if_named_nova_and_verbose_flag_set(self):
+ self.assertEqual(log.DEBUG, self.log.level)
diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py
index 96473ac7c..349e20f84 100644
--- a/nova/tests/test_network.py
+++ b/nova/tests/test_network.py
@@ -20,18 +20,18 @@ Unit Tests for network code
"""
import IPy
import os
-import logging
from nova import context
from nova import db
from nova import exception
from nova import flags
-from nova import service
+from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.network')
class NetworkTestCase(test.TestCase):
@@ -45,7 +45,6 @@ class NetworkTestCase(test.TestCase):
fake_network=True,
network_size=16,
num_networks=5)
- logging.getLogger().setLevel(logging.DEBUG)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
self.projects = []
@@ -328,7 +327,7 @@ def lease_ip(private_ip):
'TESTING': '1',
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
(out, err) = utils.execute(cmd, addl_env=env)
- logging.debug("ISSUE_IP: %s, %s ", out, err)
+ LOG.debug("ISSUE_IP: %s, %s ", out, err)
def release_ip(private_ip):
@@ -344,4 +343,4 @@ def release_ip(private_ip):
'TESTING': '1',
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
(out, err) = utils.execute(cmd, addl_env=env)
- logging.debug("RELEASE_IP: %s, %s ", out, err)
+ LOG.debug("RELEASE_IP: %s, %s ", out, err)
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index b5f9f30ef..9548a8c13 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -16,11 +16,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
-
from nova import context
from nova import db
-from nova import exception
from nova import flags
from nova import quota
from nova import test
@@ -35,7 +32,6 @@ FLAGS = flags.FLAGS
class QuotaTestCase(test.TestCase):
def setUp(self):
- logging.getLogger().setLevel(logging.DEBUG)
super(QuotaTestCase, self).setUp()
self.flags(connection_type='fake',
quota_instances=2,
diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py
index 6ea2edcab..85593ab46 100644
--- a/nova/tests/test_rpc.py
+++ b/nova/tests/test_rpc.py
@@ -18,15 +18,16 @@
"""
Unit Tests for remote procedure calls using queue
"""
-import logging
from nova import context
from nova import flags
+from nova import log as logging
from nova import rpc
from nova import test
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.rpc')
class RpcTestCase(test.TestCase):
@@ -85,12 +86,12 @@ class RpcTestCase(test.TestCase):
@staticmethod
def echo(context, queue, value):
"""Calls echo in the passed queue"""
- logging.debug("Nested received %s, %s", queue, value)
+ LOG.debug(_("Nested received %s, %s"), queue, value)
ret = rpc.call(context,
queue,
{"method": "echo",
"args": {"value": value}})
- logging.debug("Nested return %s", ret)
+ LOG.debug(_("Nested return %s"), ret)
return value
nested = Nested()
@@ -115,13 +116,13 @@ class TestReceiver(object):
@staticmethod
def echo(context, value):
"""Simply returns whatever value is sent in"""
- logging.debug("Received %s", value)
+ LOG.debug(_("Received %s"), value)
return value
@staticmethod
def context(context, value):
"""Returns dictionary version of context"""
- logging.debug("Received %s", context)
+ LOG.debug(_("Received %s"), context)
return context.to_dict()
@staticmethod
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index 4aa489d08..afdc89ba2 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -208,8 +208,141 @@ class LibvirtConnTestCase(test.TestCase):
self.manager.delete_user(self.user)
-class NWFilterTestCase(test.TestCase):
+class IptablesFirewallTestCase(test.TestCase):
+ def setUp(self):
+ super(IptablesFirewallTestCase, self).setUp()
+ self.manager = manager.AuthManager()
+ self.user = self.manager.create_user('fake', 'fake', 'fake',
+ admin=True)
+ self.project = self.manager.create_project('fake', 'fake', 'fake')
+ self.context = context.RequestContext('fake', 'fake')
+ self.network = utils.import_object(FLAGS.network_manager)
+ self.fw = libvirt_conn.IptablesFirewallDriver()
+
+ def tearDown(self):
+ self.manager.delete_project(self.project)
+ self.manager.delete_user(self.user)
+ super(IptablesFirewallTestCase, self).tearDown()
+
+ def _p(self, *args, **kwargs):
+ if 'iptables-restore' in args:
+ print ' '.join(args), kwargs['stdin']
+ if 'iptables-save' in args:
+ return
+
+ in_rules = [
+ '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
+ '*filter',
+ ':INPUT ACCEPT [969615:281627771]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [915599:63811649]',
+ ':nova-block-ipv4 - [0:0]',
+ '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ',
+ '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ',
+ '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ',
+ '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
+ '-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
+ ',ESTABLISHED -j ACCEPT ',
+ '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
+ '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable ',
+ '-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable ',
+ 'COMMIT',
+ '# Completed on Mon Dec 6 11:54:13 2010',
+ ]
+
+ def test_static_filters(self):
+ self.fw.execute = self._p
+ instance_ref = db.instance_create(self.context,
+ {'user_id': 'fake',
+ 'project_id': 'fake'})
+ ip = '10.11.12.13'
+
+ network_ref = db.project_get_network(self.context,
+ 'fake')
+
+ fixed_ip = {'address': ip,
+ 'network_id': network_ref['id']}
+
+ admin_ctxt = context.get_admin_context()
+ db.fixed_ip_create(admin_ctxt, fixed_ip)
+ db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
+ 'instance_id': instance_ref['id']})
+
+ secgroup = db.security_group_create(admin_ctxt,
+ {'user_id': 'fake',
+ 'project_id': 'fake',
+ 'name': 'testgroup',
+ 'description': 'test group'})
+
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'icmp',
+ 'from_port': -1,
+ 'to_port': -1,
+ 'cidr': '192.168.11.0/24'})
+
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'icmp',
+ 'from_port': 8,
+ 'to_port': -1,
+ 'cidr': '192.168.11.0/24'})
+
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'tcp',
+ 'from_port': 80,
+ 'to_port': 81,
+ 'cidr': '192.168.10.0/24'})
+
+ db.instance_add_security_group(admin_ctxt, instance_ref['id'],
+ secgroup['id'])
+ instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
+
+ self.fw.add_instance(instance_ref)
+
+ out_rules = self.fw.modify_rules(self.in_rules)
+
+ in_rules = filter(lambda l: not l.startswith('#'), self.in_rules)
+ for rule in in_rules:
+ if not 'nova' in rule:
+ self.assertTrue(rule in out_rules,
+ 'Rule went missing: %s' % rule)
+
+ instance_chain = None
+ for rule in out_rules:
+ # This is pretty crude, but it'll do for now
+ if '-d 10.11.12.13 -j' in rule:
+ instance_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(instance_chain, "The instance chain wasn't added")
+
+ security_group_chain = None
+ for rule in out_rules:
+ # This is pretty crude, but it'll do for now
+ if '-A %s -j' % instance_chain in rule:
+ security_group_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(security_group_chain,
+ "The security group chain wasn't added")
+
+ self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -j ACCEPT' % \
+ security_group_chain in out_rules,
+ "ICMP acceptance rule wasn't added")
+
+ self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type'
+ ' 8 -j ACCEPT' % security_group_chain in out_rules,
+ "ICMP Echo Request acceptance rule wasn't added")
+
+ self.assertTrue('-A %s -p tcp -s 192.168.10.0/24 -m multiport '
+ '--dports 80:81 -j ACCEPT' % security_group_chain \
+ in out_rules,
+ "TCP port 80/81 acceptance rule wasn't added")
+
+
+class NWFilterTestCase(test.TestCase):
def setUp(self):
super(NWFilterTestCase, self).setUp()
@@ -224,7 +357,8 @@ class NWFilterTestCase(test.TestCase):
self.fake_libvirt_connection = Mock()
- self.fw = libvirt_conn.NWFilterFirewall(self.fake_libvirt_connection)
+ self.fw = libvirt_conn.NWFilterFirewall(
+ lambda: self.fake_libvirt_connection)
def tearDown(self):
self.manager.delete_project(self.project)
@@ -337,7 +471,7 @@ class NWFilterTestCase(test.TestCase):
self.security_group.id)
instance = db.instance_get(self.context, inst_id)
- self.fw.setup_base_nwfilters()
- self.fw.setup_nwfilters_for_instance(instance)
+ self.fw.setup_basic_filtering(instance)
+ self.fw.prepare_instance_filter(instance)
_ensure_all_called()
self.teardown_security_group()
diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py
index b13455fb0..b40ca004b 100644
--- a/nova/tests/test_volume.py
+++ b/nova/tests/test_volume.py
@@ -19,23 +19,23 @@
Tests for Volume Code.
"""
-import logging
from nova import context
from nova import exception
from nova import db
from nova import flags
+from nova import log as logging
from nova import test
from nova import utils
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.volume')
class VolumeTestCase(test.TestCase):
"""Test Case for volumes."""
def setUp(self):
- logging.getLogger().setLevel(logging.DEBUG)
super(VolumeTestCase, self).setUp()
self.compute = utils.import_object(FLAGS.compute_manager)
self.flags(connection_type='fake')
@@ -159,7 +159,7 @@ class VolumeTestCase(test.TestCase):
volume_id)
self.assert_(iscsi_target not in targets)
targets.append(iscsi_target)
- logging.debug("Target %s allocated", iscsi_target)
+ LOG.debug(_("Target %s allocated"), iscsi_target)
total_slots = FLAGS.iscsi_num_targets
for _index in xrange(total_slots):
volume_id = self._create_volume()
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 55f751f11..292bd9ba9 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -41,9 +41,33 @@ def stubout_instance_snapshot(stubs):
rv = done.wait()
return rv
+ def fake_loop(self):
+ pass
+
stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task',
fake_wait_for_task)
+ stubs.Set(xenapi_conn.XenAPISession, '_stop_loop', fake_loop)
+
+ from nova.virt.xenapi.fake import create_vdi
+ name_label = "instance-%s" % instance_id
+ #TODO: create fake SR record
+ sr_ref = "fakesr"
+ vdi_ref = create_vdi(name_label=name_label, read_only=False,
+ sr_ref=sr_ref, sharable=False)
+ vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
+ vdi_uuid = vdi_rec['uuid']
+ return vdi_uuid
+
+ stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
+
+ def fake_parse_xmlrpc_value(val):
+ return val
+
+ stubs.Set(xenapi_conn, '_parse_xmlrpc_value', fake_parse_xmlrpc_value)
+
+ def fake_wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
+ original_parent_uuid):
from nova.virt.xenapi.fake import create_vdi
name_label = "instance-%s" % instance_id
#TODO: create fake SR record
diff --git a/nova/twistd.py b/nova/twistd.py
index 29be9c4e1..556271999 100644
--- a/nova/twistd.py
+++ b/nova/twistd.py
@@ -22,7 +22,6 @@ manage pid files and support syslogging.
"""
import gflags
-import logging
import os
import signal
import sys
@@ -34,6 +33,7 @@ from twisted.python import runtime
from twisted.python import usage
from nova import flags
+from nova import log as logging
if runtime.platformType == "win32":
@@ -234,22 +234,12 @@ def serve(filename):
OptionsClass = WrapTwistedOptions(TwistdServerOptions)
options = OptionsClass()
argv = options.parseOptions()
- logging.getLogger('amqplib').setLevel(logging.WARN)
FLAGS.python = filename
FLAGS.no_save = True
if not FLAGS.pidfile:
FLAGS.pidfile = '%s.pid' % name
elif FLAGS.pidfile.endswith('twistd.pid'):
FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name)
- # NOTE(vish): if we're running nodaemon, redirect the log to stdout
- if FLAGS.nodaemon and not FLAGS.logfile:
- FLAGS.logfile = "-"
- if not FLAGS.logfile:
- FLAGS.logfile = '%s.log' % name
- elif FLAGS.logfile.endswith('twistd.log'):
- FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name)
- if FLAGS.logdir:
- FLAGS.logfile = os.path.join(FLAGS.logdir, FLAGS.logfile)
if not FLAGS.prefix:
FLAGS.prefix = name
elif FLAGS.prefix.endswith('twisted'):
@@ -270,19 +260,10 @@ def serve(filename):
print 'usage: %s [options] [start|stop|restart]' % argv[0]
sys.exit(1)
- formatter = logging.Formatter(
- '(%(name)s): %(levelname)s %(message)s')
- handler = logging.StreamHandler(log.StdioOnnaStick())
- handler.setFormatter(formatter)
- logging.getLogger().addHandler(handler)
-
- if FLAGS.verbose:
- logging.getLogger().setLevel(logging.DEBUG)
- else:
- logging.getLogger().setLevel(logging.WARNING)
-
+ logging.basicConfig()
logging.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
+ logging.audit(_("Starting %s"), name)
twistd.runApp(options)
diff --git a/nova/utils.py b/nova/utils.py
index 15112faa2..45adb7b38 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -22,7 +22,6 @@ System-level utilities and helper functions.
import datetime
import inspect
-import logging
import os
import random
import subprocess
@@ -37,8 +36,10 @@ from eventlet import greenthread
from nova import exception
from nova.exception import ProcessExecutionError
+from nova import log as logging
+LOG = logging.getLogger("nova.utils")
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
@@ -109,7 +110,7 @@ def vpn_ping(address, port, timeout=0.05, session_id=None):
def fetchfile(url, target):
- logging.debug(_("Fetching %s") % url)
+ LOG.debug(_("Fetching %s") % url)
# c = pycurl.Curl()
# fp = open(target, "wb")
# c.setopt(c.URL, url)
@@ -121,7 +122,7 @@ def fetchfile(url, target):
def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
- logging.debug(_("Running cmd (subprocess): %s"), cmd)
+ LOG.debug(_("Running cmd (subprocess): %s"), cmd)
env = os.environ.copy()
if addl_env:
env.update(addl_env)
@@ -134,7 +135,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
result = obj.communicate()
obj.stdin.close()
if obj.returncode:
- logging.debug(_("Result was %s") % (obj.returncode))
+ LOG.debug(_("Result was %s") % (obj.returncode))
if check_exit_code and obj.returncode != 0:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=obj.returncode,
@@ -152,6 +153,11 @@ def abspath(s):
return os.path.join(os.path.dirname(__file__), s)
+def novadir():
+ import nova
+ return os.path.abspath(nova.__file__).split('nova/__init__.pyc')[0]
+
+
def default_flagfile(filename='nova.conf'):
for arg in sys.argv:
if arg.find('flagfile') != -1:
@@ -167,12 +173,12 @@ def default_flagfile(filename='nova.conf'):
def debug(arg):
- logging.debug('debug in callback: %s', arg)
+ LOG.debug(_('debug in callback: %s'), arg)
return arg
def runthis(prompt, cmd, check_exit_code=True):
- logging.debug(_("Running %s") % (cmd))
+ LOG.debug(_("Running %s"), (cmd))
rv, err = execute(cmd, check_exit_code=check_exit_code)
@@ -194,19 +200,6 @@ def last_octet(address):
return int(address.split(".")[-1])
-def get_my_ip():
- """Returns the actual ip of the local machine."""
- try:
- csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- csock.connect(('8.8.8.8', 80))
- (addr, port) = csock.getsockname()
- csock.close()
- return addr
- except socket.gaierror as ex:
- logging.warn(_("Couldn't get IP, using 127.0.0.1 %s"), ex)
- return "127.0.0.1"
-
-
def utcnow():
"""Overridable version of datetime.datetime.utcnow."""
if utcnow.override_time:
@@ -296,7 +289,7 @@ class LazyPluggable(object):
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
- logging.info('backend %s', self.__backend)
+ LOG.debug(_('backend %s'), self.__backend)
return self.__backend
def __getattr__(self, key):
diff --git a/nova/version.py b/nova/version.py
new file mode 100644
index 000000000..7b27acb6a
--- /dev/null
+++ b/nova/version.py
@@ -0,0 +1,46 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+try:
+ from nova.vcsversion import version_info
+except ImportError:
+ version_info = {'branch_nick': u'LOCALBRANCH',
+ 'revision_id': 'LOCALREVISION',
+ 'revno': 0}
+
+NOVA_VERSION = ['2011', '1']
+YEAR, COUNT = NOVA_VERSION
+
+FINAL = False # This becomes true at Release Candidate time
+
+
+def canonical_version_string():
+ return '.'.join([YEAR, COUNT])
+
+
+def version_string():
+ if FINAL:
+ return canonical_version_string()
+ else:
+ return '%s-dev' % (canonical_version_string(),)
+
+
+def vcs_version_string():
+ return "%s:%s" % (version_info['branch_nick'], version_info['revision_id'])
+
+
+def version_string_with_vcs():
+ return "%s-%s" % (canonical_version_string(), vcs_version_string())
diff --git a/nova/virt/connection.py b/nova/virt/connection.py
index 846423afe..13181b730 100644
--- a/nova/virt/connection.py
+++ b/nova/virt/connection.py
@@ -19,16 +19,17 @@
"""Abstraction of the underlying virtualization API."""
-import logging
import sys
from nova import flags
+from nova import log as logging
from nova.virt import fake
from nova.virt import libvirt_conn
from nova.virt import xenapi_conn
from nova.virt import hyperv
+LOG = logging.getLogger("nova.virt.connection")
FLAGS = flags.FLAGS
@@ -69,6 +70,6 @@ def get_connection(read_only=False):
raise Exception('Unknown connection type "%s"' % t)
if conn is None:
- logging.error(_('Failed to open connection to the hypervisor'))
+ LOG.error(_('Failed to open connection to the hypervisor'))
sys.exit(1)
return conn
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 32541f5b4..9186d885e 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -289,6 +289,14 @@ class FakeConnection(object):
def get_console_output(self, instance):
return 'FAKE CONSOLE OUTPUT'
+ def get_ajax_console(self, instance):
+ return 'http://fakeajaxconsole.com/?token=FAKETOKEN'
+
+ def get_console_pool_info(self, console_type):
+ return {'address': '127.0.0.1',
+ 'username': 'fakeuser',
+ 'password': 'fakepassword'}
+
class FakeInstance(object):
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 4b9f6f946..30dc1c79b 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -61,11 +61,11 @@ Using the Python WMI library:
"""
import os
-import logging
import time
from nova import exception
from nova import flags
+from nova import log as logging
from nova.auth import manager
from nova.compute import power_state
from nova.virt import images
@@ -76,6 +76,9 @@ wmi = None
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.virt.hyperv')
+
+
HYPERV_POWER_STATE = {
3: power_state.SHUTDOWN,
2: power_state.RUNNING,
@@ -89,7 +92,7 @@ REQ_POWER_STATE = {
'Reboot': 10,
'Reset': 11,
'Paused': 32768,
- 'Suspended': 32769
+ 'Suspended': 32769,
}
@@ -112,7 +115,7 @@ class HyperVConnection(object):
def init_host(self):
#FIXME(chiradeep): implement this
- logging.debug(_('In init host'))
+ LOG.debug(_('In init host'))
pass
def list_instances(self):
@@ -142,11 +145,11 @@ class HyperVConnection(object):
self._create_disk(instance['name'], vhdfile)
self._create_nic(instance['name'], instance['mac_address'])
- logging.debug(_('Starting VM %s '), instance.name)
+ LOG.debug(_('Starting VM %s '), instance.name)
self._set_vm_state(instance['name'], 'Enabled')
- logging.info(_('Started VM %s '), instance.name)
+ LOG.info(_('Started VM %s '), instance.name)
except Exception as exn:
- logging.error(_('spawn vm failed: %s'), exn)
+ LOG.exception(_('spawn vm failed: %s'), exn)
self.destroy(instance)
def _create_vm(self, instance):
@@ -165,7 +168,7 @@ class HyperVConnection(object):
if not success:
raise Exception(_('Failed to create VM %s'), instance.name)
- logging.debug(_('Created VM %s...'), instance.name)
+ LOG.debug(_('Created VM %s...'), instance.name)
vm = self._conn.Msvm_ComputerSystem(ElementName=instance.name)[0]
vmsettings = vm.associators(
@@ -182,7 +185,7 @@ class HyperVConnection(object):
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
vm.path_(), [memsetting.GetText_(1)])
- logging.debug(_('Set memory for vm %s...'), instance.name)
+ LOG.debug(_('Set memory for vm %s...'), instance.name)
procsetting = vmsetting.associators(
wmi_result_class='Msvm_ProcessorSettingData')[0]
vcpus = long(instance['vcpus'])
@@ -192,12 +195,12 @@ class HyperVConnection(object):
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
vm.path_(), [procsetting.GetText_(1)])
- logging.debug(_('Set vcpus for vm %s...'), instance.name)
+ LOG.debug(_('Set vcpus for vm %s...'), instance.name)
def _create_disk(self, vm_name, vhdfile):
"""Create a disk and attach it to the vm"""
- logging.debug(_('Creating disk for %s by attaching disk file %s'),
- vm_name, vhdfile)
+ LOG.debug(_('Creating disk for %s by attaching disk file %s'),
+ vm_name, vhdfile)
#Find the IDE controller for the vm.
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
vm = vms[0]
@@ -224,7 +227,7 @@ class HyperVConnection(object):
raise Exception(_('Failed to add diskdrive to VM %s'),
vm_name)
diskdrive_path = new_resources[0]
- logging.debug(_('New disk drive path is %s'), diskdrive_path)
+ LOG.debug(_('New disk drive path is %s'), diskdrive_path)
#Find the default VHD disk object.
vhddefault = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
@@ -243,11 +246,11 @@ class HyperVConnection(object):
if new_resources is None:
raise Exception(_('Failed to add vhd file to VM %s'),
vm_name)
- logging.info(_('Created disk for %s'), vm_name)
+ LOG.info(_('Created disk for %s'), vm_name)
def _create_nic(self, vm_name, mac):
"""Create a (emulated) nic and attach it to the vm"""
- logging.debug(_('Creating nic for %s '), vm_name)
+ LOG.debug(_('Creating nic for %s '), vm_name)
#Find the vswitch that is connected to the physical nic.
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
extswitch = self._find_external_network()
@@ -266,11 +269,11 @@ class HyperVConnection(object):
(new_port, ret_val) = switch_svc.CreateSwitchPort(vm_name, vm_name,
"", extswitch.path_())
if ret_val != 0:
- logging.error(_('Failed creating a port on the external vswitch'))
+ LOG.error(_('Failed creating a port on the external vswitch'))
raise Exception(_('Failed creating port for %s'),
vm_name)
- logging.debug(_("Created switch port %s on switch %s"),
- vm_name, extswitch.path_())
+ LOG.debug(_("Created switch port %s on switch %s"),
+ vm_name, extswitch.path_())
#Connect the new nic to the new port.
new_nic_data.Connection = [new_port]
new_nic_data.ElementName = vm_name + ' nic'
@@ -281,7 +284,7 @@ class HyperVConnection(object):
if new_resources is None:
raise Exception(_('Failed to add nic to VM %s'),
vm_name)
- logging.info(_("Created nic for %s "), vm_name)
+ LOG.info(_("Created nic for %s "), vm_name)
def _add_virt_resource(self, res_setting_data, target_vm):
"""Add a new resource (disk/nic) to the VM"""
@@ -314,10 +317,10 @@ class HyperVConnection(object):
time.sleep(0.1)
job = self._conn.Msvm_ConcreteJob(InstanceID=inst_id)[0]
if job.JobState != WMI_JOB_STATE_COMPLETED:
- logging.debug(_("WMI job failed: %s"), job.ErrorSummaryDescription)
+ LOG.debug(_("WMI job failed: %s"), job.ErrorSummaryDescription)
return False
- logging.debug(_("WMI job succeeded: %s, Elapsed=%s "), job.Description,
- job.ElapsedTime)
+ LOG.debug(_("WMI job succeeded: %s, Elapsed=%s "), job.Description,
+ job.ElapsedTime)
return True
def _find_external_network(self):
@@ -352,7 +355,7 @@ class HyperVConnection(object):
def destroy(self, instance):
"""Destroy the VM. Also destroy the associated VHD disk files"""
- logging.debug(_("Got request to destroy vm %s"), instance.name)
+ LOG.debug(_("Got request to destroy vm %s"), instance.name)
vm = self._lookup(instance.name)
if vm is None:
return
@@ -383,7 +386,7 @@ class HyperVConnection(object):
vhdfile = self._cim_conn.CIM_DataFile(Name=disk)
for vf in vhdfile:
vf.Delete()
- logging.debug(_("Del: disk %s vm %s"), vhdfile, instance.name)
+ LOG.debug(_("Del: disk %s vm %s"), vhdfile, instance.name)
def get_info(self, instance_id):
"""Get information about the VM"""
@@ -399,12 +402,12 @@ class HyperVConnection(object):
summary_info = vs_man_svc.GetSummaryInformation(
[4, 100, 103, 105], settings_paths)[1]
info = summary_info[0]
- logging.debug(_("Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, \
+ LOG.debug(_("Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, \
cpu_time=%s"), instance_id,
- str(HYPERV_POWER_STATE[info.EnabledState]),
- str(info.MemoryUsage),
- str(info.NumberOfProcessors),
- str(info.UpTime))
+ str(HYPERV_POWER_STATE[info.EnabledState]),
+ str(info.MemoryUsage),
+ str(info.NumberOfProcessors),
+ str(info.UpTime))
return {'state': HYPERV_POWER_STATE[info.EnabledState],
'max_mem': info.MemoryUsage,
@@ -438,11 +441,11 @@ class HyperVConnection(object):
#already in the state requested
success = True
if success:
- logging.info(_("Successfully changed vm state of %s to %s"),
- vm_name, req_state)
+ LOG.info(_("Successfully changed vm state of %s to %s"), vm_name,
+ req_state)
else:
- logging.error(_("Failed to change vm state of %s to %s"),
- vm_name, req_state)
+ LOG.error(_("Failed to change vm state of %s to %s"), vm_name,
+ req_state)
raise Exception(_("Failed to change vm state of %s to %s"),
vm_name, req_state)
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 2d03da4b4..ecf0e5efb 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -21,7 +21,6 @@
Handling of VM disk images.
"""
-import logging
import os.path
import shutil
import sys
@@ -30,6 +29,7 @@ import urllib2
import urlparse
from nova import flags
+from nova import log as logging
from nova import utils
from nova.auth import manager
from nova.auth import signer
@@ -40,6 +40,8 @@ FLAGS = flags.FLAGS
flags.DEFINE_bool('use_s3', True,
'whether to get images from s3 or use local copy')
+LOG = logging.getLogger('nova.virt.images')
+
def fetch(image, path, user, project):
if FLAGS.use_s3:
@@ -65,7 +67,7 @@ def _fetch_image_no_curl(url, path, headers):
urlopened = urllib2.urlopen(request)
urlretrieve(urlopened, path)
- logging.debug(_("Finished retreving %s -- placed in %s"), url, path)
+ LOG.debug(_("Finished retreving %s -- placed in %s"), url, path)
def _fetch_s3_image(image, path, user, project):
@@ -89,7 +91,7 @@ def _fetch_s3_image(image, path, user, project):
else:
cmd = ['/usr/bin/curl', '--fail', '--silent', url]
for (k, v) in headers.iteritems():
- cmd += ['-H', '%s: %s' % (k, v)]
+ cmd += ['-H', '\'%s: %s\'' % (k, v)]
cmd += ['-o', path]
cmd_out = ' '.join(cmd)
diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template
index 3fb2243da..2eb7d9488 100644
--- a/nova/virt/libvirt.xml.template
+++ b/nova/virt/libvirt.xml.template
@@ -71,9 +71,22 @@
#end if
</filterref>
</interface>
+
+ <!-- The order is significant here. File must be defined first -->
<serial type="file">
<source path='${basepath}/console.log'/>
<target port='1'/>
</serial>
+
+ <console type='pty' tty='/dev/pts/2'>
+ <source path='/dev/pts/2'/>
+ <target port='0'/>
+ </console>
+
+ <serial type='pty'>
+ <source path='/dev/pts/2'/>
+ <target port='0'/>
+ </serial>
+
</devices>
</domain>
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 00edfbdc8..655c55fa1 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -36,9 +36,13 @@ Supports KVM, QEMU, UML, and XEN.
"""
-import logging
import os
import shutil
+import random
+import subprocess
+import uuid
+from xml.dom import minidom
+
from eventlet import greenthread
from eventlet import event
@@ -50,6 +54,7 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
#from nova.api import context
from nova.auth import manager
@@ -62,6 +67,7 @@ libvirt = None
libxml2 = None
Template = None
+LOG = logging.getLogger('nova.virt.libvirt_conn')
FLAGS = flags.FLAGS
# TODO(vish): These flags should probably go into a shared location
@@ -85,6 +91,12 @@ flags.DEFINE_string('libvirt_uri',
flags.DEFINE_bool('allow_project_net_traffic',
True,
'Whether to allow in project network traffic')
+flags.DEFINE_string('ajaxterm_portrange',
+ '10000-12000',
+ 'Range of ports that ajaxterm should randomly try to bind')
+flags.DEFINE_string('firewall_driver',
+ 'nova.virt.libvirt_conn.IptablesFirewallDriver',
+ 'Firewall driver (defaults to iptables)')
def get_connection(read_only):
@@ -124,16 +136,24 @@ class LibvirtConnection(object):
self._wrapped_conn = None
self.read_only = read_only
+ self.nwfilter = NWFilterFirewall(self._get_connection)
+
+ if not FLAGS.firewall_driver:
+ self.firewall_driver = self.nwfilter
+ self.nwfilter.handle_security_groups = True
+ else:
+ self.firewall_driver = utils.import_object(FLAGS.firewall_driver)
+
def init_host(self):
- NWFilterFirewall(self._conn).setup_base_nwfilters()
+ pass
- @property
- def _conn(self):
+ def _get_connection(self):
if not self._wrapped_conn or not self._test_connection():
- logging.debug(_('Connecting to libvirt: %s') % self.libvirt_uri)
+ LOG.debug(_('Connecting to libvirt: %s'), self.libvirt_uri)
self._wrapped_conn = self._connect(self.libvirt_uri,
self.read_only)
return self._wrapped_conn
+ _conn = property(_get_connection)
def _test_connection(self):
try:
@@ -142,7 +162,7 @@ class LibvirtConnection(object):
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \
e.get_error_domain() == libvirt.VIR_FROM_REMOTE:
- logging.debug(_('Connection to libvirt broke'))
+ LOG.debug(_('Connection to libvirt broke'))
return False
raise
@@ -214,8 +234,8 @@ class LibvirtConnection(object):
def _cleanup(self, instance):
target = os.path.join(FLAGS.instances_path, instance['name'])
- logging.info(_('instance %s: deleting instance files %s'),
- instance['name'], target)
+ LOG.info(_('instance %s: deleting instance files %s'),
+ instance['name'], target)
if os.path.exists(target):
shutil.rmtree(target)
@@ -279,10 +299,10 @@ class LibvirtConnection(object):
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
- logging.debug(_('instance %s: rebooted'), instance['name'])
+ LOG.debug(_('instance %s: rebooted'), instance['name'])
timer.stop()
except Exception, exn:
- logging.error(_('_wait_for_reboot failed: %s'), exn)
+ LOG.exception(_('_wait_for_reboot failed: %s'), exn)
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
@@ -325,10 +345,10 @@ class LibvirtConnection(object):
state = self.get_info(instance['name'])['state']
db.instance_set_state(None, instance['id'], state)
if state == power_state.RUNNING:
- logging.debug(_('instance %s: rescued'), instance['name'])
+ LOG.debug(_('instance %s: rescued'), instance['name'])
timer.stop()
except Exception, exn:
- logging.error(_('_wait_for_rescue failed: %s'), exn)
+ LOG.exception(_('_wait_for_rescue failed: %s'), exn)
db.instance_set_state(None,
instance['id'],
power_state.SHUTDOWN)
@@ -350,10 +370,13 @@ class LibvirtConnection(object):
instance['id'],
power_state.NOSTATE,
'launching')
- NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance)
+
+ self.nwfilter.setup_basic_filtering(instance)
+ self.firewall_driver.prepare_instance_filter(instance)
self._create_image(instance, xml)
self._conn.createXML(xml, 0)
- logging.debug(_("instance %s: is running"), instance['name'])
+ LOG.debug(_("instance %s: is running"), instance['name'])
+ self.firewall_driver.apply_instance_filter(instance)
timer = utils.LoopingCall(f=None)
@@ -363,11 +386,11 @@ class LibvirtConnection(object):
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
- logging.debug(_('instance %s: booted'), instance['name'])
+ LOG.debug(_('instance %s: booted'), instance['name'])
timer.stop()
except:
- logging.exception(_('instance %s: failed to boot'),
- instance['name'])
+ LOG.exception(_('instance %s: failed to boot'),
+ instance['name'])
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
@@ -377,11 +400,11 @@ class LibvirtConnection(object):
return timer.start(interval=0.5, now=True)
def _flush_xen_console(self, virsh_output):
- logging.info('virsh said: %r' % (virsh_output,))
+ LOG.info(_('virsh said: %r'), virsh_output)
virsh_output = virsh_output[0].strip()
if virsh_output.startswith('/dev/'):
- logging.info(_('cool, it\'s a device'))
+ LOG.info(_('cool, it\'s a device'))
out, err = utils.execute("sudo dd if=%s iflag=nonblock" %
virsh_output, check_exit_code=False)
return out
@@ -389,7 +412,7 @@ class LibvirtConnection(object):
return ''
def _append_to_file(self, data, fpath):
- logging.info(_('data: %r, fpath: %r') % (data, fpath))
+ LOG.info(_('data: %r, fpath: %r'), data, fpath)
fp = open(fpath, 'a+')
fp.write(data)
return fpath
@@ -397,7 +420,7 @@ class LibvirtConnection(object):
def _dump_file(self, fpath):
fp = open(fpath, 'r+')
contents = fp.read()
- logging.info('Contents: %r' % (contents,))
+ LOG.info(_('Contents of file %s: %r'), fpath, contents)
return contents
@exception.wrap_exception
@@ -418,6 +441,43 @@ class LibvirtConnection(object):
return self._dump_file(fpath)
+ @exception.wrap_exception
+ def get_ajax_console(self, instance):
+ def get_open_port():
+ start_port, end_port = FLAGS.ajaxterm_portrange.split("-")
+ for i in xrange(0, 100): # don't loop forever
+ port = random.randint(int(start_port), int(end_port))
+ # netcat will exit with 0 only if the port is in use,
+ # so a nonzero return value implies it is unused
+ cmd = 'netcat 0.0.0.0 %s -w 1 </dev/null || echo free' % (port)
+ stdout, stderr = utils.execute(cmd)
+ if stdout.strip() == 'free':
+ return port
+ raise Exception(_('Unable to find an open port'))
+
+ def get_pty_for_instance(instance_name):
+ virt_dom = self._conn.lookupByName(instance_name)
+ xml = virt_dom.XMLDesc(0)
+ dom = minidom.parseString(xml)
+
+ for serial in dom.getElementsByTagName('serial'):
+ if serial.getAttribute('type') == 'pty':
+ source = serial.getElementsByTagName('source')[0]
+ return source.getAttribute('path')
+
+ port = get_open_port()
+ token = str(uuid.uuid4())
+ host = instance['host']
+
+ ajaxterm_cmd = 'sudo socat - %s' \
+ % get_pty_for_instance(instance['name'])
+
+ cmd = '%s/tools/ajaxterm/ajaxterm.py --command "%s" -t %s -p %s' \
+ % (utils.novadir(), ajaxterm_cmd, token, port)
+
+ subprocess.Popen(cmd, shell=True)
+ return {'token': token, 'host': host, 'port': port}
+
def _create_image(self, inst, libvirt_xml, prefix='', disk_images=None):
# syntactic nicety
basepath = lambda fname = '', prefix = prefix: os.path.join(
@@ -431,7 +491,7 @@ class LibvirtConnection(object):
# TODO(termie): these are blocking calls, it would be great
# if they weren't.
- logging.info(_('instance %s: Creating image'), inst['name'])
+ LOG.info(_('instance %s: Creating image'), inst['name'])
f = open(basepath('libvirt.xml'), 'w')
f.write(libvirt_xml)
f.close()
@@ -487,10 +547,10 @@ class LibvirtConnection(object):
'dns': network_ref['dns']}
if key or net:
if key:
- logging.info(_('instance %s: injecting key into image %s'),
+ LOG.info(_('instance %s: injecting key into image %s'),
inst['name'], inst.image_id)
if net:
- logging.info(_('instance %s: injecting net into image %s'),
+ LOG.info(_('instance %s: injecting net into image %s'),
inst['name'], inst.image_id)
try:
disk.inject_data(basepath('disk-raw'), key, net,
@@ -498,9 +558,9 @@ class LibvirtConnection(object):
execute=execute)
except Exception as e:
# This could be a windows image, or a vmdk format disk
- logging.warn(_('instance %s: ignoring error injecting data'
- ' into image %s (%s)'),
- inst['name'], inst.image_id, e)
+ LOG.warn(_('instance %s: ignoring error injecting data'
+ ' into image %s (%s)'),
+ inst['name'], inst.image_id, e)
if inst['kernel_id']:
if os.path.exists(basepath('disk')):
@@ -526,8 +586,10 @@ class LibvirtConnection(object):
def to_xml(self, instance, rescue=False):
# TODO(termie): cache?
- logging.debug(_('instance %s: starting toXML method'),
- instance['name'])
+ LOG.debug(_('instance %s: starting toXML method'), instance['name'])
+ network = db.project_get_network(context.get_admin_context(),
+ instance['project_id'])
+ LOG.debug(_('instance %s: starting toXML method'), instance['name'])
network = db.network_get_by_instance(context.get_admin_context(),
instance['id'])
# FIXME(vish): stick this in db
@@ -569,7 +631,7 @@ class LibvirtConnection(object):
xml_info['disk'] = xml_info['basepath'] + "/disk"
xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
- logging.debug(_('instance %s: finished toXML method'),
+ LOG.debug(_('instance %s: finished toXML method'),
instance['name'])
return xml
@@ -690,18 +752,63 @@ class LibvirtConnection(object):
domain = self._conn.lookupByName(instance_name)
return domain.interfaceStats(interface)
- def refresh_security_group(self, security_group_id):
- fw = NWFilterFirewall(self._conn)
- fw.ensure_security_group_filter(security_group_id)
+ def get_console_pool_info(self, console_type):
+ #TODO(mdragon): console proxy should be implemented for libvirt,
+ # in case someone wants to use it with kvm or
+ # such. For now return fake data.
+ return {'address': '127.0.0.1',
+ 'username': 'fakeuser',
+ 'password': 'fakepassword'}
+
+ def refresh_security_group_rules(self, security_group_id):
+ self.firewall_driver.refresh_security_group_rules(security_group_id)
+
+ def refresh_security_group_members(self, security_group_id):
+ self.firewall_driver.refresh_security_group_members(security_group_id)
+
+class FirewallDriver(object):
+ def prepare_instance_filter(self, instance):
+ """Prepare filters for the instance.
-class NWFilterFirewall(object):
+ At this point, the instance isn't running yet."""
+ raise NotImplementedError()
+
+ def apply_instance_filter(self, instance):
+ """Apply instance filter.
+
+ Once this method returns, the instance should be firewalled
+ appropriately. This method should as far as possible be a
+ no-op. It's vastly preferred to get everything set up in
+ prepare_instance_filter.
+ """
+ raise NotImplementedError()
+
+ def refresh_security_group_rules(self, security_group_id):
+ """Refresh security group rules from data store
+
+ Gets called when a rule has been added to or removed from
+ the security group."""
+ raise NotImplementedError()
+
+ def refresh_security_group_members(self, security_group_id):
+ """Refresh security group members from data store
+
+ Gets called when an instance gets added to or removed from
+ the security group."""
+ raise NotImplementedError()
+
+
+class NWFilterFirewall(FirewallDriver):
"""
This class implements a network filtering mechanism versatile
enough for EC2 style Security Group filtering by leveraging
libvirt's nwfilter.
First, all instances get a filter ("nova-base-filter") applied.
+ This filter provides some basic security such as protection against
+ MAC spoofing, IP spoofing, and ARP spoofing.
+
This filter drops all incoming ipv4 and ipv6 connections.
Outgoing connections are never blocked.
@@ -735,38 +842,79 @@ class NWFilterFirewall(object):
(*) This sentence brought to you by the redundancy department of
redundancy.
+
"""
def __init__(self, get_connection):
- self._conn = get_connection
-
- nova_base_filter = '''<filter name='nova-base' chain='root'>
- <uuid>26717364-50cf-42d1-8185-29bf893ab110</uuid>
- <filterref filter='no-mac-spoofing'/>
- <filterref filter='no-ip-spoofing'/>
- <filterref filter='no-arp-spoofing'/>
- <filterref filter='allow-dhcp-server'/>
- <filterref filter='nova-allow-dhcp-server'/>
- <filterref filter='nova-base-ipv4'/>
- <filterref filter='nova-base-ipv6'/>
- </filter>'''
-
- nova_dhcp_filter = '''<filter name='nova-allow-dhcp-server' chain='ipv4'>
- <uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid>
- <rule action='accept' direction='out'
- priority='100'>
- <udp srcipaddr='0.0.0.0'
- dstipaddr='255.255.255.255'
- srcportstart='68'
- dstportstart='67'/>
- </rule>
- <rule action='accept' direction='in'
- priority='100'>
- <udp srcipaddr='$DHCPSERVER'
- srcportstart='67'
- dstportstart='68'/>
- </rule>
- </filter>'''
+ self._libvirt_get_connection = get_connection
+ self.static_filters_configured = False
+ self.handle_security_groups = False
+
+ def _get_connection(self):
+ return self._libvirt_get_connection()
+ _conn = property(_get_connection)
+
+ def nova_dhcp_filter(self):
+ """The standard allow-dhcp-server filter is an <ip> one, so it uses
+ ebtables to allow traffic through. Without a corresponding rule in
+ iptables, it'll get blocked anyway."""
+
+ return '''<filter name='nova-allow-dhcp-server' chain='ipv4'>
+ <uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid>
+ <rule action='accept' direction='out'
+ priority='100'>
+ <udp srcipaddr='0.0.0.0'
+ dstipaddr='255.255.255.255'
+ srcportstart='68'
+ dstportstart='67'/>
+ </rule>
+ <rule action='accept' direction='in'
+ priority='100'>
+ <udp srcipaddr='$DHCPSERVER'
+ srcportstart='67'
+ dstportstart='68'/>
+ </rule>
+ </filter>'''
+
+ def setup_basic_filtering(self, instance):
+ """Set up basic filtering (MAC, IP, and ARP spoofing protection)"""
+ logging.info('called setup_basic_filtering in nwfilter')
+
+ if self.handle_security_groups:
+ # No point in setting up a filter set that we'll be overriding
+ # anyway.
+ return
+
+ logging.info('ensuring static filters')
+ self._ensure_static_filters()
+
+ instance_filter_name = self._instance_filter_name(instance)
+ self._define_filter(self._filter_container(instance_filter_name,
+ ['nova-base']))
+
+ def _ensure_static_filters(self):
+ if self.static_filters_configured:
+ return
+
+ self._define_filter(self._filter_container('nova-base',
+ ['no-mac-spoofing',
+ 'no-ip-spoofing',
+ 'no-arp-spoofing',
+ 'allow-dhcp-server']))
+ self._define_filter(self.nova_base_ipv4_filter)
+ self._define_filter(self.nova_base_ipv6_filter)
+ self._define_filter(self.nova_dhcp_filter)
+ self._define_filter(self.nova_vpn_filter)
+ if FLAGS.allow_project_net_traffic:
+ self._define_filter(self.nova_project_filter)
+
+ self.static_filters_configured = True
+
+ def _filter_container(self, name, filters):
+ xml = '''<filter name='%s' chain='root'>%s</filter>''' % (
+ name,
+ ''.join(["<filterref filter='%s'/>" % (f,) for f in filters]))
+ return xml
nova_vpn_filter = '''<filter name='nova-vpn' chain='root'>
<uuid>2086015e-cf03-11df-8c5d-080027c27973</uuid>
@@ -780,7 +928,7 @@ class NWFilterFirewall(object):
retval = "<filter name='nova-base-ipv4' chain='ipv4'>"
for protocol in ['tcp', 'udp', 'icmp']:
for direction, action, priority in [('out', 'accept', 399),
- ('inout', 'drop', 400)]:
+ ('in', 'drop', 400)]:
retval += """<rule action='%s' direction='%s' priority='%d'>
<%s />
</rule>""" % (action, direction,
@@ -792,7 +940,7 @@ class NWFilterFirewall(object):
retval = "<filter name='nova-base-ipv6' chain='ipv6'>"
for protocol in ['tcp', 'udp', 'icmp']:
for direction, action, priority in [('out', 'accept', 399),
- ('inout', 'drop', 400)]:
+ ('in', 'drop', 400)]:
retval += """<rule action='%s' direction='%s' priority='%d'>
<%s-ipv6 />
</rule>""" % (action, direction,
@@ -816,43 +964,49 @@ class NWFilterFirewall(object):
# execute in a native thread and block current greenthread until done
tpool.execute(self._conn.nwfilterDefineXML, xml)
- def setup_base_nwfilters(self):
- self._define_filter(self.nova_base_ipv4_filter)
- self._define_filter(self.nova_base_ipv6_filter)
- self._define_filter(self.nova_dhcp_filter)
- self._define_filter(self.nova_base_filter)
- self._define_filter(self.nova_vpn_filter)
- if FLAGS.allow_project_net_traffic:
- self._define_filter(self.nova_project_filter)
-
- def setup_nwfilters_for_instance(self, instance):
+ def prepare_instance_filter(self, instance):
"""
Creates an NWFilter for the given instance. In the process,
it makes sure the filters for the security groups as well as
the base filter are all in place.
"""
- nwfilter_xml = ("<filter name='nova-instance-%s' "
- "chain='root'>\n") % instance['name']
-
if instance['image_id'] == FLAGS.vpn_image_id:
- nwfilter_xml += " <filterref filter='nova-vpn' />\n"
+ base_filter = 'nova-vpn'
else:
- nwfilter_xml += " <filterref filter='nova-base' />\n"
+ base_filter = 'nova-base'
+
+ instance_filter_name = self._instance_filter_name(instance)
+ instance_secgroup_filter_name = '%s-secgroup' % (instance_filter_name,)
+ instance_filter_children = [base_filter, instance_secgroup_filter_name]
+ instance_secgroup_filter_children = ['nova-base-ipv4',
+ 'nova-base-ipv6',
+ 'nova-allow-dhcp-server']
+
+ ctxt = context.get_admin_context()
if FLAGS.allow_project_net_traffic:
- nwfilter_xml += " <filterref filter='nova-project' />\n"
+ instance_filter_children += ['nova-project']
- for security_group in instance.security_groups:
- self.ensure_security_group_filter(security_group['id'])
+ for security_group in db.security_group_get_by_instance(ctxt,
+ instance['id']):
- nwfilter_xml += (" <filterref filter='nova-secgroup-%d' "
- "/>\n") % security_group['id']
- nwfilter_xml += "</filter>"
+ self.refresh_security_group_rules(security_group['id'])
- self._define_filter(nwfilter_xml)
+ instance_secgroup_filter_children += [('nova-secgroup-%s' %
+ security_group['id'])]
- def ensure_security_group_filter(self, security_group_id):
+ self._define_filter(
+ self._filter_container(instance_secgroup_filter_name,
+ instance_secgroup_filter_children))
+
+ self._define_filter(
+ self._filter_container(instance_filter_name,
+ instance_filter_children))
+
+ return
+
+ def refresh_security_group_rules(self, security_group_id):
return self._define_filter(
self.security_group_to_nwfilter_xml(security_group_id))
@@ -870,9 +1024,9 @@ class NWFilterFirewall(object):
rule_xml += "dstportstart='%s' dstportend='%s' " % \
(rule.from_port, rule.to_port)
elif rule.protocol == 'icmp':
- logging.info('rule.protocol: %r, rule.from_port: %r, '
- 'rule.to_port: %r' %
- (rule.protocol, rule.from_port, rule.to_port))
+ LOG.info('rule.protocol: %r, rule.from_port: %r, '
+ 'rule.to_port: %r', rule.protocol,
+ rule.from_port, rule.to_port)
if rule.from_port != -1:
rule_xml += "type='%s' " % rule.from_port
if rule.to_port != -1:
@@ -883,3 +1037,162 @@ class NWFilterFirewall(object):
xml = "<filter name='nova-secgroup-%s' chain='ipv4'>%s</filter>" % \
(security_group_id, rule_xml,)
return xml
+
+ def _instance_filter_name(self, instance):
+ return 'nova-instance-%s' % instance['name']
+
+
+class IptablesFirewallDriver(FirewallDriver):
+ def __init__(self, execute=None):
+ self.execute = execute or utils.execute
+ self.instances = set()
+
+ def apply_instance_filter(self, instance):
+ """No-op. Everything is done in prepare_instance_filter"""
+ pass
+
+ def remove_instance(self, instance):
+ self.instances.remove(instance)
+
+ def add_instance(self, instance):
+ self.instances.add(instance)
+
+ def prepare_instance_filter(self, instance):
+ self.add_instance(instance)
+ self.apply_ruleset()
+
+ def apply_ruleset(self):
+ current_filter, _ = self.execute('sudo iptables-save -t filter')
+ current_lines = current_filter.split('\n')
+ new_filter = self.modify_rules(current_lines)
+ self.execute('sudo iptables-restore',
+ process_input='\n'.join(new_filter))
+
+ def modify_rules(self, current_lines):
+ ctxt = context.get_admin_context()
+ # Remove any trace of nova rules.
+ new_filter = filter(lambda l: 'nova-' not in l, current_lines)
+
+ seen_chains = False
+ for rules_index in range(len(new_filter)):
+ if not seen_chains:
+ if new_filter[rules_index].startswith(':'):
+ seen_chains = True
+ elif seen_chains == 1:
+ if not new_filter[rules_index].startswith(':'):
+ break
+
+ our_chains = [':nova-ipv4-fallback - [0:0]']
+ our_rules = ['-A nova-ipv4-fallback -j DROP']
+
+ our_chains += [':nova-local - [0:0]']
+ our_rules += ['-A FORWARD -j nova-local']
+
+ security_groups = set()
+ # Add our chains
+ # First, we add instance chains and rules
+ for instance in self.instances:
+ chain_name = self._instance_chain_name(instance)
+ ip_address = self._ip_for_instance(instance)
+
+ our_chains += [':%s - [0:0]' % chain_name]
+
+ # Jump to the per-instance chain
+ our_rules += ['-A nova-local -d %s -j %s' % (ip_address,
+ chain_name)]
+
+ # Always drop invalid packets
+ our_rules += ['-A %s -m state --state '
+ 'INVALID -j DROP' % (chain_name,)]
+
+ # Allow established connections
+ our_rules += ['-A %s -m state --state '
+ 'ESTABLISHED,RELATED -j ACCEPT' % (chain_name,)]
+
+ # Jump to each security group chain in turn
+ for security_group in \
+ db.security_group_get_by_instance(ctxt,
+ instance['id']):
+ security_groups.add(security_group)
+
+ sg_chain_name = self._security_group_chain_name(security_group)
+
+ our_rules += ['-A %s -j %s' % (chain_name, sg_chain_name)]
+
+ # Allow DHCP responses
+ dhcp_server = self._dhcp_server_for_instance(instance)
+ our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68' %
+ (chain_name, dhcp_server)]
+
+ # If nothing matches, jump to the fallback chain
+ our_rules += ['-A %s -j nova-ipv4-fallback' % (chain_name,)]
+
+ # then, security group chains and rules
+ for security_group in security_groups:
+ chain_name = self._security_group_chain_name(security_group)
+ our_chains += [':%s - [0:0]' % chain_name]
+
+ rules = \
+ db.security_group_rule_get_by_security_group(ctxt,
+ security_group['id'])
+
+ for rule in rules:
+ logging.info('%r', rule)
+ args = ['-A', chain_name, '-p', rule.protocol]
+
+ if rule.cidr:
+ args += ['-s', rule.cidr]
+ else:
+ # Eventually, a mechanism to grant access for security
+ # groups will turn up here. It'll use ipsets.
+ continue
+
+ if rule.protocol in ['udp', 'tcp']:
+ if rule.from_port == rule.to_port:
+ args += ['--dport', '%s' % (rule.from_port,)]
+ else:
+ args += ['-m', 'multiport',
+ '--dports', '%s:%s' % (rule.from_port,
+ rule.to_port)]
+ elif rule.protocol == 'icmp':
+ icmp_type = rule.from_port
+ icmp_code = rule.to_port
+
+ if icmp_type == -1:
+ icmp_type_arg = None
+ else:
+ icmp_type_arg = '%s' % icmp_type
+ if not icmp_code == -1:
+ icmp_type_arg += '/%s' % icmp_code
+
+ if icmp_type_arg:
+ args += ['-m', 'icmp', '--icmp-type', icmp_type_arg]
+
+ args += ['-j ACCEPT']
+ our_rules += [' '.join(args)]
+
+ new_filter[rules_index:rules_index] = our_rules
+ new_filter[rules_index:rules_index] = our_chains
+ logging.info('new_filter: %s', '\n'.join(new_filter))
+ return new_filter
+
+ def refresh_security_group_members(self, security_group):
+ pass
+
+ def refresh_security_group_rules(self, security_group):
+ self.apply_ruleset()
+
+ def _security_group_chain_name(self, security_group):
+ return 'nova-sg-%s' % (security_group['id'],)
+
+ def _instance_chain_name(self, instance):
+ return 'nova-inst-%s' % (instance['id'],)
+
+ def _ip_for_instance(self, instance):
+ return db.instance_get_fixed_address(context.get_admin_context(),
+ instance['id'])
+
+ def _dhcp_server_for_instance(self, instance):
+ network = db.project_get_network(context.get_admin_context(),
+ instance['project_id'])
+ return network['gateway']
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index aa4026f97..96d8f5fc8 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -52,12 +52,12 @@ A fake XenAPI SDK.
import datetime
-import logging
import uuid
from pprint import pformat
from nova import exception
+from nova import log as logging
_CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\
@@ -65,9 +65,11 @@ _CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\
_db_content = {}
+LOG = logging.getLogger("nova.virt.xenapi.fake")
+
def log_db_contents(msg=None):
- logging.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content))
+ LOG.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content))
def reset():
@@ -242,9 +244,9 @@ class SessionBase(object):
full_params = (self._session,) + params
meth = getattr(self, methodname, None)
if meth is None:
- logging.warn('Raising NotImplemented')
+ LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
- 'xenapi.fake does not have an implementation for %s' %
+ _('xenapi.fake does not have an implementation for %s') %
methodname)
return meth(*full_params)
@@ -278,12 +280,12 @@ class SessionBase(object):
if impl is not None:
def callit(*params):
- logging.warn('Calling %s %s', name, impl)
+ LOG.debug(_('Calling %s %s'), name, impl)
self._check_session(params)
return impl(*params)
return callit
if self._is_gettersetter(name, True):
- logging.warn('Calling getter %s', name)
+ LOG.debug(_('Calling getter %s'), name)
return lambda *params: self._getter(name, params)
elif self._is_create(name):
return lambda *params: self._create(name, params)
@@ -333,10 +335,10 @@ class SessionBase(object):
field in _db_content[cls][ref]):
return _db_content[cls][ref][field]
- logging.error('Raising NotImplemented')
+ LOG.debuug(_('Raising NotImplemented'))
raise NotImplementedError(
- 'xenapi.fake does not have an implementation for %s or it has '
- 'been called with the wrong number of arguments' % name)
+ _('xenapi.fake does not have an implementation for %s or it has '
+ 'been called with the wrong number of arguments') % name)
def _setter(self, name, params):
self._check_session(params)
@@ -351,7 +353,7 @@ class SessionBase(object):
field in _db_content[cls][ref]):
_db_content[cls][ref][field] = val
- logging.warn('Raising NotImplemented')
+ LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
'xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments or the database '
@@ -399,7 +401,7 @@ class SessionBase(object):
self._session not in _db_content['session']):
raise Failure(['HANDLE_INVALID', 'session', self._session])
if len(params) == 0 or params[0] != self._session:
- logging.warn('Raising NotImplemented')
+ LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError('Call to XenAPI without using .xenapi')
def _check_arg_count(self, params, expected):
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 9d1b51848..a91c8ea27 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -19,7 +19,6 @@ Helper methods for operations related to the management of VM records and
their attributes like VDIs, VIFs, as well as their lookup functions.
"""
-import logging
import pickle
import urllib
from xml.dom import minidom
@@ -27,6 +26,7 @@ from xml.dom import minidom
from eventlet import event
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
from nova.auth.manager import AuthManager
from nova.compute import instance_types
@@ -37,6 +37,7 @@ from nova.virt.xenapi.volume_utils import StorageError
FLAGS = flags.FLAGS
+LOG = logging.getLogger("nova.virt.xenapi.vm_utils")
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
@@ -121,9 +122,9 @@ class VMHelper(HelperBase):
rec['HVM_boot_params'] = {'order': 'dc'}
rec['platform'] = {'acpi': 'true', 'apic': 'true',
'pae': 'true', 'viridian': 'true'}
- logging.debug('Created VM %s...', instance.name)
+ LOG.debug(_('Created VM %s...'), instance.name)
vm_ref = session.call_xenapi('VM.create', rec)
- logging.debug(_('Created VM %s as %s.'), instance.name, vm_ref)
+ LOG.debug(_('Created VM %s as %s.'), instance.name, vm_ref)
return vm_ref
@classmethod
@@ -143,10 +144,9 @@ class VMHelper(HelperBase):
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
- logging.debug(_('Creating VBD for VM %s, VDI %s ... '),
- vm_ref, vdi_ref)
+ LOG.debug(_('Creating VBD for VM %s, VDI %s ... '), vm_ref, vdi_ref)
vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
- logging.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref,
+ LOG.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref,
vdi_ref)
return vbd_ref
@@ -161,7 +161,7 @@ class VMHelper(HelperBase):
if vbd_rec['userdevice'] == str(number):
return vbd
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('VBD not found in instance %s') % vm_ref)
@classmethod
@@ -170,7 +170,7 @@ class VMHelper(HelperBase):
try:
vbd_ref = session.call_xenapi('VBD.unplug', vbd_ref)
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
if exc.details[0] != 'DEVICE_ALREADY_DETACHED':
raise StorageError(_('Unable to unplug VBD %s') % vbd_ref)
@@ -183,7 +183,7 @@ class VMHelper(HelperBase):
#with Josh Kearney
session.wait_for_task(0, task)
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to destroy VBD %s') % vbd_ref)
@classmethod
@@ -199,11 +199,11 @@ class VMHelper(HelperBase):
vif_rec['other_config'] = {}
vif_rec['qos_algorithm_type'] = ''
vif_rec['qos_algorithm_params'] = {}
- logging.debug(_('Creating VIF for VM %s, network %s.'), vm_ref,
- network_ref)
+ LOG.debug(_('Creating VIF for VM %s, network %s.'), vm_ref,
+ network_ref)
vif_ref = session.call_xenapi('VIF.create', vif_rec)
- logging.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref,
- vm_ref, network_ref)
+ LOG.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref,
+ vm_ref, network_ref)
return vif_ref
@classmethod
@@ -213,8 +213,7 @@ class VMHelper(HelperBase):
"""
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
- logging.debug(_("Snapshotting VM %s with label '%s'..."),
- vm_ref, label)
+ LOG.debug(_("Snapshotting VM %s with label '%s'..."), vm_ref, label)
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
vm_vdi_uuid = vm_vdi_rec["uuid"]
@@ -227,8 +226,8 @@ class VMHelper(HelperBase):
template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
template_vdi_uuid = template_vdi_rec["uuid"]
- logging.debug(_('Created snapshot %s from VM %s.'), template_vm_ref,
- vm_ref)
+ LOG.debug(_('Created snapshot %s from VM %s.'), template_vm_ref,
+ vm_ref)
parent_uuid = wait_for_vhd_coalesce(
session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid)
@@ -241,8 +240,7 @@ class VMHelper(HelperBase):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
- logging.debug(_("Asking xapi to upload %s as '%s'"),
- vdi_uuids, image_name)
+ LOG.debug(_("Asking xapi to upload %s as '%s'"), vdi_uuids, image_name)
params = {'vdi_uuids': vdi_uuids,
'image_name': image_name,
@@ -260,7 +258,7 @@ class VMHelper(HelperBase):
"""
url = images.image_url(image)
access = AuthManager().get_access_key(user, project)
- logging.debug("Asking xapi to fetch %s as %s", url, access)
+ LOG.debug(_("Asking xapi to fetch %s as %s"), url, access)
fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
args = {}
args['src_url'] = url
@@ -278,7 +276,7 @@ class VMHelper(HelperBase):
@classmethod
def lookup_image(cls, session, vdi_ref):
- logging.debug("Looking up vdi %s for PV kernel", vdi_ref)
+ LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
fn = "is_vdi_pv"
args = {}
args['vdi-ref'] = vdi_ref
@@ -289,7 +287,7 @@ class VMHelper(HelperBase):
pv = True
elif pv_str.lower() == 'false':
pv = False
- logging.debug("PV Kernel in VDI:%d", pv)
+ LOG.debug(_("PV Kernel in VDI:%d"), pv)
return pv
@classmethod
@@ -317,10 +315,9 @@ class VMHelper(HelperBase):
vdi = session.get_xenapi().VBD.get_VDI(vbd)
# Test valid VDI
record = session.get_xenapi().VDI.get_record(vdi)
- logging.debug(_('VDI %s is still available'),
- record['uuid'])
+ LOG.debug(_('VDI %s is still available'), record['uuid'])
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
else:
vdis.append(vdi)
if len(vdis) > 0:
@@ -331,10 +328,10 @@ class VMHelper(HelperBase):
@classmethod
def compile_info(cls, record):
"""Fill record with VM status information"""
- logging.info(_("(VM_UTILS) xenserver vm state -> |%s|"),
- record['power_state'])
- logging.info(_("(VM_UTILS) xenapi power_state -> |%s|"),
- XENAPI_POWER_STATE[record['power_state']])
+ LOG.info(_("(VM_UTILS) xenserver vm state -> |%s|"),
+ record['power_state'])
+ LOG.info(_("(VM_UTILS) xenapi power_state -> |%s|"),
+ XENAPI_POWER_STATE[record['power_state']])
return {'state': XENAPI_POWER_STATE[record['power_state']],
'max_mem': long(record['memory_static_max']) >> 10,
'mem': long(record['memory_dynamic_max']) >> 10,
@@ -360,7 +357,9 @@ class VMHelper(HelperBase):
if i >= 3 and i <= 11:
ref = node.childNodes
# Name and Value
- diags[ref[0].firstChild.data] = ref[6].firstChild.data
+ if len(ref) > 6:
+ diags[ref[0].firstChild.data] = \
+ ref[6].firstChild.data
return diags
except cls.XenAPI.Failure as e:
return {"Unable to retrieve diagnostics": e}
@@ -388,11 +387,9 @@ def get_vhd_parent(session, vdi_rec):
"""
if 'vhd-parent' in vdi_rec['sm_config']:
parent_uuid = vdi_rec['sm_config']['vhd-parent']
- #NOTE(sirp): changed xenapi -> get_xenapi()
parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid)
parent_rec = session.get_xenapi().VDI.get_record(parent_ref)
- #NOTE(sirp): changed log -> logging
- logging.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref)
+ LOG.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref)
return parent_ref, parent_rec
else:
return None
@@ -409,7 +406,7 @@ def get_vhd_parent_uuid(session, vdi_ref):
def scan_sr(session, instance_id, sr_ref):
- logging.debug(_("Re-scanning SR %s"), sr_ref)
+ LOG.debug(_("Re-scanning SR %s"), sr_ref)
task = session.call_xenapi('Async.SR.scan', sr_ref)
session.wait_for_task(instance_id, task)
@@ -433,10 +430,9 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
scan_sr(session, instance_id, sr_ref)
parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
if original_parent_uuid and (parent_uuid != original_parent_uuid):
- logging.debug(
- _("Parent %s doesn't match original parent %s, "
- "waiting for coalesce..."),
- parent_uuid, original_parent_uuid)
+ LOG.debug(_("Parent %s doesn't match original parent %s, "
+ "waiting for coalesce..."), parent_uuid,
+ original_parent_uuid)
else:
done.send(parent_uuid)
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index b6d620782..7e3585991 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -20,10 +20,10 @@ Management class for VM-related functions (spawn, reboot, etc).
"""
import json
-import logging
from nova import db
from nova import context
+from nova import log as logging
from nova import exception
from nova import utils
@@ -33,6 +33,9 @@ from nova.virt.xenapi.network_utils import NetworkHelper
from nova.virt.xenapi.vm_utils import VMHelper
from nova.virt.xenapi.vm_utils import ImageType
+XenAPI = None
+LOG = logging.getLogger("nova.virt.xenapi.vmops")
+
class VMOps(object):
"""
@@ -93,10 +96,9 @@ class VMOps(object):
if network_ref:
VMHelper.create_vif(self._session, vm_ref,
network_ref, instance.mac_address)
- logging.debug(_('Starting VM %s...'), vm_ref)
+ LOG.debug(_('Starting VM %s...'), vm_ref)
self._session.call_xenapi('VM.start', vm_ref, False, False)
- logging.info(_('Spawning VM %s created %s.'), instance.name,
- vm_ref)
+ LOG.info(_('Spawning VM %s created %s.'), instance.name, vm_ref)
# NOTE(armando): Do we really need to do this in virt?
timer = utils.LoopingCall(f=None)
@@ -107,12 +109,12 @@ class VMOps(object):
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
- logging.debug(_('Instance %s: booted'), instance['name'])
+ LOG.debug(_('Instance %s: booted'), instance['name'])
timer.stop()
except Exception, exc:
- logging.warn(exc)
- logging.exception(_('instance %s: failed to boot'),
- instance['name'])
+ LOG.warn(exc)
+ LOG.exception(_('instance %s: failed to boot'),
+ instance['name'])
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
@@ -205,7 +207,7 @@ class VMOps(object):
task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
self._session.wait_for_task(instance.id, task)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
# Disk clean-up
if vdis:
@@ -214,20 +216,20 @@ class VMOps(object):
task = self._session.call_xenapi('Async.VDI.destroy', vdi)
self._session.wait_for_task(instance.id, task)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
# VM Destroy
try:
task = self._session.call_xenapi('Async.VM.destroy', vm)
self._session.wait_for_task(instance.id, task)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
def _wait_with_callback(self, instance_id, task, callback):
ret = None
try:
ret = self._session.wait_for_task(instance_id, task)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
callback(ret)
def pause(self, instance, callback):
@@ -282,6 +284,11 @@ class VMOps(object):
# TODO: implement this to fix pylint!
return 'FAKE CONSOLE OUTPUT of instance'
+ def get_ajax_console(self, instance):
+ """Return link to instance's ajax console"""
+ # TODO: implement this!
+ return 'http://fakeajaxconsole/fake_url'
+
def list_from_xenstore(self, vm, path):
"""Runs the xenstore-ls command to get a listing of all records
from 'path' downward. Returns a dict with the sub-paths as keys,
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 4bbc41b03..0cd15b950 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -21,16 +21,17 @@ and storage repositories
import re
import string
-import logging
from nova import db
from nova import context
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
from nova.virt.xenapi import HelperBase
FLAGS = flags.FLAGS
+LOG = logging.getLogger("nova.virt.xenapi.volume_utils")
class StorageError(Exception):
@@ -53,7 +54,7 @@ class VolumeHelper(HelperBase):
"""
sr_ref = session.get_xenapi().SR.get_by_name_label(label)
if len(sr_ref) == 0:
- logging.debug('Introducing %s...', label)
+ LOG.debug(_('Introducing %s...'), label)
record = {}
if 'chapuser' in info and 'chappassword' in info:
record = {'target': info['targetHost'],
@@ -70,10 +71,10 @@ class VolumeHelper(HelperBase):
session.get_xenapi_host(),
record,
'0', label, description, 'iscsi', '', False, {})
- logging.debug('Introduced %s as %s.', label, sr_ref)
+ LOG.debug(_('Introduced %s as %s.'), label, sr_ref)
return sr_ref
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to create Storage Repository'))
else:
return sr_ref[0]
@@ -85,32 +86,32 @@ class VolumeHelper(HelperBase):
vdi_ref = session.get_xenapi().VBD.get_VDI(vbd_ref)
sr_ref = session.get_xenapi().VDI.get_SR(vdi_ref)
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to find SR from VBD %s') % vbd_ref)
return sr_ref
@classmethod
def destroy_iscsi_storage(cls, session, sr_ref):
"""Forget the SR whilst preserving the state of the disk"""
- logging.debug("Forgetting SR %s ... ", sr_ref)
+ LOG.debug(_("Forgetting SR %s ... "), sr_ref)
pbds = []
try:
pbds = session.get_xenapi().SR.get_PBDs(sr_ref)
except cls.XenAPI.Failure, exc:
- logging.warn('Ignoring exception %s when getting PBDs for %s',
- exc, sr_ref)
+ LOG.warn(_('Ignoring exception %s when getting PBDs for %s'),
+ exc, sr_ref)
for pbd in pbds:
try:
session.get_xenapi().PBD.unplug(pbd)
except cls.XenAPI.Failure, exc:
- logging.warn('Ignoring exception %s when unplugging PBD %s',
- exc, pbd)
+ LOG.warn(_('Ignoring exception %s when unplugging PBD %s'),
+ exc, pbd)
try:
session.get_xenapi().SR.forget(sr_ref)
- logging.debug("Forgetting SR %s done.", sr_ref)
+ LOG.debug(_("Forgetting SR %s done."), sr_ref)
except cls.XenAPI.Failure, exc:
- logging.warn('Ignoring exception %s when forgetting SR %s',
- exc, sr_ref)
+ LOG.warn(_('Ignoring exception %s when forgetting SR %s'), exc,
+ sr_ref)
@classmethod
def introduce_vdi(cls, session, sr_ref):
@@ -118,12 +119,12 @@ class VolumeHelper(HelperBase):
try:
vdis = session.get_xenapi().SR.get_VDIs(sr_ref)
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref)
try:
vdi_rec = session.get_xenapi().VDI.get_record(vdis[0])
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to get record'
' of VDI %s on') % vdis[0])
else:
@@ -141,7 +142,7 @@ class VolumeHelper(HelperBase):
vdi_rec['xenstore_data'],
vdi_rec['sm_config'])
except cls.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise StorageError(_('Unable to introduce VDI for SR %s')
% sr_ref)
@@ -165,11 +166,8 @@ class VolumeHelper(HelperBase):
target_host = _get_target_host(iscsi_portal)
target_port = _get_target_port(iscsi_portal)
target_iqn = _get_iqn(iscsi_name, volume_id)
- logging.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)',
- volume_id,
- target_host,
- target_port,
- target_iqn)
+ LOG.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)',
+ volume_id, target_host, target_port, target_iqn)
if (device_number < 0) or \
(volume_id is None) or \
(target_host is None) or \
@@ -196,7 +194,7 @@ class VolumeHelper(HelperBase):
elif re.match('^[0-9]+$', mountpoint):
return string.atoi(mountpoint, 10)
else:
- logging.warn('Mountpoint cannot be translated: %s', mountpoint)
+ LOG.warn(_('Mountpoint cannot be translated: %s'), mountpoint)
return -1
@@ -257,7 +255,7 @@ def _get_target(volume_id):
"sendtargets -p %s" %
volume_ref['host'])
except exception.ProcessExecutionError, exc:
- logging.warn(exc)
+ LOG.exception(exc)
else:
targets = r.splitlines()
if len(_e) == 0 and len(targets) == 1:
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index fdeb2506c..189f968c6 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -17,14 +17,17 @@
"""
Management class for Storage-related functions (attach, detach, etc).
"""
-import logging
from nova import exception
+from nova import log as logging
from nova.virt.xenapi.vm_utils import VMHelper
from nova.virt.xenapi.volume_utils import VolumeHelper
from nova.virt.xenapi.volume_utils import StorageError
+LOG = logging.getLogger("nova.virt.xenapi.volumeops")
+
+
class VolumeOps(object):
"""
Management class for Volume-related tasks
@@ -45,8 +48,8 @@ class VolumeOps(object):
raise exception.NotFound(_('Instance %s not found')
% instance_name)
# NOTE: No Resource Pool concept so far
- logging.debug(_("Attach_volume: %s, %s, %s"),
- instance_name, device_path, mountpoint)
+ LOG.debug(_("Attach_volume: %s, %s, %s"),
+ instance_name, device_path, mountpoint)
# Create the iSCSI SR, and the PDB through which hosts access SRs.
# But first, retrieve target info, like Host, IQN, LUN and SCSIID
vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint)
@@ -61,7 +64,7 @@ class VolumeOps(object):
try:
vdi_ref = VolumeHelper.introduce_vdi(self._session, sr_ref)
except StorageError, exc:
- logging.warn(exc)
+ LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
raise Exception(_('Unable to create VDI on SR %s for instance %s')
% (sr_ref,
@@ -73,7 +76,7 @@ class VolumeOps(object):
vol_rec['deviceNumber'],
False)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
raise Exception(_('Unable to use SR %s for instance %s')
% (sr_ref,
@@ -84,13 +87,13 @@ class VolumeOps(object):
vbd_ref)
self._session.wait_for_task(vol_rec['deviceNumber'], task)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session,
sr_ref)
raise Exception(_('Unable to attach volume to instance %s')
% instance_name)
- logging.info(_('Mountpoint %s attached to instance %s'),
- mountpoint, instance_name)
+ LOG.info(_('Mountpoint %s attached to instance %s'),
+ mountpoint, instance_name)
def detach_volume(self, instance_name, mountpoint):
"""Detach volume storage to VM instance"""
@@ -100,13 +103,13 @@ class VolumeOps(object):
raise exception.NotFound(_('Instance %s not found')
% instance_name)
# Detach VBD from VM
- logging.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint)
+ LOG.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint)
device_number = VolumeHelper.mountpoint_to_number(mountpoint)
try:
vbd_ref = VMHelper.find_vbd_by_number(self._session,
vm_ref, device_number)
except StorageError, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise Exception(_('Unable to locate volume %s') % mountpoint)
else:
try:
@@ -114,13 +117,13 @@ class VolumeOps(object):
vbd_ref)
VMHelper.unplug_vbd(self._session, vbd_ref)
except StorageError, exc:
- logging.warn(exc)
+ LOG.exception(exc)
raise Exception(_('Unable to detach volume %s') % mountpoint)
try:
VMHelper.destroy_vbd(self._session, vbd_ref)
except StorageError, exc:
- logging.warn(exc)
+ LOG.exception(exc)
# Forget SR
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
- logging.info(_('Mountpoint %s detached from instance %s'),
- mountpoint, instance_name)
+ LOG.info(_('Mountpoint %s detached from instance %s'),
+ mountpoint, instance_name)
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index c48f5b7cb..45d0738a5 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -51,8 +51,8 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block.
:iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack'
"""
-import logging
import sys
+import urlparse
import xmlrpclib
from eventlet import event
@@ -62,9 +62,14 @@ from nova import context
from nova import db
from nova import utils
from nova import flags
+from nova import log as logging
from nova.virt.xenapi.vmops import VMOps
from nova.virt.xenapi.volumeops import VolumeOps
+
+LOG = logging.getLogger("nova.virt.xenapi")
+
+
FLAGS = flags.FLAGS
flags.DEFINE_string('xenapi_connection_url',
@@ -176,6 +181,10 @@ class XenAPIConnection(object):
"""Return snapshot of console"""
return self._vmops.get_console_output(instance)
+ def get_ajax_console(self, instance):
+ """Return link to instance's ajax console"""
+ return self._vmops.get_ajax_console(instance)
+
def attach_volume(self, instance_name, device_path, mountpoint):
"""Attach volume storage to VM instance"""
return self._volumeops.attach_volume(instance_name,
@@ -186,6 +195,12 @@ class XenAPIConnection(object):
"""Detach volume storage to VM instance"""
return self._volumeops.detach_volume(instance_name, mountpoint)
+ def get_console_pool_info(self, console_type):
+ xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url)
+ return {'address': xs_url.netloc,
+ 'username': FLAGS.xenapi_connection_username,
+ 'password': FLAGS.xenapi_connection_password}
+
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls"""
@@ -194,6 +209,7 @@ class XenAPISession(object):
self.XenAPI = self.get_imported_xenapi()
self._session = self._create_session(url)
self._session.login_with_password(user, pw)
+ self.loop = None
def get_imported_xenapi(self):
"""Stubout point. This can be replaced with a mock xenapi module."""
@@ -230,14 +246,20 @@ class XenAPISession(object):
def wait_for_task(self, id, task):
"""Return the result of the given task. The task is polled
- until it completes."""
+ until it completes. Not re-entrant."""
done = event.Event()
- loop = utils.LoopingCall(self._poll_task, id, task, done)
- loop.start(FLAGS.xenapi_task_poll_interval, now=True)
+ self.loop = utils.LoopingCall(self._poll_task, id, task, done)
+ self.loop.start(FLAGS.xenapi_task_poll_interval, now=True)
rv = done.wait()
- loop.stop()
+ self.loop.stop()
return rv
+ def _stop_loop(self):
+ """Stop polling for task to finish."""
+ #NOTE(sandy-walsh) Had to break this call out to support unit tests.
+ if self.loop:
+ self.loop.stop()
+
def _create_session(self, url):
"""Stubout point. This can be replaced with a mock session."""
return self.XenAPI.Session(url)
@@ -256,7 +278,7 @@ class XenAPISession(object):
return
elif status == "success":
result = self._session.xenapi.task.get_result(task)
- logging.info(_("Task [%s] %s status: success %s") % (
+ LOG.info(_("Task [%s] %s status: success %s") % (
name,
task,
result))
@@ -264,7 +286,7 @@ class XenAPISession(object):
else:
error_info = self._session.xenapi.task.get_error_info(task)
action["error"] = str(error_info)
- logging.warn(_("Task [%s] %s status: %s %s") % (
+ LOG.warn(_("Task [%s] %s status: %s %s") % (
name,
task,
status,
@@ -272,15 +294,16 @@ class XenAPISession(object):
done.send_exception(self.XenAPI.Failure(error_info))
db.instance_action_create(context.get_admin_context(), action)
except self.XenAPI.Failure, exc:
- logging.warn(exc)
+ LOG.warn(exc)
done.send_exception(*sys.exc_info())
+ self._stop_loop()
def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
"""Parse exception details"""
try:
return func(*args, **kwargs)
except self.XenAPI.Failure, exc:
- logging.debug(_("Got exception: %s"), exc)
+ LOG.debug(_("Got exception: %s"), exc)
if (len(exc.details) == 4 and
exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
exc.details[2] == 'Failure'):
@@ -293,7 +316,7 @@ class XenAPISession(object):
else:
raise
except xmlrpclib.ProtocolError, exc:
- logging.debug(_("Got exception: %s"), exc)
+ LOG.debug(_("Got exception: %s"), exc)
raise
diff --git a/nova/volume/api.py b/nova/volume/api.py
index 2d7fe3762..ce4831cc3 100644
--- a/nova/volume/api.py
+++ b/nova/volume/api.py
@@ -21,11 +21,11 @@ Handles all requests relating to volumes.
"""
import datetime
-import logging
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import quota
from nova import rpc
from nova.db import base
@@ -33,16 +33,18 @@ from nova.db import base
FLAGS = flags.FLAGS
flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
+LOG = logging.getLogger('nova.volume')
+
class API(base.Base):
"""API for interacting with the volume manager."""
def create(self, context, size, name, description):
if quota.allowed_volumes(context, 1, size) < 1:
- logging.warn("Quota exceeeded for %s, tried to create %sG volume",
+ LOG.warn(_("Quota exceeeded for %s, tried to create %sG volume"),
context.project_id, size)
- raise quota.QuotaError("Volume quota exceeded. You cannot "
- "create a volume of size %s" % size)
+ raise quota.QuotaError(_("Volume quota exceeded. You cannot "
+ "create a volume of size %s") % size)
options = {
'size': size,
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index 8353b9712..6bc925f3e 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -20,15 +20,15 @@ Drivers for volumes.
"""
-import logging
-import os
import time
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
+LOG = logging.getLogger("nova.volume.driver")
FLAGS = flags.FLAGS
flags.DEFINE_string('volume_group', 'nova-volumes',
'Name for the VG that will contain exported volumes')
@@ -73,13 +73,15 @@ class VolumeDriver(object):
tries = tries + 1
if tries >= FLAGS.num_shell_tries:
raise
- logging.exception(_("Recovering from a failed execute."
- "Try number %s"), tries)
+ LOG.exception(_("Recovering from a failed execute. "
+ "Try number %s"), tries)
time.sleep(tries ** 2)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
- if not os.path.isdir("/dev/%s" % FLAGS.volume_group):
+ out, err = self._execute("sudo vgs --noheadings -o name")
+ volume_groups = out.split()
+ if not FLAGS.volume_group in volume_groups:
raise exception.Error(_("volume group %s doesn't exist")
% FLAGS.volume_group)
@@ -205,7 +207,7 @@ class FakeAOEDriver(AOEDriver):
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
- logging.debug(_("FAKE AOE: %s"), cmd)
+ LOG.debug(_("FAKE AOE: %s"), cmd)
return (None, None)
@@ -310,5 +312,5 @@ class FakeISCSIDriver(ISCSIDriver):
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
- logging.debug(_("FAKE ISCSI: %s"), cmd)
+ LOG.debug(_("FAKE ISCSI: %s"), cmd)
return (None, None)
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index 966334c50..6348539c5 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -42,17 +42,18 @@ intact.
"""
-import logging
import datetime
from nova import context
from nova import exception
from nova import flags
+from nova import log as logging
from nova import manager
from nova import utils
+LOG = logging.getLogger('nova.volume.manager')
FLAGS = flags.FLAGS
flags.DEFINE_string('storage_availability_zone',
'nova',
@@ -81,7 +82,7 @@ class VolumeManager(manager.Manager):
self.driver.check_for_setup_error()
ctxt = context.get_admin_context()
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
- logging.debug(_("Re-exporting %s volumes"), len(volumes))
+ LOG.debug(_("Re-exporting %s volumes"), len(volumes))
for volume in volumes:
self.driver.ensure_export(ctxt, volume)
@@ -89,7 +90,7 @@ class VolumeManager(manager.Manager):
"""Creates and exports the volume."""
context = context.elevated()
volume_ref = self.db.volume_get(context, volume_id)
- logging.info(_("volume %s: creating"), volume_ref['name'])
+ LOG.info(_("volume %s: creating"), volume_ref['name'])
self.db.volume_update(context,
volume_id,
@@ -98,18 +99,18 @@ class VolumeManager(manager.Manager):
# before passing it to the driver.
volume_ref['host'] = self.host
- logging.debug(_("volume %s: creating lv of size %sG"),
- volume_ref['name'], volume_ref['size'])
+ LOG.debug(_("volume %s: creating lv of size %sG"), volume_ref['name'],
+ volume_ref['size'])
self.driver.create_volume(volume_ref)
- logging.debug(_("volume %s: creating export"), volume_ref['name'])
+ LOG.debug(_("volume %s: creating export"), volume_ref['name'])
self.driver.create_export(context, volume_ref)
now = datetime.datetime.utcnow()
self.db.volume_update(context,
volume_ref['id'], {'status': 'available',
'launched_at': now})
- logging.debug(_("volume %s: created successfully"), volume_ref['name'])
+ LOG.debug(_("volume %s: created successfully"), volume_ref['name'])
return volume_id
def delete_volume(self, context, volume_id):
@@ -120,12 +121,12 @@ class VolumeManager(manager.Manager):
raise exception.Error(_("Volume is still attached"))
if volume_ref['host'] != self.host:
raise exception.Error(_("Volume is not local to this node"))
- logging.debug(_("volume %s: removing export"), volume_ref['name'])
+ LOG.debug(_("volume %s: removing export"), volume_ref['name'])
self.driver.remove_export(context, volume_ref)
- logging.debug(_("volume %s: deleting"), volume_ref['name'])
+ LOG.debug(_("volume %s: deleting"), volume_ref['name'])
self.driver.delete_volume(volume_ref)
self.db.volume_destroy(context, volume_id)
- logging.debug(_("volume %s: deleted successfully"), volume_ref['name'])
+ LOG.debug(_("volume %s: deleted successfully"), volume_ref['name'])
return True
def setup_compute_volume(self, context, volume_id):
diff --git a/nova/wsgi.py b/nova/wsgi.py
index b5d6b96c1..e999f76a3 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -22,7 +22,6 @@ Utility methods for working with WSGI servers
"""
import json
-import logging
import sys
from xml.dom import minidom
@@ -35,18 +34,30 @@ import webob
import webob.dec
import webob.exc
+from nova import log as logging
-logging.getLogger("routes.middleware").addHandler(logging.StreamHandler())
+
+class WritableLogger(object):
+ """A thin wrapper that responds to `write` and logs."""
+
+ def __init__(self, logger, level=logging.DEBUG):
+ self.logger = logger
+ self.level = level
+
+ def write(self, msg):
+ self.logger.log(self.level, msg)
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, threads=1000):
+ logging.basicConfig()
self.pool = eventlet.GreenPool(threads)
def start(self, application, port, host='0.0.0.0', backlog=128):
"""Run a WSGI server with the given application."""
+ logging.audit(_("Starting %s on %s:%s"), sys.argv[0], host, port)
socket = eventlet.listen((host, port), backlog=backlog)
self.pool.spawn_n(self._run, application, socket)
@@ -59,7 +70,9 @@ class Server(object):
def _run(self, application, socket):
"""Start a WSGI server in a new green thread."""
- eventlet.wsgi.server(socket, application, custom_pool=self.pool)
+ logger = logging.getLogger('eventlet.wsgi.server')
+ eventlet.wsgi.server(socket, application, custom_pool=self.pool,
+ log=WritableLogger(logger))
class Application(object):
diff --git a/setup.cfg b/setup.cfg
index 14dcb5c8e..9c0a331e3 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -8,3 +8,17 @@ tag_build =
tag_date = 0
tag_svn_revision = 0
+[compile_catalog]
+directory = locale
+domain = nova
+
+[update_catalog]
+domain = nova
+output_dir = locale
+input_file = locale/nova.pot
+
+[extract_messages]
+keywords = _ l_ lazy_gettext
+mapping_file = babel.cfg
+output_file = locale/nova.pot
+
diff --git a/setup.py b/setup.py
index 1abf4d9fe..3608ff805 100644
--- a/setup.py
+++ b/setup.py
@@ -24,6 +24,15 @@ from setuptools.command.sdist import sdist
from sphinx.setup_command import BuildDoc
from nova.utils import parse_mailmap, str_dict_replace
+from nova import version
+
+if os.path.isdir('.bzr'):
+ with open("nova/vcsversion.py", 'w') as version_file:
+ vcs_cmd = subprocess.Popen(["bzr", "version-info", "--python"],
+ stdout=subprocess.PIPE)
+ vcsversion = vcs_cmd.communicate()[0]
+ version_file.write(vcsversion)
+
class local_BuildDoc(BuildDoc):
def run(self):
@@ -48,14 +57,25 @@ class local_sdist(sdist):
changelog_file.write(str_dict_replace(changelog, mailmap))
sdist.run(self)
+nova_cmdclass= { 'sdist': local_sdist,
+ 'build_sphinx' : local_BuildDoc }
+
+try:
+ from babel.messages import frontend as babel
+ nova_cmdclass['compile_catalog'] = babel.compile_catalog
+ nova_cmdclass['extract_messages'] = babel.extract_messages
+ nova_cmdclass['init_catalog'] = babel.init_catalog
+ nova_cmdclass['update_catalog'] = babel.update_catalog
+except:
+ pass
+
setup(name='nova',
- version='2011.1',
+ version=version.canonical_version_string(),
description='cloud computing fabric controller',
author='OpenStack',
author_email='nova@lists.launchpad.net',
url='http://www.openstack.org/',
- cmdclass={ 'sdist': local_sdist,
- 'build_sphinx' : local_BuildDoc },
+ cmdclass=nova_cmdclass,
packages=find_packages(exclude=['bin', 'smoketests']),
include_package_data=True,
test_suite='nose.collector',
@@ -64,9 +84,11 @@ setup(name='nova',
'bin/nova-dhcpbridge',
'bin/nova-import-canonical-imagestore',
'bin/nova-instancemonitor',
+ 'bin/nova-logspool',
'bin/nova-manage',
'bin/nova-network',
'bin/nova-objectstore',
'bin/nova-scheduler',
+ 'bin/nova-spoolsentry',
'bin/nova-volume',
'tools/nova-debug'])
diff --git a/smoketests/admin_smoketests.py b/smoketests/admin_smoketests.py
index 50bb3fa2e..1ef1c1425 100644
--- a/smoketests/admin_smoketests.py
+++ b/smoketests/admin_smoketests.py
@@ -19,10 +19,17 @@
import os
import random
import sys
-import time
import unittest
import zipfile
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
from nova import adminclient
from smoketests import flags
from smoketests import base
diff --git a/smoketests/user_smoketests.py b/smoketests/user_smoketests.py
index d29e3aea3..578c0722e 100644
--- a/smoketests/user_smoketests.py
+++ b/smoketests/user_smoketests.py
@@ -24,6 +24,14 @@ import sys
import time
import unittest
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
from smoketests import flags
from smoketests import base
@@ -40,6 +48,7 @@ flags.DEFINE_string('bundle_image', 'openwrt-x86-ext2.image',
TEST_PREFIX = 'test%s' % int (random.random()*1000000)
TEST_BUCKET = '%s_bucket' % TEST_PREFIX
TEST_KEY = '%s_key' % TEST_PREFIX
+TEST_GROUP = '%s_group' % TEST_PREFIX
TEST_DATA = {}
@@ -137,7 +146,7 @@ class InstanceTests(UserSmokeTestCase):
self.data['instance_id'] = reservation.instances[0].id
def test_003_instance_runs_within_60_seconds(self):
- reservations = self.conn.get_all_instances([data['instance_id']])
+ reservations = self.conn.get_all_instances([self.data['instance_id']])
instance = reservations[0].instances[0]
# allow 60 seconds to exit pending with IP
for x in xrange(60):
@@ -207,7 +216,7 @@ class InstanceTests(UserSmokeTestCase):
def test_999_tearDown(self):
self.delete_key_pair(self.conn, TEST_KEY)
if self.data.has_key('instance_id'):
- self.conn.terminate_instances([data['instance_id']])
+ self.conn.terminate_instances([self.data['instance_id']])
class VolumeTests(UserSmokeTestCase):
@@ -319,8 +328,80 @@ class VolumeTests(UserSmokeTestCase):
self.conn.delete_key_pair(TEST_KEY)
+class SecurityGroupTests(UserSmokeTestCase):
+
+ def __public_instance_is_accessible(self):
+ id_url = "latest/meta-data/instance-id"
+ options = "-s --max-time 1"
+ command = "curl %s %s/%s" % (options, self.data['public_ip'], id_url)
+ instance_id = commands.getoutput(command).strip()
+ if not instance_id:
+ return False
+ if instance_id != self.data['instance_id']:
+ raise Exception("Wrong instance id")
+ return True
+
+ def test_001_can_create_security_group(self):
+ self.conn.create_security_group(TEST_GROUP, description='test')
+
+ groups = self.conn.get_all_security_groups()
+ self.assertTrue(TEST_GROUP in [group.name for group in groups])
+
+ def test_002_can_launch_instance_in_security_group(self):
+ self.create_key_pair(self.conn, TEST_KEY)
+ reservation = self.conn.run_instances(FLAGS.test_image,
+ key_name=TEST_KEY,
+ security_groups=[TEST_GROUP],
+ instance_type='m1.tiny')
+
+ self.data['instance_id'] = reservation.instances[0].id
+
+ def test_003_can_authorize_security_group_ingress(self):
+ self.assertTrue(self.conn.authorize_security_group(TEST_GROUP,
+ ip_protocol='tcp',
+ from_port=80,
+ to_port=80))
+
+ def test_004_can_access_instance_over_public_ip(self):
+ result = self.conn.allocate_address()
+ self.assertTrue(hasattr(result, 'public_ip'))
+ self.data['public_ip'] = result.public_ip
+
+ result = self.conn.associate_address(self.data['instance_id'],
+ self.data['public_ip'])
+ start_time = time.time()
+ while not self.__public_instance_is_accessible():
+ # 1 minute to launch
+ if time.time() - start_time > 60:
+ raise Exception("Timeout")
+ time.sleep(1)
+
+ def test_005_can_revoke_security_group_ingress(self):
+ self.assertTrue(self.conn.revoke_security_group(TEST_GROUP,
+ ip_protocol='tcp',
+ from_port=80,
+ to_port=80))
+ start_time = time.time()
+ while self.__public_instance_is_accessible():
+ # 1 minute to teardown
+ if time.time() - start_time > 60:
+ raise Exception("Timeout")
+ time.sleep(1)
+
+
+ def test_999_tearDown(self):
+ self.conn.delete_key_pair(TEST_KEY)
+ self.conn.delete_security_group(TEST_GROUP)
+ groups = self.conn.get_all_security_groups()
+ self.assertFalse(TEST_GROUP in [group.name for group in groups])
+ self.conn.terminate_instances([self.data['instance_id']])
+ self.assertTrue(self.conn.release_address(self.data['public_ip']))
+
+
if __name__ == "__main__":
suites = {'image': unittest.makeSuite(ImageTests),
'instance': unittest.makeSuite(InstanceTests),
- 'volume': unittest.makeSuite(VolumeTests)}
+ 'security_group': unittest.makeSuite(SecurityGroupTests),
+ 'volume': unittest.makeSuite(VolumeTests)
+ }
sys.exit(base.run_tests(suites))
diff --git a/tools/ajaxterm/README.txt b/tools/ajaxterm/README.txt
new file mode 100644
index 000000000..4b0ae99af
--- /dev/null
+++ b/tools/ajaxterm/README.txt
@@ -0,0 +1,120 @@
+= [http://antony.lesuisse.org/qweb/trac/wiki/AjaxTerm Ajaxterm] =
+
+Ajaxterm is a web based terminal. It was totally inspired and works almost
+exactly like http://anyterm.org/ except it's much easier to install (see
+comparaison with anyterm below).
+
+Ajaxterm written in python (and some AJAX javascript for client side) and depends only on python2.3 or better.[[BR]]
+Ajaxterm is '''very simple to install''' on Linux, MacOS X, FreeBSD, Solaris, cygwin and any Unix that runs python2.3.[[BR]]
+Ajaxterm was written by Antony Lesuisse (email: al AT udev.org), License Public Domain.
+
+Use the [/qweb/forum/viewforum.php?id=2 Forum], if you have any question or remark.
+
+== News ==
+
+ * 2006-10-29: v0.10 allow space in login, cgi launch fix, redhat init
+ * 2006-07-12: v0.9 change uid, daemon fix (Daniel Fischer)
+ * 2006-07-04: v0.8 add login support to ssh (Sven Geggus), change max width to 256
+ * 2006-05-31: v0.7 minor fixes, daemon option
+ * 2006-05-23: v0.6 Applied debian and gentoo patches, renamed to Ajaxterm, default port 8022
+
+== Download and Install ==
+
+ * Release: [/qweb/files/Ajaxterm-0.10.tar.gz Ajaxterm-0.10.tar.gz]
+ * Browse src: [/qweb/trac/browser/trunk/ajaxterm/ ajaxterm/]
+
+To install Ajaxterm issue the following commands:
+{{{
+wget http://antony.lesuisse.org/qweb/files/Ajaxterm-0.10.tar.gz
+tar zxvf Ajaxterm-0.10.tar.gz
+cd Ajaxterm-0.10
+./ajaxterm.py
+}}}
+Then point your browser to this URL : http://localhost:8022/
+
+== Screenshot ==
+
+{{{
+#!html
+<center><img src="/qweb/trac/attachment/wiki/AjaxTerm/scr.png?format=raw" alt="ajaxterm screenshot" style=""/></center>
+}}}
+
+== Documentation and Caveats ==
+
+ * Ajaxterm only support latin1, if you use Ubuntu or any LANG==en_US.UTF-8 distribution don't forget to "unset LANG".
+
+ * If run as root ajaxterm will run /bin/login, otherwise it will run ssh
+ localhost. To use an other command use the -c option.
+
+ * By default Ajaxterm only listen at 127.0.0.1:8022. For remote access, it is
+ strongly recommended to use '''https SSL/TLS''', and that is simple to
+ configure if you use the apache web server using mod_proxy.[[BR]][[BR]]
+ Using ssl will also speed up ajaxterm (probably because of keepalive).[[BR]][[BR]]
+ Here is an configuration example:
+
+{{{
+ Listen 443
+ NameVirtualHost *:443
+
+ <VirtualHost *:443>
+ ServerName localhost
+ SSLEngine On
+ SSLCertificateKeyFile ssl/apache.pem
+ SSLCertificateFile ssl/apache.pem
+
+ ProxyRequests Off
+ <Proxy *>
+ Order deny,allow
+ Allow from all
+ </Proxy>
+ ProxyPass /ajaxterm/ http://localhost:8022/
+ ProxyPassReverse /ajaxterm/ http://localhost:8022/
+ </VirtualHost>
+}}}
+
+ * Using GET HTTP request seems to speed up ajaxterm, just click on GET in the
+ interface, but be warned that your keystrokes might be loggued (by apache or
+ any proxy). I usually enable it after the login.
+
+ * Ajaxterm commandline usage:
+
+{{{
+usage: ajaxterm.py [options]
+
+options:
+ -h, --help show this help message and exit
+ -pPORT, --port=PORT Set the TCP port (default: 8022)
+ -cCMD, --command=CMD set the command (default: /bin/login or ssh localhost)
+ -l, --log log requests to stderr (default: quiet mode)
+ -d, --daemon run as daemon in the background
+ -PPIDFILE, --pidfile=PIDFILE
+ set the pidfile (default: /var/run/ajaxterm.pid)
+ -iINDEX_FILE, --index=INDEX_FILE
+ default index file (default: ajaxterm.html)
+ -uUID, --uid=UID Set the daemon's user id
+}}}
+
+ * Ajaxterm was first written as a demo for qweb (my web framework), but
+ actually doesn't use many features of qweb.
+
+ * Compared to anyterm:
+ * There are no partial updates, ajaxterm updates either all the screen or
+ nothing. That make the code simpler and I also think it's faster. HTTP
+ replies are always gzencoded. When used in 80x25 mode, almost all of
+ them are below the 1500 bytes (size of an ethernet frame) and we just
+ replace the screen with the reply (no javascript string handling).
+ * Ajaxterm polls the server for updates with an exponentially growing
+ timeout when the screen hasn't changed. The timeout is also resetted as
+ soon as a key is pressed. Anyterm blocks on a pending request and use a
+ parallel connection for keypresses. The anyterm approch is better
+ when there aren't any keypress.
+
+ * Ajaxterm files are released in the Public Domain, (except [http://sarissa.sourceforge.net/doc/ sarissa*] which are LGPL).
+
+== TODO ==
+
+ * insert mode ESC [ 4 h
+ * change size x,y from gui (sending signal)
+ * vt102 graphic codepage
+ * use innerHTML or prototype instead of sarissa
+
diff --git a/tools/ajaxterm/ajaxterm.1 b/tools/ajaxterm/ajaxterm.1
new file mode 100644
index 000000000..46f2acb33
--- /dev/null
+++ b/tools/ajaxterm/ajaxterm.1
@@ -0,0 +1,35 @@
+.TH ajaxterm "1" "May 2006" "ajaxterm 0.5" "User commands"
+.SH NAME
+ajaxterm \- Web based terminal written in python
+
+.SH DESCRITPION
+\fBajaxterm\fR is a web based terminal written in python and some AJAX
+javascript for client side.
+It can use almost any web browser and even works through firewalls.
+
+.SH USAGE
+\fBajaxterm.py\fR [options]
+
+.SH OPTIONS
+A summary of the options supported by \fBajaxterm\fR is included below.
+ \fB-h, --help\fR show this help message and exit
+ \fB-pPORT, --port=PORT\fR Set the TCP port (default: 8022)
+ \fB-cCMD, --command=CMD\fR set the command (default: /bin/login or ssh localhost)
+ \fB-l, --log\fR log requests to stderr (default: quiet mode)
+
+.SH AUTHOR
+Antony Lesuisse <al@udev.org>
+
+This manual page was written for the Debian system by
+Julien Valroff <julien@kirya.net> (but may be used by others).
+
+.SH "REPORTING BUGS"
+Report any bugs to the author: Antony Lesuisse <al@udev.org>
+
+.SH COPYRIGHT
+Copyright Antony Lesuisse <al@udev.org>
+
+.SH SEE ALSO
+- \fBajaxterm\fR wiki page: http://antony.lesuisse.org/qweb/trac/wiki/AjaxTerm
+.br
+- \fBajaxterm\fR forum: http://antony.lesuisse.org/qweb/forum/viewforum.php?id=2
diff --git a/tools/ajaxterm/ajaxterm.css b/tools/ajaxterm/ajaxterm.css
new file mode 100644
index 000000000..b9a5f8771
--- /dev/null
+++ b/tools/ajaxterm/ajaxterm.css
@@ -0,0 +1,64 @@
+pre.stat {
+ margin: 0px;
+ padding: 4px;
+ display: block;
+ font-family: monospace;
+ white-space: pre;
+ background-color: black;
+ border-top: 1px solid black;
+ color: white;
+}
+pre.stat span {
+ padding: 0px;
+}
+pre.stat .on {
+ background-color: #080;
+ font-weight: bold;
+ color: white;
+ cursor: pointer;
+}
+pre.stat .off {
+ background-color: #888;
+ font-weight: bold;
+ color: white;
+ cursor: pointer;
+}
+pre.term {
+ margin: 0px;
+ padding: 4px;
+ display: block;
+ font-family: monospace;
+ white-space: pre;
+ background-color: black;
+ border-top: 1px solid white;
+ color: #eee;
+}
+pre.term span.f0 { color: #000; }
+pre.term span.f1 { color: #b00; }
+pre.term span.f2 { color: #0b0; }
+pre.term span.f3 { color: #bb0; }
+pre.term span.f4 { color: #00b; }
+pre.term span.f5 { color: #b0b; }
+pre.term span.f6 { color: #0bb; }
+pre.term span.f7 { color: #bbb; }
+pre.term span.f8 { color: #666; }
+pre.term span.f9 { color: #f00; }
+pre.term span.f10 { color: #0f0; }
+pre.term span.f11 { color: #ff0; }
+pre.term span.f12 { color: #00f; }
+pre.term span.f13 { color: #f0f; }
+pre.term span.f14 { color: #0ff; }
+pre.term span.f15 { color: #fff; }
+pre.term span.b0 { background-color: #000; }
+pre.term span.b1 { background-color: #b00; }
+pre.term span.b2 { background-color: #0b0; }
+pre.term span.b3 { background-color: #bb0; }
+pre.term span.b4 { background-color: #00b; }
+pre.term span.b5 { background-color: #b0b; }
+pre.term span.b6 { background-color: #0bb; }
+pre.term span.b7 { background-color: #bbb; }
+
+body { background-color: #888; }
+#term {
+ float: left;
+}
diff --git a/tools/ajaxterm/ajaxterm.html b/tools/ajaxterm/ajaxterm.html
new file mode 100644
index 000000000..7fdef5e94
--- /dev/null
+++ b/tools/ajaxterm/ajaxterm.html
@@ -0,0 +1,25 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html>
+<head>
+ <title>Ajaxterm</title>
+ <meta http-equiv="content-type" content="text/html; charset=UTF-8"/>
+ <link rel="stylesheet" type="text/css" href="ajaxterm.css"/>
+ <script type="text/javascript" src="sarissa.js"></script>
+ <script type="text/javascript" src="sarissa_dhtml.js"></script>
+ <script type="text/javascript" src="ajaxterm.js"></script>
+ <script type="text/javascript">
+ /*
+ ajaxterm.py creates a random session_id to demultiplex multiple connections,
+ and to add a layer of security - in its shipping form, ajaxterm accepted any session_id
+ and was susceptible to an easy exploit
+ */
+ SESSION_ID = '$session_id';
+ window.onload=function() {
+ t=ajaxterm.Terminal("term",80,25);
+ };
+ </script>
+</head>
+<body>
+<div id="term"></div>
+</body>
+</html>
diff --git a/tools/ajaxterm/ajaxterm.js b/tools/ajaxterm/ajaxterm.js
new file mode 100644
index 000000000..32b401930
--- /dev/null
+++ b/tools/ajaxterm/ajaxterm.js
@@ -0,0 +1,279 @@
+ajaxterm={};
+ajaxterm.Terminal_ctor=function(id,width,height) {
+ var ie=0;
+ if(window.ActiveXObject)
+ ie=1;
+ var sid=""+SESSION_ID;
+ var query0="s="+sid+"&w="+width+"&h="+height;
+ var query1=query0+"&c=1&k=";
+ var buf="";
+ var timeout;
+ var error_timeout;
+ var keybuf=[];
+ var sending=0;
+ var rmax=1;
+
+ var div=document.getElementById(id);
+ var dstat=document.createElement('pre');
+ var sled=document.createElement('span');
+ var opt_get=document.createElement('a');
+ var opt_color=document.createElement('a');
+ var opt_paste=document.createElement('a');
+ var sdebug=document.createElement('span');
+ var dterm=document.createElement('div');
+
+ function debug(s) {
+ sdebug.innerHTML=s;
+ }
+ function error() {
+ sled.className='off';
+ debug("Connection lost timeout ts:"+((new Date).getTime()));
+ }
+ function opt_add(opt,name) {
+ opt.className='off';
+ opt.innerHTML=' '+name+' ';
+ dstat.appendChild(opt);
+ dstat.appendChild(document.createTextNode(' '));
+ }
+ function do_get(event) {
+ opt_get.className=(opt_get.className=='off')?'on':'off';
+ debug('GET '+opt_get.className);
+ }
+ function do_color(event) {
+ var o=opt_color.className=(opt_color.className=='off')?'on':'off';
+ if(o=='on')
+ query1=query0+"&c=1&k=";
+ else
+ query1=query0+"&k=";
+ debug('Color '+opt_color.className);
+ }
+ function mozilla_clipboard() {
+ // mozilla sucks
+ try {
+ netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
+ } catch (err) {
+ debug('Access denied, <a href="http://kb.mozillazine.org/Granting_JavaScript_access_to_the_clipboard" target="_blank">more info</a>');
+ return undefined;
+ }
+ var clip = Components.classes["@mozilla.org/widget/clipboard;1"].createInstance(Components.interfaces.nsIClipboard);
+ var trans = Components.classes["@mozilla.org/widget/transferable;1"].createInstance(Components.interfaces.nsITransferable);
+ if (!clip || !trans) {
+ return undefined;
+ }
+ trans.addDataFlavor("text/unicode");
+ clip.getData(trans,clip.kGlobalClipboard);
+ var str=new Object();
+ var strLength=new Object();
+ try {
+ trans.getTransferData("text/unicode",str,strLength);
+ } catch(err) {
+ return "";
+ }
+ if (str) {
+ str=str.value.QueryInterface(Components.interfaces.nsISupportsString);
+ }
+ if (str) {
+ return str.data.substring(0,strLength.value / 2);
+ } else {
+ return "";
+ }
+ }
+ function do_paste(event) {
+ var p=undefined;
+ if (window.clipboardData) {
+ p=window.clipboardData.getData("Text");
+ } else if(window.netscape) {
+ p=mozilla_clipboard();
+ }
+ if (p) {
+ debug('Pasted');
+ queue(encodeURIComponent(p));
+ } else {
+ }
+ }
+ function update() {
+// debug("ts: "+((new Date).getTime())+" rmax:"+rmax);
+ if(sending==0) {
+ sending=1;
+ sled.className='on';
+ var r=new XMLHttpRequest();
+ var send="";
+ while(keybuf.length>0) {
+ send+=keybuf.pop();
+ }
+ var query=query1+send;
+ if(opt_get.className=='on') {
+ r.open("GET","u?"+query,true);
+ if(ie) {
+ r.setRequestHeader("If-Modified-Since", "Sat, 1 Jan 2000 00:00:00 GMT");
+ }
+ } else {
+ r.open("POST","u",true);
+ }
+ r.setRequestHeader('Content-Type','application/x-www-form-urlencoded');
+ r.onreadystatechange = function () {
+// debug("xhr:"+((new Date).getTime())+" state:"+r.readyState+" status:"+r.status+" statusText:"+r.statusText);
+ if (r.readyState==4) {
+ if(r.status==200) {
+ window.clearTimeout(error_timeout);
+ de=r.responseXML.documentElement;
+ if(de.tagName=="pre") {
+ if(ie) {
+ Sarissa.updateContentFromNode(de, dterm);
+ } else {
+ Sarissa.updateContentFromNode(de, dterm);
+// old=div.firstChild;
+// div.replaceChild(de,old);
+ }
+ rmax=100;
+ } else {
+ rmax*=2;
+ if(rmax>2000)
+ rmax=2000;
+ }
+ sending=0;
+ sled.className='off';
+ timeout=window.setTimeout(update,rmax);
+ } else {
+ debug("Connection error status:"+r.status);
+ }
+ }
+ }
+ error_timeout=window.setTimeout(error,5000);
+ if(opt_get.className=='on') {
+ r.send(null);
+ } else {
+ r.send(query);
+ }
+ }
+ }
+ function queue(s) {
+ keybuf.unshift(s);
+ if(sending==0) {
+ window.clearTimeout(timeout);
+ timeout=window.setTimeout(update,1);
+ }
+ }
+ function keypress(ev) {
+ if (!ev) var ev=window.event;
+// s="kp keyCode="+ev.keyCode+" which="+ev.which+" shiftKey="+ev.shiftKey+" ctrlKey="+ev.ctrlKey+" altKey="+ev.altKey;
+// debug(s);
+// return false;
+// else { if (!ev.ctrlKey || ev.keyCode==17) { return; }
+ var kc;
+ var k="";
+ if (ev.keyCode)
+ kc=ev.keyCode;
+ if (ev.which)
+ kc=ev.which;
+ if (ev.altKey) {
+ if (kc>=65 && kc<=90)
+ kc+=32;
+ if (kc>=97 && kc<=122) {
+ k=String.fromCharCode(27)+String.fromCharCode(kc);
+ }
+ } else if (ev.ctrlKey) {
+ if (kc>=65 && kc<=90) k=String.fromCharCode(kc-64); // Ctrl-A..Z
+ else if (kc>=97 && kc<=122) k=String.fromCharCode(kc-96); // Ctrl-A..Z
+ else if (kc==54) k=String.fromCharCode(30); // Ctrl-^
+ else if (kc==109) k=String.fromCharCode(31); // Ctrl-_
+ else if (kc==219) k=String.fromCharCode(27); // Ctrl-[
+ else if (kc==220) k=String.fromCharCode(28); // Ctrl-\
+ else if (kc==221) k=String.fromCharCode(29); // Ctrl-]
+ else if (kc==219) k=String.fromCharCode(29); // Ctrl-]
+ else if (kc==219) k=String.fromCharCode(0); // Ctrl-@
+ } else if (ev.which==0) {
+ if (kc==9) k=String.fromCharCode(9); // Tab
+ else if (kc==8) k=String.fromCharCode(127); // Backspace
+ else if (kc==27) k=String.fromCharCode(27); // Escape
+ else {
+ if (kc==33) k="[5~"; // PgUp
+ else if (kc==34) k="[6~"; // PgDn
+ else if (kc==35) k="[4~"; // End
+ else if (kc==36) k="[1~"; // Home
+ else if (kc==37) k="[D"; // Left
+ else if (kc==38) k="[A"; // Up
+ else if (kc==39) k="[C"; // Right
+ else if (kc==40) k="[B"; // Down
+ else if (kc==45) k="[2~"; // Ins
+ else if (kc==46) k="[3~"; // Del
+ else if (kc==112) k="[[A"; // F1
+ else if (kc==113) k="[[B"; // F2
+ else if (kc==114) k="[[C"; // F3
+ else if (kc==115) k="[[D"; // F4
+ else if (kc==116) k="[[E"; // F5
+ else if (kc==117) k="[17~"; // F6
+ else if (kc==118) k="[18~"; // F7
+ else if (kc==119) k="[19~"; // F8
+ else if (kc==120) k="[20~"; // F9
+ else if (kc==121) k="[21~"; // F10
+ else if (kc==122) k="[23~"; // F11
+ else if (kc==123) k="[24~"; // F12
+ if (k.length) {
+ k=String.fromCharCode(27)+k;
+ }
+ }
+ } else {
+ if (kc==8)
+ k=String.fromCharCode(127); // Backspace
+ else
+ k=String.fromCharCode(kc);
+ }
+ if(k.length) {
+// queue(encodeURIComponent(k));
+ if(k=="+") {
+ queue("%2B");
+ } else {
+ queue(escape(k));
+ }
+ }
+ ev.cancelBubble=true;
+ if (ev.stopPropagation) ev.stopPropagation();
+ if (ev.preventDefault) ev.preventDefault();
+ return false;
+ }
+ function keydown(ev) {
+ if (!ev) var ev=window.event;
+ if (ie) {
+// s="kd keyCode="+ev.keyCode+" which="+ev.which+" shiftKey="+ev.shiftKey+" ctrlKey="+ev.ctrlKey+" altKey="+ev.altKey;
+// debug(s);
+ o={9:1,8:1,27:1,33:1,34:1,35:1,36:1,37:1,38:1,39:1,40:1,45:1,46:1,112:1,
+ 113:1,114:1,115:1,116:1,117:1,118:1,119:1,120:1,121:1,122:1,123:1};
+ if (o[ev.keyCode] || ev.ctrlKey || ev.altKey) {
+ ev.which=0;
+ return keypress(ev);
+ }
+ }
+ }
+ function init() {
+ sled.appendChild(document.createTextNode('\xb7'));
+ sled.className='off';
+ dstat.appendChild(sled);
+ dstat.appendChild(document.createTextNode(' '));
+ opt_add(opt_color,'Colors');
+ opt_color.className='on';
+ opt_add(opt_get,'GET');
+ opt_add(opt_paste,'Paste');
+ dstat.appendChild(sdebug);
+ dstat.className='stat';
+ div.appendChild(dstat);
+ div.appendChild(dterm);
+ if(opt_color.addEventListener) {
+ opt_get.addEventListener('click',do_get,true);
+ opt_color.addEventListener('click',do_color,true);
+ opt_paste.addEventListener('click',do_paste,true);
+ } else {
+ opt_get.attachEvent("onclick", do_get);
+ opt_color.attachEvent("onclick", do_color);
+ opt_paste.attachEvent("onclick", do_paste);
+ }
+ document.onkeypress=keypress;
+ document.onkeydown=keydown;
+ timeout=window.setTimeout(update,100);
+ }
+ init();
+}
+ajaxterm.Terminal=function(id,width,height) {
+ return new this.Terminal_ctor(id,width,height);
+}
+
diff --git a/tools/ajaxterm/ajaxterm.py b/tools/ajaxterm/ajaxterm.py
new file mode 100755
index 000000000..bf27b264a
--- /dev/null
+++ b/tools/ajaxterm/ajaxterm.py
@@ -0,0 +1,586 @@
+#!/usr/bin/env python
+
+""" Ajaxterm """
+
+import array,cgi,fcntl,glob,mimetypes,optparse,os,pty,random,re,signal,select,sys,threading,time,termios,struct,pwd
+
+os.chdir(os.path.normpath(os.path.dirname(__file__)))
+# Optional: Add QWeb in sys path
+sys.path[0:0]=glob.glob('../../python')
+
+import qweb
+import string, subprocess, uuid
+
+global g_server
+TIMEOUT=300
+
+class Terminal:
+ def __init__(self,width=80,height=24):
+ self.width=width
+ self.height=height
+ self.init()
+ self.reset()
+ def init(self):
+ self.esc_seq={
+ "\x00": None,
+ "\x05": self.esc_da,
+ "\x07": None,
+ "\x08": self.esc_0x08,
+ "\x09": self.esc_0x09,
+ "\x0a": self.esc_0x0a,
+ "\x0b": self.esc_0x0a,
+ "\x0c": self.esc_0x0a,
+ "\x0d": self.esc_0x0d,
+ "\x0e": None,
+ "\x0f": None,
+ "\x1b#8": None,
+ "\x1b=": None,
+ "\x1b>": None,
+ "\x1b(0": None,
+ "\x1b(A": None,
+ "\x1b(B": None,
+ "\x1b[c": self.esc_da,
+ "\x1b[0c": self.esc_da,
+ "\x1b]R": None,
+ "\x1b7": self.esc_save,
+ "\x1b8": self.esc_restore,
+ "\x1bD": None,
+ "\x1bE": None,
+ "\x1bH": None,
+ "\x1bM": self.esc_ri,
+ "\x1bN": None,
+ "\x1bO": None,
+ "\x1bZ": self.esc_da,
+ "\x1ba": None,
+ "\x1bc": self.reset,
+ "\x1bn": None,
+ "\x1bo": None,
+ }
+ for k,v in self.esc_seq.items():
+ if v==None:
+ self.esc_seq[k]=self.esc_ignore
+ # regex
+ d={
+ r'\[\??([0-9;]*)([@ABCDEFGHJKLMPXacdefghlmnqrstu`])' : self.csi_dispatch,
+ r'\]([^\x07]+)\x07' : self.esc_ignore,
+ }
+ self.esc_re=[]
+ for k,v in d.items():
+ self.esc_re.append((re.compile('\x1b'+k),v))
+ # define csi sequences
+ self.csi_seq={
+ '@': (self.csi_at,[1]),
+ '`': (self.csi_G,[1]),
+ 'J': (self.csi_J,[0]),
+ 'K': (self.csi_K,[0]),
+ }
+ for i in [i[4] for i in dir(self) if i.startswith('csi_') and len(i)==5]:
+ if not self.csi_seq.has_key(i):
+ self.csi_seq[i]=(getattr(self,'csi_'+i),[1])
+ # Init 0-256 to latin1 and html translation table
+ self.trl1=""
+ for i in range(256):
+ if i<32:
+ self.trl1+=" "
+ elif i<127 or i>160:
+ self.trl1+=chr(i)
+ else:
+ self.trl1+="?"
+ self.trhtml=""
+ for i in range(256):
+ if i==0x0a or (i>32 and i<127) or i>160:
+ self.trhtml+=chr(i)
+ elif i<=32:
+ self.trhtml+="\xa0"
+ else:
+ self.trhtml+="?"
+ def reset(self,s=""):
+ self.scr=array.array('i',[0x000700]*(self.width*self.height))
+ self.st=0
+ self.sb=self.height-1
+ self.cx_bak=self.cx=0
+ self.cy_bak=self.cy=0
+ self.cl=0
+ self.sgr=0x000700
+ self.buf=""
+ self.outbuf=""
+ self.last_html=""
+ def peek(self,y1,x1,y2,x2):
+ return self.scr[self.width*y1+x1:self.width*y2+x2]
+ def poke(self,y,x,s):
+ pos=self.width*y+x
+ self.scr[pos:pos+len(s)]=s
+ def zero(self,y1,x1,y2,x2):
+ w=self.width*(y2-y1)+x2-x1+1
+ z=array.array('i',[0x000700]*w)
+ self.scr[self.width*y1+x1:self.width*y2+x2+1]=z
+ def scroll_up(self,y1,y2):
+ self.poke(y1,0,self.peek(y1+1,0,y2,self.width))
+ self.zero(y2,0,y2,self.width-1)
+ def scroll_down(self,y1,y2):
+ self.poke(y1+1,0,self.peek(y1,0,y2-1,self.width))
+ self.zero(y1,0,y1,self.width-1)
+ def scroll_right(self,y,x):
+ self.poke(y,x+1,self.peek(y,x,y,self.width))
+ self.zero(y,x,y,x)
+ def cursor_down(self):
+ if self.cy>=self.st and self.cy<=self.sb:
+ self.cl=0
+ q,r=divmod(self.cy+1,self.sb+1)
+ if q:
+ self.scroll_up(self.st,self.sb)
+ self.cy=self.sb
+ else:
+ self.cy=r
+ def cursor_right(self):
+ q,r=divmod(self.cx+1,self.width)
+ if q:
+ self.cl=1
+ else:
+ self.cx=r
+ def echo(self,c):
+ if self.cl:
+ self.cursor_down()
+ self.cx=0
+ self.scr[(self.cy*self.width)+self.cx]=self.sgr|ord(c)
+ self.cursor_right()
+ def esc_0x08(self,s):
+ self.cx=max(0,self.cx-1)
+ def esc_0x09(self,s):
+ x=self.cx+8
+ q,r=divmod(x,8)
+ self.cx=(q*8)%self.width
+ def esc_0x0a(self,s):
+ self.cursor_down()
+ def esc_0x0d(self,s):
+ self.cl=0
+ self.cx=0
+ def esc_save(self,s):
+ self.cx_bak=self.cx
+ self.cy_bak=self.cy
+ def esc_restore(self,s):
+ self.cx=self.cx_bak
+ self.cy=self.cy_bak
+ self.cl=0
+ def esc_da(self,s):
+ self.outbuf="\x1b[?6c"
+ def esc_ri(self,s):
+ self.cy=max(self.st,self.cy-1)
+ if self.cy==self.st:
+ self.scroll_down(self.st,self.sb)
+ def esc_ignore(self,*s):
+ pass
+# print "term:ignore: %s"%repr(s)
+ def csi_dispatch(self,seq,mo):
+ # CSI sequences
+ s=mo.group(1)
+ c=mo.group(2)
+ f=self.csi_seq.get(c,None)
+ if f:
+ try:
+ l=[min(int(i),1024) for i in s.split(';') if len(i)<4]
+ except ValueError:
+ l=[]
+ if len(l)==0:
+ l=f[1]
+ f[0](l)
+# else:
+# print 'csi ignore',c,l
+ def csi_at(self,l):
+ for i in range(l[0]):
+ self.scroll_right(self.cy,self.cx)
+ def csi_A(self,l):
+ self.cy=max(self.st,self.cy-l[0])
+ def csi_B(self,l):
+ self.cy=min(self.sb,self.cy+l[0])
+ def csi_C(self,l):
+ self.cx=min(self.width-1,self.cx+l[0])
+ self.cl=0
+ def csi_D(self,l):
+ self.cx=max(0,self.cx-l[0])
+ self.cl=0
+ def csi_E(self,l):
+ self.csi_B(l)
+ self.cx=0
+ self.cl=0
+ def csi_F(self,l):
+ self.csi_A(l)
+ self.cx=0
+ self.cl=0
+ def csi_G(self,l):
+ self.cx=min(self.width,l[0])-1
+ def csi_H(self,l):
+ if len(l)<2: l=[1,1]
+ self.cx=min(self.width,l[1])-1
+ self.cy=min(self.height,l[0])-1
+ self.cl=0
+ def csi_J(self,l):
+ if l[0]==0:
+ self.zero(self.cy,self.cx,self.height-1,self.width-1)
+ elif l[0]==1:
+ self.zero(0,0,self.cy,self.cx)
+ elif l[0]==2:
+ self.zero(0,0,self.height-1,self.width-1)
+ def csi_K(self,l):
+ if l[0]==0:
+ self.zero(self.cy,self.cx,self.cy,self.width-1)
+ elif l[0]==1:
+ self.zero(self.cy,0,self.cy,self.cx)
+ elif l[0]==2:
+ self.zero(self.cy,0,self.cy,self.width-1)
+ def csi_L(self,l):
+ for i in range(l[0]):
+ if self.cy<self.sb:
+ self.scroll_down(self.cy,self.sb)
+ def csi_M(self,l):
+ if self.cy>=self.st and self.cy<=self.sb:
+ for i in range(l[0]):
+ self.scroll_up(self.cy,self.sb)
+ def csi_P(self,l):
+ w,cx,cy=self.width,self.cx,self.cy
+ end=self.peek(cy,cx,cy,w)
+ self.csi_K([0])
+ self.poke(cy,cx,end[l[0]:])
+ def csi_X(self,l):
+ self.zero(self.cy,self.cx,self.cy,self.cx+l[0])
+ def csi_a(self,l):
+ self.csi_C(l)
+ def csi_c(self,l):
+ #'\x1b[?0c' 0-8 cursor size
+ pass
+ def csi_d(self,l):
+ self.cy=min(self.height,l[0])-1
+ def csi_e(self,l):
+ self.csi_B(l)
+ def csi_f(self,l):
+ self.csi_H(l)
+ def csi_h(self,l):
+ if l[0]==4:
+ pass
+# print "insert on"
+ def csi_l(self,l):
+ if l[0]==4:
+ pass
+# print "insert off"
+ def csi_m(self,l):
+ for i in l:
+ if i==0 or i==39 or i==49 or i==27:
+ self.sgr=0x000700
+ elif i==1:
+ self.sgr=(self.sgr|0x000800)
+ elif i==7:
+ self.sgr=0x070000
+ elif i>=30 and i<=37:
+ c=i-30
+ self.sgr=(self.sgr&0xff08ff)|(c<<8)
+ elif i>=40 and i<=47:
+ c=i-40
+ self.sgr=(self.sgr&0x00ffff)|(c<<16)
+# else:
+# print "CSI sgr ignore",l,i
+# print 'sgr: %r %x'%(l,self.sgr)
+ def csi_r(self,l):
+ if len(l)<2: l=[0,self.height]
+ self.st=min(self.height-1,l[0]-1)
+ self.sb=min(self.height-1,l[1]-1)
+ self.sb=max(self.st,self.sb)
+ def csi_s(self,l):
+ self.esc_save(0)
+ def csi_u(self,l):
+ self.esc_restore(0)
+ def escape(self):
+ e=self.buf
+ if len(e)>32:
+# print "error %r"%e
+ self.buf=""
+ elif e in self.esc_seq:
+ self.esc_seq[e](e)
+ self.buf=""
+ else:
+ for r,f in self.esc_re:
+ mo=r.match(e)
+ if mo:
+ f(e,mo)
+ self.buf=""
+ break
+# if self.buf=='': print "ESC %r\n"%e
+ def write(self,s):
+ for i in s:
+ if len(self.buf) or (i in self.esc_seq):
+ self.buf+=i
+ self.escape()
+ elif i == '\x1b':
+ self.buf+=i
+ else:
+ self.echo(i)
+ def read(self):
+ b=self.outbuf
+ self.outbuf=""
+ return b
+ def dump(self):
+ r=''
+ for i in self.scr:
+ r+=chr(i&255)
+ return r
+ def dumplatin1(self):
+ return self.dump().translate(self.trl1)
+ def dumphtml(self,color=1):
+ h=self.height
+ w=self.width
+ r=""
+ span=""
+ span_bg,span_fg=-1,-1
+ for i in range(h*w):
+ q,c=divmod(self.scr[i],256)
+ if color:
+ bg,fg=divmod(q,256)
+ else:
+ bg,fg=0,7
+ if i==self.cy*w+self.cx:
+ bg,fg=1,7
+ if (bg!=span_bg or fg!=span_fg or i==h*w-1):
+ if len(span):
+ r+='<span class="f%d b%d">%s</span>'%(span_fg,span_bg,cgi.escape(span.translate(self.trhtml)))
+ span=""
+ span_bg,span_fg=bg,fg
+ span+=chr(c)
+ if i%w==w-1:
+ span+='\n'
+ r='<?xml version="1.0" encoding="ISO-8859-1"?><pre class="term">%s</pre>'%r
+ if self.last_html==r:
+ return '<?xml version="1.0"?><idem></idem>'
+ else:
+ self.last_html=r
+# print self
+ return r
+ def __repr__(self):
+ d=self.dumplatin1()
+ r=""
+ for i in range(self.height):
+ r+="|%s|\n"%d[self.width*i:self.width*(i+1)]
+ return r
+
+class SynchronizedMethod:
+ def __init__(self,lock,orig):
+ self.lock=lock
+ self.orig=orig
+ def __call__(self,*l):
+ self.lock.acquire()
+ r=self.orig(*l)
+ self.lock.release()
+ return r
+
+class Multiplex:
+ def __init__(self,cmd=None):
+ signal.signal(signal.SIGCHLD, signal.SIG_IGN)
+ self.cmd=cmd
+ self.proc={}
+ self.lock=threading.RLock()
+ self.thread=threading.Thread(target=self.loop)
+ self.alive=1
+ self.lastActivity=time.time()
+ # synchronize methods
+ for name in ['create','fds','proc_read','proc_write','dump','die','run']:
+ orig=getattr(self,name)
+ setattr(self,name,SynchronizedMethod(self.lock,orig))
+ self.thread.start()
+ def create(self,w=80,h=25):
+ pid,fd=pty.fork()
+ if pid==0:
+ try:
+ fdl=[int(i) for i in os.listdir('/proc/self/fd')]
+ except OSError:
+ fdl=range(256)
+ for i in [i for i in fdl if i>2]:
+ try:
+ os.close(i)
+ except OSError:
+ pass
+ if self.cmd:
+ cmd=['/bin/sh','-c',self.cmd]
+ elif os.getuid()==0:
+ cmd=['/bin/login']
+ else:
+ sys.stdout.write("Login: ")
+ login=sys.stdin.readline().strip()
+ if re.match('^[0-9A-Za-z-_. ]+$',login):
+ cmd=['ssh']
+ cmd+=['-oPreferredAuthentications=keyboard-interactive,password']
+ cmd+=['-oNoHostAuthenticationForLocalhost=yes']
+ cmd+=['-oLogLevel=FATAL']
+ cmd+=['-F/dev/null','-l',login,'localhost']
+ else:
+ os._exit(0)
+ env={}
+ env["COLUMNS"]=str(w)
+ env["LINES"]=str(h)
+ env["TERM"]="linux"
+ env["PATH"]=os.environ['PATH']
+ os.execvpe(cmd[0],cmd,env)
+ else:
+ fcntl.fcntl(fd, fcntl.F_SETFL, os.O_NONBLOCK)
+ # python bug http://python.org/sf/1112949 on amd64
+ fcntl.ioctl(fd, struct.unpack('i',struct.pack('I',termios.TIOCSWINSZ))[0], struct.pack("HHHH",h,w,0,0))
+ self.proc[fd]={'pid':pid,'term':Terminal(w,h),'buf':'','time':time.time()}
+ return fd
+ def die(self):
+ self.alive=0
+ def run(self):
+ return self.alive
+ def fds(self):
+ return self.proc.keys()
+ def proc_kill(self,fd):
+ if fd in self.proc:
+ self.proc[fd]['time']=0
+ t=time.time()
+ for i in self.proc.keys():
+ t0=self.proc[i]['time']
+ if (t-t0)>TIMEOUT:
+ try:
+ os.close(i)
+ os.kill(self.proc[i]['pid'],signal.SIGTERM)
+ except (IOError,OSError):
+ pass
+ del self.proc[i]
+ def proc_read(self,fd):
+ try:
+ t=self.proc[fd]['term']
+ t.write(os.read(fd,65536))
+ reply=t.read()
+ if reply:
+ os.write(fd,reply)
+ self.proc[fd]['time']=time.time()
+ except (KeyError,IOError,OSError):
+ self.proc_kill(fd)
+ def proc_write(self,fd,s):
+ try:
+ os.write(fd,s)
+ except (IOError,OSError):
+ self.proc_kill(fd)
+ def dump(self,fd,color=1):
+ try:
+ return self.proc[fd]['term'].dumphtml(color)
+ except KeyError:
+ return False
+ def loop(self):
+ while self.run():
+ fds=self.fds()
+ i,o,e=select.select(fds, [], [], 1.0)
+ if time.time() - self.lastActivity > TIMEOUT:
+ global g_server
+ g_server.shutdown()
+ for fd in i:
+ self.proc_read(fd)
+ if len(i):
+ time.sleep(0.002)
+ for i in self.proc.keys():
+ try:
+ os.close(i)
+ os.kill(self.proc[i]['pid'],signal.SIGTERM)
+ except (IOError,OSError):
+ pass
+
+class AjaxTerm:
+ def __init__(self,cmd=None,index_file='ajaxterm.html',token=None):
+ self.files={}
+ self.token=token
+ for i in ['css','html','js']:
+ for j in glob.glob('*.%s'%i):
+ self.files[j]=file(j).read()
+ self.files['index']=file(index_file).read()
+ self.mime = mimetypes.types_map.copy()
+ self.mime['.html']= 'text/html; charset=UTF-8'
+ self.multi = Multiplex(cmd)
+ self.session = {}
+ def __call__(self, environ, start_response):
+ req = qweb.QWebRequest(environ, start_response,session=None)
+ if req.PATH_INFO.endswith('/u'):
+ s=req.REQUEST["s"]
+ k=req.REQUEST["k"]
+ c=req.REQUEST["c"]
+ w=req.REQUEST.int("w")
+ h=req.REQUEST.int("h")
+ if s in self.session:
+ term=self.session[s]
+ else:
+ raise Exception('Not Authorized')
+ # The original code below was insecure, because it allowed unauthorized sessions to be created
+ # if not (w>2 and w<256 and h>2 and h<100):
+ # w,h=80,25
+ # term=self.session[s]=self.multi.create(w,h)
+ if k:
+ self.multi.proc_write(term,k)
+ time.sleep(0.002)
+ self.multi.lastActivity = time.time();
+ dump=self.multi.dump(term,c)
+ req.response_headers['Content-Type']='text/xml'
+ if isinstance(dump,str):
+ req.write(dump)
+ req.response_gzencode=1
+ else:
+ del self.session[s]
+ req.write('<?xml version="1.0"?><idem></idem>')
+# print "sessions %r"%self.session
+ else:
+ n=os.path.basename(req.PATH_INFO)
+ if n in self.files:
+ req.response_headers['Content-Type'] = self.mime.get(os.path.splitext(n)[1].lower(), 'application/octet-stream')
+ req.write(self.files[n])
+ elif req.REQUEST['token'] == self.token:
+ req.response_headers['Content-Type'] = 'text/html; charset=UTF-8'
+ session_id = str(uuid.uuid4())
+ req.write(string.Template(self.files['index']).substitute(session_id=session_id))
+ term=self.session[session_id]=self.multi.create(80,25)
+ else:
+ raise Exception("Not Authorized")
+ return req
+
+def main():
+ parser = optparse.OptionParser()
+ parser.add_option("-p", "--port", dest="port", default="8022", help="Set the TCP port (default: 8022)")
+ parser.add_option("-c", "--command", dest="cmd", default=None,help="set the command (default: /bin/login or ssh 0.0.0.0)")
+ parser.add_option("-l", "--log", action="store_true", dest="log",default=0,help="log requests to stderr (default: quiet mode)")
+ parser.add_option("-d", "--daemon", action="store_true", dest="daemon", default=0, help="run as daemon in the background")
+ parser.add_option("-P", "--pidfile",dest="pidfile",default="/var/run/ajaxterm.pid",help="set the pidfile (default: /var/run/ajaxterm.pid)")
+ parser.add_option("-i", "--index", dest="index_file", default="ajaxterm.html",help="default index file (default: ajaxterm.html)")
+ parser.add_option("-u", "--uid", dest="uid", help="Set the daemon's user id")
+ parser.add_option("-t", "--token", dest="token", help="Set authorization token")
+ (o, a) = parser.parse_args()
+ if o.daemon:
+ pid=os.fork()
+ if pid == 0:
+ #os.setsid() ?
+ os.setpgrp()
+ nullin = file('/dev/null', 'r')
+ nullout = file('/dev/null', 'w')
+ os.dup2(nullin.fileno(), sys.stdin.fileno())
+ os.dup2(nullout.fileno(), sys.stdout.fileno())
+ os.dup2(nullout.fileno(), sys.stderr.fileno())
+ if os.getuid()==0 and o.uid:
+ try:
+ os.setuid(int(o.uid))
+ except:
+ os.setuid(pwd.getpwnam(o.uid).pw_uid)
+ else:
+ try:
+ file(o.pidfile,'w+').write(str(pid)+'\n')
+ except:
+ pass
+ print 'AjaxTerm at http://0.0.0.0:%s/ pid: %d' % (o.port,pid)
+ sys.exit(0)
+ else:
+ print 'AjaxTerm at http://0.0.0.0:%s/' % o.port
+ at=AjaxTerm(o.cmd,o.index_file,o.token)
+# f=lambda:os.system('firefox http://localhost:%s/&'%o.port)
+# qweb.qweb_wsgi_autorun(at,ip='localhost',port=int(o.port),threaded=0,log=o.log,callback_ready=None)
+ try:
+ global g_server
+ g_server = qweb.QWebWSGIServer(at,ip='0.0.0.0',port=int(o.port),threaded=0,log=o.log)
+ g_server.serve_forever()
+ except KeyboardInterrupt,e:
+ sys.excepthook(*sys.exc_info())
+ at.multi.die()
+
+if __name__ == '__main__':
+ main()
+
diff --git a/tools/ajaxterm/configure b/tools/ajaxterm/configure
new file mode 100755
index 000000000..45391f484
--- /dev/null
+++ b/tools/ajaxterm/configure
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+
+import optparse,os
+
+parser = optparse.OptionParser()
+parser.add_option("", "--prefix", dest="prefix",default="/usr/local",help="installation prefix (default: /usr/local)")
+parser.add_option("", "--confdir", dest="confdir", default="/etc",help="configuration files directory prefix (default: /etc)")
+parser.add_option("", "--port", dest="port", default="8022", help="set the listening TCP port (default: 8022)")
+parser.add_option("", "--command", dest="cmd", default=None,help="set the command (default: /bin/login or ssh localhost)")
+(o, a) = parser.parse_args()
+
+print "Configuring prefix=",o.prefix," port=",o.port
+
+etc=o.confdir
+port=o.port
+cmd=o.cmd
+bin=os.path.join(o.prefix,"bin")
+lib=os.path.join(o.prefix,"share/ajaxterm")
+man=os.path.join(o.prefix,"share/man/man1")
+
+file("ajaxterm.bin","w").write(file("configure.ajaxterm.bin").read()%locals())
+file("Makefile","w").write(file("configure.makefile").read()%locals())
+
+if os.path.isfile("/etc/gentoo-release"):
+ file("ajaxterm.initd","w").write(file("configure.initd.gentoo").read()%locals())
+elif os.path.isfile("/etc/fedora-release") or os.path.isfile("/etc/redhat-release"):
+ file("ajaxterm.initd","w").write(file("configure.initd.redhat").read()%locals())
+else:
+ file("ajaxterm.initd","w").write(file("configure.initd.debian").read()%locals())
+
+os.system("chmod a+x ajaxterm.bin")
+os.system("chmod a+x ajaxterm.initd")
diff --git a/tools/ajaxterm/configure.ajaxterm.bin b/tools/ajaxterm/configure.ajaxterm.bin
new file mode 100644
index 000000000..4d1f5a98f
--- /dev/null
+++ b/tools/ajaxterm/configure.ajaxterm.bin
@@ -0,0 +1,2 @@
+#!/bin/sh
+PYTHONPATH=%(lib)s exec %(lib)s/ajaxterm.py $@
diff --git a/tools/ajaxterm/configure.initd.debian b/tools/ajaxterm/configure.initd.debian
new file mode 100644
index 000000000..901082707
--- /dev/null
+++ b/tools/ajaxterm/configure.initd.debian
@@ -0,0 +1,33 @@
+#!/bin/sh
+
+PATH=/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
+DAEMON=%(bin)s/ajaxterm
+PORT=%(port)s
+PIDFILE=/var/run/ajaxterm.pid
+
+[ -x "$DAEMON" ] || exit 0
+
+#. /lib/lsb/init-functions
+
+case "$1" in
+ start)
+ echo "Starting ajaxterm on port $PORT"
+ start-stop-daemon --start --pidfile $PIDFILE --exec $DAEMON -- --daemon --port=$PORT --uid=nobody || return 2
+ ;;
+ stop)
+ echo "Stopping ajaxterm"
+ start-stop-daemon --stop --pidfile $PIDFILE
+ rm -f $PIDFILE
+ ;;
+ restart|force-reload)
+ $0 stop
+ sleep 1
+ $0 start
+ ;;
+ *)
+ echo "Usage: $SCRIPTNAME {start|stop|restart|force-reload}" >&2
+ exit 3
+ ;;
+esac
+
+:
diff --git a/tools/ajaxterm/configure.initd.gentoo b/tools/ajaxterm/configure.initd.gentoo
new file mode 100644
index 000000000..ac28ef0b6
--- /dev/null
+++ b/tools/ajaxterm/configure.initd.gentoo
@@ -0,0 +1,27 @@
+#!/sbin/runscript
+
+# AjaxTerm Gentoo script, 08 May 2006 Mark Gillespie
+
+DAEMON=%(bin)s/ajaxterm
+PORT=%(port)s
+PIDFILE=/var/run/ajaxterm.pid
+
+depend()
+{
+ need net
+}
+
+start()
+{
+ ebegin "Starting AjaxTerm on port $PORT"
+ start-stop-daemon --start --pidfile $PIDFILE --exec $DAEMON -- --daemon --port=$PORT --uid=nobody
+ eend $?
+}
+
+stop()
+{
+ ebegin "Stopping AjaxTerm"
+ start-stop-daemon --stop --pidfile $PIDFILE
+ rm -f $PIDFILE
+ eend $?
+}
diff --git a/tools/ajaxterm/configure.initd.redhat b/tools/ajaxterm/configure.initd.redhat
new file mode 100644
index 000000000..5c9788574
--- /dev/null
+++ b/tools/ajaxterm/configure.initd.redhat
@@ -0,0 +1,75 @@
+#
+# ajaxterm Startup script for ajaxterm
+#
+# chkconfig: - 99 99
+# description: Ajaxterm is a yadda yadda yadda
+# processname: ajaxterm
+# pidfile: /var/run/ajaxterm.pid
+# version: 1.0 Kevin Reichhart - ajaxterminit at lastname dot org
+
+# Source function library.
+. /etc/rc.d/init.d/functions
+
+if [ -f /etc/sysconfig/ajaxterm ]; then
+ . /etc/sysconfig/ajaxterm
+fi
+
+ajaxterm=/usr/local/bin/ajaxterm
+prog=ajaxterm
+pidfile=${PIDFILE-/var/run/ajaxterm.pid}
+lockfile=${LOCKFILE-/var/lock/subsys/ajaxterm}
+port=${PORT-8022}
+user=${xUSER-nobody}
+RETVAL=0
+
+
+start() {
+ echo -n $"Starting $prog: "
+ daemon $ajaxterm --daemon --port=$port --uid=$user $OPTIONS
+ RETVAL=$?
+ echo
+ [ $RETVAL = 0 ] && touch ${lockfile}
+ return $RETVAL
+}
+stop() {
+ echo -n $"Stopping $prog: "
+ killproc $ajaxterm
+ RETVAL=$?
+ echo
+ [ $RETVAL = 0 ] && rm -f ${lockfile} ${pidfile}
+}
+reload() {
+ echo -n $"Reloading $prog: "
+ killproc $ajaxterm -HUP
+ RETVAL=$?
+ echo
+}
+
+# See how we were called.
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ status)
+ status python ajaxterm
+ RETVAL=$?
+ ;;
+ restart)
+ stop
+ start
+ ;;
+ condrestart)
+ if [ -f ${pidfile} ] ; then
+ stop
+ start
+ fi
+ ;;
+ *)
+ echo $"Usage: $prog {start|stop|restart|condrestart}"
+ exit 1
+esac
+
+exit $RETVAL
diff --git a/tools/ajaxterm/configure.makefile b/tools/ajaxterm/configure.makefile
new file mode 100644
index 000000000..6bd80853d
--- /dev/null
+++ b/tools/ajaxterm/configure.makefile
@@ -0,0 +1,20 @@
+build:
+ true
+
+install:
+ install -d "%(bin)s"
+ install -d "%(lib)s"
+ install ajaxterm.bin "%(bin)s/ajaxterm"
+ install ajaxterm.initd "%(etc)s/init.d/ajaxterm"
+ install -m 644 ajaxterm.css ajaxterm.html ajaxterm.js qweb.py sarissa.js sarissa_dhtml.js "%(lib)s"
+ install -m 755 ajaxterm.py "%(lib)s"
+ gzip --best -c ajaxterm.1 > ajaxterm.1.gz
+ install -d "%(man)s"
+ install ajaxterm.1.gz "%(man)s"
+
+clean:
+ rm ajaxterm.bin
+ rm ajaxterm.initd
+ rm ajaxterm.1.gz
+ rm Makefile
+
diff --git a/tools/ajaxterm/qweb.py b/tools/ajaxterm/qweb.py
new file mode 100644
index 000000000..20c509230
--- /dev/null
+++ b/tools/ajaxterm/qweb.py
@@ -0,0 +1,1356 @@
+#!/usr/bin/python2.3
+#
+# vim:set et ts=4 fdc=0 fdn=2 fdl=0:
+#
+# There are no blank lines between blocks beacause i use folding from:
+# http://www.vim.org/scripts/script.php?script_id=515
+#
+
+"""= QWeb Framework =
+
+== What is QWeb ? ==
+
+QWeb is a python based [http://www.python.org/doc/peps/pep-0333/ WSGI]
+compatible web framework, it provides an infratructure to quickly build web
+applications consisting of:
+
+ * A lightweight request handler (QWebRequest)
+ * An xml templating engine (QWebXml and QWebHtml)
+ * A simple name based controler (qweb_control)
+ * A standalone WSGI Server (QWebWSGIServer)
+ * A cgi and fastcgi WSGI wrapper (taken from flup)
+ * A startup function that starts cgi, factgi or standalone according to the
+ evironement (qweb_autorun).
+
+QWeb applications are runnable in standalone mode (from commandline), via
+FastCGI, Regular CGI or by any python WSGI compliant server.
+
+QWeb doesn't provide any database access but it integrates nicely with ORMs
+such as SQLObject, SQLAlchemy or plain DB-API.
+
+Written by Antony Lesuisse (email al AT udev.org)
+
+Homepage: http://antony.lesuisse.org/qweb/trac/
+
+Forum: [http://antony.lesuisse.org/qweb/forum/viewforum.php?id=1 Forum]
+
+== Quick Start (for Linux, MacOS X and cygwin) ==
+
+Make sure you have at least python 2.3 installed and run the following commands:
+
+{{{
+$ wget http://antony.lesuisse.org/qweb/files/QWeb-0.7.tar.gz
+$ tar zxvf QWeb-0.7.tar.gz
+$ cd QWeb-0.7/examples/blog
+$ ./blog.py
+}}}
+
+And point your browser to http://localhost:8080/
+
+You may also try AjaxTerm which uses qweb request handler.
+
+== Download ==
+
+ * Version 0.7:
+ * Source [/qweb/files/QWeb-0.7.tar.gz QWeb-0.7.tar.gz]
+ * Python 2.3 Egg [/qweb/files/QWeb-0.7-py2.3.egg QWeb-0.7-py2.3.egg]
+ * Python 2.4 Egg [/qweb/files/QWeb-0.7-py2.4.egg QWeb-0.7-py2.4.egg]
+
+ * [/qweb/trac/browser Browse the source repository]
+
+== Documentation ==
+
+ * [/qweb/trac/browser/trunk/README.txt?format=raw Read the included documentation]
+ * QwebTemplating
+
+== Mailin-list ==
+
+ * Forum: [http://antony.lesuisse.org/qweb/forum/viewforum.php?id=1 Forum]
+ * No mailing-list exists yet, discussion should happen on: [http://mail.python.org/mailman/listinfo/web-sig web-sig] [http://mail.python.org/pipermail/web-sig/ archives]
+
+QWeb Components:
+----------------
+
+QWeb also feature a simple components api, that enables developers to easily
+produces reusable components.
+
+Default qweb components:
+
+ - qweb_static:
+ A qweb component to serve static content from the filesystem or from
+ zipfiles.
+
+ - qweb_dbadmin:
+ scaffolding for sqlobject
+
+License
+-------
+qweb/fcgi.py wich is BSD-like from saddi.com.
+Everything else is put in the public domain.
+
+
+TODO
+----
+ Announce QWeb to python-announce-list@python.org web-sig@python.org
+ qweb_core
+ rename request methods into
+ request_save_files
+ response_404
+ response_redirect
+ response_download
+ request callback_generator, callback_function ?
+ wsgi callback_server_local
+ xml tags explicitly call render_attributes(t_att)?
+ priority form-checkbox over t-value (for t-option)
+
+"""
+
+import BaseHTTPServer,SocketServer,Cookie
+import cgi,datetime,email,email.Message,errno,gzip,os,random,re,socket,sys,tempfile,time,types,urllib,urlparse,xml.dom
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+try:
+ import cStringIO as StringIO
+except ImportError:
+ import StringIO
+
+#----------------------------------------------------------
+# Qweb Xml t-raw t-esc t-if t-foreach t-set t-call t-trim
+#----------------------------------------------------------
+class QWebEval:
+ def __init__(self,data):
+ self.data=data
+ def __getitem__(self,expr):
+ if self.data.has_key(expr):
+ return self.data[expr]
+ r=None
+ try:
+ r=eval(expr,self.data)
+ except NameError,e:
+ pass
+ except AttributeError,e:
+ pass
+ except Exception,e:
+ print "qweb: expression error '%s' "%expr,e
+ if self.data.has_key("__builtins__"):
+ del self.data["__builtins__"]
+ return r
+ def eval_object(self,expr):
+ return self[expr]
+ def eval_str(self,expr):
+ if expr=="0":
+ return self.data[0]
+ if isinstance(self[expr],unicode):
+ return self[expr].encode("utf8")
+ return str(self[expr])
+ def eval_format(self,expr):
+ try:
+ return str(expr%self)
+ except:
+ return "qweb: format error '%s' "%expr
+# if isinstance(r,unicode):
+# return r.encode("utf8")
+ def eval_bool(self,expr):
+ if self.eval_object(expr):
+ return 1
+ else:
+ return 0
+class QWebXml:
+ """QWeb Xml templating engine
+
+ The templating engine use a very simple syntax, "magic" xml attributes, to
+ produce any kind of texutal output (even non-xml).
+
+ QWebXml:
+ the template engine core implements the basic magic attributes:
+
+ t-att t-raw t-esc t-if t-foreach t-set t-call t-trim
+
+ """
+ def __init__(self,x=None,zipname=None):
+ self.node=xml.dom.Node
+ self._t={}
+ self._render_tag={}
+ prefix='render_tag_'
+ for i in [j for j in dir(self) if j.startswith(prefix)]:
+ name=i[len(prefix):].replace('_','-')
+ self._render_tag[name]=getattr(self.__class__,i)
+
+ self._render_att={}
+ prefix='render_att_'
+ for i in [j for j in dir(self) if j.startswith(prefix)]:
+ name=i[len(prefix):].replace('_','-')
+ self._render_att[name]=getattr(self.__class__,i)
+
+ if x!=None:
+ if zipname!=None:
+ import zipfile
+ zf=zipfile.ZipFile(zipname, 'r')
+ self.add_template(zf.read(x))
+ else:
+ self.add_template(x)
+ def register_tag(self,tag,func):
+ self._render_tag[tag]=func
+ def add_template(self,x):
+ if hasattr(x,'documentElement'):
+ dom=x
+ elif x.startswith("<?xml"):
+ import xml.dom.minidom
+ dom=xml.dom.minidom.parseString(x)
+ else:
+ import xml.dom.minidom
+ dom=xml.dom.minidom.parse(x)
+ for n in dom.documentElement.childNodes:
+ if n.nodeName=="t":
+ self._t[str(n.getAttribute("t-name"))]=n
+ def get_template(self,name):
+ return self._t[name]
+
+ def eval_object(self,expr,v):
+ return QWebEval(v).eval_object(expr)
+ def eval_str(self,expr,v):
+ return QWebEval(v).eval_str(expr)
+ def eval_format(self,expr,v):
+ return QWebEval(v).eval_format(expr)
+ def eval_bool(self,expr,v):
+ return QWebEval(v).eval_bool(expr)
+
+ def render(self,tname,v={},out=None):
+ if self._t.has_key(tname):
+ return self.render_node(self._t[tname],v)
+ else:
+ return 'qweb: template "%s" not found'%tname
+ def render_node(self,e,v):
+ r=""
+ if e.nodeType==self.node.TEXT_NODE or e.nodeType==self.node.CDATA_SECTION_NODE:
+ r=e.data.encode("utf8")
+ elif e.nodeType==self.node.ELEMENT_NODE:
+ pre=""
+ g_att=""
+ t_render=None
+ t_att={}
+ for (an,av) in e.attributes.items():
+ an=str(an)
+ if isinstance(av,types.UnicodeType):
+ av=av.encode("utf8")
+ else:
+ av=av.nodeValue.encode("utf8")
+ if an.startswith("t-"):
+ for i in self._render_att:
+ if an[2:].startswith(i):
+ g_att+=self._render_att[i](self,e,an,av,v)
+ break
+ else:
+ if self._render_tag.has_key(an[2:]):
+ t_render=an[2:]
+ t_att[an[2:]]=av
+ else:
+ g_att+=' %s="%s"'%(an,cgi.escape(av,1));
+ if t_render:
+ if self._render_tag.has_key(t_render):
+ r=self._render_tag[t_render](self,e,t_att,g_att,v)
+ else:
+ r=self.render_element(e,g_att,v,pre,t_att.get("trim",0))
+ return r
+ def render_element(self,e,g_att,v,pre="",trim=0):
+ g_inner=[]
+ for n in e.childNodes:
+ g_inner.append(self.render_node(n,v))
+ name=str(e.nodeName)
+ inner="".join(g_inner)
+ if trim==0:
+ pass
+ elif trim=='left':
+ inner=inner.lstrip()
+ elif trim=='right':
+ inner=inner.rstrip()
+ elif trim=='both':
+ inner=inner.strip()
+ if name=="t":
+ return inner
+ elif len(inner):
+ return "<%s%s>%s%s</%s>"%(name,g_att,pre,inner,name)
+ else:
+ return "<%s%s/>"%(name,g_att)
+
+ # Attributes
+ def render_att_att(self,e,an,av,v):
+ if an.startswith("t-attf-"):
+ att,val=an[7:],self.eval_format(av,v)
+ elif an.startswith("t-att-"):
+ att,val=(an[6:],self.eval_str(av,v))
+ else:
+ att,val=self.eval_object(av,v)
+ return ' %s="%s"'%(att,cgi.escape(val,1))
+
+ # Tags
+ def render_tag_raw(self,e,t_att,g_att,v):
+ return self.eval_str(t_att["raw"],v)
+ def render_tag_rawf(self,e,t_att,g_att,v):
+ return self.eval_format(t_att["rawf"],v)
+ def render_tag_esc(self,e,t_att,g_att,v):
+ return cgi.escape(self.eval_str(t_att["esc"],v))
+ def render_tag_escf(self,e,t_att,g_att,v):
+ return cgi.escape(self.eval_format(t_att["escf"],v))
+ def render_tag_foreach(self,e,t_att,g_att,v):
+ expr=t_att["foreach"]
+ enum=self.eval_object(expr,v)
+ if enum!=None:
+ var=t_att.get('as',expr).replace('.','_')
+ d=v.copy()
+ size=-1
+ if isinstance(enum,types.ListType):
+ size=len(enum)
+ elif isinstance(enum,types.TupleType):
+ size=len(enum)
+ elif hasattr(enum,'count'):
+ size=enum.count()
+ d["%s_size"%var]=size
+ d["%s_all"%var]=enum
+ index=0
+ ru=[]
+ for i in enum:
+ d["%s_value"%var]=i
+ d["%s_index"%var]=index
+ d["%s_first"%var]=index==0
+ d["%s_even"%var]=index%2
+ d["%s_odd"%var]=(index+1)%2
+ d["%s_last"%var]=index+1==size
+ if index%2:
+ d["%s_parity"%var]='odd'
+ else:
+ d["%s_parity"%var]='even'
+ if isinstance(i,types.DictType):
+ d.update(i)
+ else:
+ d[var]=i
+ ru.append(self.render_element(e,g_att,d))
+ index+=1
+ return "".join(ru)
+ else:
+ return "qweb: t-foreach %s not found."%expr
+ def render_tag_if(self,e,t_att,g_att,v):
+ if self.eval_bool(t_att["if"],v):
+ return self.render_element(e,g_att,v)
+ else:
+ return ""
+ def render_tag_call(self,e,t_att,g_att,v):
+ # TODO t-prefix
+ if t_att.has_key("import"):
+ d=v
+ else:
+ d=v.copy()
+ d[0]=self.render_element(e,g_att,d)
+ return self.render(t_att["call"],d)
+ def render_tag_set(self,e,t_att,g_att,v):
+ if t_att.has_key("eval"):
+ v[t_att["set"]]=self.eval_object(t_att["eval"],v)
+ else:
+ v[t_att["set"]]=self.render_element(e,g_att,v)
+ return ""
+
+#----------------------------------------------------------
+# QWeb HTML (+deprecated QWebFORM and QWebOLD)
+#----------------------------------------------------------
+class QWebURL:
+ """ URL helper
+ assert req.PATH_INFO== "/site/admin/page_edit"
+ u = QWebURL(root_path="/site/",req_path=req.PATH_INFO)
+ s=u.url2_href("user/login",{'a':'1'})
+ assert s=="../user/login?a=1"
+
+ """
+ def __init__(self, root_path="/", req_path="/",defpath="",defparam={}):
+ self.defpath=defpath
+ self.defparam=defparam
+ self.root_path=root_path
+ self.req_path=req_path
+ self.req_list=req_path.split("/")[:-1]
+ self.req_len=len(self.req_list)
+ def decode(self,s):
+ h={}
+ for k,v in cgi.parse_qsl(s,1):
+ h[k]=v
+ return h
+ def encode(self,h):
+ return urllib.urlencode(h.items())
+ def request(self,req):
+ return req.REQUEST
+ def copy(self,path=None,param=None):
+ npath=self.defpath
+ if path:
+ npath=path
+ nparam=self.defparam.copy()
+ if param:
+ nparam.update(param)
+ return QWebURL(self.root_path,self.req_path,npath,nparam)
+ def path(self,path=''):
+ if not path:
+ path=self.defpath
+ pl=(self.root_path+path).split('/')
+ i=0
+ for i in range(min(len(pl), self.req_len)):
+ if pl[i]!=self.req_list[i]:
+ break
+ else:
+ i+=1
+ dd=self.req_len-i
+ if dd<0:
+ dd=0
+ return '/'.join(['..']*dd+pl[i:])
+ def href(self,path='',arg={}):
+ p=self.path(path)
+ tmp=self.defparam.copy()
+ tmp.update(arg)
+ s=self.encode(tmp)
+ if len(s):
+ return p+"?"+s
+ else:
+ return p
+ def form(self,path='',arg={}):
+ p=self.path(path)
+ tmp=self.defparam.copy()
+ tmp.update(arg)
+ r=''.join(['<input type="hidden" name="%s" value="%s"/>'%(k,cgi.escape(str(v),1)) for k,v in tmp.items()])
+ return (p,r)
+class QWebField:
+ def __init__(self,name=None,default="",check=None):
+ self.name=name
+ self.default=default
+ self.check=check
+ # optional attributes
+ self.type=None
+ self.trim=1
+ self.required=1
+ self.cssvalid="form_valid"
+ self.cssinvalid="form_invalid"
+ # set by addfield
+ self.form=None
+ # set by processing
+ self.input=None
+ self.css=None
+ self.value=None
+ self.valid=None
+ self.invalid=None
+ self.validate(1)
+ def validate(self,val=1,update=1):
+ if val:
+ self.valid=1
+ self.invalid=0
+ self.css=self.cssvalid
+ else:
+ self.valid=0
+ self.invalid=1
+ self.css=self.cssinvalid
+ if update and self.form:
+ self.form.update()
+ def invalidate(self,update=1):
+ self.validate(0,update)
+class QWebForm:
+ class QWebFormF:
+ pass
+ def __init__(self,e=None,arg=None,default=None):
+ self.fields={}
+ # all fields have been submitted
+ self.submitted=False
+ self.missing=[]
+ # at least one field is invalid or missing
+ self.invalid=False
+ self.error=[]
+ # all fields have been submitted and are valid
+ self.valid=False
+ # fields under self.f for convenience
+ self.f=self.QWebFormF()
+ if e:
+ self.add_template(e)
+ # assume that the fields are done with the template
+ if default:
+ self.set_default(default,e==None)
+ if arg!=None:
+ self.process_input(arg)
+ def __getitem__(self,k):
+ return self.fields[k]
+ def set_default(self,default,add_missing=1):
+ for k,v in default.items():
+ if self.fields.has_key(k):
+ self.fields[k].default=str(v)
+ elif add_missing:
+ self.add_field(QWebField(k,v))
+ def add_field(self,f):
+ self.fields[f.name]=f
+ f.form=self
+ setattr(self.f,f.name,f)
+ def add_template(self,e):
+ att={}
+ for (an,av) in e.attributes.items():
+ an=str(an)
+ if an.startswith("t-"):
+ att[an[2:]]=av.encode("utf8")
+ for i in ["form-text", "form-password", "form-radio", "form-checkbox", "form-select","form-textarea"]:
+ if att.has_key(i):
+ name=att[i].split(".")[-1]
+ default=att.get("default","")
+ check=att.get("check",None)
+ f=QWebField(name,default,check)
+ if i=="form-textarea":
+ f.type="textarea"
+ f.trim=0
+ if i=="form-checkbox":
+ f.type="checkbox"
+ f.required=0
+ self.add_field(f)
+ for n in e.childNodes:
+ if n.nodeType==n.ELEMENT_NODE:
+ self.add_template(n)
+ def process_input(self,arg):
+ for f in self.fields.values():
+ if arg.has_key(f.name):
+ f.input=arg[f.name]
+ f.value=f.input
+ if f.trim:
+ f.input=f.input.strip()
+ f.validate(1,False)
+ if f.check==None:
+ continue
+ elif callable(f.check):
+ pass
+ elif isinstance(f.check,str):
+ v=f.check
+ if f.check=="email":
+ v=r"/^[^@#!& ]+@[A-Za-z0-9-][.A-Za-z0-9-]{0,64}\.[A-Za-z]{2,5}$/"
+ if f.check=="date":
+ v=r"/^(19|20)\d\d-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])$/"
+ if not re.match(v[1:-1],f.input):
+ f.validate(0,False)
+ else:
+ f.value=f.default
+ self.update()
+ def validate_all(self,val=1):
+ for f in self.fields.values():
+ f.validate(val,0)
+ self.update()
+ def invalidate_all(self):
+ self.validate_all(0)
+ def update(self):
+ self.submitted=True
+ self.valid=True
+ self.errors=[]
+ for f in self.fields.values():
+ if f.required and f.input==None:
+ self.submitted=False
+ self.valid=False
+ self.missing.append(f.name)
+ if f.invalid:
+ self.valid=False
+ self.error.append(f.name)
+ # invalid have been submitted and
+ self.invalid=self.submitted and self.valid==False
+ def collect(self):
+ d={}
+ for f in self.fields.values():
+ d[f.name]=f.value
+ return d
+class QWebURLEval(QWebEval):
+ def __init__(self,data):
+ QWebEval.__init__(self,data)
+ def __getitem__(self,expr):
+ r=QWebEval.__getitem__(self,expr)
+ if isinstance(r,str):
+ return urllib.quote_plus(r)
+ else:
+ return r
+class QWebHtml(QWebXml):
+ """QWebHtml
+ QWebURL:
+ QWebField:
+ QWebForm:
+ QWebHtml:
+ an extended template engine, with a few utility class to easily produce
+ HTML, handle URLs and process forms, it adds the following magic attributes:
+
+ t-href t-action t-form-text t-form-password t-form-textarea t-form-radio
+ t-form-checkbox t-form-select t-option t-selected t-checked t-pager
+
+ # explication URL:
+ # v['tableurl']=QWebUrl({p=afdmin,saar=,orderby=,des=,mlink;meta_active=})
+ # t-href="tableurl?desc=1"
+ #
+ # explication FORM: t-if="form.valid()"
+ # Foreach i
+ # email: <input type="text" t-esc-name="i" t-esc-value="form[i].value" t-esc-class="form[i].css"/>
+ # <input type="radio" name="spamtype" t-esc-value="i" t-selected="i==form.f.spamtype.value"/>
+ # <option t-esc-value="cc" t-selected="cc==form.f.country.value"><t t-esc="cname"></option>
+ # Simple forms:
+ # <input t-form-text="form.email" t-check="email"/>
+ # <input t-form-password="form.email" t-check="email"/>
+ # <input t-form-radio="form.email" />
+ # <input t-form-checkbox="form.email" />
+ # <textarea t-form-textarea="form.email" t-check="email"/>
+ # <select t-form-select="form.email"/>
+ # <option t-value="1">
+ # <input t-form-radio="form.spamtype" t-value="1"/> Cars
+ # <input t-form-radio="form.spamtype" t-value="2"/> Sprt
+ """
+ # QWebForm from a template
+ def form(self,tname,arg=None,default=None):
+ form=QWebForm(self._t[tname],arg,default)
+ return form
+
+ # HTML Att
+ def eval_url(self,av,v):
+ s=QWebURLEval(v).eval_format(av)
+ a=s.split('?',1)
+ arg={}
+ if len(a)>1:
+ for k,v in cgi.parse_qsl(a[1],1):
+ arg[k]=v
+ b=a[0].split('/',1)
+ path=''
+ if len(b)>1:
+ path=b[1]
+ u=b[0]
+ return u,path,arg
+ def render_att_url_(self,e,an,av,v):
+ u,path,arg=self.eval_url(av,v)
+ if not isinstance(v.get(u,0),QWebURL):
+ out='qweb: missing url %r %r %r'%(u,path,arg)
+ else:
+ out=v[u].href(path,arg)
+ return ' %s="%s"'%(an[6:],cgi.escape(out,1))
+ def render_att_href(self,e,an,av,v):
+ return self.render_att_url_(e,"t-url-href",av,v)
+ def render_att_checked(self,e,an,av,v):
+ if self.eval_bool(av,v):
+ return ' %s="%s"'%(an[2:],an[2:])
+ else:
+ return ''
+ def render_att_selected(self,e,an,av,v):
+ return self.render_att_checked(e,an,av,v)
+
+ # HTML Tags forms
+ def render_tag_rawurl(self,e,t_att,g_att,v):
+ u,path,arg=self.eval_url(t_att["rawurl"],v)
+ return v[u].href(path,arg)
+ def render_tag_escurl(self,e,t_att,g_att,v):
+ u,path,arg=self.eval_url(t_att["escurl"],v)
+ return cgi.escape(v[u].href(path,arg))
+ def render_tag_action(self,e,t_att,g_att,v):
+ u,path,arg=self.eval_url(t_att["action"],v)
+ if not isinstance(v.get(u,0),QWebURL):
+ action,input=('qweb: missing url %r %r %r'%(u,path,arg),'')
+ else:
+ action,input=v[u].form(path,arg)
+ g_att+=' action="%s"'%action
+ return self.render_element(e,g_att,v,input)
+ def render_tag_form_text(self,e,t_att,g_att,v):
+ f=self.eval_object(t_att["form-text"],v)
+ g_att+=' type="text" name="%s" value="%s" class="%s"'%(f.name,cgi.escape(f.value,1),f.css)
+ return self.render_element(e,g_att,v)
+ def render_tag_form_password(self,e,t_att,g_att,v):
+ f=self.eval_object(t_att["form-password"],v)
+ g_att+=' type="password" name="%s" value="%s" class="%s"'%(f.name,cgi.escape(f.value,1),f.css)
+ return self.render_element(e,g_att,v)
+ def render_tag_form_textarea(self,e,t_att,g_att,v):
+ type="textarea"
+ f=self.eval_object(t_att["form-textarea"],v)
+ g_att+=' name="%s" class="%s"'%(f.name,f.css)
+ r="<%s%s>%s</%s>"%(type,g_att,cgi.escape(f.value,1),type)
+ return r
+ def render_tag_form_radio(self,e,t_att,g_att,v):
+ f=self.eval_object(t_att["form-radio"],v)
+ val=t_att["value"]
+ g_att+=' type="radio" name="%s" value="%s"'%(f.name,val)
+ if f.value==val:
+ g_att+=' checked="checked"'
+ return self.render_element(e,g_att,v)
+ def render_tag_form_checkbox(self,e,t_att,g_att,v):
+ f=self.eval_object(t_att["form-checkbox"],v)
+ val=t_att["value"]
+ g_att+=' type="checkbox" name="%s" value="%s"'%(f.name,val)
+ if f.value==val:
+ g_att+=' checked="checked"'
+ return self.render_element(e,g_att,v)
+ def render_tag_form_select(self,e,t_att,g_att,v):
+ f=self.eval_object(t_att["form-select"],v)
+ g_att+=' name="%s" class="%s"'%(f.name,f.css)
+ return self.render_element(e,g_att,v)
+ def render_tag_option(self,e,t_att,g_att,v):
+ f=self.eval_object(e.parentNode.getAttribute("t-form-select"),v)
+ val=t_att["option"]
+ g_att+=' value="%s"'%(val)
+ if f.value==val:
+ g_att+=' selected="selected"'
+ return self.render_element(e,g_att,v)
+
+ # HTML Tags others
+ def render_tag_pager(self,e,t_att,g_att,v):
+ pre=t_att["pager"]
+ total=int(self.eval_str(t_att["total"],v))
+ start=int(self.eval_str(t_att["start"],v))
+ step=int(self.eval_str(t_att.get("step","100"),v))
+ scope=int(self.eval_str(t_att.get("scope","5"),v))
+ # Compute Pager
+ p=pre+"_"
+ d={}
+ d[p+"tot_size"]=total
+ d[p+"tot_page"]=tot_page=total/step
+ d[p+"win_start0"]=total and start
+ d[p+"win_start1"]=total and start+1
+ d[p+"win_end0"]=max(0,min(start+step-1,total-1))
+ d[p+"win_end1"]=min(start+step,total)
+ d[p+"win_page0"]=win_page=start/step
+ d[p+"win_page1"]=win_page+1
+ d[p+"prev"]=(win_page!=0)
+ d[p+"prev_start"]=(win_page-1)*step
+ d[p+"next"]=(tot_page>=win_page+1)
+ d[p+"next_start"]=(win_page+1)*step
+ l=[]
+ begin=win_page-scope
+ end=win_page+scope
+ if begin<0:
+ end-=begin
+ if end>tot_page:
+ begin-=(end-tot_page)
+ i=max(0,begin)
+ while i<=min(end,tot_page) and total!=step:
+ l.append( { p+"page0":i, p+"page1":i+1, p+"start":i*step, p+"sel":(win_page==i) })
+ i+=1
+ d[p+"active"]=len(l)>1
+ d[p+"list"]=l
+ # Update v
+ v.update(d)
+ return ""
+
+#----------------------------------------------------------
+# QWeb Simple Controller
+#----------------------------------------------------------
+def qweb_control(self,jump='main',p=[]):
+ """ qweb_control(self,jump='main',p=[]):
+ A simple function to handle the controler part of your application. It
+ dispatch the control to the jump argument, while ensuring that prefix
+ function have been called.
+
+ qweb_control replace '/' to '_' and strip '_' from the jump argument.
+
+ name1
+ name1_name2
+ name1_name2_name3
+
+ """
+ jump=jump.replace('/','_').strip('_')
+ if not hasattr(self,jump):
+ return 0
+ done={}
+ todo=[]
+ while 1:
+ if jump!=None:
+ tmp=""
+ todo=[]
+ for i in jump.split("_"):
+ tmp+=i+"_";
+ if not done.has_key(tmp[:-1]):
+ todo.append(tmp[:-1])
+ jump=None
+ elif len(todo):
+ i=todo.pop(0)
+ done[i]=1
+ if hasattr(self,i):
+ f=getattr(self,i)
+ r=f(*p)
+ if isinstance(r,types.StringType):
+ jump=r
+ else:
+ break
+ return 1
+
+#----------------------------------------------------------
+# QWeb WSGI Request handler
+#----------------------------------------------------------
+class QWebSession(dict):
+ def __init__(self,environ,**kw):
+ dict.__init__(self)
+ default={
+ "path" : tempfile.gettempdir(),
+ "cookie_name" : "QWEBSID",
+ "cookie_lifetime" : 0,
+ "cookie_path" : '/',
+ "cookie_domain" : '',
+ "limit_cache" : 1,
+ "probability" : 0.01,
+ "maxlifetime" : 3600,
+ "disable" : 0,
+ }
+ for k,v in default.items():
+ setattr(self,'session_%s'%k,kw.get(k,v))
+ # Try to find session
+ self.session_found_cookie=0
+ self.session_found_url=0
+ self.session_found=0
+ self.session_orig=""
+ # Try cookie
+ c=Cookie.SimpleCookie()
+ c.load(environ.get('HTTP_COOKIE', ''))
+ if c.has_key(self.session_cookie_name):
+ sid=c[self.session_cookie_name].value[:64]
+ if re.match('[a-f0-9]+$',sid) and self.session_load(sid):
+ self.session_id=sid
+ self.session_found_cookie=1
+ self.session_found=1
+ # Try URL
+ if not self.session_found_cookie:
+ mo=re.search('&%s=([a-f0-9]+)'%self.session_cookie_name,environ.get('QUERY_STRING',''))
+ if mo and self.session_load(mo.group(1)):
+ self.session_id=mo.group(1)
+ self.session_found_url=1
+ self.session_found=1
+ # New session
+ if not self.session_found:
+ self.session_id='%032x'%random.randint(1,2**128)
+ self.session_trans_sid="&amp;%s=%s"%(self.session_cookie_name,self.session_id)
+ # Clean old session
+ if random.random() < self.session_probability:
+ self.session_clean()
+ def session_get_headers(self):
+ h=[]
+ if (not self.session_disable) and (len(self) or len(self.session_orig)):
+ self.session_save()
+ if not self.session_found_cookie:
+ c=Cookie.SimpleCookie()
+ c[self.session_cookie_name] = self.session_id
+ c[self.session_cookie_name]['path'] = self.session_cookie_path
+ if self.session_cookie_domain:
+ c[self.session_cookie_name]['domain'] = self.session_cookie_domain
+# if self.session_cookie_lifetime:
+# c[self.session_cookie_name]['expires'] = TODO date localtime or not, datetime.datetime(1970, 1, 1)
+ h.append(("Set-Cookie", c[self.session_cookie_name].OutputString()))
+ if self.session_limit_cache:
+ h.append(('Cache-Control','no-store, no-cache, must-revalidate, post-check=0, pre-check=0'))
+ h.append(('Expires','Thu, 19 Nov 1981 08:52:00 GMT'))
+ h.append(('Pragma','no-cache'))
+ return h
+ def session_load(self,sid):
+ fname=os.path.join(self.session_path,'qweb_sess_%s'%sid)
+ try:
+ orig=file(fname).read()
+ d=pickle.loads(orig)
+ except:
+ return
+ self.session_orig=orig
+ self.update(d)
+ return 1
+ def session_save(self):
+ if not os.path.isdir(self.session_path):
+ os.makedirs(self.session_path)
+ fname=os.path.join(self.session_path,'qweb_sess_%s'%self.session_id)
+ try:
+ oldtime=os.path.getmtime(fname)
+ except OSError,IOError:
+ oldtime=0
+ dump=pickle.dumps(self.copy())
+ if (dump != self.session_orig) or (time.time() > oldtime+self.session_maxlifetime/4):
+ tmpname=os.path.join(self.session_path,'qweb_sess_%s_%x'%(self.session_id,random.randint(1,2**32)))
+ f=file(tmpname,'wb')
+ f.write(dump)
+ f.close()
+ if sys.platform=='win32' and os.path.isfile(fname):
+ os.remove(fname)
+ os.rename(tmpname,fname)
+ def session_clean(self):
+ t=time.time()
+ try:
+ for i in [os.path.join(self.session_path,i) for i in os.listdir(self.session_path) if i.startswith('qweb_sess_')]:
+ if (t > os.path.getmtime(i)+self.session_maxlifetime):
+ os.unlink(i)
+ except OSError,IOError:
+ pass
+class QWebSessionMem(QWebSession):
+ def session_load(self,sid):
+ global _qweb_sessions
+ if not "_qweb_sessions" in globals():
+ _qweb_sessions={}
+ if _qweb_sessions.has_key(sid):
+ self.session_orig=_qweb_sessions[sid]
+ self.update(self.session_orig)
+ return 1
+ def session_save(self):
+ global _qweb_sessions
+ if not "_qweb_sessions" in globals():
+ _qweb_sessions={}
+ _qweb_sessions[self.session_id]=self.copy()
+class QWebSessionService:
+ def __init__(self, wsgiapp, url_rewrite=0):
+ self.wsgiapp=wsgiapp
+ self.url_rewrite_tags="a=href,area=href,frame=src,form=,fieldset="
+ def __call__(self, environ, start_response):
+ # TODO
+ # use QWebSession to provide environ["qweb.session"]
+ return self.wsgiapp(environ,start_response)
+class QWebDict(dict):
+ def __init__(self,*p):
+ dict.__init__(self,*p)
+ def __getitem__(self,key):
+ return self.get(key,"")
+ def int(self,key):
+ try:
+ return int(self.get(key,"0"))
+ except ValueError:
+ return 0
+class QWebListDict(dict):
+ def __init__(self,*p):
+ dict.__init__(self,*p)
+ def __getitem__(self,key):
+ return self.get(key,[])
+ def appendlist(self,key,val):
+ if self.has_key(key):
+ self[key].append(val)
+ else:
+ self[key]=[val]
+ def get_qwebdict(self):
+ d=QWebDict()
+ for k,v in self.items():
+ d[k]=v[-1]
+ return d
+class QWebRequest:
+ """QWebRequest a WSGI request handler.
+
+ QWebRequest is a WSGI request handler that feature GET, POST and POST
+ multipart methods, handles cookies and headers and provide a dict-like
+ SESSION Object (either on the filesystem or in memory).
+
+ It is constructed with the environ and start_response WSGI arguments:
+
+ req=qweb.QWebRequest(environ, start_response)
+
+ req has the folowing attributes :
+
+ req.environ standard WSGI dict (CGI and wsgi ones)
+
+ Some CGI vars as attributes from environ for convenience:
+
+ req.SCRIPT_NAME
+ req.PATH_INFO
+ req.REQUEST_URI
+
+ Some computed value (also for convenience)
+
+ req.FULL_URL full URL recontructed (http://host/query)
+ req.FULL_PATH (URL path before ?querystring)
+
+ Dict constructed from querystring and POST datas, PHP-like.
+
+ req.GET contains GET vars
+ req.POST contains POST vars
+ req.REQUEST contains merge of GET and POST
+ req.FILES contains uploaded files
+ req.GET_LIST req.POST_LIST req.REQUEST_LIST req.FILES_LIST multiple arguments versions
+ req.debug() returns an HTML dump of those vars
+
+ A dict-like session object.
+
+ req.SESSION the session start when the dict is not empty.
+
+ Attribute for handling the response
+
+ req.response_headers dict-like to set headers
+ req.response_cookies a SimpleCookie to set cookies
+ req.response_status a string to set the status like '200 OK'
+
+ req.write() to write to the buffer
+
+ req itselfs is an iterable object with the buffer, it will also also call
+ start_response automatically before returning anything via the iterator.
+
+ To make it short, it means that you may use
+
+ return req
+
+ at the end of your request handling to return the reponse to any WSGI
+ application server.
+ """
+ #
+ # This class contains part ripped from colubrid (with the permission of
+ # mitsuhiko) see http://wsgiarea.pocoo.org/colubrid/
+ #
+ # - the class HttpHeaders
+ # - the method load_post_data (tuned version)
+ #
+ class HttpHeaders(object):
+ def __init__(self):
+ self.data = [('Content-Type', 'text/html')]
+ def __setitem__(self, key, value):
+ self.set(key, value)
+ def __delitem__(self, key):
+ self.remove(key)
+ def __contains__(self, key):
+ key = key.lower()
+ for k, v in self.data:
+ if k.lower() == key:
+ return True
+ return False
+ def add(self, key, value):
+ self.data.append((key, value))
+ def remove(self, key, count=-1):
+ removed = 0
+ data = []
+ for _key, _value in self.data:
+ if _key.lower() != key.lower():
+ if count > -1:
+ if removed >= count:
+ break
+ else:
+ removed += 1
+ data.append((_key, _value))
+ self.data = data
+ def clear(self):
+ self.data = []
+ def set(self, key, value):
+ self.remove(key)
+ self.add(key, value)
+ def get(self, key=False, httpformat=False):
+ if not key:
+ result = self.data
+ else:
+ result = []
+ for _key, _value in self.data:
+ if _key.lower() == key.lower():
+ result.append((_key, _value))
+ if httpformat:
+ return '\n'.join(['%s: %s' % item for item in result])
+ return result
+ def load_post_data(self,environ,POST,FILES):
+ length = int(environ['CONTENT_LENGTH'])
+ DATA = environ['wsgi.input'].read(length)
+ if environ.get('CONTENT_TYPE', '').startswith('multipart'):
+ lines = ['Content-Type: %s' % environ.get('CONTENT_TYPE', '')]
+ for key, value in environ.items():
+ if key.startswith('HTTP_'):
+ lines.append('%s: %s' % (key, value))
+ raw = '\r\n'.join(lines) + '\r\n\r\n' + DATA
+ msg = email.message_from_string(raw)
+ for sub in msg.get_payload():
+ if not isinstance(sub, email.Message.Message):
+ continue
+ name_dict = cgi.parse_header(sub['Content-Disposition'])[1]
+ if 'filename' in name_dict:
+ # Nested MIME Messages are not supported'
+ if type([]) == type(sub.get_payload()):
+ continue
+ if not name_dict['filename'].strip():
+ continue
+ filename = name_dict['filename']
+ # why not keep all the filename? because IE always send 'C:\documents and settings\blub\blub.png'
+ filename = filename[filename.rfind('\\') + 1:]
+ if 'Content-Type' in sub:
+ content_type = sub['Content-Type']
+ else:
+ content_type = None
+ s = { "name":filename, "type":content_type, "data":sub.get_payload() }
+ FILES.appendlist(name_dict['name'], s)
+ else:
+ POST.appendlist(name_dict['name'], sub.get_payload())
+ else:
+ POST.update(cgi.parse_qs(DATA,keep_blank_values=1))
+ return DATA
+
+ def __init__(self,environ,start_response,session=QWebSession):
+ self.environ=environ
+ self.start_response=start_response
+ self.buffer=[]
+
+ self.SCRIPT_NAME = environ.get('SCRIPT_NAME', '')
+ self.PATH_INFO = environ.get('PATH_INFO', '')
+ # extensions:
+ self.FULL_URL = environ['FULL_URL'] = self.get_full_url(environ)
+ # REQUEST_URI is optional, fake it if absent
+ if not environ.has_key("REQUEST_URI"):
+ environ["REQUEST_URI"]=urllib.quote(self.SCRIPT_NAME+self.PATH_INFO)
+ if environ.get('QUERY_STRING'):
+ environ["REQUEST_URI"]+='?'+environ['QUERY_STRING']
+ self.REQUEST_URI = environ["REQUEST_URI"]
+ # full quote url path before the ?
+ self.FULL_PATH = environ['FULL_PATH'] = self.REQUEST_URI.split('?')[0]
+
+ self.request_cookies=Cookie.SimpleCookie()
+ self.request_cookies.load(environ.get('HTTP_COOKIE', ''))
+
+ self.response_started=False
+ self.response_gzencode=False
+ self.response_cookies=Cookie.SimpleCookie()
+ # to delete a cookie use: c[key]['expires'] = datetime.datetime(1970, 1, 1)
+ self.response_headers=self.HttpHeaders()
+ self.response_status="200 OK"
+
+ self.php=None
+ if self.environ.has_key("php"):
+ self.php=environ["php"]
+ self.SESSION=self.php._SESSION
+ self.GET=self.php._GET
+ self.POST=self.php._POST
+ self.REQUEST=self.php._ARG
+ self.FILES=self.php._FILES
+ else:
+ if isinstance(session,QWebSession):
+ self.SESSION=session
+ elif session:
+ self.SESSION=session(environ)
+ else:
+ self.SESSION=None
+ self.GET_LIST=QWebListDict(cgi.parse_qs(environ.get('QUERY_STRING', ''),keep_blank_values=1))
+ self.POST_LIST=QWebListDict()
+ self.FILES_LIST=QWebListDict()
+ self.REQUEST_LIST=QWebListDict(self.GET_LIST)
+ if environ['REQUEST_METHOD'] == 'POST':
+ self.DATA=self.load_post_data(environ,self.POST_LIST,self.FILES_LIST)
+ self.REQUEST_LIST.update(self.POST_LIST)
+ self.GET=self.GET_LIST.get_qwebdict()
+ self.POST=self.POST_LIST.get_qwebdict()
+ self.FILES=self.FILES_LIST.get_qwebdict()
+ self.REQUEST=self.REQUEST_LIST.get_qwebdict()
+ def get_full_url(environ):
+ # taken from PEP 333
+ if 'FULL_URL' in environ:
+ return environ['FULL_URL']
+ url = environ['wsgi.url_scheme']+'://'
+ if environ.get('HTTP_HOST'):
+ url += environ['HTTP_HOST']
+ else:
+ url += environ['SERVER_NAME']
+ if environ['wsgi.url_scheme'] == 'https':
+ if environ['SERVER_PORT'] != '443':
+ url += ':' + environ['SERVER_PORT']
+ else:
+ if environ['SERVER_PORT'] != '80':
+ url += ':' + environ['SERVER_PORT']
+ if environ.has_key('REQUEST_URI'):
+ url += environ['REQUEST_URI']
+ else:
+ url += urllib.quote(environ.get('SCRIPT_NAME', ''))
+ url += urllib.quote(environ.get('PATH_INFO', ''))
+ if environ.get('QUERY_STRING'):
+ url += '?' + environ['QUERY_STRING']
+ return url
+ get_full_url=staticmethod(get_full_url)
+ def save_files(self):
+ for k,v in self.FILES.items():
+ if not v.has_key("tmp_file"):
+ f=tempfile.NamedTemporaryFile()
+ f.write(v["data"])
+ f.flush()
+ v["tmp_file"]=f
+ v["tmp_name"]=f.name
+ def debug(self):
+ body=''
+ for name,d in [
+ ("GET",self.GET), ("POST",self.POST), ("REQUEST",self.REQUEST), ("FILES",self.FILES),
+ ("GET_LIST",self.GET_LIST), ("POST_LIST",self.POST_LIST), ("REQUEST_LIST",self.REQUEST_LIST), ("FILES_LIST",self.FILES_LIST),
+ ("SESSION",self.SESSION), ("environ",self.environ),
+ ]:
+ body+='<table border="1" width="100%" align="center">\n'
+ body+='<tr><th colspan="2" align="center">%s</th></tr>\n'%name
+ keys=d.keys()
+ keys.sort()
+ body+=''.join(['<tr><td>%s</td><td>%s</td></tr>\n'%(k,cgi.escape(repr(d[k]))) for k in keys])
+ body+='</table><br><br>\n\n'
+ return body
+ def write(self,s):
+ self.buffer.append(s)
+ def echo(self,*s):
+ self.buffer.extend([str(i) for i in s])
+ def response(self):
+ if not self.response_started:
+ if not self.php:
+ for k,v in self.FILES.items():
+ if v.has_key("tmp_file"):
+ try:
+ v["tmp_file"].close()
+ except OSError:
+ pass
+ if self.response_gzencode and self.environ.get('HTTP_ACCEPT_ENCODING','').find('gzip')!=-1:
+ zbuf=StringIO.StringIO()
+ zfile=gzip.GzipFile(mode='wb', fileobj=zbuf)
+ zfile.write(''.join(self.buffer))
+ zfile.close()
+ zbuf=zbuf.getvalue()
+ self.buffer=[zbuf]
+ self.response_headers['Content-Encoding']="gzip"
+ self.response_headers['Content-Length']=str(len(zbuf))
+ headers = self.response_headers.get()
+ if isinstance(self.SESSION, QWebSession):
+ headers.extend(self.SESSION.session_get_headers())
+ headers.extend([('Set-Cookie', self.response_cookies[i].OutputString()) for i in self.response_cookies])
+ self.start_response(self.response_status, headers)
+ self.response_started=True
+ return self.buffer
+ def __iter__(self):
+ return self.response().__iter__()
+ def http_redirect(self,url,permanent=1):
+ if permanent:
+ self.response_status="301 Moved Permanently"
+ else:
+ self.response_status="302 Found"
+ self.response_headers["Location"]=url
+ def http_404(self,msg="<h1>404 Not Found</h1>"):
+ self.response_status="404 Not Found"
+ if msg:
+ self.write(msg)
+ def http_download(self,fname,fstr,partial=0):
+# allow fstr to be a file-like object
+# if parital:
+# say accept ranages
+# parse range headers...
+# if range:
+# header("HTTP/1.1 206 Partial Content");
+# header("Content-Range: bytes $offset-".($fsize-1)."/".$fsize);
+# header("Content-Length: ".($fsize-$offset));
+# fseek($fd,$offset);
+# else:
+ self.response_headers["Content-Type"]="application/octet-stream"
+ self.response_headers["Content-Disposition"]="attachment; filename=\"%s\""%fname
+ self.response_headers["Content-Transfer-Encoding"]="binary"
+ self.response_headers["Content-Length"]="%d"%len(fstr)
+ self.write(fstr)
+
+#----------------------------------------------------------
+# QWeb WSGI HTTP Server to run any WSGI app
+# autorun, run an app as FCGI or CGI otherwise launch the server
+#----------------------------------------------------------
+class QWebWSGIHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+ def log_message(self,*p):
+ if self.server.log:
+ return BaseHTTPServer.BaseHTTPRequestHandler.log_message(self,*p)
+ def address_string(self):
+ return self.client_address[0]
+ def start_response(self,status,headers):
+ l=status.split(' ',1)
+ self.send_response(int(l[0]),l[1])
+ ctype_sent=0
+ for i in headers:
+ if i[0].lower()=="content-type":
+ ctype_sent=1
+ self.send_header(*i)
+ if not ctype_sent:
+ self.send_header("Content-type", "text/html")
+ self.end_headers()
+ return self.write
+ def write(self,data):
+ try:
+ self.wfile.write(data)
+ except (socket.error, socket.timeout),e:
+ print e
+ def bufferon(self):
+ if not getattr(self,'wfile_buf',0):
+ self.wfile_buf=1
+ self.wfile_bak=self.wfile
+ self.wfile=StringIO.StringIO()
+ def bufferoff(self):
+ if self.wfile_buf:
+ buf=self.wfile
+ self.wfile=self.wfile_bak
+ self.write(buf.getvalue())
+ self.wfile_buf=0
+ def serve(self,type):
+ path_info, parameters, query = urlparse.urlparse(self.path)[2:5]
+ environ = {
+ 'wsgi.version': (1,0),
+ 'wsgi.url_scheme': 'http',
+ 'wsgi.input': self.rfile,
+ 'wsgi.errors': sys.stderr,
+ 'wsgi.multithread': 0,
+ 'wsgi.multiprocess': 0,
+ 'wsgi.run_once': 0,
+ 'REQUEST_METHOD': self.command,
+ 'SCRIPT_NAME': '',
+ 'QUERY_STRING': query,
+ 'CONTENT_TYPE': self.headers.get('Content-Type', ''),
+ 'CONTENT_LENGTH': self.headers.get('Content-Length', ''),
+ 'REMOTE_ADDR': self.client_address[0],
+ 'REMOTE_PORT': str(self.client_address[1]),
+ 'SERVER_NAME': self.server.server_address[0],
+ 'SERVER_PORT': str(self.server.server_address[1]),
+ 'SERVER_PROTOCOL': self.request_version,
+ # extention
+ 'FULL_PATH': self.path,
+ 'qweb.mode': 'standalone',
+ }
+ if path_info:
+ environ['PATH_INFO'] = urllib.unquote(path_info)
+ for key, value in self.headers.items():
+ environ['HTTP_' + key.upper().replace('-', '_')] = value
+ # Hack to avoid may TCP packets
+ self.bufferon()
+ appiter=self.server.wsgiapp(environ, self.start_response)
+ for data in appiter:
+ self.write(data)
+ self.bufferoff()
+ self.bufferoff()
+ def do_GET(self):
+ self.serve('GET')
+ def do_POST(self):
+ self.serve('GET')
+class QWebWSGIServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
+ """ QWebWSGIServer
+ qweb_wsgi_autorun(wsgiapp,ip='127.0.0.1',port=8080,threaded=1)
+ A WSGI HTTP server threaded or not and a function to automatically run your
+ app according to the environement (either standalone, CGI or FastCGI).
+
+ This feature is called QWeb autorun. If you want to To use it on your
+ application use the following lines at the end of the main application
+ python file:
+
+ if __name__ == '__main__':
+ qweb.qweb_wsgi_autorun(your_wsgi_app)
+
+ this function will select the approriate running mode according to the
+ calling environement (http-server, FastCGI or CGI).
+ """
+ def __init__(self, wsgiapp, ip, port, threaded=1, log=1):
+ BaseHTTPServer.HTTPServer.__init__(self, (ip, port), QWebWSGIHandler)
+ self.wsgiapp = wsgiapp
+ self.threaded = threaded
+ self.log = log
+ def process_request(self,*p):
+ if self.threaded:
+ return SocketServer.ThreadingMixIn.process_request(self,*p)
+ else:
+ return BaseHTTPServer.HTTPServer.process_request(self,*p)
+def qweb_wsgi_autorun(wsgiapp,ip='127.0.0.1',port=8080,threaded=1,log=1,callback_ready=None):
+ if sys.platform=='win32':
+ fcgi=0
+ else:
+ fcgi=1
+ sock = socket.fromfd(0, socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ sock.getpeername()
+ except socket.error, e:
+ if e[0] == errno.ENOTSOCK:
+ fcgi=0
+ if fcgi or os.environ.has_key('REQUEST_METHOD'):
+ import fcgi
+ fcgi.WSGIServer(wsgiapp,multithreaded=False).run()
+ else:
+ if log:
+ print 'Serving on %s:%d'%(ip,port)
+ s=QWebWSGIServer(wsgiapp,ip=ip,port=port,threaded=threaded,log=log)
+ if callback_ready:
+ callback_ready()
+ try:
+ s.serve_forever()
+ except KeyboardInterrupt,e:
+ sys.excepthook(*sys.exc_info())
+
+#----------------------------------------------------------
+# Qweb Documentation
+#----------------------------------------------------------
+def qweb_doc():
+ body=__doc__
+ for i in [QWebXml ,QWebHtml ,QWebForm ,QWebURL ,qweb_control ,QWebRequest ,QWebSession ,QWebWSGIServer ,qweb_wsgi_autorun]:
+ n=i.__name__
+ d=i.__doc__
+ body+='\n\n%s\n%s\n\n%s'%(n,'-'*len(n),d)
+ return body
+
+ print qweb_doc()
+
+#
diff --git a/tools/ajaxterm/sarissa.js b/tools/ajaxterm/sarissa.js
new file mode 100644
index 000000000..6d13aa2e2
--- /dev/null
+++ b/tools/ajaxterm/sarissa.js
@@ -0,0 +1,647 @@
+/**
+ * ====================================================================
+ * About
+ * ====================================================================
+ * Sarissa is an ECMAScript library acting as a cross-browser wrapper for native XML APIs.
+ * The library supports Gecko based browsers like Mozilla and Firefox,
+ * Internet Explorer (5.5+ with MSXML3.0+), Konqueror, Safari and a little of Opera
+ * @version 0.9.6.1
+ * @author: Manos Batsis, mailto: mbatsis at users full stop sourceforge full stop net
+ * ====================================================================
+ * Licence
+ * ====================================================================
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * the GNU Lesser General Public License version 2.1 as published by
+ * the Free Software Foundation (your choice between the two).
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License or GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * or GNU Lesser General Public License along with this program; if not,
+ * write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * or visit http://www.gnu.org
+ *
+ */
+/**
+ * <p>Sarissa is a utility class. Provides "static" methods for DOMDocument and
+ * XMLHTTP objects, DOM Node serializatrion to XML strings and other goodies.</p>
+ * @constructor
+ */
+function Sarissa(){};
+/** @private */
+Sarissa.PARSED_OK = "Document contains no parsing errors";
+/**
+ * Tells you whether transformNode and transformNodeToObject are available. This functionality
+ * is contained in sarissa_ieemu_xslt.js and is deprecated. If you want to control XSLT transformations
+ * use the XSLTProcessor
+ * @deprecated
+ * @type boolean
+ */
+Sarissa.IS_ENABLED_TRANSFORM_NODE = false;
+/**
+ * tells you whether XMLHttpRequest (or equivalent) is available
+ * @type boolean
+ */
+Sarissa.IS_ENABLED_XMLHTTP = false;
+/**
+ * tells you whether selectNodes/selectSingleNode is available
+ * @type boolean
+ */
+Sarissa.IS_ENABLED_SELECT_NODES = false;
+var _sarissa_iNsCounter = 0;
+var _SARISSA_IEPREFIX4XSLPARAM = "";
+var _SARISSA_HAS_DOM_IMPLEMENTATION = document.implementation && true;
+var _SARISSA_HAS_DOM_CREATE_DOCUMENT = _SARISSA_HAS_DOM_IMPLEMENTATION && document.implementation.createDocument;
+var _SARISSA_HAS_DOM_FEATURE = _SARISSA_HAS_DOM_IMPLEMENTATION && document.implementation.hasFeature;
+var _SARISSA_IS_MOZ = _SARISSA_HAS_DOM_CREATE_DOCUMENT && _SARISSA_HAS_DOM_FEATURE;
+var _SARISSA_IS_SAFARI = (navigator.userAgent && navigator.vendor && (navigator.userAgent.toLowerCase().indexOf("applewebkit") != -1 || navigator.vendor.indexOf("Apple") != -1));
+var _SARISSA_IS_IE = document.all && window.ActiveXObject && navigator.userAgent.toLowerCase().indexOf("msie") > -1 && navigator.userAgent.toLowerCase().indexOf("opera") == -1;
+if(!window.Node || !window.Node.ELEMENT_NODE){
+ var Node = {ELEMENT_NODE: 1, ATTRIBUTE_NODE: 2, TEXT_NODE: 3, CDATA_SECTION_NODE: 4, ENTITY_REFERENCE_NODE: 5, ENTITY_NODE: 6, PROCESSING_INSTRUCTION_NODE: 7, COMMENT_NODE: 8, DOCUMENT_NODE: 9, DOCUMENT_TYPE_NODE: 10, DOCUMENT_FRAGMENT_NODE: 11, NOTATION_NODE: 12};
+};
+
+// IE initialization
+if(_SARISSA_IS_IE){
+ // for XSLT parameter names, prefix needed by IE
+ _SARISSA_IEPREFIX4XSLPARAM = "xsl:";
+ // used to store the most recent ProgID available out of the above
+ var _SARISSA_DOM_PROGID = "";
+ var _SARISSA_XMLHTTP_PROGID = "";
+ /**
+ * Called when the Sarissa_xx.js file is parsed, to pick most recent
+ * ProgIDs for IE, then gets destroyed.
+ * @param idList an array of MSXML PROGIDs from which the most recent will be picked for a given object
+ * @param enabledList an array of arrays where each array has two items; the index of the PROGID for which a certain feature is enabled
+ */
+ pickRecentProgID = function (idList, enabledList){
+ // found progID flag
+ var bFound = false;
+ for(var i=0; i < idList.length && !bFound; i++){
+ try{
+ var oDoc = new ActiveXObject(idList[i]);
+ o2Store = idList[i];
+ bFound = true;
+ for(var j=0;j<enabledList.length;j++)
+ if(i <= enabledList[j][1])
+ Sarissa["IS_ENABLED_"+enabledList[j][0]] = true;
+ }catch (objException){
+ // trap; try next progID
+ };
+ };
+ if (!bFound)
+ throw "Could not retreive a valid progID of Class: " + idList[idList.length-1]+". (original exception: "+e+")";
+ idList = null;
+ return o2Store;
+ };
+ // pick best available MSXML progIDs
+ _SARISSA_DOM_PROGID = pickRecentProgID(["Msxml2.DOMDocument.5.0", "Msxml2.DOMDocument.4.0", "Msxml2.DOMDocument.3.0", "MSXML2.DOMDocument", "MSXML.DOMDocument", "Microsoft.XMLDOM"], [["SELECT_NODES", 2],["TRANSFORM_NODE", 2]]);
+ _SARISSA_XMLHTTP_PROGID = pickRecentProgID(["Msxml2.XMLHTTP.5.0", "Msxml2.XMLHTTP.4.0", "MSXML2.XMLHTTP.3.0", "MSXML2.XMLHTTP", "Microsoft.XMLHTTP"], [["XMLHTTP", 4]]);
+ _SARISSA_THREADEDDOM_PROGID = pickRecentProgID(["Msxml2.FreeThreadedDOMDocument.5.0", "MSXML2.FreeThreadedDOMDocument.4.0", "MSXML2.FreeThreadedDOMDocument.3.0"]);
+ _SARISSA_XSLTEMPLATE_PROGID = pickRecentProgID(["Msxml2.XSLTemplate.5.0", "Msxml2.XSLTemplate.4.0", "MSXML2.XSLTemplate.3.0"], [["XSLTPROC", 2]]);
+ // we dont need this anymore
+ pickRecentProgID = null;
+ //============================================
+ // Factory methods (IE)
+ //============================================
+ // see non-IE version
+ Sarissa.getDomDocument = function(sUri, sName){
+ var oDoc = new ActiveXObject(_SARISSA_DOM_PROGID);
+ // if a root tag name was provided, we need to load it in the DOM
+ // object
+ if (sName){
+ // if needed, create an artifical namespace prefix the way Moz
+ // does
+ if (sUri){
+ oDoc.loadXML("<a" + _sarissa_iNsCounter + ":" + sName + " xmlns:a" + _sarissa_iNsCounter + "=\"" + sUri + "\" />");
+ // don't use the same prefix again
+ ++_sarissa_iNsCounter;
+ }
+ else
+ oDoc.loadXML("<" + sName + "/>");
+ };
+ return oDoc;
+ };
+ // see non-IE version
+ Sarissa.getParseErrorText = function (oDoc) {
+ var parseErrorText = Sarissa.PARSED_OK;
+ if(oDoc.parseError != 0){
+ parseErrorText = "XML Parsing Error: " + oDoc.parseError.reason +
+ "\nLocation: " + oDoc.parseError.url +
+ "\nLine Number " + oDoc.parseError.line + ", Column " +
+ oDoc.parseError.linepos +
+ ":\n" + oDoc.parseError.srcText +
+ "\n";
+ for(var i = 0; i < oDoc.parseError.linepos;i++){
+ parseErrorText += "-";
+ };
+ parseErrorText += "^\n";
+ };
+ return parseErrorText;
+ };
+ // see non-IE version
+ Sarissa.setXpathNamespaces = function(oDoc, sNsSet) {
+ oDoc.setProperty("SelectionLanguage", "XPath");
+ oDoc.setProperty("SelectionNamespaces", sNsSet);
+ };
+ /**
+ * Basic implementation of Mozilla's XSLTProcessor for IE.
+ * Reuses the same XSLT stylesheet for multiple transforms
+ * @constructor
+ */
+ XSLTProcessor = function(){
+ this.template = new ActiveXObject(_SARISSA_XSLTEMPLATE_PROGID);
+ this.processor = null;
+ };
+ /**
+ * Impoprts the given XSLT DOM and compiles it to a reusable transform
+ * @argument xslDoc The XSLT DOMDocument to import
+ */
+ XSLTProcessor.prototype.importStylesheet = function(xslDoc){
+ // convert stylesheet to free threaded
+ var converted = new ActiveXObject(_SARISSA_THREADEDDOM_PROGID);
+ converted.loadXML(xslDoc.xml);
+ this.template.stylesheet = converted;
+ this.processor = this.template.createProcessor();
+ // (re)set default param values
+ this.paramsSet = new Array();
+ };
+ /**
+ * Transform the given XML DOM
+ * @argument sourceDoc The XML DOMDocument to transform
+ * @return The transformation result as a DOM Document
+ */
+ XSLTProcessor.prototype.transformToDocument = function(sourceDoc){
+ this.processor.input = sourceDoc;
+ var outDoc = new ActiveXObject(_SARISSA_DOM_PROGID);
+ this.processor.output = outDoc;
+ this.processor.transform();
+ return outDoc;
+ };
+ /**
+ * Set global XSLT parameter of the imported stylesheet
+ * @argument nsURI The parameter namespace URI
+ * @argument name The parameter base name
+ * @argument value The new parameter value
+ */
+ XSLTProcessor.prototype.setParameter = function(nsURI, name, value){
+ /* nsURI is optional but cannot be null */
+ if(nsURI){
+ this.processor.addParameter(name, value, nsURI);
+ }else{
+ this.processor.addParameter(name, value);
+ };
+ /* update updated params for getParameter */
+ if(!this.paramsSet[""+nsURI]){
+ this.paramsSet[""+nsURI] = new Array();
+ };
+ this.paramsSet[""+nsURI][name] = value;
+ };
+ /**
+ * Gets a parameter if previously set by setParameter. Returns null
+ * otherwise
+ * @argument name The parameter base name
+ * @argument value The new parameter value
+ * @return The parameter value if reviously set by setParameter, null otherwise
+ */
+ XSLTProcessor.prototype.getParameter = function(nsURI, name){
+ nsURI = nsURI || "";
+ if(nsURI in this.paramsSet && name in this.paramsSet[nsURI]){
+ return this.paramsSet[nsURI][name];
+ }else{
+ return null;
+ };
+ };
+}
+else{ /* end IE initialization, try to deal with real browsers now ;-) */
+ if(_SARISSA_HAS_DOM_CREATE_DOCUMENT){
+ /**
+ * <p>Ensures the document was loaded correctly, otherwise sets the
+ * parseError to -1 to indicate something went wrong. Internal use</p>
+ * @private
+ */
+ Sarissa.__handleLoad__ = function(oDoc){
+ if (!oDoc.documentElement || oDoc.documentElement.tagName == "parsererror")
+ oDoc.parseError = -1;
+ Sarissa.__setReadyState__(oDoc, 4);
+ };
+ /**
+ * <p>Attached by an event handler to the load event. Internal use.</p>
+ * @private
+ */
+ _sarissa_XMLDocument_onload = function(){
+ Sarissa.__handleLoad__(this);
+ };
+ /**
+ * <p>Sets the readyState property of the given DOM Document object.
+ * Internal use.</p>
+ * @private
+ * @argument oDoc the DOM Document object to fire the
+ * readystatechange event
+ * @argument iReadyState the number to change the readystate property to
+ */
+ Sarissa.__setReadyState__ = function(oDoc, iReadyState){
+ oDoc.readyState = iReadyState;
+ if (oDoc.onreadystatechange != null && typeof oDoc.onreadystatechange == "function")
+ oDoc.onreadystatechange();
+ };
+ Sarissa.getDomDocument = function(sUri, sName){
+ var oDoc = document.implementation.createDocument(sUri?sUri:"", sName?sName:"", null);
+ oDoc.addEventListener("load", _sarissa_XMLDocument_onload, false);
+ return oDoc;
+ };
+ if(false && window.XMLDocument){
+ /**
+ * <p>Emulate IE's onreadystatechange attribute</p>
+ */
+ XMLDocument.prototype.onreadystatechange = null;
+ /**
+ * <p>Emulates IE's readyState property, which always gives an integer from 0 to 4:</p>
+ * <ul><li>1 == LOADING,</li>
+ * <li>2 == LOADED,</li>
+ * <li>3 == INTERACTIVE,</li>
+ * <li>4 == COMPLETED</li></ul>
+ */
+ XMLDocument.prototype.readyState = 0;
+ /**
+ * <p>Emulate IE's parseError attribute</p>
+ */
+ XMLDocument.prototype.parseError = 0;
+
+ // NOTE: setting async to false will only work with documents
+ // called over HTTP (meaning a server), not the local file system,
+ // unless you are using Moz 1.4+.
+ // BTW the try>catch block is for 1.4; I haven't found a way to check if
+ // the property is implemented without
+ // causing an error and I dont want to use user agent stuff for that...
+ var _SARISSA_SYNC_NON_IMPLEMENTED = false;// ("async" in XMLDocument.prototype) ? false: true;
+ /**
+ * <p>Keeps a handle to the original load() method. Internal use and only
+ * if Mozilla version is lower than 1.4</p>
+ * @private
+ */
+ XMLDocument.prototype._sarissa_load = XMLDocument.prototype.load;
+
+ /**
+ * <p>Overrides the original load method to provide synchronous loading for
+ * Mozilla versions prior to 1.4, using an XMLHttpRequest object (if
+ * async is set to false)</p>
+ * @returns the DOM Object as it was before the load() call (may be empty)
+ */
+ XMLDocument.prototype.load = function(sURI) {
+ var oDoc = document.implementation.createDocument("", "", null);
+ Sarissa.copyChildNodes(this, oDoc);
+ this.parseError = 0;
+ Sarissa.__setReadyState__(this, 1);
+ try {
+ if(this.async == false && _SARISSA_SYNC_NON_IMPLEMENTED) {
+ var tmp = new XMLHttpRequest();
+ tmp.open("GET", sURI, false);
+ tmp.send(null);
+ Sarissa.__setReadyState__(this, 2);
+ Sarissa.copyChildNodes(tmp.responseXML, this);
+ Sarissa.__setReadyState__(this, 3);
+ }
+ else {
+ this._sarissa_load(sURI);
+ };
+ }
+ catch (objException) {
+ this.parseError = -1;
+ }
+ finally {
+ if(this.async == false){
+ Sarissa.__handleLoad__(this);
+ };
+ };
+ return oDoc;
+ };
+
+
+ }//if(window.XMLDocument)
+ else if(document.implementation && document.implementation.hasFeature && document.implementation.hasFeature('LS', '3.0')){
+ Document.prototype.async = true;
+ Document.prototype.onreadystatechange = null;
+ Document.prototype.parseError = 0;
+ Document.prototype.load = function(sURI) {
+ var parser = document.implementation.createLSParser(this.async ? document.implementation.MODE_ASYNCHRONOUS : document.implementation.MODE_SYNCHRONOUS, null);
+ if(this.async){
+ var self = this;
+ parser.addEventListener("load",
+ function(e) {
+ self.readyState = 4;
+ Sarissa.copyChildNodes(e.newDocument, self.documentElement, false);
+ self.onreadystatechange.call();
+ },
+ false);
+ };
+ try {
+ var oDoc = parser.parseURI(sURI);
+ }
+ catch(e){
+ this.parseError = -1;
+ };
+ if(!this.async)
+ Sarissa.copyChildNodes(oDoc, this.documentElement, false);
+ return oDoc;
+ };
+ /**
+ * <p>Factory method to obtain a new DOM Document object</p>
+ * @argument sUri the namespace of the root node (if any)
+ * @argument sUri the local name of the root node (if any)
+ * @returns a new DOM Document
+ */
+ Sarissa.getDomDocument = function(sUri, sName){
+ return document.implementation.createDocument(sUri?sUri:"", sName?sName:"", null);
+ };
+ };
+ };//if(_SARISSA_HAS_DOM_CREATE_DOCUMENT)
+};
+//==========================================
+// Common stuff
+//==========================================
+if(!window.DOMParser){
+ /*
+ * DOMParser is a utility class, used to construct DOMDocuments from XML strings
+ * @constructor
+ */
+ DOMParser = function() {
+ };
+ if(_SARISSA_IS_SAFARI){
+ /**
+ * Construct a new DOM Document from the given XMLstring
+ * @param sXml the given XML string
+ * @param contentType the content type of the document the given string represents (one of text/xml, application/xml, application/xhtml+xml).
+ * @return a new DOM Document from the given XML string
+ */
+ DOMParser.prototype.parseFromString = function(sXml, contentType){
+ if(contentType.toLowerCase() != "application/xml"){
+ throw "Cannot handle content type: \"" + contentType + "\"";
+ };
+ var xmlhttp = new XMLHttpRequest();
+ xmlhttp.open("GET", "data:text/xml;charset=utf-8," + encodeURIComponent(str), false);
+ xmlhttp.send(null);
+ return xmlhttp.responseXML;
+ };
+ }else if(Sarissa.getDomDocument && Sarissa.getDomDocument() && "loadXML" in Sarissa.getDomDocument()){
+ DOMParser.prototype.parseFromString = function(sXml, contentType){
+ var doc = Sarissa.getDomDocument();
+ doc.loadXML(sXml);
+ return doc;
+ };
+ };
+};
+
+if(window.XMLHttpRequest){
+ Sarissa.IS_ENABLED_XMLHTTP = true;
+}
+else if(_SARISSA_IS_IE){
+ /**
+ * Emulate XMLHttpRequest
+ * @constructor
+ */
+ XMLHttpRequest = function() {
+ return new ActiveXObject(_SARISSA_XMLHTTP_PROGID);
+ };
+ Sarissa.IS_ENABLED_XMLHTTP = true;
+};
+
+if(!window.document.importNode && _SARISSA_IS_IE){
+ try{
+ /**
+ * Implements importNode for the current window document in IE using innerHTML.
+ * Testing showed that DOM was multiple times slower than innerHTML for this,
+ * sorry folks. If you encounter trouble (who knows what IE does behind innerHTML)
+ * please gimme a call.
+ * @param oNode the Node to import
+ * @param bChildren whether to include the children of oNode
+ * @returns the imported node for further use
+ */
+ window.document.importNode = function(oNode, bChildren){
+ var importNode = document.createElement("div");
+ if(bChildren)
+ importNode.innerHTML = Sarissa.serialize(oNode);
+ else
+ importNode.innerHTML = Sarissa.serialize(oNode.cloneNode(false));
+ return importNode.firstChild;
+ };
+ }catch(e){};
+};
+if(!Sarissa.getParseErrorText){
+ /**
+ * <p>Returns a human readable description of the parsing error. Usefull
+ * for debugging. Tip: append the returned error string in a &lt;pre&gt;
+ * element if you want to render it.</p>
+ * <p>Many thanks to Christian Stocker for the initial patch.</p>
+ * @argument oDoc The target DOM document
+ * @returns The parsing error description of the target Document in
+ * human readable form (preformated text)
+ */
+ Sarissa.getParseErrorText = function (oDoc){
+ var parseErrorText = Sarissa.PARSED_OK;
+ if(oDoc && oDoc.parseError && oDoc.parseError != 0){
+ /*moz*/
+ if(oDoc.documentElement.tagName == "parsererror"){
+ parseErrorText = oDoc.documentElement.firstChild.data;
+ parseErrorText += "\n" + oDoc.documentElement.firstChild.nextSibling.firstChild.data;
+ }/*konq*/
+ else{
+ parseErrorText = Sarissa.getText(oDoc.documentElement);/*.getElementsByTagName("h1")[0], false) + "\n";
+ parseErrorText += Sarissa.getText(oDoc.documentElement.getElementsByTagName("body")[0], false) + "\n";
+ parseErrorText += Sarissa.getText(oDoc.documentElement.getElementsByTagName("pre")[0], false);*/
+ };
+ };
+ return parseErrorText;
+ };
+};
+Sarissa.getText = function(oNode, deep){
+ var s = "";
+ var nodes = oNode.childNodes;
+ for(var i=0; i < nodes.length; i++){
+ var node = nodes[i];
+ var nodeType = node.nodeType;
+ if(nodeType == Node.TEXT_NODE || nodeType == Node.CDATA_SECTION_NODE){
+ s += node.data;
+ }else if(deep == true
+ && (nodeType == Node.ELEMENT_NODE
+ || nodeType == Node.DOCUMENT_NODE
+ || nodeType == Node.DOCUMENT_FRAGMENT_NODE)){
+ s += Sarissa.getText(node, true);
+ };
+ };
+ return s;
+};
+if(window.XMLSerializer){
+ /**
+ * <p>Factory method to obtain the serialization of a DOM Node</p>
+ * @returns the serialized Node as an XML string
+ */
+ Sarissa.serialize = function(oDoc){
+ var s = null;
+ if(oDoc){
+ s = oDoc.innerHTML?oDoc.innerHTML:(new XMLSerializer()).serializeToString(oDoc);
+ };
+ return s;
+ };
+}else{
+ if(Sarissa.getDomDocument && (Sarissa.getDomDocument("","foo", null)).xml){
+ // see non-IE version
+ Sarissa.serialize = function(oDoc) {
+ var s = null;
+ if(oDoc){
+ s = oDoc.innerHTML?oDoc.innerHTML:oDoc.xml;
+ };
+ return s;
+ };
+ /**
+ * Utility class to serialize DOM Node objects to XML strings
+ * @constructor
+ */
+ XMLSerializer = function(){};
+ /**
+ * Serialize the given DOM Node to an XML string
+ * @param oNode the DOM Node to serialize
+ */
+ XMLSerializer.prototype.serializeToString = function(oNode) {
+ return oNode.xml;
+ };
+ };
+};
+
+/**
+ * strips tags from a markup string
+ */
+Sarissa.stripTags = function (s) {
+ return s.replace(/<[^>]+>/g,"");
+};
+/**
+ * <p>Deletes all child nodes of the given node</p>
+ * @argument oNode the Node to empty
+ */
+Sarissa.clearChildNodes = function(oNode) {
+ // need to check for firstChild due to opera 8 bug with hasChildNodes
+ while(oNode.firstChild){
+ oNode.removeChild(oNode.firstChild);
+ };
+};
+/**
+ * <p> Copies the childNodes of nodeFrom to nodeTo</p>
+ * <p> <b>Note:</b> The second object's original content is deleted before
+ * the copy operation, unless you supply a true third parameter</p>
+ * @argument nodeFrom the Node to copy the childNodes from
+ * @argument nodeTo the Node to copy the childNodes to
+ * @argument bPreserveExisting whether to preserve the original content of nodeTo, default is false
+ */
+Sarissa.copyChildNodes = function(nodeFrom, nodeTo, bPreserveExisting) {
+ if((!nodeFrom) || (!nodeTo)){
+ throw "Both source and destination nodes must be provided";
+ };
+ if(!bPreserveExisting){
+ Sarissa.clearChildNodes(nodeTo);
+ };
+ var ownerDoc = nodeTo.nodeType == Node.DOCUMENT_NODE ? nodeTo : nodeTo.ownerDocument;
+ var nodes = nodeFrom.childNodes;
+ if(ownerDoc.importNode && (!_SARISSA_IS_IE)) {
+ for(var i=0;i < nodes.length;i++) {
+ nodeTo.appendChild(ownerDoc.importNode(nodes[i], true));
+ };
+ }
+ else{
+ for(var i=0;i < nodes.length;i++) {
+ nodeTo.appendChild(nodes[i].cloneNode(true));
+ };
+ };
+};
+
+/**
+ * <p> Moves the childNodes of nodeFrom to nodeTo</p>
+ * <p> <b>Note:</b> The second object's original content is deleted before
+ * the move operation, unless you supply a true third parameter</p>
+ * @argument nodeFrom the Node to copy the childNodes from
+ * @argument nodeTo the Node to copy the childNodes to
+ * @argument bPreserveExisting whether to preserve the original content of nodeTo, default is
+ */
+Sarissa.moveChildNodes = function(nodeFrom, nodeTo, bPreserveExisting) {
+ if((!nodeFrom) || (!nodeTo)){
+ throw "Both source and destination nodes must be provided";
+ };
+ if(!bPreserveExisting){
+ Sarissa.clearChildNodes(nodeTo);
+ };
+ var nodes = nodeFrom.childNodes;
+ // if within the same doc, just move, else copy and delete
+ if(nodeFrom.ownerDocument == nodeTo.ownerDocument){
+ while(nodeFrom.firstChild){
+ nodeTo.appendChild(nodeFrom.firstChild);
+ };
+ }else{
+ var ownerDoc = nodeTo.nodeType == Node.DOCUMENT_NODE ? nodeTo : nodeTo.ownerDocument;
+ if(ownerDoc.importNode && (!_SARISSA_IS_IE)) {
+ for(var i=0;i < nodes.length;i++) {
+ nodeTo.appendChild(ownerDoc.importNode(nodes[i], true));
+ };
+ }else{
+ for(var i=0;i < nodes.length;i++) {
+ nodeTo.appendChild(nodes[i].cloneNode(true));
+ };
+ };
+ Sarissa.clearChildNodes(nodeFrom);
+ };
+};
+
+/**
+ * <p>Serialize any object to an XML string. All properties are serialized using the property name
+ * as the XML element name. Array elements are rendered as <code>array-item</code> elements,
+ * using their index/key as the value of the <code>key</code> attribute.</p>
+ * @argument anyObject the object to serialize
+ * @argument objectName a name for that object
+ * @return the XML serializationj of the given object as a string
+ */
+Sarissa.xmlize = function(anyObject, objectName, indentSpace){
+ indentSpace = indentSpace?indentSpace:'';
+ var s = indentSpace + '<' + objectName + '>';
+ var isLeaf = false;
+ if(!(anyObject instanceof Object) || anyObject instanceof Number || anyObject instanceof String
+ || anyObject instanceof Boolean || anyObject instanceof Date){
+ s += Sarissa.escape(""+anyObject);
+ isLeaf = true;
+ }else{
+ s += "\n";
+ var itemKey = '';
+ var isArrayItem = anyObject instanceof Array;
+ for(var name in anyObject){
+ s += Sarissa.xmlize(anyObject[name], (isArrayItem?"array-item key=\""+name+"\"":name), indentSpace + " ");
+ };
+ s += indentSpace;
+ };
+ return s += (objectName.indexOf(' ')!=-1?"</array-item>\n":"</" + objectName + ">\n");
+};
+
+/**
+ * Escape the given string chacters that correspond to the five predefined XML entities
+ * @param sXml the string to escape
+ */
+Sarissa.escape = function(sXml){
+ return sXml.replace(/&/g, "&amp;")
+ .replace(/</g, "&lt;")
+ .replace(/>/g, "&gt;")
+ .replace(/"/g, "&quot;")
+ .replace(/'/g, "&apos;");
+};
+
+/**
+ * Unescape the given string. This turns the occurences of the predefined XML
+ * entities to become the characters they represent correspond to the five predefined XML entities
+ * @param sXml the string to unescape
+ */
+Sarissa.unescape = function(sXml){
+ return sXml.replace(/&apos;/g,"'")
+ .replace(/&quot;/g,"\"")
+ .replace(/&gt;/g,">")
+ .replace(/&lt;/g,"<")
+ .replace(/&amp;/g,"&");
+};
+// EOF
diff --git a/tools/ajaxterm/sarissa_dhtml.js b/tools/ajaxterm/sarissa_dhtml.js
new file mode 100644
index 000000000..2d85c817e
--- /dev/null
+++ b/tools/ajaxterm/sarissa_dhtml.js
@@ -0,0 +1,105 @@
+/**
+ * ====================================================================
+ * About
+ * ====================================================================
+ * Sarissa cross browser XML library - AJAX module
+ * @version 0.9.6.1
+ * @author: Copyright Manos Batsis, mailto: mbatsis at users full stop sourceforge full stop net
+ *
+ * This module contains some convinient AJAX tricks based on Sarissa
+ *
+ * ====================================================================
+ * Licence
+ * ====================================================================
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * the GNU Lesser General Public License version 2.1 as published by
+ * the Free Software Foundation (your choice between the two).
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License or GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * or GNU Lesser General Public License along with this program; if not,
+ * write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * or visit http://www.gnu.org
+ *
+ */
+/**
+ * Update an element with response of a GET request on the given URL.
+ * @addon
+ * @param sFromUrl the URL to make the request to
+ * @param oTargetElement the element to update
+ * @param xsltproc (optional) the transformer to use on the returned
+ * content before updating the target element with it
+ */
+Sarissa.updateContentFromURI = function(sFromUrl, oTargetElement, xsltproc) {
+ try{
+ oTargetElement.style.cursor = "wait";
+ var xmlhttp = new XMLHttpRequest();
+ xmlhttp.open("GET", sFromUrl);
+ function sarissa_dhtml_loadHandler() {
+ if (xmlhttp.readyState == 4) {
+ oTargetElement.style.cursor = "auto";
+ Sarissa.updateContentFromNode(xmlhttp.responseXML, oTargetElement, xsltproc);
+ };
+ };
+ xmlhttp.onreadystatechange = sarissa_dhtml_loadHandler;
+ xmlhttp.send(null);
+ oTargetElement.style.cursor = "auto";
+ }
+ catch(e){
+ oTargetElement.style.cursor = "auto";
+ throw e;
+ };
+};
+
+/**
+ * Update an element's content with the given DOM node.
+ * @addon
+ * @param sFromUrl the URL to make the request to
+ * @param oTargetElement the element to update
+ * @param xsltproc (optional) the transformer to use on the given
+ * DOM node before updating the target element with it
+ */
+Sarissa.updateContentFromNode = function(oNode, oTargetElement, xsltproc) {
+ try {
+ oTargetElement.style.cursor = "wait";
+ Sarissa.clearChildNodes(oTargetElement);
+ // check for parsing errors
+ var ownerDoc = oNode.nodeType == Node.DOCUMENT_NODE?oNode:oNode.ownerDocument;
+ if(ownerDoc.parseError && ownerDoc.parseError != 0) {
+ var pre = document.createElement("pre");
+ pre.appendChild(document.createTextNode(Sarissa.getParseErrorText(ownerDoc)));
+ oTargetElement.appendChild(pre);
+ }
+ else {
+ // transform if appropriate
+ if(xsltproc) {
+ oNode = xsltproc.transformToDocument(oNode);
+ };
+ // be smart, maybe the user wants to display the source instead
+ if(oTargetElement.tagName.toLowerCase == "textarea" || oTargetElement.tagName.toLowerCase == "input") {
+ oTargetElement.value = Sarissa.serialize(oNode);
+ }
+ else {
+ // ok that was not smart; it was paranoid. Keep up the good work by trying to use DOM instead of innerHTML
+ if(oNode.nodeType == Node.DOCUMENT_NODE || oNode.ownerDocument.documentElement == oNode) {
+ oTargetElement.innerHTML = Sarissa.serialize(oNode);
+ }
+ else{
+ oTargetElement.appendChild(oTargetElement.ownerDocument.importNode(oNode, true));
+ };
+ };
+ };
+ }
+ catch(e) {
+ throw e;
+ }
+ finally{
+ oTargetElement.style.cursor = "auto";
+ };
+};
+
diff --git a/tools/euca-get-ajax-console b/tools/euca-get-ajax-console
new file mode 100755
index 000000000..37060e74f
--- /dev/null
+++ b/tools/euca-get-ajax-console
@@ -0,0 +1,164 @@
+#!/usr/bin/env python
+# pylint: disable-msg=C0103
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Euca add-on to use ajax console"""
+
+import getopt
+import os
+import sys
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+import boto
+import nova
+from boto.ec2.connection import EC2Connection
+from euca2ools import Euca2ool, InstanceValidationError, Util, ConnectionFailed
+
+usage_string = """
+Retrieves a url to an ajax console terminal
+
+euca-get-ajax-console [-h, --help] [--version] [--debug] instance_id
+
+REQUIRED PARAMETERS
+
+instance_id: unique identifier for the instance show the console output for.
+
+OPTIONAL PARAMETERS
+
+"""
+
+
+# This class extends boto to add AjaxConsole functionality
+class NovaEC2Connection(EC2Connection):
+
+ def get_ajax_console(self, instance_id):
+ """
+ Retrieves a console connection for the specified instance.
+
+ :type instance_id: string
+ :param instance_id: The instance ID of a running instance on the cloud.
+
+ :rtype: :class:`AjaxConsole`
+ """
+
+ class AjaxConsole:
+ def __init__(self, parent=None):
+ self.parent = parent
+ self.instance_id = None
+ self.url = None
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'instanceId':
+ self.instance_id = value
+ elif name == 'url':
+ self.url = value
+ else:
+ setattr(self, name, value)
+
+ params = {}
+ self.build_list_params(params, [instance_id], 'InstanceId')
+ return self.get_object('GetAjaxConsole', params, AjaxConsole)
+ pass
+
+
+def override_connect_ec2(aws_access_key_id=None,
+ aws_secret_access_key=None, **kwargs):
+ return NovaEC2Connection(aws_access_key_id,
+ aws_secret_access_key, **kwargs)
+
+# override boto's connect_ec2 method, so that we can use NovaEC2Connection
+boto.connect_ec2 = override_connect_ec2
+
+
+def usage(status=1):
+ print usage_string
+ Util().usage()
+ sys.exit(status)
+
+
+def version():
+ print Util().version()
+ sys.exit()
+
+
+def display_console_output(console_output):
+ print console_output.instance_id
+ print console_output.timestamp
+ print console_output.output
+
+
+def display_ajax_console_output(console_output):
+ print console_output.url
+
+
+def main():
+ try:
+ euca = Euca2ool()
+ except Exception, e:
+ print e
+ usage()
+
+ instance_id = None
+
+ for name, value in euca.opts:
+ if name in ('-h', '--help'):
+ usage(0)
+ elif name == '--version':
+ version()
+ elif name == '--debug':
+ debug = True
+
+ for arg in euca.args:
+ instance_id = arg
+ break
+
+ if instance_id:
+ try:
+ euca.validate_instance_id(instance_id)
+ except InstanceValidationError:
+ print 'Invalid instance id'
+ sys.exit(1)
+
+ try:
+ euca_conn = euca.make_connection()
+ except ConnectionFailed, e:
+ print e.message
+ sys.exit(1)
+ try:
+ console_output = euca_conn.get_ajax_console(instance_id)
+ except Exception, ex:
+ euca.display_error_and_exit('%s' % ex)
+
+ display_ajax_console_output(console_output)
+ else:
+ print 'instance_id must be specified'
+ usage()
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/install_venv.py b/tools/install_venv.py
index 32c372352..4e3941210 100644
--- a/tools/install_venv.py
+++ b/tools/install_venv.py
@@ -66,7 +66,8 @@ def check_dependencies():
# Try installing it via easy_install...
if HAS_EASY_INSTALL:
print 'Installing virtualenv via easy_install...',
- if not run_command(['which', 'easy_install']):
+ if not (run_command(['which', 'easy_install']) and
+ run_command(['easy_install', 'virtualenv'])):
die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,'
' please install it using your favorite package management tool')
print 'done.'