summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Authors2
-rw-r--r--README4
-rwxr-xr-xbin/nova-ajax-console-proxy137
-rwxr-xr-xcontrib/nova.sh2
-rw-r--r--doc/source/adminguide/binaries.rst2
-rw-r--r--doc/source/adminguide/flags.rst2
-rw-r--r--doc/source/adminguide/getting.started.rst2
-rw-r--r--doc/source/adminguide/index.rst2
-rw-r--r--doc/source/adminguide/managing.images.rst2
-rw-r--r--doc/source/adminguide/managing.instances.rst2
-rw-r--r--doc/source/adminguide/managing.networks.rst4
-rw-r--r--doc/source/adminguide/managing.projects.rst2
-rw-r--r--doc/source/adminguide/managingsecurity.rst2
-rw-r--r--doc/source/adminguide/monitoring.rst2
-rw-r--r--doc/source/adminguide/multi.node.install.rst365
-rw-r--r--doc/source/adminguide/network.flat.rst2
-rw-r--r--doc/source/adminguide/network.vlan.rst5
-rw-r--r--doc/source/adminguide/nova.manage.rst2
-rw-r--r--doc/source/cloud101.rst8
-rw-r--r--doc/source/community.rst2
-rw-r--r--doc/source/devref/addmethod.openstackapi.rst2
-rw-r--r--doc/source/devref/api.rst2
-rw-r--r--doc/source/devref/architecture.rst2
-rw-r--r--doc/source/devref/auth.rst2
-rw-r--r--doc/source/devref/cloudpipe.rst2
-rw-r--r--doc/source/devref/compute.rst2
-rw-r--r--doc/source/devref/database.rst4
-rw-r--r--doc/source/devref/development.environment.rst2
-rw-r--r--doc/source/devref/fakes.rst2
-rw-r--r--doc/source/devref/glance.rst2
-rw-r--r--doc/source/devref/index.rst2
-rw-r--r--doc/source/devref/network.rst2
-rw-r--r--doc/source/devref/nova.rst2
-rw-r--r--doc/source/devref/objectstore.rst2
-rw-r--r--doc/source/devref/rabbit.rst5
-rw-r--r--doc/source/devref/scheduler.rst2
-rw-r--r--doc/source/devref/services.rst2
-rw-r--r--doc/source/devref/volume.rst2
-rw-r--r--doc/source/index.rst2
-rw-r--r--doc/source/livecd.rst17
-rw-r--r--doc/source/nova.concepts.rst8
-rw-r--r--doc/source/object.model.rst26
-rw-r--r--doc/source/quickstart.rst2
-rw-r--r--doc/source/service.architecture.rst17
-rw-r--r--krm_mapping.json.sample3
-rw-r--r--nova/api/ec2/cloud.py57
-rw-r--r--nova/api/openstack/__init__.py11
-rw-r--r--nova/api/openstack/backup_schedules.py6
-rw-r--r--nova/api/openstack/common.py24
-rw-r--r--nova/api/openstack/images.py19
-rw-r--r--nova/api/openstack/servers.py38
-rw-r--r--nova/api/openstack/shared_ip_groups.py (renamed from nova/api/openstack/sharedipgroups.py)10
-rw-r--r--nova/auth/ldapdriver.py91
-rw-r--r--nova/auth/manager.py14
-rw-r--r--nova/auth/novarc.template4
-rw-r--r--nova/compute/api.py27
-rw-r--r--nova/compute/manager.py8
-rw-r--r--nova/db/api.py13
-rw-r--r--nova/db/sqlalchemy/api.py17
-rw-r--r--nova/db/sqlalchemy/models.py1
-rw-r--r--nova/flags.py12
-rw-r--r--nova/scheduler/zone.py56
-rw-r--r--nova/service.py4
-rw-r--r--nova/tests/api/openstack/test_images.py2
-rw-r--r--nova/tests/api/openstack/test_servers.py10
-rw-r--r--nova/tests/api/openstack/test_shared_ip_groups.py (renamed from nova/tests/api/openstack/test_sharedipgroups.py)2
-rw-r--r--nova/tests/test_cloud.py59
-rw-r--r--nova/tests/test_compute.py10
-rw-r--r--nova/tests/test_scheduler.py54
-rw-r--r--nova/tests/test_service.py15
-rw-r--r--nova/tests/test_virt.py44
-rw-r--r--nova/utils.py5
-rw-r--r--nova/virt/fake.py3
-rw-r--r--nova/virt/libvirt.xml.template13
-rw-r--r--nova/virt/libvirt_conn.py51
-rw-r--r--nova/virt/xenapi/vm_utils.py4
-rw-r--r--nova/virt/xenapi/vmops.py24
-rw-r--r--nova/virt/xenapi_conn.py4
-rw-r--r--tools/ajaxterm/README.txt120
-rw-r--r--tools/ajaxterm/ajaxterm.135
-rw-r--r--tools/ajaxterm/ajaxterm.css64
-rw-r--r--tools/ajaxterm/ajaxterm.html25
-rw-r--r--tools/ajaxterm/ajaxterm.js279
-rwxr-xr-xtools/ajaxterm/ajaxterm.py586
-rwxr-xr-xtools/ajaxterm/configure32
-rw-r--r--tools/ajaxterm/configure.ajaxterm.bin2
-rw-r--r--tools/ajaxterm/configure.initd.debian33
-rw-r--r--tools/ajaxterm/configure.initd.gentoo27
-rw-r--r--tools/ajaxterm/configure.initd.redhat75
-rw-r--r--tools/ajaxterm/configure.makefile20
-rw-r--r--tools/ajaxterm/qweb.py1356
-rw-r--r--tools/ajaxterm/sarissa.js647
-rw-r--r--tools/ajaxterm/sarissa_dhtml.js105
-rwxr-xr-xtools/euca-get-ajax-console164
-rw-r--r--tools/install_venv.py3
95 files changed, 4656 insertions, 296 deletions
diff --git a/Authors b/Authors
index a482c34f0..56344957e 100644
--- a/Authors
+++ b/Authors
@@ -15,6 +15,7 @@ Eldar Nugaev <enugaev@griddynamics.com>
Eric Day <eday@oddments.org>
Ewan Mellor <ewan.mellor@citrix.com>
Hisaki Ohara <hisaki.ohara@intel.com>
+Ilya Alekseyev <ialekseev@griddynamics.com>
Jay Pipes <jaypipes@gmail.com>
Jesse Andrews <anotherjesse@gmail.com>
Joe Heck <heckj@mac.com>
@@ -24,6 +25,7 @@ Josh Kearney <josh.kearney@rackspace.com>
Joshua McKenty <jmckenty@gmail.com>
Justin Santa Barbara <justin@fathomdb.com>
Ken Pepple <ken.pepple@gmail.com>
+Lorin Hochstein <lorin@isi.edu>
Matt Dietz <matt.dietz@rackspace.com>
Michael Gundlach <michael.gundlach@rackspace.com>
Monsyne Dragon <mdragon@rackspace.com>
diff --git a/README b/README
index 851bca9db..f9334c295 100644
--- a/README
+++ b/README
@@ -1,7 +1,7 @@
The Choose Your Own Adventure README for Nova:
You have come across a cloud computing fabric controller. It has identified
- itself as "Nova." It is apparent that it maintains compatability with
+ itself as "Nova." It is apparent that it maintains compatibility with
the popular Amazon EC2 and S3 APIs.
To monitor it from a distance: follow @novacc on twitter
@@ -10,7 +10,7 @@ To tame it for use in your own cloud: read http://nova.openstack.org/getting.sta
To study its anatomy: read http://nova.openstack.org/architecture.html
-To disect it in detail: visit http://code.launchpad.net/nova
+To dissect it in detail: visit http://code.launchpad.net/nova
To taunt it with its weaknesses: use http://bugs.launchpad.net/nova
diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy
new file mode 100755
index 000000000..2bc407658
--- /dev/null
+++ b/bin/nova-ajax-console-proxy
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+# pylint: disable-msg=C0103
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Ajax Console Proxy Server"""
+
+from eventlet import greenthread
+from eventlet.green import urllib2
+
+import exceptions
+import gettext
+import logging
+import os
+import sys
+import time
+import urlparse
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+gettext.install('nova', unicode=1)
+
+from nova import flags
+from nova import log as logging
+from nova import rpc
+from nova import utils
+from nova import wsgi
+
+FLAGS = flags.FLAGS
+
+flags.DEFINE_integer('ajax_console_idle_timeout', 300,
+ 'Seconds before idle connection destroyed')
+
+LOG = logging.getLogger('nova.ajax_console_proxy')
+LOG.setLevel(logging.DEBUG)
+LOG.addHandler(logging.StreamHandler())
+
+
+class AjaxConsoleProxy(object):
+ tokens = {}
+
+ def __call__(self, env, start_response):
+ try:
+ req_url = '%s://%s%s?%s' % (env['wsgi.url_scheme'],
+ env['HTTP_HOST'],
+ env['PATH_INFO'],
+ env['QUERY_STRING'])
+ if 'HTTP_REFERER' in env:
+ auth_url = env['HTTP_REFERER']
+ else:
+ auth_url = req_url
+
+ auth_params = urlparse.parse_qs(urlparse.urlparse(auth_url).query)
+ parsed_url = urlparse.urlparse(req_url)
+
+ auth_info = AjaxConsoleProxy.tokens[auth_params['token'][0]]
+ args = auth_info['args']
+ auth_info['last_activity'] = time.time()
+
+ remote_url = ("http://%s:%s%s?token=%s" % (
+ str(args['host']),
+ str(args['port']),
+ parsed_url.path,
+ str(args['token'])))
+
+ opener = urllib2.urlopen(remote_url, env['wsgi.input'].read())
+ body = opener.read()
+ info = opener.info()
+
+ start_response("200 OK", info.dict.items())
+ return body
+ except (exceptions.KeyError):
+ if env['PATH_INFO'] != '/favicon.ico':
+ LOG.audit("Unauthorized request %s, %s"
+ % (req_url, str(env)))
+ start_response("401 NOT AUTHORIZED", [])
+ return "Not Authorized"
+ except Exception:
+ start_response("500 ERROR", [])
+ return "Server Error"
+
+ def register_listeners(self):
+ class Callback:
+ def __call__(self, data, message):
+ if data['method'] == 'authorize_ajax_console':
+ AjaxConsoleProxy.tokens[data['args']['token']] = \
+ {'args': data['args'], 'last_activity': time.time()}
+
+ conn = rpc.Connection.instance(new=True)
+ consumer = rpc.TopicConsumer(
+ connection=conn,
+ topic=FLAGS.ajax_console_proxy_topic)
+ consumer.register_callback(Callback())
+
+ def delete_expired_tokens():
+ now = time.time()
+ to_delete = []
+ for k, v in AjaxConsoleProxy.tokens.items():
+ if now - v['last_activity'] > FLAGS.ajax_console_idle_timeout:
+ to_delete.append(k)
+
+ for k in to_delete:
+ del AjaxConsoleProxy.tokens[k]
+
+ utils.LoopingCall(consumer.fetch, auto_ack=True,
+ enable_callbacks=True).start(0.1)
+ utils.LoopingCall(delete_expired_tokens).start(1)
+
+if __name__ == '__main__':
+ utils.default_flagfile()
+ FLAGS(sys.argv)
+ server = wsgi.Server()
+ acp = AjaxConsoleProxy()
+ acp.register_listeners()
+ server.start(acp, FLAGS.ajax_console_proxy_port, host='0.0.0.0')
+ server.wait()
diff --git a/contrib/nova.sh b/contrib/nova.sh
index da1ba030c..e06706295 100755
--- a/contrib/nova.sh
+++ b/contrib/nova.sh
@@ -78,6 +78,7 @@ if [ "$CMD" == "install" ]; then
sudo apt-get install -y user-mode-linux kvm libvirt-bin
sudo apt-get install -y screen euca2ools vlan curl rabbitmq-server
sudo apt-get install -y lvm2 iscsitarget open-iscsi
+ sudo apt-get install -y socat
echo "ISCSITARGET_ENABLE=true" | sudo tee /etc/default/iscsitarget
sudo /etc/init.d/iscsitarget restart
sudo modprobe kvm
@@ -155,6 +156,7 @@ if [ "$CMD" == "run" ]; then
screen_it network "$NOVA_DIR/bin/nova-network"
screen_it scheduler "$NOVA_DIR/bin/nova-scheduler"
screen_it volume "$NOVA_DIR/bin/nova-volume"
+ screen_it ajax_console_proxy "$NOVA_DIR/bin/nova-ajax-console-proxy"
screen_it test ". $NOVA_DIR/novarc"
screen -S nova -x
fi
diff --git a/doc/source/adminguide/binaries.rst b/doc/source/adminguide/binaries.rst
index 25605adf9..5c50a51f1 100644
--- a/doc/source/adminguide/binaries.rst
+++ b/doc/source/adminguide/binaries.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/adminguide/flags.rst b/doc/source/adminguide/flags.rst
index 4c950aa88..072f0a1a5 100644
--- a/doc/source/adminguide/flags.rst
+++ b/doc/source/adminguide/flags.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/adminguide/getting.started.rst b/doc/source/adminguide/getting.started.rst
index 0cadeb45e..675d8e664 100644
--- a/doc/source/adminguide/getting.started.rst
+++ b/doc/source/adminguide/getting.started.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/adminguide/index.rst b/doc/source/adminguide/index.rst
index 736a154b2..e653c9e8b 100644
--- a/doc/source/adminguide/index.rst
+++ b/doc/source/adminguide/index.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/adminguide/managing.images.rst b/doc/source/adminguide/managing.images.rst
index df71db23b..c5d93a6e8 100644
--- a/doc/source/adminguide/managing.images.rst
+++ b/doc/source/adminguide/managing.images.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/adminguide/managing.instances.rst b/doc/source/adminguide/managing.instances.rst
index d97567bb2..e62352017 100644
--- a/doc/source/adminguide/managing.instances.rst
+++ b/doc/source/adminguide/managing.instances.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/adminguide/managing.networks.rst b/doc/source/adminguide/managing.networks.rst
index 38c1cba78..9eea46d70 100644
--- a/doc/source/adminguide/managing.networks.rst
+++ b/doc/source/adminguide/managing.networks.rst
@@ -1,7 +1,7 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
- Overview Sections Copyright 2010 Citrix
+ Overview Sections Copyright 2010-2011 Citrix
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/doc/source/adminguide/managing.projects.rst b/doc/source/adminguide/managing.projects.rst
index b592e14d7..5dd7f2de9 100644
--- a/doc/source/adminguide/managing.projects.rst
+++ b/doc/source/adminguide/managing.projects.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/adminguide/managingsecurity.rst b/doc/source/adminguide/managingsecurity.rst
index 3b11b181a..7893925e7 100644
--- a/doc/source/adminguide/managingsecurity.rst
+++ b/doc/source/adminguide/managingsecurity.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/adminguide/monitoring.rst b/doc/source/adminguide/monitoring.rst
index e7766a6e7..2c93c71b5 100644
--- a/doc/source/adminguide/monitoring.rst
+++ b/doc/source/adminguide/monitoring.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/adminguide/multi.node.install.rst b/doc/source/adminguide/multi.node.install.rst
index a652e44b7..5918b0d38 100644
--- a/doc/source/adminguide/multi.node.install.rst
+++ b/doc/source/adminguide/multi.node.install.rst
@@ -1,6 +1,7 @@
..
- Copyright 2010 United States Government as represented by the
- Administrator of the National Aeronautics and Space Administration.
+ Copyright 2010-2011 United States Government as represented by the
+ Administrator of the National Aeronautics and Space Administration.
+
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -17,36 +18,35 @@
Installing Nova on Multiple Servers
===================================
-
+
When you move beyond evaluating the technology and into building an actual
production environment, you will need to know how to configure your datacenter
and how to deploy components across your clusters. This guide should help you
through that process.
-
+
You can install multiple nodes to increase performance and availability of the OpenStack Compute installation.
-
+
This setup is based on an Ubuntu Lucid 10.04 installation with the latest updates. Most of this works around issues that need to be resolved in the installation and configuration scripts as of October 18th 2010. It also needs to eventually be generalized, but the intent here is to get the multi-node configuration bootstrapped so folks can move forward.
-
-
+
+
Requirements for a multi-node installation
------------------------------------------
-
+
* You need a real database, compatible with SQLAlchemy (mysql, postgresql) There's not a specific reason to choose one over another, it basically depends what you know. MySQL is easier to do High Availability (HA) with, but people may already know Postgres. We should document both configurations, though.
* For a recommended HA setup, consider a MySQL master/slave replication, with as many slaves as you like, and probably a heartbeat to kick one of the slaves into being a master if it dies.
* For performance optimization, split reads and writes to the database. MySQL proxy is the easiest way to make this work if running MySQL.
-
+
Assumptions
-^^^^^^^^^^^
-
+-----------
+
* Networking is configured between/through the physical machines on a single subnet.
-* Installation and execution are both performed by root user.
-
-
-
-Step 1 Use apt-get to get the latest code
------------------------------------------
+* Installation and execution are both performed by ROOT user.
+
+
+Step 1 - Use apt-get to get the latest code
+-------------------------------------------
-1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/trunk.
+1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/trunk. The ‘python-software-properties’ package is a pre-requisite for setting up the nova package repo:
::
@@ -69,201 +69,260 @@ Step 1 Use apt-get to get the latest code
It is highly likely that there will be errors when the nova services come up since they are not yet configured. Don't worry, you're only at step 1!
Step 2 Setup configuration file (installed in /etc/nova)
----------------------------------------------------------
-
-Note: CC_ADDR=<the external IP address of your cloud controller>
-
-Nova development has consolidated all .conf files to nova.conf as of November 2010. References to specific .conf files may be ignored.
-
-#. These need to be defined in the nova.conf configuration file::
-
- --sql_connection=mysql://root:nova@$CC_ADDR/nova # location of nova sql db
- --s3_host=$CC_ADDR # This is where Nova is hosting the objectstore service, which
- # will contain the VM images and buckets
- --rabbit_host=$CC_ADDR # This is where the rabbit AMQP messaging service is hosted
- --cc_host=$CC_ADDR # This is where the the nova-api service lives
- --verbose # Optional but very helpful during initial setup
- --ec2_url=http://$CC_ADDR:8773/services/Cloud
- --network_manager=nova.network.manager.FlatManager # simple, no-vlan networking type
- --fixed_range=<network/prefix> # ip network to use for VM guests, ex 192.168.2.64/26
- --network_size=<# of addrs> # number of ip addrs to use for VM guests, ex 64
-
-#. Create a nova group::
-
- sudo addgroup nova
-
-The Nova config file should have its owner set to root:nova, and mode set to 0640, since they contain your MySQL server's root password.
+--------------------------------------------------------
+1. Nova development has consolidated all config files to nova.conf as of November 2010. There is a default set of options that are already configured in nova.conf:
+
::
+
+--daemonize=1
+--dhcpbridge_flagfile=/etc/nova/nova.conf
+--dhcpbridge=/usr/bin/nova-dhcpbridge
+--logdir=/var/log/nova
+--state_path=/var/lib/nova
+
+The following items ALSO need to be defined in /etc/nova/nova.conf. I’ve added some explanation of the variables, as comments CANNOT be in nova.conf. There seems to be an issue with nova-manage not processing the comments/whitespace correctly:
- cd /etc/nova
- chown -R root:nova .
+--sql_connection ### Location of Nova SQL DB
-Step 3 Setup the sql db
------------------------
+--s3_host ### This is where Nova is hosting the objectstore service, which will contain the VM images and buckets
-1. First you 'preseed' (using the Quick Start method :doc:`../quickstart`). Run this as root.
+--rabbit_host ### This is where the rabbit AMQP messaging service is hosted
-::
-
- sudo apt-get install bzr git-core
- sudo bash
- export MYSQL_PASS=nova
+--cc_host ### This is where the the nova-api service lives
+--verbose ### Optional but very helpful during initial setup
-::
+--ec2_url ### The location to interface nova-api
- cat <<MYSQL_PRESEED | debconf-set-selections
- mysql-server-5.1 mysql-server/root_password password $MYSQL_PASS
- mysql-server-5.1 mysql-server/root_password_again password $MYSQL_PASS
- mysql-server-5.1 mysql-server/start_on_boot boolean true
- MYSQL_PRESEED
+--network_manager ### Many options here, discussed below. This is how your controller will communicate with additional Nova nodes and VMs:
-2. Install mysql
+nova.network.manager.FlatManager # Simple, no-vlan networking type
+nova.network.manager. FlatDHCPManager # Flat networking with DHCP
+nova.network.manager.VlanManager # Vlan networking with DHCP – /DEFAULT/ if no network manager is defined in nova.conf
-::
+--fixed_range=<network/prefix> ### This will be the IP network that ALL the projects for future VM guests will reside on. E.g. 192.168.0.0/12
- sudo apt-get install -y mysql-server
+--network_size=<# of addrs> ### This is the total number of IP Addrs to use for VM guests, of all projects. E.g. 5000
-4. Edit /etc/mysql/my.cnf and set this line: bind-address=0.0.0.0 and then sighup or restart mysql
+The following code can be cut and paste, and edited to your setup:
-5. create nova's db
+Note: CC_ADDR=<the external IP address of your cloud controller>
+Detailed explanation of the following example is available above.
+
::
+
+--sql_connection=mysql://root:nova@<CC_ADDR>/nova
+--s3_host=<CC_ADDR>
+--rabbit_host=<CC_ADDR>
+--cc_host=<CC_ADDR>
+--verbose
+--ec2_url=http://<CC_ADDR>:8773/services/Cloud
+--network_manager=nova.network.manager.VlanManager
+--fixed_range=<network/prefix>
+--network_size=<# of addrs>
+
+2. Create a “nova” group, and set permissions::
+
+ addgroup nova
+
+The Nova config file should have its owner set to root:nova, and mode set to 0644, since they contain your MySQL server's root password. ::
+
+ chown -R root:nova /etc/nova
+ chmod 644 /etc/nova/nova.conf
+
+Step 3 - Setup the SQL DB (MySQL for this setup)
+------------------------------------------------
+
+1. First you 'preseed' to bypass all the installation prompts::
+
+ bash
+ MYSQL_PASS=nova
+ cat <<MYSQL_PRESEED | debconf-set-selections
+ mysql-server-5.1 mysql-server/root_password password $MYSQL_PASS
+ mysql-server-5.1 mysql-server/root_password_again password $MYSQL_PASS
+ mysql-server-5.1 mysql-server/start_on_boot boolean true
+ MYSQL_PRESEED
+
+2. Install MySQL::
+
+ apt-get install -y mysql-server
+
+3. Edit /etc/mysql/my.cnf to change ‘bind-address’ from localhost to any::
+
+ sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf
+ service mysql restart
+
+3. Network Configuration
+
+If you use FlatManager (as opposed to VlanManager that we set) as your network manager, there are some additional networking changes you’ll have to make to ensure connectivity between your nodes and VMs. If you chose VlanManager or FlatDHCP, you may skip this section, as it’s set up for you automatically.
+
+Nova defaults to a bridge device named 'br100'. This needs to be created and somehow integrated into YOUR network. To keep things as simple as possible, have all the VM guests on the same network as the VM hosts (the compute nodes). To do so, set the compute node's external IP address to be on the bridge and add eth0 to that bridge. To do this, edit your network interfaces config to look like the following::
- mysql -uroot -pnova -e 'CREATE DATABASE nova;'
-
-
-6. Update the db to include user 'root'@'%'
+ < begin /etc/network/interfaces >
+ # The loopback network interface
+ auto lo
+ iface lo inet loopback
+
+ # Networking for NOVA
+ auto br100
+
+ iface br100 inet dhcp
+ bridge_ports eth0
+ bridge_stp off
+ bridge_maxwait 0
+ bridge_fd 0
+ < end /etc/network/interfaces >
+
+Next, restart networking to apply the changes::
+
+ sudo /etc/init.d/networking restart
+
+4. MySQL DB configuration:
+
+Create NOVA database::
+
+ mysql -uroot -p$MYSQL_PASS -e 'CREATE DATABASE nova;'
+
+Update the DB to include user 'root'@'%' with super user privileges::
+
+ mysql -uroot -p$MYSQL_PASS -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;"
+
+Set mySQL root password::
+
+ mysql -uroot -p$MYSQL_PASS -e "SET PASSWORD FOR 'root'@'%' = PASSWORD('$MYSQL_PASS');"
+
+Step 4 - Setup Nova environment
+-------------------------------
+
+These are the commands you run to set up a user and project::
+
+ /usr/bin/python /usr/bin/nova-manage user admin <user_name>
+ /usr/bin/python /usr/bin/nova-manage project create <project_name> <user_name>
+ /usr/bin/python /usr/bin/nova-manage network create <project-network> <number-of-networks-in-project> <IPs in project>
+
+Here is an example of what this looks like with real data::
+
+ /usr/bin/python /usr/bin/nova-manage user admin dub
+ /usr/bin/python /usr/bin/nova-manage project create dubproject dub
+ /usr/bin/python /usr/bin/nova-manage network create 192.168.0.0/24 1 255
+
+(I chose a /24 since that falls inside my /12 range I set in ‘fixed-range’ in nova.conf. Currently, there can only be one network, and I am using the max IP’s available in a /24. You can choose to use any valid amount that you would like.)
+
+Note: The nova-manage service assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range you defined (192.168.0.255). If this is not the case you will need to manually edit the sql db 'networks' table.o.
+
+On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected. This is ONLY necessary if you chose FlatManager as your network type. More information can be found at the end of this document discussing setting up the bridge device.
+
+
+Step 5 - Create Nova certifications
+-----------------------------------
+
+1. Generate the certs as a zip file. These are the certs you will use to launch instances, bundle images, and all the other assorted api functions.
::
- mysql -u root -p nova
- GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;
- SET PASSWORD FOR 'root'@'%' = PASSWORD('nova');
-
-7. Branch and install Nova
+ mkdir –p /root/creds
+ /usr/bin/python /usr/bin/nova-manage project zipfile $NOVA_PROJECT $NOVA_PROJECT_USER /root/creds/novacreds.zip
+
+2. Unzip them in your home directory, and add them to your environment.
::
- sudo -i
- cd ~
- export USE_MYSQL=1
- export MYSQL_PASS=nova
- git clone https://github.com/vishvananda/novascript.git
- cd novascript
- ./nova.sh branch
- ./nova.sh install
- ./nova.sh run
+ unzip /root/creds/novacreds.zip -d /root/creds/
+ cat /root/creds/novarc >> ~/.bashrc
+ source ~/.bashrc
-Step 4 Setup Nova environment
------------------------------
+Step 6 - Restart all relevant services
+--------------------------------------
-::
+Restart all six services in total, just to cover the entire spectrum::
+
+ libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart
- /usr/bin/python /usr/bin/nova-manage user admin <user_name>
- /usr/bin/python /usr/bin/nova-manage project create <project_name> <user_name>
- /usr/bin/python /usr/bin/nova-manage project create network
+Step 7 - Closing steps, and cleaning up
+---------------------------------------
-Note: The nova-manage service assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range you defined (192.168.0.255). If this is not the case you will need to manually edit the sql db 'networks' table.o.
+One of the most commonly missed configuration areas is not allowing the proper access to VMs. Use the 'euca-authorize' command to enable access. Below, you will find the commands to allow 'ping' and 'ssh' to your VMs::
-On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected.
+ euca-authorize -P icmp -t -1:-1 default
+ euca-authorize -P tcp -p 22 default
-More networking details to create a network bridge for flat network
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Another common issue is you cannot ping or SSH your instances after issusing the 'euca-authorize' commands. Something to look at is the amount of 'dnsmasq' processes that are running. If you have a running instance, check to see that TWO 'dnsmasq' processes are running. If not, perform the following::
-Nova defaults to a bridge device named 'br100'. This needs to be created and somehow integrated into YOUR network. In my case, I wanted to keep things as simple as possible and have all the vm guests on the same network as the vm hosts (the compute nodes). Thus, I set the compute node's external IP address to be on the bridge and added eth0 to that bridge. To do this, edit your network interfaces config to look like the following::
+ killall dnsmasq
+ service nova-network restart
- < begin /etc/network/interfaces >
- # The loopback network interface
- auto lo
- iface lo inet loopback
+Step 8 – Testing the installation
+---------------------------------
- # Networking for NOVA
- auto br100
+You can then use `euca2ools` to test some items::
- iface br100 inet dhcp
- bridge_ports eth0
- bridge_stp off
- bridge_maxwait 0
- bridge_fd 0
- < end /etc/network/interfaces >
+ euca-describe-images
+ euca-describe-instances
+
+If you have issues with the API key, you may need to re-source your creds file::
+ . /root/creds/novarc
+
+If you don’t get any immediate errors, you’re successfully making calls to your cloud!
-Next, restart networking to apply the changes::
+Step 9 - Spinning up a VM for testing
+-------------------------------------
- sudo /etc/init.d/networking restart
+(This excerpt is from Thierry Carrez's blog, with reference to http://wiki.openstack.org/GettingImages.)
-Step 5: Create nova certs.
---------------------------
+The image that you will use here will be a ttylinux image, so this is a limited function server. You will be able to ping and SSH to this instance, but it is in no way a full production VM.
-Generate the certs as a zip file::
+Download the image, and publish to your bucket:
- mkdir creds
- sudo /usr/bin/python /usr/bin/nova-manage project zip admin admin creds/nova.zip
+::
-you can get the rc file more easily with::
+ image="ttylinux-uec-amd64-12.1_2.6.35-22_1.tar.gz"
+ wget http://smoser.brickies.net/ubuntu/ttylinux-uec/$image
+ uec-publish-tarball $image mybucket
- sudo /usr/bin/python /usr/bin/nova-manage project env admin admin creds/novarc
+This will output three references, an "emi", an "eri" and an "eki." (Image, ramdisk, and kernel) The emi is the one we use to launch instances, so take note of this.
-unzip them in your home directory, and add them to your environment::
+Create a keypair to SSH to the server:
- unzip creds/nova.zip
- echo ". creds/novarc" >> ~/.bashrc
- ~/.bashrc
+::
-Step 6 Restart all relevant services
-------------------------------------
+ euca-add-keypair mykey > mykey.priv
-Restart Libvirt::
+ chmod 0600 mykey.priv
- sudo /etc/init.d/libvirt-bin restart
+Boot your instance:
-Restart relevant nova services::
+::
- sudo /etc/init.d/nova-compute restart
- sudo /etc/init.d/nova-volume restart
+ euca-run-instances $emi -k mykey -t m1.tiny
+($emi is replaced with the output from the previous command)
-.. todo:: do we still need the content below?
+Checking status, and confirming communication:
-Bare-metal Provisioning Notes
------------------------------
+Once you have booted the instance, you can check the status the the `euca-describe-instances` command. Here you can view the instance ID, IP, and current status of the VM.
-To install the base operating system you can use PXE booting.
+::
-Types of Hosts
---------------
+ euca-describe-instances
-A single machine in your cluster can act as one or more of the following types
-of host:
+Once in a "running" state, you can use your SSH key connect:
-Nova Services
+::
-* Network
-* Compute
-* Volume
-* API
-* Objectstore
+ ssh -i mykey.priv root@$ipaddress
-Other supporting services
+When you are ready to terminate the instance, you may do so with the `euca-terminate-instances` command:
-* Message Queue
-* Database (optional)
-* Authentication database (optional)
+::
-Initial Setup
--------------
+ euca-terminate-instances $instance-id
-* Networking
-* Cloudadmin User Creation
+You can determine the instance-id with `euca-describe-instances`, and the format is "i-" with a series of letter and numbers following: e.g. i-a4g9d.
-Deployment Technologies
------------------------
+For more information in creating you own custom (production ready) instance images, please visit http://wiki.openstack.org/GettingImages for more information!
-Once you have machines with a base operating system installation, you can deploy
-code and configuration with your favorite tools to specify which machines in
-your cluster have which roles:
+Enjoy your new private cloud, and play responsibly!
-* Puppet
-* Chef
diff --git a/doc/source/adminguide/network.flat.rst b/doc/source/adminguide/network.flat.rst
index 1b8661a40..3d8680c6f 100644
--- a/doc/source/adminguide/network.flat.rst
+++ b/doc/source/adminguide/network.flat.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/adminguide/network.vlan.rst b/doc/source/adminguide/network.vlan.rst
index c6c4e7f91..c06ce8e8b 100644
--- a/doc/source/adminguide/network.vlan.rst
+++ b/doc/source/adminguide/network.vlan.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
@@ -91,11 +91,10 @@ These do NOT have IP addresses in the host to protect host access.
Compute nodes have iptables/ebtables entries created per project and
instance to protect against IP/MAC address spoofing and ARP poisoning.
-The network assignment to a project, and IP address assignment to a VM instance, are triggered when a user starts to run a VM instance. When running a VM instance, a user needs to specify a project for the instances, and the security groups (described in Security Groups) when the instance wants to join. If this is the first instance to be created for the project, then Nova (the cloud controller) needs to find a network controller to be the network host for the project; it then sets up a private network by finding an unused VLAN id, an unused subnet, and then the controller assigns them to the project, it also assigns a name to the project's Linux bridge, and allocating a private IP within the project's subnet for the new instance.
+The network assignment to a project, and IP address assignment to a VM instance, are triggered when a user starts to run a VM instance. When running a VM instance, a user needs to specify a project for the instances, and the security groups (described in Security Groups) when the instance wants to join. If this is the first instance to be created for the project, then Nova (the cloud controller) needs to find a network controller to be the network host for the project; it then sets up a private network by finding an unused VLAN id, an unused subnet, and then the controller assigns them to the project, it also assigns a name to the project's Linux bridge (br100 stored in the Nova database), and allocating a private IP within the project's subnet for the new instance.
If the instance the user wants to start is not the project's first, a subnet and a VLAN must have already been assigned to the project; therefore the system needs only to find an available IP address within the subnet and assign it to the new starting instance. If there is no private IP available within the subnet, an exception will be raised to the cloud controller, and the VM creation cannot proceed.
-.. todo:: insert the name of the Linux bridge, is it always named bridge?
External Infrastructure
-----------------------
diff --git a/doc/source/adminguide/nova.manage.rst b/doc/source/adminguide/nova.manage.rst
index 0e5c4e062..0ec67c69c 100644
--- a/doc/source/adminguide/nova.manage.rst
+++ b/doc/source/adminguide/nova.manage.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/cloud101.rst b/doc/source/cloud101.rst
index 7c79d2a70..9902ba502 100644
--- a/doc/source/cloud101.rst
+++ b/doc/source/cloud101.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
@@ -54,7 +54,7 @@ Cloud computing offers different service models depending on the capabilities a
The US-based National Institute of Standards and Technology offers definitions for cloud computing
and the service models that are emerging.
-These definitions are summarized from http://csrc.nist.gov/groups/SNS/cloud-computing/.
+These definitions are summarized from the `U.S. National Institute of Standards and Technology (NIST) cloud computing research group <http://csrc.nist.gov/groups/SNS/cloud-computing/>`_.
SaaS - Software as a Service
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -74,7 +74,6 @@ IaaS - Infrastructure as a Service
Provides infrastructure such as computer instances, network connections, and storage so that people
can run any software or operating system.
-
Types of Cloud Deployments
--------------------------
@@ -87,4 +86,5 @@ A hybrid cloud can be a deployment model, as a composition of both public and pr
Work in the Clouds
------------------
-.. todo:: What people have done/sample projects
+What have people done with cloud computing? Cloud computing can help with large-scale computing needs or can lead consolidation efforts by virtualizing servers to make more use of existing hardware (and possibly release old hardware from service.) People also use cloud computing for collaboration because of the high availability through networked computers. Productivity suites for word processing, number crunching, and email communications, and more are also available through cloud computing. Cloud computing also avails additional storage to the cloud user, avoiding the need for additional hard drives on your desktop and enabling access to large data storage capacity online in the cloud.
+
diff --git a/doc/source/community.rst b/doc/source/community.rst
index 01ff5f055..4ae32f1eb 100644
--- a/doc/source/community.rst
+++ b/doc/source/community.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/addmethod.openstackapi.rst b/doc/source/devref/addmethod.openstackapi.rst
index 4baa46e20..dde50083b 100644
--- a/doc/source/devref/addmethod.openstackapi.rst
+++ b/doc/source/devref/addmethod.openstackapi.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 OpenStack LLC
+ Copyright 2010-2011 OpenStack LLC
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/doc/source/devref/api.rst b/doc/source/devref/api.rst
index 14181529a..35abf1ae0 100644
--- a/doc/source/devref/api.rst
+++ b/doc/source/devref/api.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/architecture.rst b/doc/source/devref/architecture.rst
index 1e23e1361..233cd6f08 100644
--- a/doc/source/devref/architecture.rst
+++ b/doc/source/devref/architecture.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/auth.rst b/doc/source/devref/auth.rst
index c3af3f945..830caba67 100644
--- a/doc/source/devref/auth.rst
+++ b/doc/source/devref/auth.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/cloudpipe.rst b/doc/source/devref/cloudpipe.rst
index fb104c160..4f5d91e28 100644
--- a/doc/source/devref/cloudpipe.rst
+++ b/doc/source/devref/cloudpipe.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/compute.rst b/doc/source/devref/compute.rst
index db9ef6f34..31cc2037f 100644
--- a/doc/source/devref/compute.rst
+++ b/doc/source/devref/compute.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/database.rst b/doc/source/devref/database.rst
index 14559aa8c..a26e48705 100644
--- a/doc/source/devref/database.rst
+++ b/doc/source/devref/database.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
@@ -60,4 +60,4 @@ Tests
-----
Tests are lacking for the db api layer and for the sqlalchemy driver.
-Failures in the drivers would be dectected in other test cases, though.
+Failures in the drivers would be detected in other test cases, though.
diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst
index 3de2e2287..f3c454d64 100644
--- a/doc/source/devref/development.environment.rst
+++ b/doc/source/devref/development.environment.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/fakes.rst b/doc/source/devref/fakes.rst
index 0ba5d6ef2..6073447f0 100644
--- a/doc/source/devref/fakes.rst
+++ b/doc/source/devref/fakes.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/glance.rst b/doc/source/devref/glance.rst
index d18f7fec6..9a1c14d58 100644
--- a/doc/source/devref/glance.rst
+++ b/doc/source/devref/glance.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst
index 589609ace..9613ba990 100644
--- a/doc/source/devref/index.rst
+++ b/doc/source/devref/index.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/network.rst b/doc/source/devref/network.rst
index d9d091494..eaf13e9ba 100644
--- a/doc/source/devref/network.rst
+++ b/doc/source/devref/network.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/nova.rst b/doc/source/devref/nova.rst
index 53ce6f34f..093fbb3ee 100644
--- a/doc/source/devref/nova.rst
+++ b/doc/source/devref/nova.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/objectstore.rst b/doc/source/devref/objectstore.rst
index 3ccfc8566..f140e85e9 100644
--- a/doc/source/devref/objectstore.rst
+++ b/doc/source/devref/objectstore.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/rabbit.rst b/doc/source/devref/rabbit.rst
index ae0bac49d..c17a13222 100644
--- a/doc/source/devref/rabbit.rst
+++ b/doc/source/devref/rabbit.rst
@@ -1,5 +1,6 @@
..
Copyright (c) 2010 Citrix Systems, Inc.
+ All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
@@ -29,7 +30,7 @@ Nova (Austin release) uses both direct and topic-based exchanges. The architectu
..
-Nova implements RPC (both request+response, and one-way, respectively nicknamed 'rpc.call' and 'rpc.cast') over AMQP by providing an adapter class which take cares of marshalling and unmarshalling of messages into function calls. Each Nova service (for example Compute, Volume, etc.) create two queues at the initialization time, one which accepts messages with routing keys 'NODE-TYPE.NODE-ID' (for example compute.hostname) and another, which accepts messages with routing keys as generic 'NODE-TYPE' (for example compute). The former is used specifically when Nova-API needs to redirect commands to a specific node like 'euca-terminate instance'. In this case, only the compute node whose host's hypervisor is running the virtual machine can kill the instance. The API acts as a consumer when RPC calls are request/response, otherwise is acts as publisher only.
+Nova implements RPC (both request+response, and one-way, respectively nicknamed 'rpc.call' and 'rpc.cast') over AMQP by providing an adapter class which take cares of marshaling and unmarshaling of messages into function calls. Each Nova service (for example Compute, Volume, etc.) create two queues at the initialization time, one which accepts messages with routing keys 'NODE-TYPE.NODE-ID' (for example compute.hostname) and another, which accepts messages with routing keys as generic 'NODE-TYPE' (for example compute). The former is used specifically when Nova-API needs to redirect commands to a specific node like 'euca-terminate instance'. In this case, only the compute node whose host's hypervisor is running the virtual machine can kill the instance. The API acts as a consumer when RPC calls are request/response, otherwise is acts as publisher only.
Nova RPC Mappings
-----------------
@@ -39,7 +40,7 @@ The figure below shows the internals of a RabbitMQ node when a single instance i
Figure 2 shows the following internal elements:
* Topic Publisher: a Topic Publisher comes to life when an rpc.call or an rpc.cast operation is executed; this object is instantiated and used to push a message to the queuing system. Every publisher connects always to the same topic-based exchange; its life-cycle is limited to the message delivery.
- * Direct Consumer: a Direct Consumer comes to life if (an only if) a rpc.call operation is executed; this object is instantiated and used to receive a response message from the queuing system; Every consumer connects to a unique direct-based exchange via a unique exclusive queue; its life-cycle is limited to the message delivery; the exchange and queue identifiers are determined by a UUID generator, and are marshalled in the message sent by the Topic Publisher (only rpc.call operations).
+ * Direct Consumer: a Direct Consumer comes to life if (an only if) a rpc.call operation is executed; this object is instantiated and used to receive a response message from the queuing system; Every consumer connects to a unique direct-based exchange via a unique exclusive queue; its life-cycle is limited to the message delivery; the exchange and queue identifiers are determined by a UUID generator, and are marshaled in the message sent by the Topic Publisher (only rpc.call operations).
* Topic Consumer: a Topic Consumer comes to life as soon as a Worker is instantiated and exists throughout its life-cycle; this object is used to receive messages from the queue and it invokes the appropriate action as defined by the Worker role. A Topic Consumer connects to the same topic-based exchange either via a shared queue or via a unique exclusive queue. Every Worker has two topic consumers, one that is addressed only during rpc.cast operations (and it connects to a shared queue whose exchange key is 'topic') and the other that is addressed only during rpc.call operations (and it connects to a unique queue whose exchange key is 'topic.host').
* Direct Publisher: a Direct Publisher comes to life only during rpc.call operations and it is instantiated to return the message required by the request/response operation. The object connects to a direct-based exchange whose identity is dictated by the incoming message.
* Topic Exchange: The Exchange is a routing table that exists in the context of a virtual host (the multi-tenancy mechanism provided by RabbitMQ); its type (such as topic vs. direct) determines the routing policy; a RabbitMQ node will have only one topic-based exchange for every topic in Nova.
diff --git a/doc/source/devref/scheduler.rst b/doc/source/devref/scheduler.rst
index ab74b6ba8..066781514 100644
--- a/doc/source/devref/scheduler.rst
+++ b/doc/source/devref/scheduler.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/services.rst b/doc/source/devref/services.rst
index f5bba5c12..ae237a248 100644
--- a/doc/source/devref/services.rst
+++ b/doc/source/devref/services.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/devref/volume.rst b/doc/source/devref/volume.rst
index 54a2d4f8b..c4dddb9ea 100644
--- a/doc/source/devref/volume.rst
+++ b/doc/source/devref/volume.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/index.rst b/doc/source/index.rst
index b9ba6208a..6eec09acb 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/livecd.rst b/doc/source/livecd.rst
index b355fa180..37c92c8bc 100644
--- a/doc/source/livecd.rst
+++ b/doc/source/livecd.rst
@@ -1,3 +1,20 @@
+..
+ Copyright 2010-2011 OpenStack LLC
+
+ All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
Installing the Live CD
======================
diff --git a/doc/source/nova.concepts.rst b/doc/source/nova.concepts.rst
index fb3969a43..e9687dc98 100644
--- a/doc/source/nova.concepts.rst
+++ b/doc/source/nova.concepts.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
@@ -105,7 +105,7 @@ It is important to know that there are user-specific (sometimes called global) r
For example: A user can access api commands allowed to the netadmin role (like allocate_address) only if he has the user-specific netadmin role AND the project-specific netadmin role.
-More information about RBAC can be found in the :ref:`auth`.
+More information about RBAC can be found in :ref:`auth`.
Concept: API
------------
@@ -159,12 +159,10 @@ vpn management, and much more.
See :doc:`nova.manage` in the Administration Guide for more details.
-
Concept: Flags
--------------
-Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within flag files. When you install Nova packages, each Nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth.
-
+Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within a flag file. When you install Nova packages for the Austin release, each nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth. In releases beyond Austin which was released in October 2010, all flags are set in nova.conf.
Concept: Plugins
----------------
diff --git a/doc/source/object.model.rst b/doc/source/object.model.rst
index c8d4df736..d02f151fd 100644
--- a/doc/source/object.model.rst
+++ b/doc/source/object.model.rst
@@ -1,3 +1,20 @@
+..
+ Copyright 2010-2011 OpenStack LLC
+
+ All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
Object Model
============
@@ -25,29 +42,38 @@ Object Model
Users
-----
+Each Nova User is authorized based on their access key and secret key, assigned per-user. Read more at :doc:`/adminguide/managing.users`.
+
Projects
--------
+For Nova, access to images is based on the project. Read more at :doc:`/adminguide/managing.projects`.
Images
------
+Images are binary files that run the operating system. Read more at :doc:`/adminguide/managing.images`.
Instances
---------
+Instances are running virtual servers. Read more at :doc:`/adminguide/managing.instances`.
Volumes
-------
+.. todo:: Write doc about volumes
Security Groups
---------------
+In Nova, a security group is a named collection of network access rules, like firewall policies. Read more at `Security Groups <http://nova.openstack.org/nova.concepts.html#concept-security-groups>`_.
VLANs
-----
+VLAN is the default network mode for Nova. Read more at :doc:`/adminguide/network.vlan`.
IP Addresses
------------
+Nova enables floating IP management. \ No newline at end of file
diff --git a/doc/source/quickstart.rst b/doc/source/quickstart.rst
index fa5d96738..17c9e10a8 100644
--- a/doc/source/quickstart.rst
+++ b/doc/source/quickstart.rst
@@ -1,5 +1,5 @@
..
- Copyright 2010 United States Government as represented by the
+ Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
diff --git a/doc/source/service.architecture.rst b/doc/source/service.architecture.rst
index 28a32bec6..8fa1e3306 100644
--- a/doc/source/service.architecture.rst
+++ b/doc/source/service.architecture.rst
@@ -1,3 +1,20 @@
+..
+ Copyright 2010-2011 OpenStack LLC
+
+ All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
Service Architecture
====================
diff --git a/krm_mapping.json.sample b/krm_mapping.json.sample
new file mode 100644
index 000000000..1ecfba635
--- /dev/null
+++ b/krm_mapping.json.sample
@@ -0,0 +1,3 @@
+{
+ "machine" : ["kernel", "ramdisk"]
+}
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 99a9677c4..832426b94 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -132,6 +132,21 @@ class CloudController(object):
result[key] = [line]
return result
+ def _trigger_refresh_security_group(self, context, security_group):
+ nodes = set([instance['host'] for instance in security_group.instances
+ if instance['host'] is not None])
+ for node in nodes:
+ rpc.cast(context,
+ '%s.%s' % (FLAGS.compute_topic, node),
+ {"method": "refresh_security_group",
+ "args": {"security_group_id": security_group.id}})
+
+ def _get_availability_zone_by_host(self, context, host):
+ services = db.service_get_all_by_host(context, host)
+ if len(services) > 0:
+ return services[0]['availability_zone']
+ return 'unknown zone'
+
def get_metadata(self, address):
ctxt = context.get_admin_context()
instance_ref = self.compute_api.get_all(ctxt, fixed_ip=address)
@@ -144,6 +159,8 @@ class CloudController(object):
else:
keys = ''
hostname = instance_ref['hostname']
+ host = instance_ref['host']
+ availability_zone = self._get_availability_zone_by_host(ctxt, host)
floating_ip = db.instance_get_floating_address(ctxt,
instance_ref['id'])
ec2_id = id_to_ec2_id(instance_ref['id'])
@@ -166,8 +183,7 @@ class CloudController(object):
'local-hostname': hostname,
'local-ipv4': address,
'kernel-id': instance_ref['kernel_id'],
- # TODO(vish): real zone
- 'placement': {'availability-zone': 'nova'},
+ 'placement': {'availability-zone': availability_zone},
'public-hostname': hostname,
'public-ipv4': floating_ip or '',
'public-keys': keys,
@@ -191,8 +207,26 @@ class CloudController(object):
return self._describe_availability_zones(context, **kwargs)
def _describe_availability_zones(self, context, **kwargs):
- return {'availabilityZoneInfo': [{'zoneName': 'nova',
- 'zoneState': 'available'}]}
+ enabled_services = db.service_get_all(context)
+ disabled_services = db.service_get_all(context, True)
+ available_zones = []
+ for zone in [service.availability_zone for service
+ in enabled_services]:
+ if not zone in available_zones:
+ available_zones.append(zone)
+ not_available_zones = []
+ for zone in [service.availability_zone for service in disabled_services
+ if not service['availability_zone'] in available_zones]:
+ if not zone in not_available_zones:
+ not_available_zones.append(zone)
+ result = []
+ for zone in available_zones:
+ result.append({'zoneName': zone,
+ 'zoneState': "available"})
+ for zone in not_available_zones:
+ result.append({'zoneName': zone,
+ 'zoneState': "not available"})
+ return {'availabilityZoneInfo': result}
def _describe_availability_zones_verbose(self, context, **kwargs):
rv = {'availabilityZoneInfo': [{'zoneName': 'nova',
@@ -399,8 +433,8 @@ class CloudController(object):
criteria = self._revoke_rule_args_to_dict(context, **kwargs)
if criteria == None:
- raise exception.ApiError(_("No rule for the specified "
- "parameters."))
+ raise exception.ApiError(_("Not enough parameters to build a "
+ "valid rule."))
for rule in security_group.rules:
match = True
@@ -427,6 +461,9 @@ class CloudController(object):
group_name)
values = self._revoke_rule_args_to_dict(context, **kwargs)
+ if values is None:
+ raise exception.ApiError(_("Not enough parameters to build a "
+ "valid rule."))
values['parent_group_id'] = security_group.id
if self._security_group_rule_exists(security_group, values):
@@ -498,6 +535,11 @@ class CloudController(object):
"Timestamp": now,
"output": base64.b64encode(output)}
+ def get_ajax_console(self, context, instance_id, **kwargs):
+ ec2_id = instance_id[0]
+ internal_id = ec2_id_to_id(ec2_id)
+ return self.compute_api.get_ajax_console(context, internal_id)
+
def describe_volumes(self, context, volume_id=None, **kwargs):
volumes = self.volume_api.get_all(context)
# NOTE(vish): volume_id is an optional list of volume ids to filter by.
@@ -646,6 +688,9 @@ class CloudController(object):
i['amiLaunchIndex'] = instance['launch_index']
i['displayName'] = instance['display_name']
i['displayDescription'] = instance['display_description']
+ host = instance['host']
+ zone = self._get_availability_zone_by_host(context, host)
+ i['placement'] = {'availabilityZone': zone}
if instance['reservation_id'] not in reservations:
r = {}
r['reservationId'] = instance['reservation_id']
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index 7b999c87b..f96e2af91 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -35,7 +35,7 @@ from nova.api.openstack import consoles
from nova.api.openstack import flavors
from nova.api.openstack import images
from nova.api.openstack import servers
-from nova.api.openstack import sharedipgroups
+from nova.api.openstack import shared_ip_groups
LOG = logging.getLogger('nova.api.openstack')
@@ -48,6 +48,10 @@ flags.DEFINE_string('os_api_ratelimiting',
'nova.api.openstack.ratelimiting.RateLimitingMiddleware',
'Default ratelimiting implementation for the Openstack API')
+flags.DEFINE_string('os_krm_mapping_file',
+ 'krm_mapping.json',
+ 'Location of OpenStack Flavor/OS:EC2 Kernel/Ramdisk/Machine JSON file.')
+
flags.DEFINE_bool('allow_admin_api',
False,
'When True, this API service will accept admin operations.')
@@ -110,8 +114,9 @@ class APIRouter(wsgi.Router):
collection={'detail': 'GET'})
mapper.resource("flavor", "flavors", controller=flavors.Controller(),
collection={'detail': 'GET'})
- mapper.resource("sharedipgroup", "sharedipgroups",
- controller=sharedipgroups.Controller())
+ mapper.resource("shared_ip_group", "shared_ip_groups",
+ collection={'detail': 'GET'},
+ controller=shared_ip_groups.Controller())
super(APIRouter, self).__init__(mapper)
diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py
index fcc07bdd3..197125d86 100644
--- a/nova/api/openstack/backup_schedules.py
+++ b/nova/api/openstack/backup_schedules.py
@@ -15,7 +15,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+import logging
import time
+
from webob import exc
from nova import wsgi
@@ -46,8 +48,8 @@ class Controller(wsgi.Controller):
def create(self, req, server_id):
""" No actual update method required, since the existing API allows
both create and update through a POST """
- return faults.Fault(exc.HTTPNotFound())
+ return faults.Fault(exc.HTTPNotImplemented())
def delete(self, req, server_id, id):
""" Deletes an existing backup schedule """
- return faults.Fault(exc.HTTPNotFound())
+ return faults.Fault(exc.HTTPNotImplemented())
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index ac0572c96..037ed47a0 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova import exception
+
def limited(items, req):
"""Return a slice of items according to requested offset and limit.
@@ -34,3 +36,25 @@ def limited(items, req):
limit = min(1000, limit)
range_end = offset + limit
return items[offset:range_end]
+
+
+def get_image_id_from_image_hash(image_service, context, image_hash):
+ """Given an Image ID Hash, return an objectstore Image ID.
+
+ image_service - reference to objectstore compatible image service.
+ context - security context for image service requests.
+ image_hash - hash of the image ID.
+ """
+
+ # FIX(sandy): This is terribly inefficient. It pulls all images
+ # from objectstore in order to find the match. ObjectStore
+ # should have a numeric counterpart to the string ID.
+ try:
+ items = image_service.detail(context)
+ except NotImplementedError:
+ items = image_service.index(context)
+ for image in items:
+ image_id = image['imageId']
+ if abs(hash(image_id)) == int(image_hash):
+ return image_id
+ raise exception.NotFound(image_hash)
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index 0b239aab8..a5f55a489 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import logging
+
from webob import exc
from nova import compute
@@ -26,6 +28,7 @@ from nova.api.openstack import common
from nova.api.openstack import faults
import nova.image.service
+
FLAGS = flags.FLAGS
@@ -88,6 +91,12 @@ def _filter_keys(item, keys):
return dict((k, v) for k, v in item.iteritems() if k in keys)
+def _convert_image_id_to_hash(image):
+ image_id = abs(hash(image['imageId']))
+ image['imageId'] = image_id
+ image['id'] = image_id
+
+
class Controller(wsgi.Controller):
_serialization_metadata = {
@@ -112,6 +121,9 @@ class Controller(wsgi.Controller):
items = self._service.detail(req.environ['nova.context'])
except NotImplementedError:
items = self._service.index(req.environ['nova.context'])
+ for image in items:
+ _convert_image_id_to_hash(image)
+
items = common.limited(items, req)
items = [_translate_keys(item) for item in items]
items = [_translate_status(item) for item in items]
@@ -119,7 +131,12 @@ class Controller(wsgi.Controller):
def show(self, req, id):
"""Return data about the given image id"""
- return dict(image=self._service.show(req.environ['nova.context'], id))
+ image_id = common.get_image_id_from_image_hash(self._service,
+ req.environ['nova.context'], id)
+
+ image = self._service.show(req.environ['nova.context'], image_id)
+ _convert_image_id_to_hash(image)
+ return dict(image=image)
def delete(self, req, id):
# Only public images are supported for now.
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 764c843ac..8cbcebed2 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -15,14 +15,17 @@
# License for the specific language governing permissions and limitations
# under the License.
+import json
import traceback
from webob import exc
from nova import compute
from nova import exception
+from nova import flags
from nova import log as logging
from nova import wsgi
+from nova import utils
from nova.api.openstack import common
from nova.api.openstack import faults
from nova.auth import manager as auth_manager
@@ -35,6 +38,9 @@ LOG = logging.getLogger('server')
LOG.setLevel(logging.DEBUG)
+FLAGS = flags.FLAGS
+
+
def _translate_detail_keys(inst):
""" Coerces into dictionary format, mapping everything to Rackspace-like
attributes for return"""
@@ -44,7 +50,7 @@ def _translate_detail_keys(inst):
power_state.RUNNING: 'active',
power_state.BLOCKED: 'active',
power_state.SUSPENDED: 'suspended',
- power_state.PAUSED: 'error',
+ power_state.PAUSED: 'paused',
power_state.SHUTDOWN: 'active',
power_state.SHUTOFF: 'active',
power_state.CRASHED: 'error'}
@@ -81,6 +87,7 @@ class Controller(wsgi.Controller):
def __init__(self):
self.compute_api = compute.API()
+ self._image_service = utils.import_object(FLAGS.image_service)
super(Controller, self).__init__()
def index(self, req):
@@ -117,6 +124,18 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
+ def _get_kernel_ramdisk_from_image(self, image_id):
+ mapping_filename = FLAGS.os_krm_mapping_file
+
+ with open(mapping_filename) as f:
+ mapping = json.load(f)
+ if image_id in mapping:
+ return mapping[image_id]
+
+ raise exception.NotFound(
+ _("No entry for image '%s' in mapping file '%s'") %
+ (image_id, mapping_filename))
+
def create(self, req):
""" Creates a new server for a given user """
env = self._deserialize(req.body, req)
@@ -125,10 +144,15 @@ class Controller(wsgi.Controller):
key_pair = auth_manager.AuthManager.get_key_pairs(
req.environ['nova.context'])[0]
+ image_id = common.get_image_id_from_image_hash(self._image_service,
+ req.environ['nova.context'], env['server']['imageId'])
+ kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(image_id)
instances = self.compute_api.create(
req.environ['nova.context'],
instance_types.get_by_flavor_id(env['server']['flavorId']),
- env['server']['imageId'],
+ image_id,
+ kernel_id=kernel_id,
+ ramdisk_id=ramdisk_id,
display_name=env['server']['name'],
display_description=env['server']['name'],
key_name=key_pair['name'],
@@ -161,6 +185,7 @@ class Controller(wsgi.Controller):
""" Multi-purpose method used to reboot, rebuild, and
resize a server """
input_dict = self._deserialize(req.body, req)
+ #TODO(sandy): rebuild/resize not supported.
try:
reboot_type = input_dict['reboot']['type']
except Exception:
@@ -261,6 +286,15 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ def get_ajax_console(self, req, id):
+ """ Returns a url to an instance's ajaxterm console. """
+ try:
+ self.compute_api.get_ajax_console(req.environ['nova.context'],
+ int(id))
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ return exc.HTTPAccepted()
+
def diagnostics(self, req, id):
"""Permit Admins to retrieve server diagnostics."""
ctxt = req.environ["nova.context"]
diff --git a/nova/api/openstack/sharedipgroups.py b/nova/api/openstack/shared_ip_groups.py
index 845f5bead..bd3cc23a8 100644
--- a/nova/api/openstack/sharedipgroups.py
+++ b/nova/api/openstack/shared_ip_groups.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import logging
+
from webob import exc
from nova import wsgi
@@ -29,7 +31,7 @@ def _translate_keys(inst):
def _translate_detail_keys(inst):
""" Coerces a shared IP group instance into proper dictionary format with
correctly mapped attributes """
- return dict(sharedIpGroup=inst)
+ return dict(sharedIpGroups=inst)
class Controller(wsgi.Controller):
@@ -54,12 +56,12 @@ class Controller(wsgi.Controller):
def delete(self, req, id):
""" Deletes a Shared IP Group """
- raise faults.Fault(exc.HTTPNotFound())
+ raise faults.Fault(exc.HTTPNotImplemented())
- def detail(self, req, id):
+ def detail(self, req):
""" Returns a complete list of Shared IP Groups """
return _translate_detail_keys({})
def create(self, req):
""" Creates a new Shared IP group """
- raise faults.Fault(exc.HTTPNotFound())
+ raise faults.Fault(exc.HTTPNotImplemented())
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index c8de20028..bc53e0ec6 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -119,8 +119,7 @@ class LdapDriver(object):
def get_project(self, pid):
"""Retrieve project by id"""
- dn = 'cn=%s,%s' % (pid,
- FLAGS.ldap_project_subtree)
+ dn = self.__project_to_dn(pid)
attr = self.__find_object(dn, LdapDriver.project_pattern)
return self.__to_project(attr)
@@ -228,7 +227,8 @@ class LdapDriver(object):
('description', [description]),
(LdapDriver.project_attribute, [manager_dn]),
('member', members)]
- self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr)
+ dn = self.__project_to_dn(name, search=False)
+ self.conn.add_s(dn, attr)
return self.__to_project(dict(attr))
def modify_project(self, project_id, manager_uid=None, description=None):
@@ -246,23 +246,22 @@ class LdapDriver(object):
manager_dn))
if description:
attr.append((self.ldap.MOD_REPLACE, 'description', description))
- self.conn.modify_s('cn=%s,%s' % (project_id,
- FLAGS.ldap_project_subtree),
- attr)
+ dn = self.__project_to_dn(project_id)
+ self.conn.modify_s(dn, attr)
def add_to_project(self, uid, project_id):
"""Add user to project"""
- dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ dn = self.__project_to_dn(project_id)
return self.__add_to_group(uid, dn)
def remove_from_project(self, uid, project_id):
"""Remove user from project"""
- dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ dn = self.__project_to_dn(project_id)
return self.__remove_from_group(uid, dn)
def is_in_project(self, uid, project_id):
"""Check if user is in project"""
- dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ dn = self.__project_to_dn(project_id)
return self.__is_in_group(uid, dn)
def has_role(self, uid, role, project_id=None):
@@ -302,7 +301,7 @@ class LdapDriver(object):
roles.append(role)
return roles
else:
- project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ project_dn = self.__project_to_dn(project_id)
query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' %
(LdapDriver.project_pattern, self.__uid_to_dn(uid)))
roles = self.__find_objects(project_dn, query)
@@ -335,7 +334,7 @@ class LdapDriver(object):
def delete_project(self, project_id):
"""Delete a project"""
- project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ project_dn = self.__project_to_dn(project_id)
self.__delete_roles(project_dn)
self.__delete_group(project_dn)
@@ -367,9 +366,10 @@ class LdapDriver(object):
def __get_ldap_user(self, uid):
"""Retrieve LDAP user entry by id"""
- attr = self.__find_object(self.__uid_to_dn(uid),
- '(objectclass=novaUser)')
- return attr
+ dn = FLAGS.ldap_user_subtree
+ query = ('(&(%s=%s)(objectclass=novaUser))' %
+ (FLAGS.ldap_user_id_attribute, uid))
+ return self.__find_object(dn, query)
def __find_object(self, dn, query=None, scope=None):
"""Find an object by dn and query"""
@@ -420,15 +420,13 @@ class LdapDriver(object):
query = '(objectclass=groupOfNames)'
return self.__find_object(dn, query) is not None
- @staticmethod
- def __role_to_dn(role, project_id=None):
+ def __role_to_dn(self, role, project_id=None):
"""Convert role to corresponding dn"""
if project_id is None:
return FLAGS.__getitem__("ldap_%s" % role).value
else:
- return 'cn=%s,cn=%s,%s' % (role,
- project_id,
- FLAGS.ldap_project_subtree)
+ project_dn = self.__project_to_dn(project_id)
+ return 'cn=%s,%s' % (role, project_dn)
def __create_group(self, group_dn, name, uid,
description, member_uids=None):
@@ -534,6 +532,42 @@ class LdapDriver(object):
for role_dn in self.__find_role_dns(project_dn):
self.__delete_group(role_dn)
+ def __to_project(self, attr):
+ """Convert ldap attributes to Project object"""
+ if attr is None:
+ return None
+ member_dns = attr.get('member', [])
+ return {
+ 'id': attr['cn'][0],
+ 'name': attr['cn'][0],
+ 'project_manager_id':
+ self.__dn_to_uid(attr[LdapDriver.project_attribute][0]),
+ 'description': attr.get('description', [None])[0],
+ 'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
+
+ def __uid_to_dn(self, uid, search=True):
+ """Convert uid to dn"""
+ # By default return a generated DN
+ userdn = (FLAGS.ldap_user_id_attribute + '=%s,%s'
+ % (uid, FLAGS.ldap_user_subtree))
+ if search:
+ query = ('%s=%s' % (FLAGS.ldap_user_id_attribute, uid))
+ user = self.__find_dns(FLAGS.ldap_user_subtree, query)
+ if len(user) > 0:
+ userdn = user[0]
+ return userdn
+
+ def __project_to_dn(self, pid, search=True):
+ """Convert pid to dn"""
+ # By default return a generated DN
+ projectdn = ('cn=%s,%s' % (pid, FLAGS.ldap_project_subtree))
+ if search:
+ query = ('(&(cn=%s)%s)' % (pid, LdapDriver.project_pattern))
+ project = self.__find_dns(FLAGS.ldap_project_subtree, query)
+ if len(project) > 0:
+ projectdn = project[0]
+ return projectdn
+
@staticmethod
def __to_user(attr):
"""Convert ldap attributes to User object"""
@@ -550,30 +584,11 @@ class LdapDriver(object):
else:
return None
- def __to_project(self, attr):
- """Convert ldap attributes to Project object"""
- if attr is None:
- return None
- member_dns = attr.get('member', [])
- return {
- 'id': attr['cn'][0],
- 'name': attr['cn'][0],
- 'project_manager_id':
- self.__dn_to_uid(attr[LdapDriver.project_attribute][0]),
- 'description': attr.get('description', [None])[0],
- 'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
-
@staticmethod
def __dn_to_uid(dn):
"""Convert user dn to uid"""
return dn.split(',')[0].split('=')[1]
- @staticmethod
- def __uid_to_dn(uid):
- """Convert uid to dn"""
- return (FLAGS.ldap_user_id_attribute + '=%s,%s'
- % (uid, FLAGS.ldap_user_subtree))
-
class FakeLdapDriver(LdapDriver):
"""Fake Ldap Auth driver"""
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 5685ae5e2..89f02998d 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -684,8 +684,7 @@ class AuthManager(object):
else:
regions = {'nova': FLAGS.cc_host}
for region, host in regions.iteritems():
- rc = self.__generate_rc(user.access,
- user.secret,
+ rc = self.__generate_rc(user,
pid,
use_dmz,
host)
@@ -725,7 +724,7 @@ class AuthManager(object):
return self.__generate_rc(user.access, user.secret, pid, use_dmz)
@staticmethod
- def __generate_rc(access, secret, pid, use_dmz=True, host=None):
+ def __generate_rc(user, pid, use_dmz=True, host=None):
"""Generate rc file for user"""
if use_dmz:
cc_host = FLAGS.cc_dmz
@@ -738,14 +737,19 @@ class AuthManager(object):
s3_host = host
cc_host = host
rc = open(FLAGS.credentials_template).read()
- rc = rc % {'access': access,
+ rc = rc % {'access': user.access,
'project': pid,
- 'secret': secret,
+ 'secret': user.secret,
'ec2': '%s://%s:%s%s' % (FLAGS.ec2_prefix,
cc_host,
FLAGS.cc_port,
FLAGS.ec2_suffix),
's3': 'http://%s:%s' % (s3_host, FLAGS.s3_port),
+ 'os': '%s://%s:%s%s' % (FLAGS.os_prefix,
+ cc_host,
+ FLAGS.cc_port,
+ FLAGS.os_suffix),
+ 'user': user.name,
'nova': FLAGS.ca_file,
'cert': FLAGS.credential_cert_file,
'key': FLAGS.credential_key_file}
diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template
index 1b8ecb173..c53a4acdc 100644
--- a/nova/auth/novarc.template
+++ b/nova/auth/novarc.template
@@ -10,3 +10,7 @@ export NOVA_CERT=${NOVA_KEY_DIR}/%(nova)s
export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set
alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}"
alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}"
+export CLOUD_SERVERS_API_KEY="%(access)s"
+export CLOUD_SERVERS_USERNAME="%(user)s"
+export CLOUD_SERVERS_URL="%(os)s"
+
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 10d7b67cf..90273da36 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -108,6 +108,8 @@ class API(base.Base):
ramdisk_id = None
LOG.debug(_("Creating a raw instance"))
# Make sure we have access to kernel and ramdisk (if not raw)
+ logging.debug("Using Kernel=%s, Ramdisk=%s" %
+ (kernel_id, ramdisk_id))
if kernel_id:
self.image_service.show(context, kernel_id)
if ramdisk_id:
@@ -171,7 +173,8 @@ class API(base.Base):
# Set sane defaults if not specified
updates = dict(hostname=generate_hostname(instance_id))
- if 'display_name' not in instance:
+ if (not hasattr(instance, 'display_name')) or \
+ instance.display_name == None:
updates['display_name'] = "Server %s" % instance_id
instance = self.update(context, instance_id, **updates)
@@ -183,7 +186,8 @@ class API(base.Base):
FLAGS.scheduler_topic,
{"method": "run_instance",
"args": {"topic": FLAGS.compute_topic,
- "instance_id": instance_id}})
+ "instance_id": instance_id,
+ "availability_zone": availability_zone}})
for group_id in security_groups:
self.trigger_security_group_members_refresh(elevated, group_id)
@@ -379,6 +383,25 @@ class API(base.Base):
"""Set the root/admin password for the given instance."""
self._cast_compute_message('set_admin_password', context, instance_id)
+ def get_ajax_console(self, context, instance_id):
+ """Get a url to an AJAX Console"""
+
+ instance = self.get(context, instance_id)
+
+ output = rpc.call(context,
+ '%s.%s' % (FLAGS.compute_topic,
+ instance['host']),
+ {'method': 'get_ajax_console',
+ 'args': {'instance_id': instance['id']}})
+
+ rpc.cast(context, '%s' % FLAGS.ajax_console_proxy_topic,
+ {'method': 'authorize_ajax_console',
+ 'args': {'token': output['token'], 'host': output['host'],
+ 'port': output['port']}})
+
+ return {'url': '%s?token=%s' % (FLAGS.ajax_console_proxy_url,
+ output['token'])}
+
def lock(self, context, instance_id):
"""lock the instance with instance_id"""
self._cast_compute_message('lock_instance', context, instance_id)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 9872cc65b..267beca45 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -504,6 +504,14 @@ class ComputeManager(manager.Manager):
return self.driver.get_console_output(instance_ref)
@exception.wrap_exception
+ def get_ajax_console(self, context, instance_id):
+ """Return connection information for an ajax console"""
+ context = context.elevated()
+ logging.debug(_("instance %s: getting ajax console"), instance_id)
+ instance_ref = self.db.instance_get(context, instance_id)
+
+ return self.driver.get_ajax_console(instance_ref)
+
@checks_instance_lock
def attach_volume(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instance."""
diff --git a/nova/db/api.py b/nova/db/api.py
index cf84157bc..1f81ef145 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -81,16 +81,21 @@ def service_get(context, service_id):
return IMPL.service_get(context, service_id)
-def service_get_all(context):
- """Get a list of all services on any machine on any topic of any type"""
- return IMPL.service_get_all(context)
+def service_get_all(context, disabled=False):
+ """Get all service."""
+ return IMPL.service_get_all(context, None, disabled)
def service_get_all_by_topic(context, topic):
- """Get all compute services for a given topic."""
+ """Get all services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic)
+def service_get_all_by_host(context, host):
+ """Get all services for a given host."""
+ return IMPL.service_get_all_by_host(context, host)
+
+
def service_get_all_compute_sorted(context):
"""Get all compute services sorted by instance count.
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 4561fa219..2e4f8fc39 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -135,14 +135,14 @@ def service_get(context, service_id, session=None):
@require_admin_context
-def service_get_all(context, session=None):
+def service_get_all(context, session=None, disabled=False):
if not session:
session = get_session()
result = session.query(models.Service).\
- filter_by(deleted=can_read_deleted(context)).\
- all()
-
+ filter_by(deleted=can_read_deleted(context)).\
+ filter_by(disabled=disabled).\
+ all()
return result
@@ -157,6 +157,15 @@ def service_get_all_by_topic(context, topic):
@require_admin_context
+def service_get_all_by_host(context, host):
+ session = get_session()
+ return session.query(models.Service).\
+ filter_by(deleted=False).\
+ filter_by(host=host).\
+ all()
+
+
+@require_admin_context
def _service_get_all_topic_subquery(context, session, topic, subq, label):
sort_value = getattr(subq.c, label)
return session.query(models.Service, func.coalesce(sort_value, 0)).\
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 2a966448c..1dc46fe78 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -149,6 +149,7 @@ class Service(BASE, NovaBase):
topic = Column(String(255))
report_count = Column(Integer, nullable=False, default=0)
disabled = Column(Boolean, default=False)
+ availability_zone = Column(String(255), default='nova')
class Certificate(BASE, NovaBase):
diff --git a/nova/flags.py b/nova/flags.py
index ab3a2b5f8..fdcba6c72 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -234,7 +234,14 @@ DEFINE_string('scheduler_topic', 'scheduler',
'the topic scheduler nodes listen on')
DEFINE_string('volume_topic', 'volume', 'the topic volume nodes listen on')
DEFINE_string('network_topic', 'network', 'the topic network nodes listen on')
-
+DEFINE_string('ajax_console_proxy_topic', 'ajax_proxy',
+ 'the topic ajax proxy nodes listen on')
+DEFINE_string('ajax_console_proxy_url',
+ 'http://127.0.0.1:8000',
+ 'location of ajax console proxy, \
+ in the form "http://127.0.0.1:8000"')
+DEFINE_string('ajax_console_proxy_port',
+ 8000, 'port that ajax_console_proxy binds')
DEFINE_bool('verbose', False, 'show debug output')
DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit')
DEFINE_bool('fake_network', False,
@@ -248,10 +255,12 @@ DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
DEFINE_string('ec2_prefix', 'http', 'prefix for ec2')
+DEFINE_string('os_prefix', 'http', 'prefix for openstack')
DEFINE_string('cc_host', '$my_ip', 'ip of api server')
DEFINE_string('cc_dmz', '$my_ip', 'internal ip of api server')
DEFINE_integer('cc_port', 8773, 'cloud controller port')
DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2')
+DEFINE_string('os_suffix', '/v1.0/', 'suffix for openstack')
DEFINE_string('default_project', 'openstack', 'default project for openstack')
DEFINE_string('default_image', 'ami-11111',
@@ -299,6 +308,5 @@ DEFINE_string('image_service', 'nova.image.s3.S3ImageService',
DEFINE_string('host', socket.gethostname(),
'name of this node')
-# UNUSED
DEFINE_string('node_availability_zone', 'nova',
'availability zone of this node')
diff --git a/nova/scheduler/zone.py b/nova/scheduler/zone.py
new file mode 100644
index 000000000..49786cd32
--- /dev/null
+++ b/nova/scheduler/zone.py
@@ -0,0 +1,56 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Availability Zone Scheduler implementation
+"""
+
+import random
+
+from nova.scheduler import driver
+from nova import db
+
+
+class ZoneScheduler(driver.Scheduler):
+ """Implements Scheduler as a random node selector."""
+
+ def hosts_up_with_zone(self, context, topic, zone):
+ """Return the list of hosts that have a running service
+ for topic and availability zone (if defined).
+ """
+
+ if zone is None:
+ return self.hosts_up(context, topic)
+
+ services = db.service_get_all_by_topic(context, topic)
+ return [service.host
+ for service in services
+ if self.service_is_up(service)
+ and service.availability_zone == zone]
+
+ def schedule(self, context, topic, *_args, **_kwargs):
+ """Picks a host that is up at random in selected
+ availability zone (if defined).
+ """
+
+ zone = _kwargs.get('availability_zone')
+ hosts = self.hosts_up_with_zone(context, topic, zone)
+ if not hosts:
+ raise driver.NoValidHost(_("No hosts found"))
+ return hosts[int(random.random() * len(hosts))]
diff --git a/nova/service.py b/nova/service.py
index 523c1a8d7..8b2a22ce0 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -113,11 +113,13 @@ class Service(object):
self.timers.append(periodic)
def _create_service_ref(self, context):
+ zone = FLAGS.node_availability_zone
service_ref = db.service_create(context,
{'host': self.host,
'binary': self.binary,
'topic': self.topic,
- 'report_count': 0})
+ 'report_count': 0,
+ 'availability_zone': zone})
self.service_id = service_ref['id']
def __getattr__(self, key):
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index f5be9c94f..00ca739a5 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -172,6 +172,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase):
IMAGE_FIXTURES = [
{'id': '23g2ogk23k4hhkk4k42l',
+ 'imageId': '23g2ogk23k4hhkk4k42l',
'name': 'public image #1',
'created_at': str(datetime.datetime.utcnow()),
'updated_at': str(datetime.datetime.utcnow()),
@@ -181,6 +182,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase):
'status': 'available',
'image_type': 'kernel'},
{'id': 'slkduhfas73kkaskgdas',
+ 'imageId': 'slkduhfas73kkaskgdas',
'name': 'public image #2',
'created_at': str(datetime.datetime.utcnow()),
'updated_at': str(datetime.datetime.utcnow()),
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 6e611a55d..0396daf98 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -133,6 +133,12 @@ class ServersTest(unittest.TestCase):
def queue_get_for(context, *args):
return 'network_topic'
+ def kernel_ramdisk_mapping(*args, **kwargs):
+ return (1, 1)
+
+ def image_id_from_hash(*args, **kwargs):
+ return 2
+
self.stubs.Set(nova.db.api, 'project_get_network', project_get_network)
self.stubs.Set(nova.db.api, 'instance_create', instance_create)
self.stubs.Set(nova.rpc, 'cast', fake_method)
@@ -142,6 +148,10 @@ class ServersTest(unittest.TestCase):
self.stubs.Set(nova.db.api, 'queue_get_for', queue_get_for)
self.stubs.Set(nova.network.manager.VlanManager, 'allocate_fixed_ip',
fake_method)
+ self.stubs.Set(nova.api.openstack.servers.Controller,
+ "_get_kernel_ramdisk_from_image", kernel_ramdisk_mapping)
+ self.stubs.Set(nova.api.openstack.common,
+ "get_image_id_from_image_hash", image_id_from_hash)
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
diff --git a/nova/tests/api/openstack/test_sharedipgroups.py b/nova/tests/api/openstack/test_shared_ip_groups.py
index d199951d8..c2fc3a203 100644
--- a/nova/tests/api/openstack/test_sharedipgroups.py
+++ b/nova/tests/api/openstack/test_shared_ip_groups.py
@@ -19,7 +19,7 @@ import unittest
import stubout
-from nova.api.openstack import sharedipgroups
+from nova.api.openstack import shared_ip_groups
class SharedIpGroupsTest(unittest.TestCase):
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index b8a15c7b2..fdacb04f6 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -133,10 +133,35 @@ class CloudTestCase(test.TestCase):
db.volume_destroy(self.context, vol1['id'])
db.volume_destroy(self.context, vol2['id'])
+ def test_describe_availability_zones(self):
+ """Makes sure describe_availability_zones works and filters results."""
+ service1 = db.service_create(self.context, {'host': 'host1_zones',
+ 'binary': "nova-compute",
+ 'topic': 'compute',
+ 'report_count': 0,
+ 'availability_zone': "zone1"})
+ service2 = db.service_create(self.context, {'host': 'host2_zones',
+ 'binary': "nova-compute",
+ 'topic': 'compute',
+ 'report_count': 0,
+ 'availability_zone': "zone2"})
+ result = self.cloud.describe_availability_zones(self.context)
+ self.assertEqual(len(result['availabilityZoneInfo']), 3)
+ db.service_destroy(self.context, service1['id'])
+ db.service_destroy(self.context, service2['id'])
+
def test_describe_instances(self):
"""Makes sure describe_instances works and filters results."""
- inst1 = db.instance_create(self.context, {'reservation_id': 'a'})
- inst2 = db.instance_create(self.context, {'reservation_id': 'a'})
+ inst1 = db.instance_create(self.context, {'reservation_id': 'a',
+ 'host': 'host1'})
+ inst2 = db.instance_create(self.context, {'reservation_id': 'a',
+ 'host': 'host2'})
+ comp1 = db.service_create(self.context, {'host': 'host1',
+ 'availability_zone': 'zone1',
+ 'topic': "compute"})
+ comp2 = db.service_create(self.context, {'host': 'host2',
+ 'availability_zone': 'zone2',
+ 'topic': "compute"})
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 2)
@@ -147,8 +172,12 @@ class CloudTestCase(test.TestCase):
self.assertEqual(len(result['instancesSet']), 1)
self.assertEqual(result['instancesSet'][0]['instanceId'],
instance_id)
+ self.assertEqual(result['instancesSet'][0]
+ ['placement']['availabilityZone'], 'zone2')
db.instance_destroy(self.context, inst1['id'])
db.instance_destroy(self.context, inst2['id'])
+ db.service_destroy(self.context, comp1['id'])
+ db.service_destroy(self.context, comp2['id'])
def test_console_output(self):
image_id = FLAGS.default_image
@@ -167,6 +196,19 @@ class CloudTestCase(test.TestCase):
greenthread.sleep(0.3)
rv = self.cloud.terminate_instances(self.context, [instance_id])
+ def test_ajax_console(self):
+ kwargs = {'image_id': image_id}
+ rv = yield self.cloud.run_instances(self.context, **kwargs)
+ instance_id = rv['instancesSet'][0]['instanceId']
+ output = yield self.cloud.get_console_output(context=self.context,
+ instance_id=[instance_id])
+ self.assertEquals(b64decode(output['output']),
+ 'http://fakeajaxconsole.com/?token=FAKETOKEN')
+ # TODO(soren): We need this until we can stop polling in the rpc code
+ # for unit tests.
+ greenthread.sleep(0.3)
+ rv = yield self.cloud.terminate_instances(self.context, [instance_id])
+
def test_key_generation(self):
result = self._create_key('test')
private_key = result['private_key']
@@ -228,6 +270,19 @@ class CloudTestCase(test.TestCase):
LOG.debug(_("Terminating instance %s"), instance_id)
rv = self.compute.terminate_instance(instance_id)
+ def test_describe_instances(self):
+ """Makes sure describe_instances works."""
+ instance1 = db.instance_create(self.context, {'host': 'host2'})
+ comp1 = db.service_create(self.context, {'host': 'host2',
+ 'availability_zone': 'zone1',
+ 'topic': "compute"})
+ result = self.cloud.describe_instances(self.context)
+ self.assertEqual(result['reservationSet'][0]
+ ['instancesSet'][0]
+ ['placement']['availabilityZone'], 'zone1')
+ db.instance_destroy(self.context, instance1['id'])
+ db.service_destroy(self.context, comp1['id'])
+
def test_instance_update_state(self):
def instance(num):
return {
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 7a384f1da..a7d47961c 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -176,6 +176,16 @@ class ComputeTestCase(test.TestCase):
self.assert_(console)
self.compute.terminate_instance(self.context, instance_id)
+ def test_ajax_console(self):
+ """Make sure we can get console output from instance"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+
+ console = self.compute.get_ajax_console(self.context,
+ instance_id)
+ self.assert_(console)
+ self.compute.terminate_instance(self.context, instance_id)
+
def test_run_instance_existing(self):
"""Ensure failure when running an instance that already exists"""
instance_id = self._create_instance()
diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py
index a9937d797..9d458244b 100644
--- a/nova/tests/test_scheduler.py
+++ b/nova/tests/test_scheduler.py
@@ -21,6 +21,7 @@ Tests For Scheduler
import datetime
+from mox import IgnoreArg
from nova import context
from nova import db
from nova import flags
@@ -76,6 +77,59 @@ class SchedulerTestCase(test.TestCase):
scheduler.named_method(ctxt, 'topic', num=7)
+class ZoneSchedulerTestCase(test.TestCase):
+ """Test case for zone scheduler"""
+ def setUp(self):
+ super(ZoneSchedulerTestCase, self).setUp()
+ self.flags(scheduler_driver='nova.scheduler.zone.ZoneScheduler')
+
+ def _create_service_model(self, **kwargs):
+ service = db.sqlalchemy.models.Service()
+ service.host = kwargs['host']
+ service.disabled = False
+ service.deleted = False
+ service.report_count = 0
+ service.binary = 'nova-compute'
+ service.topic = 'compute'
+ service.id = kwargs['id']
+ service.availability_zone = kwargs['zone']
+ service.created_at = datetime.datetime.utcnow()
+ return service
+
+ def test_with_two_zones(self):
+ scheduler = manager.SchedulerManager()
+ ctxt = context.get_admin_context()
+ service_list = [self._create_service_model(id=1,
+ host='host1',
+ zone='zone1'),
+ self._create_service_model(id=2,
+ host='host2',
+ zone='zone2'),
+ self._create_service_model(id=3,
+ host='host3',
+ zone='zone2'),
+ self._create_service_model(id=4,
+ host='host4',
+ zone='zone2'),
+ self._create_service_model(id=5,
+ host='host5',
+ zone='zone2')]
+ self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
+ arg = IgnoreArg()
+ db.service_get_all_by_topic(arg, arg).AndReturn(service_list)
+ self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
+ rpc.cast(ctxt,
+ 'compute.host1',
+ {'method': 'run_instance',
+ 'args': {'instance_id': 'i-ffffffff',
+ 'availability_zone': 'zone1'}})
+ self.mox.ReplayAll()
+ scheduler.run_instance(ctxt,
+ 'compute',
+ instance_id='i-ffffffff',
+ availability_zone='zone1')
+
+
class SimpleDriverTestCase(test.TestCase):
"""Test case for simple driver"""
def setUp(self):
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index 9f1a181a0..a67c8d1e8 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -133,7 +133,8 @@ class ServiceTestCase(test.TestCase):
service_create = {'host': host,
'binary': binary,
'topic': topic,
- 'report_count': 0}
+ 'report_count': 0,
+ 'availability_zone': 'nova'}
service_ref = {'host': host,
'binary': binary,
'report_count': 0,
@@ -161,11 +162,13 @@ class ServiceTestCase(test.TestCase):
service_create = {'host': host,
'binary': binary,
'topic': topic,
- 'report_count': 0}
+ 'report_count': 0,
+ 'availability_zone': 'nova'}
service_ref = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
+ 'availability_zone': 'nova',
'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
@@ -193,11 +196,13 @@ class ServiceTestCase(test.TestCase):
service_create = {'host': host,
'binary': binary,
'topic': topic,
- 'report_count': 0}
+ 'report_count': 0,
+ 'availability_zone': 'nova'}
service_ref = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
+ 'availability_zone': 'nova',
'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
@@ -224,11 +229,13 @@ class ServiceTestCase(test.TestCase):
service_create = {'host': host,
'binary': binary,
'topic': topic,
- 'report_count': 0}
+ 'report_count': 0,
+ 'availability_zone': 'nova'}
service_ref = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
+ 'availability_zone': 'nova',
'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index 2ef9ee9c1..afdc89ba2 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -278,6 +278,20 @@ class IptablesFirewallTestCase(test.TestCase):
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
+ 'protocol': 'icmp',
+ 'from_port': -1,
+ 'to_port': -1,
+ 'cidr': '192.168.11.0/24'})
+
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'icmp',
+ 'from_port': 8,
+ 'to_port': -1,
+ 'cidr': '192.168.11.0/24'})
+
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
@@ -297,7 +311,35 @@ class IptablesFirewallTestCase(test.TestCase):
self.assertTrue(rule in out_rules,
'Rule went missing: %s' % rule)
- print '\n'.join(out_rules)
+ instance_chain = None
+ for rule in out_rules:
+ # This is pretty crude, but it'll do for now
+ if '-d 10.11.12.13 -j' in rule:
+ instance_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(instance_chain, "The instance chain wasn't added")
+
+ security_group_chain = None
+ for rule in out_rules:
+ # This is pretty crude, but it'll do for now
+ if '-A %s -j' % instance_chain in rule:
+ security_group_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(security_group_chain,
+ "The security group chain wasn't added")
+
+ self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -j ACCEPT' % \
+ security_group_chain in out_rules,
+ "ICMP acceptance rule wasn't added")
+
+ self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type'
+ ' 8 -j ACCEPT' % security_group_chain in out_rules,
+ "ICMP Echo Request acceptance rule wasn't added")
+
+ self.assertTrue('-A %s -p tcp -s 192.168.10.0/24 -m multiport '
+ '--dports 80:81 -j ACCEPT' % security_group_chain \
+ in out_rules,
+ "TCP port 80/81 acceptance rule wasn't added")
class NWFilterTestCase(test.TestCase):
diff --git a/nova/utils.py b/nova/utils.py
index aadbec532..45adb7b38 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -153,6 +153,11 @@ def abspath(s):
return os.path.join(os.path.dirname(__file__), s)
+def novadir():
+ import nova
+ return os.path.abspath(nova.__file__).split('nova/__init__.pyc')[0]
+
+
def default_flagfile(filename='nova.conf'):
for arg in sys.argv:
if arg.find('flagfile') != -1:
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 038857e81..a57a8f43b 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -302,6 +302,9 @@ class FakeConnection(object):
def get_console_output(self, instance):
return 'FAKE CONSOLE OUTPUT'
+ def get_ajax_console(self, instance):
+ return 'http://fakeajaxconsole.com/?token=FAKETOKEN'
+
def get_console_pool_info(self, console_type):
return {'address': '127.0.0.1',
'username': 'fakeuser',
diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template
index 3fb2243da..2eb7d9488 100644
--- a/nova/virt/libvirt.xml.template
+++ b/nova/virt/libvirt.xml.template
@@ -71,9 +71,22 @@
#end if
</filterref>
</interface>
+
+ <!-- The order is significant here. File must be defined first -->
<serial type="file">
<source path='${basepath}/console.log'/>
<target port='1'/>
</serial>
+
+ <console type='pty' tty='/dev/pts/2'>
+ <source path='/dev/pts/2'/>
+ <target port='0'/>
+ </console>
+
+ <serial type='pty'>
+ <source path='/dev/pts/2'/>
+ <target port='0'/>
+ </serial>
+
</devices>
</domain>
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 19f79e19f..655c55fa1 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -38,6 +38,11 @@ Supports KVM, QEMU, UML, and XEN.
import os
import shutil
+import random
+import subprocess
+import uuid
+from xml.dom import minidom
+
from eventlet import greenthread
from eventlet import event
@@ -86,6 +91,9 @@ flags.DEFINE_string('libvirt_uri',
flags.DEFINE_bool('allow_project_net_traffic',
True,
'Whether to allow in project network traffic')
+flags.DEFINE_string('ajaxterm_portrange',
+ '10000-12000',
+ 'Range of ports that ajaxterm should randomly try to bind')
flags.DEFINE_string('firewall_driver',
'nova.virt.libvirt_conn.IptablesFirewallDriver',
'Firewall driver (defaults to iptables)')
@@ -433,6 +441,43 @@ class LibvirtConnection(object):
return self._dump_file(fpath)
+ @exception.wrap_exception
+ def get_ajax_console(self, instance):
+ def get_open_port():
+ start_port, end_port = FLAGS.ajaxterm_portrange.split("-")
+ for i in xrange(0, 100): # don't loop forever
+ port = random.randint(int(start_port), int(end_port))
+ # netcat will exit with 0 only if the port is in use,
+ # so a nonzero return value implies it is unused
+ cmd = 'netcat 0.0.0.0 %s -w 1 </dev/null || echo free' % (port)
+ stdout, stderr = utils.execute(cmd)
+ if stdout.strip() == 'free':
+ return port
+ raise Exception(_('Unable to find an open port'))
+
+ def get_pty_for_instance(instance_name):
+ virt_dom = self._conn.lookupByName(instance_name)
+ xml = virt_dom.XMLDesc(0)
+ dom = minidom.parseString(xml)
+
+ for serial in dom.getElementsByTagName('serial'):
+ if serial.getAttribute('type') == 'pty':
+ source = serial.getElementsByTagName('source')[0]
+ return source.getAttribute('path')
+
+ port = get_open_port()
+ token = str(uuid.uuid4())
+ host = instance['host']
+
+ ajaxterm_cmd = 'sudo socat - %s' \
+ % get_pty_for_instance(instance['name'])
+
+ cmd = '%s/tools/ajaxterm/ajaxterm.py --command "%s" -t %s -p %s' \
+ % (utils.novadir(), ajaxterm_cmd, token, port)
+
+ subprocess.Popen(cmd, shell=True)
+ return {'token': token, 'host': host, 'port': port}
+
def _create_image(self, inst, libvirt_xml, prefix='', disk_images=None):
# syntactic nicety
basepath = lambda fname = '', prefix = prefix: os.path.join(
@@ -1113,15 +1158,15 @@ class IptablesFirewallDriver(FirewallDriver):
icmp_type = rule.from_port
icmp_code = rule.to_port
- if icmp_type == '-1':
+ if icmp_type == -1:
icmp_type_arg = None
else:
icmp_type_arg = '%s' % icmp_type
- if not icmp_code == '-1':
+ if not icmp_code == -1:
icmp_type_arg += '/%s' % icmp_code
if icmp_type_arg:
- args += ['-m', 'icmp', '--icmp_type', icmp_type_arg]
+ args += ['-m', 'icmp', '--icmp-type', icmp_type_arg]
args += ['-j ACCEPT']
our_rules += [' '.join(args)]
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 1e9448a26..a91c8ea27 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -357,7 +357,9 @@ class VMHelper(HelperBase):
if i >= 3 and i <= 11:
ref = node.childNodes
# Name and Value
- diags[ref[0].firstChild.data] = ref[6].firstChild.data
+ if len(ref) > 6:
+ diags[ref[0].firstChild.data] = \
+ ref[6].firstChild.data
return diags
except cls.XenAPI.Failure as e:
return {"Unable to retrieve diagnostics": e}
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index c10943aa1..99583bee1 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -64,13 +64,13 @@ class VMOps(object):
"""Create VM instance"""
vm = VMHelper.lookup(self._session, instance.name)
if vm is not None:
- msg = _('Attempted to create non-unique name %s') % instance.name
- raise exception.Duplicate(msg)
+ raise exception.Duplicate(_('Attempted to create'
+ ' non-unique name %s') % instance.name)
bridge = db.network_get_by_instance(context.get_admin_context(),
instance['id'])['bridge']
- network_ref = NetworkHelper.find_network_with_bridge(self._session,
- bridge)
+ network_ref = \
+ NetworkHelper.find_network_with_bridge(self._session, bridge)
user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id)
@@ -145,8 +145,15 @@ class VMOps(object):
# Not a string; must be an ID or a vm instance
if isinstance(instance_or_vm, (int, long)):
ctx = context.get_admin_context()
- instance_obj = db.instance_get_by_id(ctx, instance_or_vm)
- instance_name = instance_obj.name
+ try:
+ instance_obj = db.instance_get_by_id(ctx, instance_or_vm)
+ instance_name = instance_obj.name
+ except exception.NotFound:
+ # The unit tests screw this up, as they use an integer for
+ # the vm name. I'd fix that up, but that's a matter for
+ # another bug report. So for now, just try with the passed
+ # value
+ instance_name = instance_or_vm
else:
instance_name = instance_or_vm.name
vm = VMHelper.lookup(self._session, instance_name)
@@ -328,6 +335,11 @@ class VMOps(object):
# TODO: implement this to fix pylint!
return 'FAKE CONSOLE OUTPUT of instance'
+ def get_ajax_console(self, instance):
+ """Return link to instance's ajax console"""
+ # TODO: implement this!
+ return 'http://fakeajaxconsole/fake_url'
+
def list_from_xenstore(self, vm, path):
"""Runs the xenstore-ls command to get a listing of all records
from 'path' downward. Returns a dict with the sub-paths as keys,
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 8189e40db..689844f34 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -185,6 +185,10 @@ class XenAPIConnection(object):
"""Return snapshot of console"""
return self._vmops.get_console_output(instance)
+ def get_ajax_console(self, instance):
+ """Return link to instance's ajax console"""
+ return self._vmops.get_ajax_console(instance)
+
def attach_volume(self, instance_name, device_path, mountpoint):
"""Attach volume storage to VM instance"""
return self._volumeops.attach_volume(instance_name,
diff --git a/tools/ajaxterm/README.txt b/tools/ajaxterm/README.txt
new file mode 100644
index 000000000..4b0ae99af
--- /dev/null
+++ b/tools/ajaxterm/README.txt
@@ -0,0 +1,120 @@
+= [http://antony.lesuisse.org/qweb/trac/wiki/AjaxTerm Ajaxterm] =
+
+Ajaxterm is a web based terminal. It was totally inspired and works almost
+exactly like http://anyterm.org/ except it's much easier to install (see
+comparaison with anyterm below).
+
+Ajaxterm written in python (and some AJAX javascript for client side) and depends only on python2.3 or better.[[BR]]
+Ajaxterm is '''very simple to install''' on Linux, MacOS X, FreeBSD, Solaris, cygwin and any Unix that runs python2.3.[[BR]]
+Ajaxterm was written by Antony Lesuisse (email: al AT udev.org), License Public Domain.
+
+Use the [/qweb/forum/viewforum.php?id=2 Forum], if you have any question or remark.
+
+== News ==
+
+ * 2006-10-29: v0.10 allow space in login, cgi launch fix, redhat init
+ * 2006-07-12: v0.9 change uid, daemon fix (Daniel Fischer)
+ * 2006-07-04: v0.8 add login support to ssh (Sven Geggus), change max width to 256
+ * 2006-05-31: v0.7 minor fixes, daemon option
+ * 2006-05-23: v0.6 Applied debian and gentoo patches, renamed to Ajaxterm, default port 8022
+
+== Download and Install ==
+
+ * Release: [/qweb/files/Ajaxterm-0.10.tar.gz Ajaxterm-0.10.tar.gz]
+ * Browse src: [/qweb/trac/browser/trunk/ajaxterm/ ajaxterm/]
+
+To install Ajaxterm issue the following commands:
+{{{
+wget http://antony.lesuisse.org/qweb/files/Ajaxterm-0.10.tar.gz
+tar zxvf Ajaxterm-0.10.tar.gz
+cd Ajaxterm-0.10
+./ajaxterm.py
+}}}
+Then point your browser to this URL : http://localhost:8022/
+
+== Screenshot ==
+
+{{{
+#!html
+<center><img src="/qweb/trac/attachment/wiki/AjaxTerm/scr.png?format=raw" alt="ajaxterm screenshot" style=""/></center>
+}}}
+
+== Documentation and Caveats ==
+
+ * Ajaxterm only support latin1, if you use Ubuntu or any LANG==en_US.UTF-8 distribution don't forget to "unset LANG".
+
+ * If run as root ajaxterm will run /bin/login, otherwise it will run ssh
+ localhost. To use an other command use the -c option.
+
+ * By default Ajaxterm only listen at 127.0.0.1:8022. For remote access, it is
+ strongly recommended to use '''https SSL/TLS''', and that is simple to
+ configure if you use the apache web server using mod_proxy.[[BR]][[BR]]
+ Using ssl will also speed up ajaxterm (probably because of keepalive).[[BR]][[BR]]
+ Here is an configuration example:
+
+{{{
+ Listen 443
+ NameVirtualHost *:443
+
+ <VirtualHost *:443>
+ ServerName localhost
+ SSLEngine On
+ SSLCertificateKeyFile ssl/apache.pem
+ SSLCertificateFile ssl/apache.pem
+
+ ProxyRequests Off
+ <Proxy *>
+ Order deny,allow
+ Allow from all
+ </Proxy>
+ ProxyPass /ajaxterm/ http://localhost:8022/
+ ProxyPassReverse /ajaxterm/ http://localhost:8022/
+ </VirtualHost>
+}}}
+
+ * Using GET HTTP request seems to speed up ajaxterm, just click on GET in the
+ interface, but be warned that your keystrokes might be loggued (by apache or
+ any proxy). I usually enable it after the login.
+
+ * Ajaxterm commandline usage:
+
+{{{
+usage: ajaxterm.py [options]
+
+options:
+ -h, --help show this help message and exit
+ -pPORT, --port=PORT Set the TCP port (default: 8022)
+ -cCMD, --command=CMD set the command (default: /bin/login or ssh localhost)
+ -l, --log log requests to stderr (default: quiet mode)
+ -d, --daemon run as daemon in the background
+ -PPIDFILE, --pidfile=PIDFILE
+ set the pidfile (default: /var/run/ajaxterm.pid)
+ -iINDEX_FILE, --index=INDEX_FILE
+ default index file (default: ajaxterm.html)
+ -uUID, --uid=UID Set the daemon's user id
+}}}
+
+ * Ajaxterm was first written as a demo for qweb (my web framework), but
+ actually doesn't use many features of qweb.
+
+ * Compared to anyterm:
+ * There are no partial updates, ajaxterm updates either all the screen or
+ nothing. That make the code simpler and I also think it's faster. HTTP
+ replies are always gzencoded. When used in 80x25 mode, almost all of
+ them are below the 1500 bytes (size of an ethernet frame) and we just
+ replace the screen with the reply (no javascript string handling).
+ * Ajaxterm polls the server for updates with an exponentially growing
+ timeout when the screen hasn't changed. The timeout is also resetted as
+ soon as a key is pressed. Anyterm blocks on a pending request and use a
+ parallel connection for keypresses. The anyterm approch is better
+ when there aren't any keypress.
+
+ * Ajaxterm files are released in the Public Domain, (except [http://sarissa.sourceforge.net/doc/ sarissa*] which are LGPL).
+
+== TODO ==
+
+ * insert mode ESC [ 4 h
+ * change size x,y from gui (sending signal)
+ * vt102 graphic codepage
+ * use innerHTML or prototype instead of sarissa
+
diff --git a/tools/ajaxterm/ajaxterm.1 b/tools/ajaxterm/ajaxterm.1
new file mode 100644
index 000000000..46f2acb33
--- /dev/null
+++ b/tools/ajaxterm/ajaxterm.1
@@ -0,0 +1,35 @@
+.TH ajaxterm "1" "May 2006" "ajaxterm 0.5" "User commands"
+.SH NAME
+ajaxterm \- Web based terminal written in python
+
+.SH DESCRITPION
+\fBajaxterm\fR is a web based terminal written in python and some AJAX
+javascript for client side.
+It can use almost any web browser and even works through firewalls.
+
+.SH USAGE
+\fBajaxterm.py\fR [options]
+
+.SH OPTIONS
+A summary of the options supported by \fBajaxterm\fR is included below.
+ \fB-h, --help\fR show this help message and exit
+ \fB-pPORT, --port=PORT\fR Set the TCP port (default: 8022)
+ \fB-cCMD, --command=CMD\fR set the command (default: /bin/login or ssh localhost)
+ \fB-l, --log\fR log requests to stderr (default: quiet mode)
+
+.SH AUTHOR
+Antony Lesuisse <al@udev.org>
+
+This manual page was written for the Debian system by
+Julien Valroff <julien@kirya.net> (but may be used by others).
+
+.SH "REPORTING BUGS"
+Report any bugs to the author: Antony Lesuisse <al@udev.org>
+
+.SH COPYRIGHT
+Copyright Antony Lesuisse <al@udev.org>
+
+.SH SEE ALSO
+- \fBajaxterm\fR wiki page: http://antony.lesuisse.org/qweb/trac/wiki/AjaxTerm
+.br
+- \fBajaxterm\fR forum: http://antony.lesuisse.org/qweb/forum/viewforum.php?id=2
diff --git a/tools/ajaxterm/ajaxterm.css b/tools/ajaxterm/ajaxterm.css
new file mode 100644
index 000000000..b9a5f8771
--- /dev/null
+++ b/tools/ajaxterm/ajaxterm.css
@@ -0,0 +1,64 @@
+pre.stat {
+ margin: 0px;
+ padding: 4px;
+ display: block;
+ font-family: monospace;
+ white-space: pre;
+ background-color: black;
+ border-top: 1px solid black;
+ color: white;
+}
+pre.stat span {
+ padding: 0px;
+}
+pre.stat .on {
+ background-color: #080;
+ font-weight: bold;
+ color: white;
+ cursor: pointer;
+}
+pre.stat .off {
+ background-color: #888;
+ font-weight: bold;
+ color: white;
+ cursor: pointer;
+}
+pre.term {
+ margin: 0px;
+ padding: 4px;
+ display: block;
+ font-family: monospace;
+ white-space: pre;
+ background-color: black;
+ border-top: 1px solid white;
+ color: #eee;
+}
+pre.term span.f0 { color: #000; }
+pre.term span.f1 { color: #b00; }
+pre.term span.f2 { color: #0b0; }
+pre.term span.f3 { color: #bb0; }
+pre.term span.f4 { color: #00b; }
+pre.term span.f5 { color: #b0b; }
+pre.term span.f6 { color: #0bb; }
+pre.term span.f7 { color: #bbb; }
+pre.term span.f8 { color: #666; }
+pre.term span.f9 { color: #f00; }
+pre.term span.f10 { color: #0f0; }
+pre.term span.f11 { color: #ff0; }
+pre.term span.f12 { color: #00f; }
+pre.term span.f13 { color: #f0f; }
+pre.term span.f14 { color: #0ff; }
+pre.term span.f15 { color: #fff; }
+pre.term span.b0 { background-color: #000; }
+pre.term span.b1 { background-color: #b00; }
+pre.term span.b2 { background-color: #0b0; }
+pre.term span.b3 { background-color: #bb0; }
+pre.term span.b4 { background-color: #00b; }
+pre.term span.b5 { background-color: #b0b; }
+pre.term span.b6 { background-color: #0bb; }
+pre.term span.b7 { background-color: #bbb; }
+
+body { background-color: #888; }
+#term {
+ float: left;
+}
diff --git a/tools/ajaxterm/ajaxterm.html b/tools/ajaxterm/ajaxterm.html
new file mode 100644
index 000000000..7fdef5e94
--- /dev/null
+++ b/tools/ajaxterm/ajaxterm.html
@@ -0,0 +1,25 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html>
+<head>
+ <title>Ajaxterm</title>
+ <meta http-equiv="content-type" content="text/html; charset=UTF-8"/>
+ <link rel="stylesheet" type="text/css" href="ajaxterm.css"/>
+ <script type="text/javascript" src="sarissa.js"></script>
+ <script type="text/javascript" src="sarissa_dhtml.js"></script>
+ <script type="text/javascript" src="ajaxterm.js"></script>
+ <script type="text/javascript">
+ /*
+ ajaxterm.py creates a random session_id to demultiplex multiple connections,
+ and to add a layer of security - in its shipping form, ajaxterm accepted any session_id
+ and was susceptible to an easy exploit
+ */
+ SESSION_ID = '$session_id';
+ window.onload=function() {
+ t=ajaxterm.Terminal("term",80,25);
+ };
+ </script>
+</head>
+<body>
+<div id="term"></div>
+</body>
+</html>
diff --git a/tools/ajaxterm/ajaxterm.js b/tools/ajaxterm/ajaxterm.js
new file mode 100644
index 000000000..32b401930
--- /dev/null
+++ b/tools/ajaxterm/ajaxterm.js
@@ -0,0 +1,279 @@
+ajaxterm={};
+ajaxterm.Terminal_ctor=function(id,width,height) {
+ var ie=0;
+ if(window.ActiveXObject)
+ ie=1;
+ var sid=""+SESSION_ID;
+ var query0="s="+sid+"&w="+width+"&h="+height;
+ var query1=query0+"&c=1&k=";
+ var buf="";
+ var timeout;
+ var error_timeout;
+ var keybuf=[];
+ var sending=0;
+ var rmax=1;
+
+ var div=document.getElementById(id);
+ var dstat=document.createElement('pre');
+ var sled=document.createElement('span');
+ var opt_get=document.createElement('a');
+ var opt_color=document.createElement('a');
+ var opt_paste=document.createElement('a');
+ var sdebug=document.createElement('span');
+ var dterm=document.createElement('div');
+
+ function debug(s) {
+ sdebug.innerHTML=s;
+ }
+ function error() {
+ sled.className='off';
+ debug("Connection lost timeout ts:"+((new Date).getTime()));
+ }
+ function opt_add(opt,name) {
+ opt.className='off';
+ opt.innerHTML=' '+name+' ';
+ dstat.appendChild(opt);
+ dstat.appendChild(document.createTextNode(' '));
+ }
+ function do_get(event) {
+ opt_get.className=(opt_get.className=='off')?'on':'off';
+ debug('GET '+opt_get.className);
+ }
+ function do_color(event) {
+ var o=opt_color.className=(opt_color.className=='off')?'on':'off';
+ if(o=='on')
+ query1=query0+"&c=1&k=";
+ else
+ query1=query0+"&k=";
+ debug('Color '+opt_color.className);
+ }
+ function mozilla_clipboard() {
+ // mozilla sucks
+ try {
+ netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
+ } catch (err) {
+ debug('Access denied, <a href="http://kb.mozillazine.org/Granting_JavaScript_access_to_the_clipboard" target="_blank">more info</a>');
+ return undefined;
+ }
+ var clip = Components.classes["@mozilla.org/widget/clipboard;1"].createInstance(Components.interfaces.nsIClipboard);
+ var trans = Components.classes["@mozilla.org/widget/transferable;1"].createInstance(Components.interfaces.nsITransferable);
+ if (!clip || !trans) {
+ return undefined;
+ }
+ trans.addDataFlavor("text/unicode");
+ clip.getData(trans,clip.kGlobalClipboard);
+ var str=new Object();
+ var strLength=new Object();
+ try {
+ trans.getTransferData("text/unicode",str,strLength);
+ } catch(err) {
+ return "";
+ }
+ if (str) {
+ str=str.value.QueryInterface(Components.interfaces.nsISupportsString);
+ }
+ if (str) {
+ return str.data.substring(0,strLength.value / 2);
+ } else {
+ return "";
+ }
+ }
+ function do_paste(event) {
+ var p=undefined;
+ if (window.clipboardData) {
+ p=window.clipboardData.getData("Text");
+ } else if(window.netscape) {
+ p=mozilla_clipboard();
+ }
+ if (p) {
+ debug('Pasted');
+ queue(encodeURIComponent(p));
+ } else {
+ }
+ }
+ function update() {
+// debug("ts: "+((new Date).getTime())+" rmax:"+rmax);
+ if(sending==0) {
+ sending=1;
+ sled.className='on';
+ var r=new XMLHttpRequest();
+ var send="";
+ while(keybuf.length>0) {
+ send+=keybuf.pop();
+ }
+ var query=query1+send;
+ if(opt_get.className=='on') {
+ r.open("GET","u?"+query,true);
+ if(ie) {
+ r.setRequestHeader("If-Modified-Since", "Sat, 1 Jan 2000 00:00:00 GMT");
+ }
+ } else {
+ r.open("POST","u",true);
+ }
+ r.setRequestHeader('Content-Type','application/x-www-form-urlencoded');
+ r.onreadystatechange = function () {
+// debug("xhr:"+((new Date).getTime())+" state:"+r.readyState+" status:"+r.status+" statusText:"+r.statusText);
+ if (r.readyState==4) {
+ if(r.status==200) {
+ window.clearTimeout(error_timeout);
+ de=r.responseXML.documentElement;
+ if(de.tagName=="pre") {
+ if(ie) {
+ Sarissa.updateContentFromNode(de, dterm);
+ } else {
+ Sarissa.updateContentFromNode(de, dterm);
+// old=div.firstChild;
+// div.replaceChild(de,old);
+ }
+ rmax=100;
+ } else {
+ rmax*=2;
+ if(rmax>2000)
+ rmax=2000;
+ }
+ sending=0;
+ sled.className='off';
+ timeout=window.setTimeout(update,rmax);
+ } else {
+ debug("Connection error status:"+r.status);
+ }
+ }
+ }
+ error_timeout=window.setTimeout(error,5000);
+ if(opt_get.className=='on') {
+ r.send(null);
+ } else {
+ r.send(query);
+ }
+ }
+ }
+ function queue(s) {
+ keybuf.unshift(s);
+ if(sending==0) {
+ window.clearTimeout(timeout);
+ timeout=window.setTimeout(update,1);
+ }
+ }
+ function keypress(ev) {
+ if (!ev) var ev=window.event;
+// s="kp keyCode="+ev.keyCode+" which="+ev.which+" shiftKey="+ev.shiftKey+" ctrlKey="+ev.ctrlKey+" altKey="+ev.altKey;
+// debug(s);
+// return false;
+// else { if (!ev.ctrlKey || ev.keyCode==17) { return; }
+ var kc;
+ var k="";
+ if (ev.keyCode)
+ kc=ev.keyCode;
+ if (ev.which)
+ kc=ev.which;
+ if (ev.altKey) {
+ if (kc>=65 && kc<=90)
+ kc+=32;
+ if (kc>=97 && kc<=122) {
+ k=String.fromCharCode(27)+String.fromCharCode(kc);
+ }
+ } else if (ev.ctrlKey) {
+ if (kc>=65 && kc<=90) k=String.fromCharCode(kc-64); // Ctrl-A..Z
+ else if (kc>=97 && kc<=122) k=String.fromCharCode(kc-96); // Ctrl-A..Z
+ else if (kc==54) k=String.fromCharCode(30); // Ctrl-^
+ else if (kc==109) k=String.fromCharCode(31); // Ctrl-_
+ else if (kc==219) k=String.fromCharCode(27); // Ctrl-[
+ else if (kc==220) k=String.fromCharCode(28); // Ctrl-\
+ else if (kc==221) k=String.fromCharCode(29); // Ctrl-]
+ else if (kc==219) k=String.fromCharCode(29); // Ctrl-]
+ else if (kc==219) k=String.fromCharCode(0); // Ctrl-@
+ } else if (ev.which==0) {
+ if (kc==9) k=String.fromCharCode(9); // Tab
+ else if (kc==8) k=String.fromCharCode(127); // Backspace
+ else if (kc==27) k=String.fromCharCode(27); // Escape
+ else {
+ if (kc==33) k="[5~"; // PgUp
+ else if (kc==34) k="[6~"; // PgDn
+ else if (kc==35) k="[4~"; // End
+ else if (kc==36) k="[1~"; // Home
+ else if (kc==37) k="[D"; // Left
+ else if (kc==38) k="[A"; // Up
+ else if (kc==39) k="[C"; // Right
+ else if (kc==40) k="[B"; // Down
+ else if (kc==45) k="[2~"; // Ins
+ else if (kc==46) k="[3~"; // Del
+ else if (kc==112) k="[[A"; // F1
+ else if (kc==113) k="[[B"; // F2
+ else if (kc==114) k="[[C"; // F3
+ else if (kc==115) k="[[D"; // F4
+ else if (kc==116) k="[[E"; // F5
+ else if (kc==117) k="[17~"; // F6
+ else if (kc==118) k="[18~"; // F7
+ else if (kc==119) k="[19~"; // F8
+ else if (kc==120) k="[20~"; // F9
+ else if (kc==121) k="[21~"; // F10
+ else if (kc==122) k="[23~"; // F11
+ else if (kc==123) k="[24~"; // F12
+ if (k.length) {
+ k=String.fromCharCode(27)+k;
+ }
+ }
+ } else {
+ if (kc==8)
+ k=String.fromCharCode(127); // Backspace
+ else
+ k=String.fromCharCode(kc);
+ }
+ if(k.length) {
+// queue(encodeURIComponent(k));
+ if(k=="+") {
+ queue("%2B");
+ } else {
+ queue(escape(k));
+ }
+ }
+ ev.cancelBubble=true;
+ if (ev.stopPropagation) ev.stopPropagation();
+ if (ev.preventDefault) ev.preventDefault();
+ return false;
+ }
+ function keydown(ev) {
+ if (!ev) var ev=window.event;
+ if (ie) {
+// s="kd keyCode="+ev.keyCode+" which="+ev.which+" shiftKey="+ev.shiftKey+" ctrlKey="+ev.ctrlKey+" altKey="+ev.altKey;
+// debug(s);
+ o={9:1,8:1,27:1,33:1,34:1,35:1,36:1,37:1,38:1,39:1,40:1,45:1,46:1,112:1,
+ 113:1,114:1,115:1,116:1,117:1,118:1,119:1,120:1,121:1,122:1,123:1};
+ if (o[ev.keyCode] || ev.ctrlKey || ev.altKey) {
+ ev.which=0;
+ return keypress(ev);
+ }
+ }
+ }
+ function init() {
+ sled.appendChild(document.createTextNode('\xb7'));
+ sled.className='off';
+ dstat.appendChild(sled);
+ dstat.appendChild(document.createTextNode(' '));
+ opt_add(opt_color,'Colors');
+ opt_color.className='on';
+ opt_add(opt_get,'GET');
+ opt_add(opt_paste,'Paste');
+ dstat.appendChild(sdebug);
+ dstat.className='stat';
+ div.appendChild(dstat);
+ div.appendChild(dterm);
+ if(opt_color.addEventListener) {
+ opt_get.addEventListener('click',do_get,true);
+ opt_color.addEventListener('click',do_color,true);
+ opt_paste.addEventListener('click',do_paste,true);
+ } else {
+ opt_get.attachEvent("onclick", do_get);
+ opt_color.attachEvent("onclick", do_color);
+ opt_paste.attachEvent("onclick", do_paste);
+ }
+ document.onkeypress=keypress;
+ document.onkeydown=keydown;
+ timeout=window.setTimeout(update,100);
+ }
+ init();
+}
+ajaxterm.Terminal=function(id,width,height) {
+ return new this.Terminal_ctor(id,width,height);
+}
+
diff --git a/tools/ajaxterm/ajaxterm.py b/tools/ajaxterm/ajaxterm.py
new file mode 100755
index 000000000..bf27b264a
--- /dev/null
+++ b/tools/ajaxterm/ajaxterm.py
@@ -0,0 +1,586 @@
+#!/usr/bin/env python
+
+""" Ajaxterm """
+
+import array,cgi,fcntl,glob,mimetypes,optparse,os,pty,random,re,signal,select,sys,threading,time,termios,struct,pwd
+
+os.chdir(os.path.normpath(os.path.dirname(__file__)))
+# Optional: Add QWeb in sys path
+sys.path[0:0]=glob.glob('../../python')
+
+import qweb
+import string, subprocess, uuid
+
+global g_server
+TIMEOUT=300
+
+class Terminal:
+ def __init__(self,width=80,height=24):
+ self.width=width
+ self.height=height
+ self.init()
+ self.reset()
+ def init(self):
+ self.esc_seq={
+ "\x00": None,
+ "\x05": self.esc_da,
+ "\x07": None,
+ "\x08": self.esc_0x08,
+ "\x09": self.esc_0x09,
+ "\x0a": self.esc_0x0a,
+ "\x0b": self.esc_0x0a,
+ "\x0c": self.esc_0x0a,
+ "\x0d": self.esc_0x0d,
+ "\x0e": None,
+ "\x0f": None,
+ "\x1b#8": None,
+ "\x1b=": None,
+ "\x1b>": None,
+ "\x1b(0": None,
+ "\x1b(A": None,
+ "\x1b(B": None,
+ "\x1b[c": self.esc_da,
+ "\x1b[0c": self.esc_da,
+ "\x1b]R": None,
+ "\x1b7": self.esc_save,
+ "\x1b8": self.esc_restore,
+ "\x1bD": None,
+ "\x1bE": None,
+ "\x1bH": None,
+ "\x1bM": self.esc_ri,
+ "\x1bN": None,
+ "\x1bO": None,
+ "\x1bZ": self.esc_da,
+ "\x1ba": None,
+ "\x1bc": self.reset,
+ "\x1bn": None,
+ "\x1bo": None,
+ }
+ for k,v in self.esc_seq.items():
+ if v==None:
+ self.esc_seq[k]=self.esc_ignore
+ # regex
+ d={
+ r'\[\??([0-9;]*)([@ABCDEFGHJKLMPXacdefghlmnqrstu`])' : self.csi_dispatch,
+ r'\]([^\x07]+)\x07' : self.esc_ignore,
+ }
+ self.esc_re=[]
+ for k,v in d.items():
+ self.esc_re.append((re.compile('\x1b'+k),v))
+ # define csi sequences
+ self.csi_seq={
+ '@': (self.csi_at,[1]),
+ '`': (self.csi_G,[1]),
+ 'J': (self.csi_J,[0]),
+ 'K': (self.csi_K,[0]),
+ }
+ for i in [i[4] for i in dir(self) if i.startswith('csi_') and len(i)==5]:
+ if not self.csi_seq.has_key(i):
+ self.csi_seq[i]=(getattr(self,'csi_'+i),[1])
+ # Init 0-256 to latin1 and html translation table
+ self.trl1=""
+ for i in range(256):
+ if i<32:
+ self.trl1+=" "
+ elif i<127 or i>160:
+ self.trl1+=chr(i)
+ else:
+ self.trl1+="?"
+ self.trhtml=""
+ for i in range(256):
+ if i==0x0a or (i>32 and i<127) or i>160:
+ self.trhtml+=chr(i)
+ elif i<=32:
+ self.trhtml+="\xa0"
+ else:
+ self.trhtml+="?"
+ def reset(self,s=""):
+ self.scr=array.array('i',[0x000700]*(self.width*self.height))
+ self.st=0
+ self.sb=self.height-1
+ self.cx_bak=self.cx=0
+ self.cy_bak=self.cy=0
+ self.cl=0
+ self.sgr=0x000700
+ self.buf=""
+ self.outbuf=""
+ self.last_html=""
+ def peek(self,y1,x1,y2,x2):
+ return self.scr[self.width*y1+x1:self.width*y2+x2]
+ def poke(self,y,x,s):
+ pos=self.width*y+x
+ self.scr[pos:pos+len(s)]=s
+ def zero(self,y1,x1,y2,x2):
+ w=self.width*(y2-y1)+x2-x1+1
+ z=array.array('i',[0x000700]*w)
+ self.scr[self.width*y1+x1:self.width*y2+x2+1]=z
+ def scroll_up(self,y1,y2):
+ self.poke(y1,0,self.peek(y1+1,0,y2,self.width))
+ self.zero(y2,0,y2,self.width-1)
+ def scroll_down(self,y1,y2):
+ self.poke(y1+1,0,self.peek(y1,0,y2-1,self.width))
+ self.zero(y1,0,y1,self.width-1)
+ def scroll_right(self,y,x):
+ self.poke(y,x+1,self.peek(y,x,y,self.width))
+ self.zero(y,x,y,x)
+ def cursor_down(self):
+ if self.cy>=self.st and self.cy<=self.sb:
+ self.cl=0
+ q,r=divmod(self.cy+1,self.sb+1)
+ if q:
+ self.scroll_up(self.st,self.sb)
+ self.cy=self.sb
+ else:
+ self.cy=r
+ def cursor_right(self):
+ q,r=divmod(self.cx+1,self.width)
+ if q:
+ self.cl=1
+ else:
+ self.cx=r
+ def echo(self,c):
+ if self.cl:
+ self.cursor_down()
+ self.cx=0
+ self.scr[(self.cy*self.width)+self.cx]=self.sgr|ord(c)
+ self.cursor_right()
+ def esc_0x08(self,s):
+ self.cx=max(0,self.cx-1)
+ def esc_0x09(self,s):
+ x=self.cx+8
+ q,r=divmod(x,8)
+ self.cx=(q*8)%self.width
+ def esc_0x0a(self,s):
+ self.cursor_down()
+ def esc_0x0d(self,s):
+ self.cl=0
+ self.cx=0
+ def esc_save(self,s):
+ self.cx_bak=self.cx
+ self.cy_bak=self.cy
+ def esc_restore(self,s):
+ self.cx=self.cx_bak
+ self.cy=self.cy_bak
+ self.cl=0
+ def esc_da(self,s):
+ self.outbuf="\x1b[?6c"
+ def esc_ri(self,s):
+ self.cy=max(self.st,self.cy-1)
+ if self.cy==self.st:
+ self.scroll_down(self.st,self.sb)
+ def esc_ignore(self,*s):
+ pass
+# print "term:ignore: %s"%repr(s)
+ def csi_dispatch(self,seq,mo):
+ # CSI sequences
+ s=mo.group(1)
+ c=mo.group(2)
+ f=self.csi_seq.get(c,None)
+ if f:
+ try:
+ l=[min(int(i),1024) for i in s.split(';') if len(i)<4]
+ except ValueError:
+ l=[]
+ if len(l)==0:
+ l=f[1]
+ f[0](l)
+# else:
+# print 'csi ignore',c,l
+ def csi_at(self,l):
+ for i in range(l[0]):
+ self.scroll_right(self.cy,self.cx)
+ def csi_A(self,l):
+ self.cy=max(self.st,self.cy-l[0])
+ def csi_B(self,l):
+ self.cy=min(self.sb,self.cy+l[0])
+ def csi_C(self,l):
+ self.cx=min(self.width-1,self.cx+l[0])
+ self.cl=0
+ def csi_D(self,l):
+ self.cx=max(0,self.cx-l[0])
+ self.cl=0
+ def csi_E(self,l):
+ self.csi_B(l)
+ self.cx=0
+ self.cl=0
+ def csi_F(self,l):
+ self.csi_A(l)
+ self.cx=0
+ self.cl=0
+ def csi_G(self,l):
+ self.cx=min(self.width,l[0])-1
+ def csi_H(self,l):
+ if len(l)<2: l=[1,1]
+ self.cx=min(self.width,l[1])-1
+ self.cy=min(self.height,l[0])-1
+ self.cl=0
+ def csi_J(self,l):
+ if l[0]==0:
+ self.zero(self.cy,self.cx,self.height-1,self.width-1)
+ elif l[0]==1:
+ self.zero(0,0,self.cy,self.cx)
+ elif l[0]==2:
+ self.zero(0,0,self.height-1,self.width-1)
+ def csi_K(self,l):
+ if l[0]==0:
+ self.zero(self.cy,self.cx,self.cy,self.width-1)
+ elif l[0]==1:
+ self.zero(self.cy,0,self.cy,self.cx)
+ elif l[0]==2:
+ self.zero(self.cy,0,self.cy,self.width-1)
+ def csi_L(self,l):
+ for i in range(l[0]):
+ if self.cy<self.sb:
+ self.scroll_down(self.cy,self.sb)
+ def csi_M(self,l):
+ if self.cy>=self.st and self.cy<=self.sb:
+ for i in range(l[0]):
+ self.scroll_up(self.cy,self.sb)
+ def csi_P(self,l):
+ w,cx,cy=self.width,self.cx,self.cy
+ end=self.peek(cy,cx,cy,w)
+ self.csi_K([0])
+ self.poke(cy,cx,end[l[0]:])
+ def csi_X(self,l):
+ self.zero(self.cy,self.cx,self.cy,self.cx+l[0])
+ def csi_a(self,l):
+ self.csi_C(l)
+ def csi_c(self,l):
+ #'\x1b[?0c' 0-8 cursor size
+ pass
+ def csi_d(self,l):
+ self.cy=min(self.height,l[0])-1
+ def csi_e(self,l):
+ self.csi_B(l)
+ def csi_f(self,l):
+ self.csi_H(l)
+ def csi_h(self,l):
+ if l[0]==4:
+ pass
+# print "insert on"
+ def csi_l(self,l):
+ if l[0]==4:
+ pass
+# print "insert off"
+ def csi_m(self,l):
+ for i in l:
+ if i==0 or i==39 or i==49 or i==27:
+ self.sgr=0x000700
+ elif i==1:
+ self.sgr=(self.sgr|0x000800)
+ elif i==7:
+ self.sgr=0x070000
+ elif i>=30 and i<=37:
+ c=i-30
+ self.sgr=(self.sgr&0xff08ff)|(c<<8)
+ elif i>=40 and i<=47:
+ c=i-40
+ self.sgr=(self.sgr&0x00ffff)|(c<<16)
+# else:
+# print "CSI sgr ignore",l,i
+# print 'sgr: %r %x'%(l,self.sgr)
+ def csi_r(self,l):
+ if len(l)<2: l=[0,self.height]
+ self.st=min(self.height-1,l[0]-1)
+ self.sb=min(self.height-1,l[1]-1)
+ self.sb=max(self.st,self.sb)
+ def csi_s(self,l):
+ self.esc_save(0)
+ def csi_u(self,l):
+ self.esc_restore(0)
+ def escape(self):
+ e=self.buf
+ if len(e)>32:
+# print "error %r"%e
+ self.buf=""
+ elif e in self.esc_seq:
+ self.esc_seq[e](e)
+ self.buf=""
+ else:
+ for r,f in self.esc_re:
+ mo=r.match(e)
+ if mo:
+ f(e,mo)
+ self.buf=""
+ break
+# if self.buf=='': print "ESC %r\n"%e
+ def write(self,s):
+ for i in s:
+ if len(self.buf) or (i in self.esc_seq):
+ self.buf+=i
+ self.escape()
+ elif i == '\x1b':
+ self.buf+=i
+ else:
+ self.echo(i)
+ def read(self):
+ b=self.outbuf
+ self.outbuf=""
+ return b
+ def dump(self):
+ r=''
+ for i in self.scr:
+ r+=chr(i&255)
+ return r
+ def dumplatin1(self):
+ return self.dump().translate(self.trl1)
+ def dumphtml(self,color=1):
+ h=self.height
+ w=self.width
+ r=""
+ span=""
+ span_bg,span_fg=-1,-1
+ for i in range(h*w):
+ q,c=divmod(self.scr[i],256)
+ if color:
+ bg,fg=divmod(q,256)
+ else:
+ bg,fg=0,7
+ if i==self.cy*w+self.cx:
+ bg,fg=1,7
+ if (bg!=span_bg or fg!=span_fg or i==h*w-1):
+ if len(span):
+ r+='<span class="f%d b%d">%s</span>'%(span_fg,span_bg,cgi.escape(span.translate(self.trhtml)))
+ span=""
+ span_bg,span_fg=bg,fg
+ span+=chr(c)
+ if i%w==w-1:
+ span+='\n'
+ r='<?xml version="1.0" encoding="ISO-8859-1"?><pre class="term">%s</pre>'%r
+ if self.last_html==r:
+ return '<?xml version="1.0"?><idem></idem>'
+ else:
+ self.last_html=r
+# print self
+ return r
+ def __repr__(self):
+ d=self.dumplatin1()
+ r=""
+ for i in range(self.height):
+ r+="|%s|\n"%d[self.width*i:self.width*(i+1)]
+ return r
+
+class SynchronizedMethod:
+ def __init__(self,lock,orig):
+ self.lock=lock
+ self.orig=orig
+ def __call__(self,*l):
+ self.lock.acquire()
+ r=self.orig(*l)
+ self.lock.release()
+ return r
+
+class Multiplex:
+ def __init__(self,cmd=None):
+ signal.signal(signal.SIGCHLD, signal.SIG_IGN)
+ self.cmd=cmd
+ self.proc={}
+ self.lock=threading.RLock()
+ self.thread=threading.Thread(target=self.loop)
+ self.alive=1
+ self.lastActivity=time.time()
+ # synchronize methods
+ for name in ['create','fds','proc_read','proc_write','dump','die','run']:
+ orig=getattr(self,name)
+ setattr(self,name,SynchronizedMethod(self.lock,orig))
+ self.thread.start()
+ def create(self,w=80,h=25):
+ pid,fd=pty.fork()
+ if pid==0:
+ try:
+ fdl=[int(i) for i in os.listdir('/proc/self/fd')]
+ except OSError:
+ fdl=range(256)
+ for i in [i for i in fdl if i>2]:
+ try:
+ os.close(i)
+ except OSError:
+ pass
+ if self.cmd:
+ cmd=['/bin/sh','-c',self.cmd]
+ elif os.getuid()==0:
+ cmd=['/bin/login']
+ else:
+ sys.stdout.write("Login: ")
+ login=sys.stdin.readline().strip()
+ if re.match('^[0-9A-Za-z-_. ]+$',login):
+ cmd=['ssh']
+ cmd+=['-oPreferredAuthentications=keyboard-interactive,password']
+ cmd+=['-oNoHostAuthenticationForLocalhost=yes']
+ cmd+=['-oLogLevel=FATAL']
+ cmd+=['-F/dev/null','-l',login,'localhost']
+ else:
+ os._exit(0)
+ env={}
+ env["COLUMNS"]=str(w)
+ env["LINES"]=str(h)
+ env["TERM"]="linux"
+ env["PATH"]=os.environ['PATH']
+ os.execvpe(cmd[0],cmd,env)
+ else:
+ fcntl.fcntl(fd, fcntl.F_SETFL, os.O_NONBLOCK)
+ # python bug http://python.org/sf/1112949 on amd64
+ fcntl.ioctl(fd, struct.unpack('i',struct.pack('I',termios.TIOCSWINSZ))[0], struct.pack("HHHH",h,w,0,0))
+ self.proc[fd]={'pid':pid,'term':Terminal(w,h),'buf':'','time':time.time()}
+ return fd
+ def die(self):
+ self.alive=0
+ def run(self):
+ return self.alive
+ def fds(self):
+ return self.proc.keys()
+ def proc_kill(self,fd):
+ if fd in self.proc:
+ self.proc[fd]['time']=0
+ t=time.time()
+ for i in self.proc.keys():
+ t0=self.proc[i]['time']
+ if (t-t0)>TIMEOUT:
+ try:
+ os.close(i)
+ os.kill(self.proc[i]['pid'],signal.SIGTERM)
+ except (IOError,OSError):
+ pass
+ del self.proc[i]
+ def proc_read(self,fd):
+ try:
+ t=self.proc[fd]['term']
+ t.write(os.read(fd,65536))
+ reply=t.read()
+ if reply:
+ os.write(fd,reply)
+ self.proc[fd]['time']=time.time()
+ except (KeyError,IOError,OSError):
+ self.proc_kill(fd)
+ def proc_write(self,fd,s):
+ try:
+ os.write(fd,s)
+ except (IOError,OSError):
+ self.proc_kill(fd)
+ def dump(self,fd,color=1):
+ try:
+ return self.proc[fd]['term'].dumphtml(color)
+ except KeyError:
+ return False
+ def loop(self):
+ while self.run():
+ fds=self.fds()
+ i,o,e=select.select(fds, [], [], 1.0)
+ if time.time() - self.lastActivity > TIMEOUT:
+ global g_server
+ g_server.shutdown()
+ for fd in i:
+ self.proc_read(fd)
+ if len(i):
+ time.sleep(0.002)
+ for i in self.proc.keys():
+ try:
+ os.close(i)
+ os.kill(self.proc[i]['pid'],signal.SIGTERM)
+ except (IOError,OSError):
+ pass
+
+class AjaxTerm:
+ def __init__(self,cmd=None,index_file='ajaxterm.html',token=None):
+ self.files={}
+ self.token=token
+ for i in ['css','html','js']:
+ for j in glob.glob('*.%s'%i):
+ self.files[j]=file(j).read()
+ self.files['index']=file(index_file).read()
+ self.mime = mimetypes.types_map.copy()
+ self.mime['.html']= 'text/html; charset=UTF-8'
+ self.multi = Multiplex(cmd)
+ self.session = {}
+ def __call__(self, environ, start_response):
+ req = qweb.QWebRequest(environ, start_response,session=None)
+ if req.PATH_INFO.endswith('/u'):
+ s=req.REQUEST["s"]
+ k=req.REQUEST["k"]
+ c=req.REQUEST["c"]
+ w=req.REQUEST.int("w")
+ h=req.REQUEST.int("h")
+ if s in self.session:
+ term=self.session[s]
+ else:
+ raise Exception('Not Authorized')
+ # The original code below was insecure, because it allowed unauthorized sessions to be created
+ # if not (w>2 and w<256 and h>2 and h<100):
+ # w,h=80,25
+ # term=self.session[s]=self.multi.create(w,h)
+ if k:
+ self.multi.proc_write(term,k)
+ time.sleep(0.002)
+ self.multi.lastActivity = time.time();
+ dump=self.multi.dump(term,c)
+ req.response_headers['Content-Type']='text/xml'
+ if isinstance(dump,str):
+ req.write(dump)
+ req.response_gzencode=1
+ else:
+ del self.session[s]
+ req.write('<?xml version="1.0"?><idem></idem>')
+# print "sessions %r"%self.session
+ else:
+ n=os.path.basename(req.PATH_INFO)
+ if n in self.files:
+ req.response_headers['Content-Type'] = self.mime.get(os.path.splitext(n)[1].lower(), 'application/octet-stream')
+ req.write(self.files[n])
+ elif req.REQUEST['token'] == self.token:
+ req.response_headers['Content-Type'] = 'text/html; charset=UTF-8'
+ session_id = str(uuid.uuid4())
+ req.write(string.Template(self.files['index']).substitute(session_id=session_id))
+ term=self.session[session_id]=self.multi.create(80,25)
+ else:
+ raise Exception("Not Authorized")
+ return req
+
+def main():
+ parser = optparse.OptionParser()
+ parser.add_option("-p", "--port", dest="port", default="8022", help="Set the TCP port (default: 8022)")
+ parser.add_option("-c", "--command", dest="cmd", default=None,help="set the command (default: /bin/login or ssh 0.0.0.0)")
+ parser.add_option("-l", "--log", action="store_true", dest="log",default=0,help="log requests to stderr (default: quiet mode)")
+ parser.add_option("-d", "--daemon", action="store_true", dest="daemon", default=0, help="run as daemon in the background")
+ parser.add_option("-P", "--pidfile",dest="pidfile",default="/var/run/ajaxterm.pid",help="set the pidfile (default: /var/run/ajaxterm.pid)")
+ parser.add_option("-i", "--index", dest="index_file", default="ajaxterm.html",help="default index file (default: ajaxterm.html)")
+ parser.add_option("-u", "--uid", dest="uid", help="Set the daemon's user id")
+ parser.add_option("-t", "--token", dest="token", help="Set authorization token")
+ (o, a) = parser.parse_args()
+ if o.daemon:
+ pid=os.fork()
+ if pid == 0:
+ #os.setsid() ?
+ os.setpgrp()
+ nullin = file('/dev/null', 'r')
+ nullout = file('/dev/null', 'w')
+ os.dup2(nullin.fileno(), sys.stdin.fileno())
+ os.dup2(nullout.fileno(), sys.stdout.fileno())
+ os.dup2(nullout.fileno(), sys.stderr.fileno())
+ if os.getuid()==0 and o.uid:
+ try:
+ os.setuid(int(o.uid))
+ except:
+ os.setuid(pwd.getpwnam(o.uid).pw_uid)
+ else:
+ try:
+ file(o.pidfile,'w+').write(str(pid)+'\n')
+ except:
+ pass
+ print 'AjaxTerm at http://0.0.0.0:%s/ pid: %d' % (o.port,pid)
+ sys.exit(0)
+ else:
+ print 'AjaxTerm at http://0.0.0.0:%s/' % o.port
+ at=AjaxTerm(o.cmd,o.index_file,o.token)
+# f=lambda:os.system('firefox http://localhost:%s/&'%o.port)
+# qweb.qweb_wsgi_autorun(at,ip='localhost',port=int(o.port),threaded=0,log=o.log,callback_ready=None)
+ try:
+ global g_server
+ g_server = qweb.QWebWSGIServer(at,ip='0.0.0.0',port=int(o.port),threaded=0,log=o.log)
+ g_server.serve_forever()
+ except KeyboardInterrupt,e:
+ sys.excepthook(*sys.exc_info())
+ at.multi.die()
+
+if __name__ == '__main__':
+ main()
+
diff --git a/tools/ajaxterm/configure b/tools/ajaxterm/configure
new file mode 100755
index 000000000..45391f484
--- /dev/null
+++ b/tools/ajaxterm/configure
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+
+import optparse,os
+
+parser = optparse.OptionParser()
+parser.add_option("", "--prefix", dest="prefix",default="/usr/local",help="installation prefix (default: /usr/local)")
+parser.add_option("", "--confdir", dest="confdir", default="/etc",help="configuration files directory prefix (default: /etc)")
+parser.add_option("", "--port", dest="port", default="8022", help="set the listening TCP port (default: 8022)")
+parser.add_option("", "--command", dest="cmd", default=None,help="set the command (default: /bin/login or ssh localhost)")
+(o, a) = parser.parse_args()
+
+print "Configuring prefix=",o.prefix," port=",o.port
+
+etc=o.confdir
+port=o.port
+cmd=o.cmd
+bin=os.path.join(o.prefix,"bin")
+lib=os.path.join(o.prefix,"share/ajaxterm")
+man=os.path.join(o.prefix,"share/man/man1")
+
+file("ajaxterm.bin","w").write(file("configure.ajaxterm.bin").read()%locals())
+file("Makefile","w").write(file("configure.makefile").read()%locals())
+
+if os.path.isfile("/etc/gentoo-release"):
+ file("ajaxterm.initd","w").write(file("configure.initd.gentoo").read()%locals())
+elif os.path.isfile("/etc/fedora-release") or os.path.isfile("/etc/redhat-release"):
+ file("ajaxterm.initd","w").write(file("configure.initd.redhat").read()%locals())
+else:
+ file("ajaxterm.initd","w").write(file("configure.initd.debian").read()%locals())
+
+os.system("chmod a+x ajaxterm.bin")
+os.system("chmod a+x ajaxterm.initd")
diff --git a/tools/ajaxterm/configure.ajaxterm.bin b/tools/ajaxterm/configure.ajaxterm.bin
new file mode 100644
index 000000000..4d1f5a98f
--- /dev/null
+++ b/tools/ajaxterm/configure.ajaxterm.bin
@@ -0,0 +1,2 @@
+#!/bin/sh
+PYTHONPATH=%(lib)s exec %(lib)s/ajaxterm.py $@
diff --git a/tools/ajaxterm/configure.initd.debian b/tools/ajaxterm/configure.initd.debian
new file mode 100644
index 000000000..901082707
--- /dev/null
+++ b/tools/ajaxterm/configure.initd.debian
@@ -0,0 +1,33 @@
+#!/bin/sh
+
+PATH=/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
+DAEMON=%(bin)s/ajaxterm
+PORT=%(port)s
+PIDFILE=/var/run/ajaxterm.pid
+
+[ -x "$DAEMON" ] || exit 0
+
+#. /lib/lsb/init-functions
+
+case "$1" in
+ start)
+ echo "Starting ajaxterm on port $PORT"
+ start-stop-daemon --start --pidfile $PIDFILE --exec $DAEMON -- --daemon --port=$PORT --uid=nobody || return 2
+ ;;
+ stop)
+ echo "Stopping ajaxterm"
+ start-stop-daemon --stop --pidfile $PIDFILE
+ rm -f $PIDFILE
+ ;;
+ restart|force-reload)
+ $0 stop
+ sleep 1
+ $0 start
+ ;;
+ *)
+ echo "Usage: $SCRIPTNAME {start|stop|restart|force-reload}" >&2
+ exit 3
+ ;;
+esac
+
+:
diff --git a/tools/ajaxterm/configure.initd.gentoo b/tools/ajaxterm/configure.initd.gentoo
new file mode 100644
index 000000000..ac28ef0b6
--- /dev/null
+++ b/tools/ajaxterm/configure.initd.gentoo
@@ -0,0 +1,27 @@
+#!/sbin/runscript
+
+# AjaxTerm Gentoo script, 08 May 2006 Mark Gillespie
+
+DAEMON=%(bin)s/ajaxterm
+PORT=%(port)s
+PIDFILE=/var/run/ajaxterm.pid
+
+depend()
+{
+ need net
+}
+
+start()
+{
+ ebegin "Starting AjaxTerm on port $PORT"
+ start-stop-daemon --start --pidfile $PIDFILE --exec $DAEMON -- --daemon --port=$PORT --uid=nobody
+ eend $?
+}
+
+stop()
+{
+ ebegin "Stopping AjaxTerm"
+ start-stop-daemon --stop --pidfile $PIDFILE
+ rm -f $PIDFILE
+ eend $?
+}
diff --git a/tools/ajaxterm/configure.initd.redhat b/tools/ajaxterm/configure.initd.redhat
new file mode 100644
index 000000000..5c9788574
--- /dev/null
+++ b/tools/ajaxterm/configure.initd.redhat
@@ -0,0 +1,75 @@
+#
+# ajaxterm Startup script for ajaxterm
+#
+# chkconfig: - 99 99
+# description: Ajaxterm is a yadda yadda yadda
+# processname: ajaxterm
+# pidfile: /var/run/ajaxterm.pid
+# version: 1.0 Kevin Reichhart - ajaxterminit at lastname dot org
+
+# Source function library.
+. /etc/rc.d/init.d/functions
+
+if [ -f /etc/sysconfig/ajaxterm ]; then
+ . /etc/sysconfig/ajaxterm
+fi
+
+ajaxterm=/usr/local/bin/ajaxterm
+prog=ajaxterm
+pidfile=${PIDFILE-/var/run/ajaxterm.pid}
+lockfile=${LOCKFILE-/var/lock/subsys/ajaxterm}
+port=${PORT-8022}
+user=${xUSER-nobody}
+RETVAL=0
+
+
+start() {
+ echo -n $"Starting $prog: "
+ daemon $ajaxterm --daemon --port=$port --uid=$user $OPTIONS
+ RETVAL=$?
+ echo
+ [ $RETVAL = 0 ] && touch ${lockfile}
+ return $RETVAL
+}
+stop() {
+ echo -n $"Stopping $prog: "
+ killproc $ajaxterm
+ RETVAL=$?
+ echo
+ [ $RETVAL = 0 ] && rm -f ${lockfile} ${pidfile}
+}
+reload() {
+ echo -n $"Reloading $prog: "
+ killproc $ajaxterm -HUP
+ RETVAL=$?
+ echo
+}
+
+# See how we were called.
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ status)
+ status python ajaxterm
+ RETVAL=$?
+ ;;
+ restart)
+ stop
+ start
+ ;;
+ condrestart)
+ if [ -f ${pidfile} ] ; then
+ stop
+ start
+ fi
+ ;;
+ *)
+ echo $"Usage: $prog {start|stop|restart|condrestart}"
+ exit 1
+esac
+
+exit $RETVAL
diff --git a/tools/ajaxterm/configure.makefile b/tools/ajaxterm/configure.makefile
new file mode 100644
index 000000000..6bd80853d
--- /dev/null
+++ b/tools/ajaxterm/configure.makefile
@@ -0,0 +1,20 @@
+build:
+ true
+
+install:
+ install -d "%(bin)s"
+ install -d "%(lib)s"
+ install ajaxterm.bin "%(bin)s/ajaxterm"
+ install ajaxterm.initd "%(etc)s/init.d/ajaxterm"
+ install -m 644 ajaxterm.css ajaxterm.html ajaxterm.js qweb.py sarissa.js sarissa_dhtml.js "%(lib)s"
+ install -m 755 ajaxterm.py "%(lib)s"
+ gzip --best -c ajaxterm.1 > ajaxterm.1.gz
+ install -d "%(man)s"
+ install ajaxterm.1.gz "%(man)s"
+
+clean:
+ rm ajaxterm.bin
+ rm ajaxterm.initd
+ rm ajaxterm.1.gz
+ rm Makefile
+
diff --git a/tools/ajaxterm/qweb.py b/tools/ajaxterm/qweb.py
new file mode 100644
index 000000000..20c509230
--- /dev/null
+++ b/tools/ajaxterm/qweb.py
@@ -0,0 +1,1356 @@
+#!/usr/bin/python2.3
+#
+# vim:set et ts=4 fdc=0 fdn=2 fdl=0:
+#
+# There are no blank lines between blocks beacause i use folding from:
+# http://www.vim.org/scripts/script.php?script_id=515
+#
+
+"""= QWeb Framework =
+
+== What is QWeb ? ==
+
+QWeb is a python based [http://www.python.org/doc/peps/pep-0333/ WSGI]
+compatible web framework, it provides an infratructure to quickly build web
+applications consisting of:
+
+ * A lightweight request handler (QWebRequest)
+ * An xml templating engine (QWebXml and QWebHtml)
+ * A simple name based controler (qweb_control)
+ * A standalone WSGI Server (QWebWSGIServer)
+ * A cgi and fastcgi WSGI wrapper (taken from flup)
+ * A startup function that starts cgi, factgi or standalone according to the
+ evironement (qweb_autorun).
+
+QWeb applications are runnable in standalone mode (from commandline), via
+FastCGI, Regular CGI or by any python WSGI compliant server.
+
+QWeb doesn't provide any database access but it integrates nicely with ORMs
+such as SQLObject, SQLAlchemy or plain DB-API.
+
+Written by Antony Lesuisse (email al AT udev.org)
+
+Homepage: http://antony.lesuisse.org/qweb/trac/
+
+Forum: [http://antony.lesuisse.org/qweb/forum/viewforum.php?id=1 Forum]
+
+== Quick Start (for Linux, MacOS X and cygwin) ==
+
+Make sure you have at least python 2.3 installed and run the following commands:
+
+{{{
+$ wget http://antony.lesuisse.org/qweb/files/QWeb-0.7.tar.gz
+$ tar zxvf QWeb-0.7.tar.gz
+$ cd QWeb-0.7/examples/blog
+$ ./blog.py
+}}}
+
+And point your browser to http://localhost:8080/
+
+You may also try AjaxTerm which uses qweb request handler.
+
+== Download ==
+
+ * Version 0.7:
+ * Source [/qweb/files/QWeb-0.7.tar.gz QWeb-0.7.tar.gz]
+ * Python 2.3 Egg [/qweb/files/QWeb-0.7-py2.3.egg QWeb-0.7-py2.3.egg]
+ * Python 2.4 Egg [/qweb/files/QWeb-0.7-py2.4.egg QWeb-0.7-py2.4.egg]
+
+ * [/qweb/trac/browser Browse the source repository]
+
+== Documentation ==
+
+ * [/qweb/trac/browser/trunk/README.txt?format=raw Read the included documentation]
+ * QwebTemplating
+
+== Mailin-list ==
+
+ * Forum: [http://antony.lesuisse.org/qweb/forum/viewforum.php?id=1 Forum]
+ * No mailing-list exists yet, discussion should happen on: [http://mail.python.org/mailman/listinfo/web-sig web-sig] [http://mail.python.org/pipermail/web-sig/ archives]
+
+QWeb Components:
+----------------
+
+QWeb also feature a simple components api, that enables developers to easily
+produces reusable components.
+
+Default qweb components:
+
+ - qweb_static:
+ A qweb component to serve static content from the filesystem or from
+ zipfiles.
+
+ - qweb_dbadmin:
+ scaffolding for sqlobject
+
+License
+-------
+qweb/fcgi.py wich is BSD-like from saddi.com.
+Everything else is put in the public domain.
+
+
+TODO
+----
+ Announce QWeb to python-announce-list@python.org web-sig@python.org
+ qweb_core
+ rename request methods into
+ request_save_files
+ response_404
+ response_redirect
+ response_download
+ request callback_generator, callback_function ?
+ wsgi callback_server_local
+ xml tags explicitly call render_attributes(t_att)?
+ priority form-checkbox over t-value (for t-option)
+
+"""
+
+import BaseHTTPServer,SocketServer,Cookie
+import cgi,datetime,email,email.Message,errno,gzip,os,random,re,socket,sys,tempfile,time,types,urllib,urlparse,xml.dom
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+try:
+ import cStringIO as StringIO
+except ImportError:
+ import StringIO
+
+#----------------------------------------------------------
+# Qweb Xml t-raw t-esc t-if t-foreach t-set t-call t-trim
+#----------------------------------------------------------
+class QWebEval:
+ def __init__(self,data):
+ self.data=data
+ def __getitem__(self,expr):
+ if self.data.has_key(expr):
+ return self.data[expr]
+ r=None
+ try:
+ r=eval(expr,self.data)
+ except NameError,e:
+ pass
+ except AttributeError,e:
+ pass
+ except Exception,e:
+ print "qweb: expression error '%s' "%expr,e
+ if self.data.has_key("__builtins__"):
+ del self.data["__builtins__"]
+ return r
+ def eval_object(self,expr):
+ return self[expr]
+ def eval_str(self,expr):
+ if expr=="0":
+ return self.data[0]
+ if isinstance(self[expr],unicode):
+ return self[expr].encode("utf8")
+ return str(self[expr])
+ def eval_format(self,expr):
+ try:
+ return str(expr%self)
+ except:
+ return "qweb: format error '%s' "%expr
+# if isinstance(r,unicode):
+# return r.encode("utf8")
+ def eval_bool(self,expr):
+ if self.eval_object(expr):
+ return 1
+ else:
+ return 0
+class QWebXml:
+ """QWeb Xml templating engine
+
+ The templating engine use a very simple syntax, "magic" xml attributes, to
+ produce any kind of texutal output (even non-xml).
+
+ QWebXml:
+ the template engine core implements the basic magic attributes:
+
+ t-att t-raw t-esc t-if t-foreach t-set t-call t-trim
+
+ """
+ def __init__(self,x=None,zipname=None):
+ self.node=xml.dom.Node
+ self._t={}
+ self._render_tag={}
+ prefix='render_tag_'
+ for i in [j for j in dir(self) if j.startswith(prefix)]:
+ name=i[len(prefix):].replace('_','-')
+ self._render_tag[name]=getattr(self.__class__,i)
+
+ self._render_att={}
+ prefix='render_att_'
+ for i in [j for j in dir(self) if j.startswith(prefix)]:
+ name=i[len(prefix):].replace('_','-')
+ self._render_att[name]=getattr(self.__class__,i)
+
+ if x!=None:
+ if zipname!=None:
+ import zipfile
+ zf=zipfile.ZipFile(zipname, 'r')
+ self.add_template(zf.read(x))
+ else:
+ self.add_template(x)
+ def register_tag(self,tag,func):
+ self._render_tag[tag]=func
+ def add_template(self,x):
+ if hasattr(x,'documentElement'):
+ dom=x
+ elif x.startswith("<?xml"):
+ import xml.dom.minidom
+ dom=xml.dom.minidom.parseString(x)
+ else:
+ import xml.dom.minidom
+ dom=xml.dom.minidom.parse(x)
+ for n in dom.documentElement.childNodes:
+ if n.nodeName=="t":
+ self._t[str(n.getAttribute("t-name"))]=n
+ def get_template(self,name):
+ return self._t[name]
+
+ def eval_object(self,expr,v):
+ return QWebEval(v).eval_object(expr)
+ def eval_str(self,expr,v):
+ return QWebEval(v).eval_str(expr)
+ def eval_format(self,expr,v):
+ return QWebEval(v).eval_format(expr)
+ def eval_bool(self,expr,v):
+ return QWebEval(v).eval_bool(expr)
+
+ def render(self,tname,v={},out=None):
+ if self._t.has_key(tname):
+ return self.render_node(self._t[tname],v)
+ else:
+ return 'qweb: template "%s" not found'%tname
+ def render_node(self,e,v):
+ r=""
+ if e.nodeType==self.node.TEXT_NODE or e.nodeType==self.node.CDATA_SECTION_NODE:
+ r=e.data.encode("utf8")
+ elif e.nodeType==self.node.ELEMENT_NODE:
+ pre=""
+ g_att=""
+ t_render=None
+ t_att={}
+ for (an,av) in e.attributes.items():
+ an=str(an)
+ if isinstance(av,types.UnicodeType):
+ av=av.encode("utf8")
+ else:
+ av=av.nodeValue.encode("utf8")
+ if an.startswith("t-"):
+ for i in self._render_att:
+ if an[2:].startswith(i):
+ g_att+=self._render_att[i](self,e,an,av,v)
+ break
+ else:
+ if self._render_tag.has_key(an[2:]):
+ t_render=an[2:]
+ t_att[an[2:]]=av
+ else:
+ g_att+=' %s="%s"'%(an,cgi.escape(av,1));
+ if t_render:
+ if self._render_tag.has_key(t_render):
+ r=self._render_tag[t_render](self,e,t_att,g_att,v)
+ else:
+ r=self.render_element(e,g_att,v,pre,t_att.get("trim",0))
+ return r
+ def render_element(self,e,g_att,v,pre="",trim=0):
+ g_inner=[]
+ for n in e.childNodes:
+ g_inner.append(self.render_node(n,v))
+ name=str(e.nodeName)
+ inner="".join(g_inner)
+ if trim==0:
+ pass
+ elif trim=='left':
+ inner=inner.lstrip()
+ elif trim=='right':
+ inner=inner.rstrip()
+ elif trim=='both':
+ inner=inner.strip()
+ if name=="t":
+ return inner
+ elif len(inner):
+ return "<%s%s>%s%s</%s>"%(name,g_att,pre,inner,name)
+ else:
+ return "<%s%s/>"%(name,g_att)
+
+ # Attributes
+ def render_att_att(self,e,an,av,v):
+ if an.startswith("t-attf-"):
+ att,val=an[7:],self.eval_format(av,v)
+ elif an.startswith("t-att-"):
+ att,val=(an[6:],self.eval_str(av,v))
+ else:
+ att,val=self.eval_object(av,v)
+ return ' %s="%s"'%(att,cgi.escape(val,1))
+
+ # Tags
+ def render_tag_raw(self,e,t_att,g_att,v):
+ return self.eval_str(t_att["raw"],v)
+ def render_tag_rawf(self,e,t_att,g_att,v):
+ return self.eval_format(t_att["rawf"],v)
+ def render_tag_esc(self,e,t_att,g_att,v):
+ return cgi.escape(self.eval_str(t_att["esc"],v))
+ def render_tag_escf(self,e,t_att,g_att,v):
+ return cgi.escape(self.eval_format(t_att["escf"],v))
+ def render_tag_foreach(self,e,t_att,g_att,v):
+ expr=t_att["foreach"]
+ enum=self.eval_object(expr,v)
+ if enum!=None:
+ var=t_att.get('as',expr).replace('.','_')
+ d=v.copy()
+ size=-1
+ if isinstance(enum,types.ListType):
+ size=len(enum)
+ elif isinstance(enum,types.TupleType):
+ size=len(enum)
+ elif hasattr(enum,'count'):
+ size=enum.count()
+ d["%s_size"%var]=size
+ d["%s_all"%var]=enum
+ index=0
+ ru=[]
+ for i in enum:
+ d["%s_value"%var]=i
+ d["%s_index"%var]=index
+ d["%s_first"%var]=index==0
+ d["%s_even"%var]=index%2
+ d["%s_odd"%var]=(index+1)%2
+ d["%s_last"%var]=index+1==size
+ if index%2:
+ d["%s_parity"%var]='odd'
+ else:
+ d["%s_parity"%var]='even'
+ if isinstance(i,types.DictType):
+ d.update(i)
+ else:
+ d[var]=i
+ ru.append(self.render_element(e,g_att,d))
+ index+=1
+ return "".join(ru)
+ else:
+ return "qweb: t-foreach %s not found."%expr
+ def render_tag_if(self,e,t_att,g_att,v):
+ if self.eval_bool(t_att["if"],v):
+ return self.render_element(e,g_att,v)
+ else:
+ return ""
+ def render_tag_call(self,e,t_att,g_att,v):
+ # TODO t-prefix
+ if t_att.has_key("import"):
+ d=v
+ else:
+ d=v.copy()
+ d[0]=self.render_element(e,g_att,d)
+ return self.render(t_att["call"],d)
+ def render_tag_set(self,e,t_att,g_att,v):
+ if t_att.has_key("eval"):
+ v[t_att["set"]]=self.eval_object(t_att["eval"],v)
+ else:
+ v[t_att["set"]]=self.render_element(e,g_att,v)
+ return ""
+
+#----------------------------------------------------------
+# QWeb HTML (+deprecated QWebFORM and QWebOLD)
+#----------------------------------------------------------
+class QWebURL:
+ """ URL helper
+ assert req.PATH_INFO== "/site/admin/page_edit"
+ u = QWebURL(root_path="/site/",req_path=req.PATH_INFO)
+ s=u.url2_href("user/login",{'a':'1'})
+ assert s=="../user/login?a=1"
+
+ """
+ def __init__(self, root_path="/", req_path="/",defpath="",defparam={}):
+ self.defpath=defpath
+ self.defparam=defparam
+ self.root_path=root_path
+ self.req_path=req_path
+ self.req_list=req_path.split("/")[:-1]
+ self.req_len=len(self.req_list)
+ def decode(self,s):
+ h={}
+ for k,v in cgi.parse_qsl(s,1):
+ h[k]=v
+ return h
+ def encode(self,h):
+ return urllib.urlencode(h.items())
+ def request(self,req):
+ return req.REQUEST
+ def copy(self,path=None,param=None):
+ npath=self.defpath
+ if path:
+ npath=path
+ nparam=self.defparam.copy()
+ if param:
+ nparam.update(param)
+ return QWebURL(self.root_path,self.req_path,npath,nparam)
+ def path(self,path=''):
+ if not path:
+ path=self.defpath
+ pl=(self.root_path+path).split('/')
+ i=0
+ for i in range(min(len(pl), self.req_len)):
+ if pl[i]!=self.req_list[i]:
+ break
+ else:
+ i+=1
+ dd=self.req_len-i
+ if dd<0:
+ dd=0
+ return '/'.join(['..']*dd+pl[i:])
+ def href(self,path='',arg={}):
+ p=self.path(path)
+ tmp=self.defparam.copy()
+ tmp.update(arg)
+ s=self.encode(tmp)
+ if len(s):
+ return p+"?"+s
+ else:
+ return p
+ def form(self,path='',arg={}):
+ p=self.path(path)
+ tmp=self.defparam.copy()
+ tmp.update(arg)
+ r=''.join(['<input type="hidden" name="%s" value="%s"/>'%(k,cgi.escape(str(v),1)) for k,v in tmp.items()])
+ return (p,r)
+class QWebField:
+ def __init__(self,name=None,default="",check=None):
+ self.name=name
+ self.default=default
+ self.check=check
+ # optional attributes
+ self.type=None
+ self.trim=1
+ self.required=1
+ self.cssvalid="form_valid"
+ self.cssinvalid="form_invalid"
+ # set by addfield
+ self.form=None
+ # set by processing
+ self.input=None
+ self.css=None
+ self.value=None
+ self.valid=None
+ self.invalid=None
+ self.validate(1)
+ def validate(self,val=1,update=1):
+ if val:
+ self.valid=1
+ self.invalid=0
+ self.css=self.cssvalid
+ else:
+ self.valid=0
+ self.invalid=1
+ self.css=self.cssinvalid
+ if update and self.form:
+ self.form.update()
+ def invalidate(self,update=1):
+ self.validate(0,update)
+class QWebForm:
+ class QWebFormF:
+ pass
+ def __init__(self,e=None,arg=None,default=None):
+ self.fields={}
+ # all fields have been submitted
+ self.submitted=False
+ self.missing=[]
+ # at least one field is invalid or missing
+ self.invalid=False
+ self.error=[]
+ # all fields have been submitted and are valid
+ self.valid=False
+ # fields under self.f for convenience
+ self.f=self.QWebFormF()
+ if e:
+ self.add_template(e)
+ # assume that the fields are done with the template
+ if default:
+ self.set_default(default,e==None)
+ if arg!=None:
+ self.process_input(arg)
+ def __getitem__(self,k):
+ return self.fields[k]
+ def set_default(self,default,add_missing=1):
+ for k,v in default.items():
+ if self.fields.has_key(k):
+ self.fields[k].default=str(v)
+ elif add_missing:
+ self.add_field(QWebField(k,v))
+ def add_field(self,f):
+ self.fields[f.name]=f
+ f.form=self
+ setattr(self.f,f.name,f)
+ def add_template(self,e):
+ att={}
+ for (an,av) in e.attributes.items():
+ an=str(an)
+ if an.startswith("t-"):
+ att[an[2:]]=av.encode("utf8")
+ for i in ["form-text", "form-password", "form-radio", "form-checkbox", "form-select","form-textarea"]:
+ if att.has_key(i):
+ name=att[i].split(".")[-1]
+ default=att.get("default","")
+ check=att.get("check",None)
+ f=QWebField(name,default,check)
+ if i=="form-textarea":
+ f.type="textarea"
+ f.trim=0
+ if i=="form-checkbox":
+ f.type="checkbox"
+ f.required=0
+ self.add_field(f)
+ for n in e.childNodes:
+ if n.nodeType==n.ELEMENT_NODE:
+ self.add_template(n)
+ def process_input(self,arg):
+ for f in self.fields.values():
+ if arg.has_key(f.name):
+ f.input=arg[f.name]
+ f.value=f.input
+ if f.trim:
+ f.input=f.input.strip()
+ f.validate(1,False)
+ if f.check==None:
+ continue
+ elif callable(f.check):
+ pass
+ elif isinstance(f.check,str):
+ v=f.check
+ if f.check=="email":
+ v=r"/^[^@#!& ]+@[A-Za-z0-9-][.A-Za-z0-9-]{0,64}\.[A-Za-z]{2,5}$/"
+ if f.check=="date":
+ v=r"/^(19|20)\d\d-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])$/"
+ if not re.match(v[1:-1],f.input):
+ f.validate(0,False)
+ else:
+ f.value=f.default
+ self.update()
+ def validate_all(self,val=1):
+ for f in self.fields.values():
+ f.validate(val,0)
+ self.update()
+ def invalidate_all(self):
+ self.validate_all(0)
+ def update(self):
+ self.submitted=True
+ self.valid=True
+ self.errors=[]
+ for f in self.fields.values():
+ if f.required and f.input==None:
+ self.submitted=False
+ self.valid=False
+ self.missing.append(f.name)
+ if f.invalid:
+ self.valid=False
+ self.error.append(f.name)
+ # invalid have been submitted and
+ self.invalid=self.submitted and self.valid==False
+ def collect(self):
+ d={}
+ for f in self.fields.values():
+ d[f.name]=f.value
+ return d
+class QWebURLEval(QWebEval):
+ def __init__(self,data):
+ QWebEval.__init__(self,data)
+ def __getitem__(self,expr):
+ r=QWebEval.__getitem__(self,expr)
+ if isinstance(r,str):
+ return urllib.quote_plus(r)
+ else:
+ return r
+class QWebHtml(QWebXml):
+ """QWebHtml
+ QWebURL:
+ QWebField:
+ QWebForm:
+ QWebHtml:
+ an extended template engine, with a few utility class to easily produce
+ HTML, handle URLs and process forms, it adds the following magic attributes:
+
+ t-href t-action t-form-text t-form-password t-form-textarea t-form-radio
+ t-form-checkbox t-form-select t-option t-selected t-checked t-pager
+
+ # explication URL:
+ # v['tableurl']=QWebUrl({p=afdmin,saar=,orderby=,des=,mlink;meta_active=})
+ # t-href="tableurl?desc=1"
+ #
+ # explication FORM: t-if="form.valid()"
+ # Foreach i
+ # email: <input type="text" t-esc-name="i" t-esc-value="form[i].value" t-esc-class="form[i].css"/>
+ # <input type="radio" name="spamtype" t-esc-value="i" t-selected="i==form.f.spamtype.value"/>
+ # <option t-esc-value="cc" t-selected="cc==form.f.country.value"><t t-esc="cname"></option>
+ # Simple forms:
+ # <input t-form-text="form.email" t-check="email"/>
+ # <input t-form-password="form.email" t-check="email"/>
+ # <input t-form-radio="form.email" />
+ # <input t-form-checkbox="form.email" />
+ # <textarea t-form-textarea="form.email" t-check="email"/>
+ # <select t-form-select="form.email"/>
+ # <option t-value="1">
+ # <input t-form-radio="form.spamtype" t-value="1"/> Cars
+ # <input t-form-radio="form.spamtype" t-value="2"/> Sprt
+ """
+ # QWebForm from a template
+ def form(self,tname,arg=None,default=None):
+ form=QWebForm(self._t[tname],arg,default)
+ return form
+
+ # HTML Att
+ def eval_url(self,av,v):
+ s=QWebURLEval(v).eval_format(av)
+ a=s.split('?',1)
+ arg={}
+ if len(a)>1:
+ for k,v in cgi.parse_qsl(a[1],1):
+ arg[k]=v
+ b=a[0].split('/',1)
+ path=''
+ if len(b)>1:
+ path=b[1]
+ u=b[0]
+ return u,path,arg
+ def render_att_url_(self,e,an,av,v):
+ u,path,arg=self.eval_url(av,v)
+ if not isinstance(v.get(u,0),QWebURL):
+ out='qweb: missing url %r %r %r'%(u,path,arg)
+ else:
+ out=v[u].href(path,arg)
+ return ' %s="%s"'%(an[6:],cgi.escape(out,1))
+ def render_att_href(self,e,an,av,v):
+ return self.render_att_url_(e,"t-url-href",av,v)
+ def render_att_checked(self,e,an,av,v):
+ if self.eval_bool(av,v):
+ return ' %s="%s"'%(an[2:],an[2:])
+ else:
+ return ''
+ def render_att_selected(self,e,an,av,v):
+ return self.render_att_checked(e,an,av,v)
+
+ # HTML Tags forms
+ def render_tag_rawurl(self,e,t_att,g_att,v):
+ u,path,arg=self.eval_url(t_att["rawurl"],v)
+ return v[u].href(path,arg)
+ def render_tag_escurl(self,e,t_att,g_att,v):
+ u,path,arg=self.eval_url(t_att["escurl"],v)
+ return cgi.escape(v[u].href(path,arg))
+ def render_tag_action(self,e,t_att,g_att,v):
+ u,path,arg=self.eval_url(t_att["action"],v)
+ if not isinstance(v.get(u,0),QWebURL):
+ action,input=('qweb: missing url %r %r %r'%(u,path,arg),'')
+ else:
+ action,input=v[u].form(path,arg)
+ g_att+=' action="%s"'%action
+ return self.render_element(e,g_att,v,input)
+ def render_tag_form_text(self,e,t_att,g_att,v):
+ f=self.eval_object(t_att["form-text"],v)
+ g_att+=' type="text" name="%s" value="%s" class="%s"'%(f.name,cgi.escape(f.value,1),f.css)
+ return self.render_element(e,g_att,v)
+ def render_tag_form_password(self,e,t_att,g_att,v):
+ f=self.eval_object(t_att["form-password"],v)
+ g_att+=' type="password" name="%s" value="%s" class="%s"'%(f.name,cgi.escape(f.value,1),f.css)
+ return self.render_element(e,g_att,v)
+ def render_tag_form_textarea(self,e,t_att,g_att,v):
+ type="textarea"
+ f=self.eval_object(t_att["form-textarea"],v)
+ g_att+=' name="%s" class="%s"'%(f.name,f.css)
+ r="<%s%s>%s</%s>"%(type,g_att,cgi.escape(f.value,1),type)
+ return r
+ def render_tag_form_radio(self,e,t_att,g_att,v):
+ f=self.eval_object(t_att["form-radio"],v)
+ val=t_att["value"]
+ g_att+=' type="radio" name="%s" value="%s"'%(f.name,val)
+ if f.value==val:
+ g_att+=' checked="checked"'
+ return self.render_element(e,g_att,v)
+ def render_tag_form_checkbox(self,e,t_att,g_att,v):
+ f=self.eval_object(t_att["form-checkbox"],v)
+ val=t_att["value"]
+ g_att+=' type="checkbox" name="%s" value="%s"'%(f.name,val)
+ if f.value==val:
+ g_att+=' checked="checked"'
+ return self.render_element(e,g_att,v)
+ def render_tag_form_select(self,e,t_att,g_att,v):
+ f=self.eval_object(t_att["form-select"],v)
+ g_att+=' name="%s" class="%s"'%(f.name,f.css)
+ return self.render_element(e,g_att,v)
+ def render_tag_option(self,e,t_att,g_att,v):
+ f=self.eval_object(e.parentNode.getAttribute("t-form-select"),v)
+ val=t_att["option"]
+ g_att+=' value="%s"'%(val)
+ if f.value==val:
+ g_att+=' selected="selected"'
+ return self.render_element(e,g_att,v)
+
+ # HTML Tags others
+ def render_tag_pager(self,e,t_att,g_att,v):
+ pre=t_att["pager"]
+ total=int(self.eval_str(t_att["total"],v))
+ start=int(self.eval_str(t_att["start"],v))
+ step=int(self.eval_str(t_att.get("step","100"),v))
+ scope=int(self.eval_str(t_att.get("scope","5"),v))
+ # Compute Pager
+ p=pre+"_"
+ d={}
+ d[p+"tot_size"]=total
+ d[p+"tot_page"]=tot_page=total/step
+ d[p+"win_start0"]=total and start
+ d[p+"win_start1"]=total and start+1
+ d[p+"win_end0"]=max(0,min(start+step-1,total-1))
+ d[p+"win_end1"]=min(start+step,total)
+ d[p+"win_page0"]=win_page=start/step
+ d[p+"win_page1"]=win_page+1
+ d[p+"prev"]=(win_page!=0)
+ d[p+"prev_start"]=(win_page-1)*step
+ d[p+"next"]=(tot_page>=win_page+1)
+ d[p+"next_start"]=(win_page+1)*step
+ l=[]
+ begin=win_page-scope
+ end=win_page+scope
+ if begin<0:
+ end-=begin
+ if end>tot_page:
+ begin-=(end-tot_page)
+ i=max(0,begin)
+ while i<=min(end,tot_page) and total!=step:
+ l.append( { p+"page0":i, p+"page1":i+1, p+"start":i*step, p+"sel":(win_page==i) })
+ i+=1
+ d[p+"active"]=len(l)>1
+ d[p+"list"]=l
+ # Update v
+ v.update(d)
+ return ""
+
+#----------------------------------------------------------
+# QWeb Simple Controller
+#----------------------------------------------------------
+def qweb_control(self,jump='main',p=[]):
+ """ qweb_control(self,jump='main',p=[]):
+ A simple function to handle the controler part of your application. It
+ dispatch the control to the jump argument, while ensuring that prefix
+ function have been called.
+
+ qweb_control replace '/' to '_' and strip '_' from the jump argument.
+
+ name1
+ name1_name2
+ name1_name2_name3
+
+ """
+ jump=jump.replace('/','_').strip('_')
+ if not hasattr(self,jump):
+ return 0
+ done={}
+ todo=[]
+ while 1:
+ if jump!=None:
+ tmp=""
+ todo=[]
+ for i in jump.split("_"):
+ tmp+=i+"_";
+ if not done.has_key(tmp[:-1]):
+ todo.append(tmp[:-1])
+ jump=None
+ elif len(todo):
+ i=todo.pop(0)
+ done[i]=1
+ if hasattr(self,i):
+ f=getattr(self,i)
+ r=f(*p)
+ if isinstance(r,types.StringType):
+ jump=r
+ else:
+ break
+ return 1
+
+#----------------------------------------------------------
+# QWeb WSGI Request handler
+#----------------------------------------------------------
+class QWebSession(dict):
+ def __init__(self,environ,**kw):
+ dict.__init__(self)
+ default={
+ "path" : tempfile.gettempdir(),
+ "cookie_name" : "QWEBSID",
+ "cookie_lifetime" : 0,
+ "cookie_path" : '/',
+ "cookie_domain" : '',
+ "limit_cache" : 1,
+ "probability" : 0.01,
+ "maxlifetime" : 3600,
+ "disable" : 0,
+ }
+ for k,v in default.items():
+ setattr(self,'session_%s'%k,kw.get(k,v))
+ # Try to find session
+ self.session_found_cookie=0
+ self.session_found_url=0
+ self.session_found=0
+ self.session_orig=""
+ # Try cookie
+ c=Cookie.SimpleCookie()
+ c.load(environ.get('HTTP_COOKIE', ''))
+ if c.has_key(self.session_cookie_name):
+ sid=c[self.session_cookie_name].value[:64]
+ if re.match('[a-f0-9]+$',sid) and self.session_load(sid):
+ self.session_id=sid
+ self.session_found_cookie=1
+ self.session_found=1
+ # Try URL
+ if not self.session_found_cookie:
+ mo=re.search('&%s=([a-f0-9]+)'%self.session_cookie_name,environ.get('QUERY_STRING',''))
+ if mo and self.session_load(mo.group(1)):
+ self.session_id=mo.group(1)
+ self.session_found_url=1
+ self.session_found=1
+ # New session
+ if not self.session_found:
+ self.session_id='%032x'%random.randint(1,2**128)
+ self.session_trans_sid="&amp;%s=%s"%(self.session_cookie_name,self.session_id)
+ # Clean old session
+ if random.random() < self.session_probability:
+ self.session_clean()
+ def session_get_headers(self):
+ h=[]
+ if (not self.session_disable) and (len(self) or len(self.session_orig)):
+ self.session_save()
+ if not self.session_found_cookie:
+ c=Cookie.SimpleCookie()
+ c[self.session_cookie_name] = self.session_id
+ c[self.session_cookie_name]['path'] = self.session_cookie_path
+ if self.session_cookie_domain:
+ c[self.session_cookie_name]['domain'] = self.session_cookie_domain
+# if self.session_cookie_lifetime:
+# c[self.session_cookie_name]['expires'] = TODO date localtime or not, datetime.datetime(1970, 1, 1)
+ h.append(("Set-Cookie", c[self.session_cookie_name].OutputString()))
+ if self.session_limit_cache:
+ h.append(('Cache-Control','no-store, no-cache, must-revalidate, post-check=0, pre-check=0'))
+ h.append(('Expires','Thu, 19 Nov 1981 08:52:00 GMT'))
+ h.append(('Pragma','no-cache'))
+ return h
+ def session_load(self,sid):
+ fname=os.path.join(self.session_path,'qweb_sess_%s'%sid)
+ try:
+ orig=file(fname).read()
+ d=pickle.loads(orig)
+ except:
+ return
+ self.session_orig=orig
+ self.update(d)
+ return 1
+ def session_save(self):
+ if not os.path.isdir(self.session_path):
+ os.makedirs(self.session_path)
+ fname=os.path.join(self.session_path,'qweb_sess_%s'%self.session_id)
+ try:
+ oldtime=os.path.getmtime(fname)
+ except OSError,IOError:
+ oldtime=0
+ dump=pickle.dumps(self.copy())
+ if (dump != self.session_orig) or (time.time() > oldtime+self.session_maxlifetime/4):
+ tmpname=os.path.join(self.session_path,'qweb_sess_%s_%x'%(self.session_id,random.randint(1,2**32)))
+ f=file(tmpname,'wb')
+ f.write(dump)
+ f.close()
+ if sys.platform=='win32' and os.path.isfile(fname):
+ os.remove(fname)
+ os.rename(tmpname,fname)
+ def session_clean(self):
+ t=time.time()
+ try:
+ for i in [os.path.join(self.session_path,i) for i in os.listdir(self.session_path) if i.startswith('qweb_sess_')]:
+ if (t > os.path.getmtime(i)+self.session_maxlifetime):
+ os.unlink(i)
+ except OSError,IOError:
+ pass
+class QWebSessionMem(QWebSession):
+ def session_load(self,sid):
+ global _qweb_sessions
+ if not "_qweb_sessions" in globals():
+ _qweb_sessions={}
+ if _qweb_sessions.has_key(sid):
+ self.session_orig=_qweb_sessions[sid]
+ self.update(self.session_orig)
+ return 1
+ def session_save(self):
+ global _qweb_sessions
+ if not "_qweb_sessions" in globals():
+ _qweb_sessions={}
+ _qweb_sessions[self.session_id]=self.copy()
+class QWebSessionService:
+ def __init__(self, wsgiapp, url_rewrite=0):
+ self.wsgiapp=wsgiapp
+ self.url_rewrite_tags="a=href,area=href,frame=src,form=,fieldset="
+ def __call__(self, environ, start_response):
+ # TODO
+ # use QWebSession to provide environ["qweb.session"]
+ return self.wsgiapp(environ,start_response)
+class QWebDict(dict):
+ def __init__(self,*p):
+ dict.__init__(self,*p)
+ def __getitem__(self,key):
+ return self.get(key,"")
+ def int(self,key):
+ try:
+ return int(self.get(key,"0"))
+ except ValueError:
+ return 0
+class QWebListDict(dict):
+ def __init__(self,*p):
+ dict.__init__(self,*p)
+ def __getitem__(self,key):
+ return self.get(key,[])
+ def appendlist(self,key,val):
+ if self.has_key(key):
+ self[key].append(val)
+ else:
+ self[key]=[val]
+ def get_qwebdict(self):
+ d=QWebDict()
+ for k,v in self.items():
+ d[k]=v[-1]
+ return d
+class QWebRequest:
+ """QWebRequest a WSGI request handler.
+
+ QWebRequest is a WSGI request handler that feature GET, POST and POST
+ multipart methods, handles cookies and headers and provide a dict-like
+ SESSION Object (either on the filesystem or in memory).
+
+ It is constructed with the environ and start_response WSGI arguments:
+
+ req=qweb.QWebRequest(environ, start_response)
+
+ req has the folowing attributes :
+
+ req.environ standard WSGI dict (CGI and wsgi ones)
+
+ Some CGI vars as attributes from environ for convenience:
+
+ req.SCRIPT_NAME
+ req.PATH_INFO
+ req.REQUEST_URI
+
+ Some computed value (also for convenience)
+
+ req.FULL_URL full URL recontructed (http://host/query)
+ req.FULL_PATH (URL path before ?querystring)
+
+ Dict constructed from querystring and POST datas, PHP-like.
+
+ req.GET contains GET vars
+ req.POST contains POST vars
+ req.REQUEST contains merge of GET and POST
+ req.FILES contains uploaded files
+ req.GET_LIST req.POST_LIST req.REQUEST_LIST req.FILES_LIST multiple arguments versions
+ req.debug() returns an HTML dump of those vars
+
+ A dict-like session object.
+
+ req.SESSION the session start when the dict is not empty.
+
+ Attribute for handling the response
+
+ req.response_headers dict-like to set headers
+ req.response_cookies a SimpleCookie to set cookies
+ req.response_status a string to set the status like '200 OK'
+
+ req.write() to write to the buffer
+
+ req itselfs is an iterable object with the buffer, it will also also call
+ start_response automatically before returning anything via the iterator.
+
+ To make it short, it means that you may use
+
+ return req
+
+ at the end of your request handling to return the reponse to any WSGI
+ application server.
+ """
+ #
+ # This class contains part ripped from colubrid (with the permission of
+ # mitsuhiko) see http://wsgiarea.pocoo.org/colubrid/
+ #
+ # - the class HttpHeaders
+ # - the method load_post_data (tuned version)
+ #
+ class HttpHeaders(object):
+ def __init__(self):
+ self.data = [('Content-Type', 'text/html')]
+ def __setitem__(self, key, value):
+ self.set(key, value)
+ def __delitem__(self, key):
+ self.remove(key)
+ def __contains__(self, key):
+ key = key.lower()
+ for k, v in self.data:
+ if k.lower() == key:
+ return True
+ return False
+ def add(self, key, value):
+ self.data.append((key, value))
+ def remove(self, key, count=-1):
+ removed = 0
+ data = []
+ for _key, _value in self.data:
+ if _key.lower() != key.lower():
+ if count > -1:
+ if removed >= count:
+ break
+ else:
+ removed += 1
+ data.append((_key, _value))
+ self.data = data
+ def clear(self):
+ self.data = []
+ def set(self, key, value):
+ self.remove(key)
+ self.add(key, value)
+ def get(self, key=False, httpformat=False):
+ if not key:
+ result = self.data
+ else:
+ result = []
+ for _key, _value in self.data:
+ if _key.lower() == key.lower():
+ result.append((_key, _value))
+ if httpformat:
+ return '\n'.join(['%s: %s' % item for item in result])
+ return result
+ def load_post_data(self,environ,POST,FILES):
+ length = int(environ['CONTENT_LENGTH'])
+ DATA = environ['wsgi.input'].read(length)
+ if environ.get('CONTENT_TYPE', '').startswith('multipart'):
+ lines = ['Content-Type: %s' % environ.get('CONTENT_TYPE', '')]
+ for key, value in environ.items():
+ if key.startswith('HTTP_'):
+ lines.append('%s: %s' % (key, value))
+ raw = '\r\n'.join(lines) + '\r\n\r\n' + DATA
+ msg = email.message_from_string(raw)
+ for sub in msg.get_payload():
+ if not isinstance(sub, email.Message.Message):
+ continue
+ name_dict = cgi.parse_header(sub['Content-Disposition'])[1]
+ if 'filename' in name_dict:
+ # Nested MIME Messages are not supported'
+ if type([]) == type(sub.get_payload()):
+ continue
+ if not name_dict['filename'].strip():
+ continue
+ filename = name_dict['filename']
+ # why not keep all the filename? because IE always send 'C:\documents and settings\blub\blub.png'
+ filename = filename[filename.rfind('\\') + 1:]
+ if 'Content-Type' in sub:
+ content_type = sub['Content-Type']
+ else:
+ content_type = None
+ s = { "name":filename, "type":content_type, "data":sub.get_payload() }
+ FILES.appendlist(name_dict['name'], s)
+ else:
+ POST.appendlist(name_dict['name'], sub.get_payload())
+ else:
+ POST.update(cgi.parse_qs(DATA,keep_blank_values=1))
+ return DATA
+
+ def __init__(self,environ,start_response,session=QWebSession):
+ self.environ=environ
+ self.start_response=start_response
+ self.buffer=[]
+
+ self.SCRIPT_NAME = environ.get('SCRIPT_NAME', '')
+ self.PATH_INFO = environ.get('PATH_INFO', '')
+ # extensions:
+ self.FULL_URL = environ['FULL_URL'] = self.get_full_url(environ)
+ # REQUEST_URI is optional, fake it if absent
+ if not environ.has_key("REQUEST_URI"):
+ environ["REQUEST_URI"]=urllib.quote(self.SCRIPT_NAME+self.PATH_INFO)
+ if environ.get('QUERY_STRING'):
+ environ["REQUEST_URI"]+='?'+environ['QUERY_STRING']
+ self.REQUEST_URI = environ["REQUEST_URI"]
+ # full quote url path before the ?
+ self.FULL_PATH = environ['FULL_PATH'] = self.REQUEST_URI.split('?')[0]
+
+ self.request_cookies=Cookie.SimpleCookie()
+ self.request_cookies.load(environ.get('HTTP_COOKIE', ''))
+
+ self.response_started=False
+ self.response_gzencode=False
+ self.response_cookies=Cookie.SimpleCookie()
+ # to delete a cookie use: c[key]['expires'] = datetime.datetime(1970, 1, 1)
+ self.response_headers=self.HttpHeaders()
+ self.response_status="200 OK"
+
+ self.php=None
+ if self.environ.has_key("php"):
+ self.php=environ["php"]
+ self.SESSION=self.php._SESSION
+ self.GET=self.php._GET
+ self.POST=self.php._POST
+ self.REQUEST=self.php._ARG
+ self.FILES=self.php._FILES
+ else:
+ if isinstance(session,QWebSession):
+ self.SESSION=session
+ elif session:
+ self.SESSION=session(environ)
+ else:
+ self.SESSION=None
+ self.GET_LIST=QWebListDict(cgi.parse_qs(environ.get('QUERY_STRING', ''),keep_blank_values=1))
+ self.POST_LIST=QWebListDict()
+ self.FILES_LIST=QWebListDict()
+ self.REQUEST_LIST=QWebListDict(self.GET_LIST)
+ if environ['REQUEST_METHOD'] == 'POST':
+ self.DATA=self.load_post_data(environ,self.POST_LIST,self.FILES_LIST)
+ self.REQUEST_LIST.update(self.POST_LIST)
+ self.GET=self.GET_LIST.get_qwebdict()
+ self.POST=self.POST_LIST.get_qwebdict()
+ self.FILES=self.FILES_LIST.get_qwebdict()
+ self.REQUEST=self.REQUEST_LIST.get_qwebdict()
+ def get_full_url(environ):
+ # taken from PEP 333
+ if 'FULL_URL' in environ:
+ return environ['FULL_URL']
+ url = environ['wsgi.url_scheme']+'://'
+ if environ.get('HTTP_HOST'):
+ url += environ['HTTP_HOST']
+ else:
+ url += environ['SERVER_NAME']
+ if environ['wsgi.url_scheme'] == 'https':
+ if environ['SERVER_PORT'] != '443':
+ url += ':' + environ['SERVER_PORT']
+ else:
+ if environ['SERVER_PORT'] != '80':
+ url += ':' + environ['SERVER_PORT']
+ if environ.has_key('REQUEST_URI'):
+ url += environ['REQUEST_URI']
+ else:
+ url += urllib.quote(environ.get('SCRIPT_NAME', ''))
+ url += urllib.quote(environ.get('PATH_INFO', ''))
+ if environ.get('QUERY_STRING'):
+ url += '?' + environ['QUERY_STRING']
+ return url
+ get_full_url=staticmethod(get_full_url)
+ def save_files(self):
+ for k,v in self.FILES.items():
+ if not v.has_key("tmp_file"):
+ f=tempfile.NamedTemporaryFile()
+ f.write(v["data"])
+ f.flush()
+ v["tmp_file"]=f
+ v["tmp_name"]=f.name
+ def debug(self):
+ body=''
+ for name,d in [
+ ("GET",self.GET), ("POST",self.POST), ("REQUEST",self.REQUEST), ("FILES",self.FILES),
+ ("GET_LIST",self.GET_LIST), ("POST_LIST",self.POST_LIST), ("REQUEST_LIST",self.REQUEST_LIST), ("FILES_LIST",self.FILES_LIST),
+ ("SESSION",self.SESSION), ("environ",self.environ),
+ ]:
+ body+='<table border="1" width="100%" align="center">\n'
+ body+='<tr><th colspan="2" align="center">%s</th></tr>\n'%name
+ keys=d.keys()
+ keys.sort()
+ body+=''.join(['<tr><td>%s</td><td>%s</td></tr>\n'%(k,cgi.escape(repr(d[k]))) for k in keys])
+ body+='</table><br><br>\n\n'
+ return body
+ def write(self,s):
+ self.buffer.append(s)
+ def echo(self,*s):
+ self.buffer.extend([str(i) for i in s])
+ def response(self):
+ if not self.response_started:
+ if not self.php:
+ for k,v in self.FILES.items():
+ if v.has_key("tmp_file"):
+ try:
+ v["tmp_file"].close()
+ except OSError:
+ pass
+ if self.response_gzencode and self.environ.get('HTTP_ACCEPT_ENCODING','').find('gzip')!=-1:
+ zbuf=StringIO.StringIO()
+ zfile=gzip.GzipFile(mode='wb', fileobj=zbuf)
+ zfile.write(''.join(self.buffer))
+ zfile.close()
+ zbuf=zbuf.getvalue()
+ self.buffer=[zbuf]
+ self.response_headers['Content-Encoding']="gzip"
+ self.response_headers['Content-Length']=str(len(zbuf))
+ headers = self.response_headers.get()
+ if isinstance(self.SESSION, QWebSession):
+ headers.extend(self.SESSION.session_get_headers())
+ headers.extend([('Set-Cookie', self.response_cookies[i].OutputString()) for i in self.response_cookies])
+ self.start_response(self.response_status, headers)
+ self.response_started=True
+ return self.buffer
+ def __iter__(self):
+ return self.response().__iter__()
+ def http_redirect(self,url,permanent=1):
+ if permanent:
+ self.response_status="301 Moved Permanently"
+ else:
+ self.response_status="302 Found"
+ self.response_headers["Location"]=url
+ def http_404(self,msg="<h1>404 Not Found</h1>"):
+ self.response_status="404 Not Found"
+ if msg:
+ self.write(msg)
+ def http_download(self,fname,fstr,partial=0):
+# allow fstr to be a file-like object
+# if parital:
+# say accept ranages
+# parse range headers...
+# if range:
+# header("HTTP/1.1 206 Partial Content");
+# header("Content-Range: bytes $offset-".($fsize-1)."/".$fsize);
+# header("Content-Length: ".($fsize-$offset));
+# fseek($fd,$offset);
+# else:
+ self.response_headers["Content-Type"]="application/octet-stream"
+ self.response_headers["Content-Disposition"]="attachment; filename=\"%s\""%fname
+ self.response_headers["Content-Transfer-Encoding"]="binary"
+ self.response_headers["Content-Length"]="%d"%len(fstr)
+ self.write(fstr)
+
+#----------------------------------------------------------
+# QWeb WSGI HTTP Server to run any WSGI app
+# autorun, run an app as FCGI or CGI otherwise launch the server
+#----------------------------------------------------------
+class QWebWSGIHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+ def log_message(self,*p):
+ if self.server.log:
+ return BaseHTTPServer.BaseHTTPRequestHandler.log_message(self,*p)
+ def address_string(self):
+ return self.client_address[0]
+ def start_response(self,status,headers):
+ l=status.split(' ',1)
+ self.send_response(int(l[0]),l[1])
+ ctype_sent=0
+ for i in headers:
+ if i[0].lower()=="content-type":
+ ctype_sent=1
+ self.send_header(*i)
+ if not ctype_sent:
+ self.send_header("Content-type", "text/html")
+ self.end_headers()
+ return self.write
+ def write(self,data):
+ try:
+ self.wfile.write(data)
+ except (socket.error, socket.timeout),e:
+ print e
+ def bufferon(self):
+ if not getattr(self,'wfile_buf',0):
+ self.wfile_buf=1
+ self.wfile_bak=self.wfile
+ self.wfile=StringIO.StringIO()
+ def bufferoff(self):
+ if self.wfile_buf:
+ buf=self.wfile
+ self.wfile=self.wfile_bak
+ self.write(buf.getvalue())
+ self.wfile_buf=0
+ def serve(self,type):
+ path_info, parameters, query = urlparse.urlparse(self.path)[2:5]
+ environ = {
+ 'wsgi.version': (1,0),
+ 'wsgi.url_scheme': 'http',
+ 'wsgi.input': self.rfile,
+ 'wsgi.errors': sys.stderr,
+ 'wsgi.multithread': 0,
+ 'wsgi.multiprocess': 0,
+ 'wsgi.run_once': 0,
+ 'REQUEST_METHOD': self.command,
+ 'SCRIPT_NAME': '',
+ 'QUERY_STRING': query,
+ 'CONTENT_TYPE': self.headers.get('Content-Type', ''),
+ 'CONTENT_LENGTH': self.headers.get('Content-Length', ''),
+ 'REMOTE_ADDR': self.client_address[0],
+ 'REMOTE_PORT': str(self.client_address[1]),
+ 'SERVER_NAME': self.server.server_address[0],
+ 'SERVER_PORT': str(self.server.server_address[1]),
+ 'SERVER_PROTOCOL': self.request_version,
+ # extention
+ 'FULL_PATH': self.path,
+ 'qweb.mode': 'standalone',
+ }
+ if path_info:
+ environ['PATH_INFO'] = urllib.unquote(path_info)
+ for key, value in self.headers.items():
+ environ['HTTP_' + key.upper().replace('-', '_')] = value
+ # Hack to avoid may TCP packets
+ self.bufferon()
+ appiter=self.server.wsgiapp(environ, self.start_response)
+ for data in appiter:
+ self.write(data)
+ self.bufferoff()
+ self.bufferoff()
+ def do_GET(self):
+ self.serve('GET')
+ def do_POST(self):
+ self.serve('GET')
+class QWebWSGIServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
+ """ QWebWSGIServer
+ qweb_wsgi_autorun(wsgiapp,ip='127.0.0.1',port=8080,threaded=1)
+ A WSGI HTTP server threaded or not and a function to automatically run your
+ app according to the environement (either standalone, CGI or FastCGI).
+
+ This feature is called QWeb autorun. If you want to To use it on your
+ application use the following lines at the end of the main application
+ python file:
+
+ if __name__ == '__main__':
+ qweb.qweb_wsgi_autorun(your_wsgi_app)
+
+ this function will select the approriate running mode according to the
+ calling environement (http-server, FastCGI or CGI).
+ """
+ def __init__(self, wsgiapp, ip, port, threaded=1, log=1):
+ BaseHTTPServer.HTTPServer.__init__(self, (ip, port), QWebWSGIHandler)
+ self.wsgiapp = wsgiapp
+ self.threaded = threaded
+ self.log = log
+ def process_request(self,*p):
+ if self.threaded:
+ return SocketServer.ThreadingMixIn.process_request(self,*p)
+ else:
+ return BaseHTTPServer.HTTPServer.process_request(self,*p)
+def qweb_wsgi_autorun(wsgiapp,ip='127.0.0.1',port=8080,threaded=1,log=1,callback_ready=None):
+ if sys.platform=='win32':
+ fcgi=0
+ else:
+ fcgi=1
+ sock = socket.fromfd(0, socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ sock.getpeername()
+ except socket.error, e:
+ if e[0] == errno.ENOTSOCK:
+ fcgi=0
+ if fcgi or os.environ.has_key('REQUEST_METHOD'):
+ import fcgi
+ fcgi.WSGIServer(wsgiapp,multithreaded=False).run()
+ else:
+ if log:
+ print 'Serving on %s:%d'%(ip,port)
+ s=QWebWSGIServer(wsgiapp,ip=ip,port=port,threaded=threaded,log=log)
+ if callback_ready:
+ callback_ready()
+ try:
+ s.serve_forever()
+ except KeyboardInterrupt,e:
+ sys.excepthook(*sys.exc_info())
+
+#----------------------------------------------------------
+# Qweb Documentation
+#----------------------------------------------------------
+def qweb_doc():
+ body=__doc__
+ for i in [QWebXml ,QWebHtml ,QWebForm ,QWebURL ,qweb_control ,QWebRequest ,QWebSession ,QWebWSGIServer ,qweb_wsgi_autorun]:
+ n=i.__name__
+ d=i.__doc__
+ body+='\n\n%s\n%s\n\n%s'%(n,'-'*len(n),d)
+ return body
+
+ print qweb_doc()
+
+#
diff --git a/tools/ajaxterm/sarissa.js b/tools/ajaxterm/sarissa.js
new file mode 100644
index 000000000..6d13aa2e2
--- /dev/null
+++ b/tools/ajaxterm/sarissa.js
@@ -0,0 +1,647 @@
+/**
+ * ====================================================================
+ * About
+ * ====================================================================
+ * Sarissa is an ECMAScript library acting as a cross-browser wrapper for native XML APIs.
+ * The library supports Gecko based browsers like Mozilla and Firefox,
+ * Internet Explorer (5.5+ with MSXML3.0+), Konqueror, Safari and a little of Opera
+ * @version 0.9.6.1
+ * @author: Manos Batsis, mailto: mbatsis at users full stop sourceforge full stop net
+ * ====================================================================
+ * Licence
+ * ====================================================================
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * the GNU Lesser General Public License version 2.1 as published by
+ * the Free Software Foundation (your choice between the two).
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License or GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * or GNU Lesser General Public License along with this program; if not,
+ * write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * or visit http://www.gnu.org
+ *
+ */
+/**
+ * <p>Sarissa is a utility class. Provides "static" methods for DOMDocument and
+ * XMLHTTP objects, DOM Node serializatrion to XML strings and other goodies.</p>
+ * @constructor
+ */
+function Sarissa(){};
+/** @private */
+Sarissa.PARSED_OK = "Document contains no parsing errors";
+/**
+ * Tells you whether transformNode and transformNodeToObject are available. This functionality
+ * is contained in sarissa_ieemu_xslt.js and is deprecated. If you want to control XSLT transformations
+ * use the XSLTProcessor
+ * @deprecated
+ * @type boolean
+ */
+Sarissa.IS_ENABLED_TRANSFORM_NODE = false;
+/**
+ * tells you whether XMLHttpRequest (or equivalent) is available
+ * @type boolean
+ */
+Sarissa.IS_ENABLED_XMLHTTP = false;
+/**
+ * tells you whether selectNodes/selectSingleNode is available
+ * @type boolean
+ */
+Sarissa.IS_ENABLED_SELECT_NODES = false;
+var _sarissa_iNsCounter = 0;
+var _SARISSA_IEPREFIX4XSLPARAM = "";
+var _SARISSA_HAS_DOM_IMPLEMENTATION = document.implementation && true;
+var _SARISSA_HAS_DOM_CREATE_DOCUMENT = _SARISSA_HAS_DOM_IMPLEMENTATION && document.implementation.createDocument;
+var _SARISSA_HAS_DOM_FEATURE = _SARISSA_HAS_DOM_IMPLEMENTATION && document.implementation.hasFeature;
+var _SARISSA_IS_MOZ = _SARISSA_HAS_DOM_CREATE_DOCUMENT && _SARISSA_HAS_DOM_FEATURE;
+var _SARISSA_IS_SAFARI = (navigator.userAgent && navigator.vendor && (navigator.userAgent.toLowerCase().indexOf("applewebkit") != -1 || navigator.vendor.indexOf("Apple") != -1));
+var _SARISSA_IS_IE = document.all && window.ActiveXObject && navigator.userAgent.toLowerCase().indexOf("msie") > -1 && navigator.userAgent.toLowerCase().indexOf("opera") == -1;
+if(!window.Node || !window.Node.ELEMENT_NODE){
+ var Node = {ELEMENT_NODE: 1, ATTRIBUTE_NODE: 2, TEXT_NODE: 3, CDATA_SECTION_NODE: 4, ENTITY_REFERENCE_NODE: 5, ENTITY_NODE: 6, PROCESSING_INSTRUCTION_NODE: 7, COMMENT_NODE: 8, DOCUMENT_NODE: 9, DOCUMENT_TYPE_NODE: 10, DOCUMENT_FRAGMENT_NODE: 11, NOTATION_NODE: 12};
+};
+
+// IE initialization
+if(_SARISSA_IS_IE){
+ // for XSLT parameter names, prefix needed by IE
+ _SARISSA_IEPREFIX4XSLPARAM = "xsl:";
+ // used to store the most recent ProgID available out of the above
+ var _SARISSA_DOM_PROGID = "";
+ var _SARISSA_XMLHTTP_PROGID = "";
+ /**
+ * Called when the Sarissa_xx.js file is parsed, to pick most recent
+ * ProgIDs for IE, then gets destroyed.
+ * @param idList an array of MSXML PROGIDs from which the most recent will be picked for a given object
+ * @param enabledList an array of arrays where each array has two items; the index of the PROGID for which a certain feature is enabled
+ */
+ pickRecentProgID = function (idList, enabledList){
+ // found progID flag
+ var bFound = false;
+ for(var i=0; i < idList.length && !bFound; i++){
+ try{
+ var oDoc = new ActiveXObject(idList[i]);
+ o2Store = idList[i];
+ bFound = true;
+ for(var j=0;j<enabledList.length;j++)
+ if(i <= enabledList[j][1])
+ Sarissa["IS_ENABLED_"+enabledList[j][0]] = true;
+ }catch (objException){
+ // trap; try next progID
+ };
+ };
+ if (!bFound)
+ throw "Could not retreive a valid progID of Class: " + idList[idList.length-1]+". (original exception: "+e+")";
+ idList = null;
+ return o2Store;
+ };
+ // pick best available MSXML progIDs
+ _SARISSA_DOM_PROGID = pickRecentProgID(["Msxml2.DOMDocument.5.0", "Msxml2.DOMDocument.4.0", "Msxml2.DOMDocument.3.0", "MSXML2.DOMDocument", "MSXML.DOMDocument", "Microsoft.XMLDOM"], [["SELECT_NODES", 2],["TRANSFORM_NODE", 2]]);
+ _SARISSA_XMLHTTP_PROGID = pickRecentProgID(["Msxml2.XMLHTTP.5.0", "Msxml2.XMLHTTP.4.0", "MSXML2.XMLHTTP.3.0", "MSXML2.XMLHTTP", "Microsoft.XMLHTTP"], [["XMLHTTP", 4]]);
+ _SARISSA_THREADEDDOM_PROGID = pickRecentProgID(["Msxml2.FreeThreadedDOMDocument.5.0", "MSXML2.FreeThreadedDOMDocument.4.0", "MSXML2.FreeThreadedDOMDocument.3.0"]);
+ _SARISSA_XSLTEMPLATE_PROGID = pickRecentProgID(["Msxml2.XSLTemplate.5.0", "Msxml2.XSLTemplate.4.0", "MSXML2.XSLTemplate.3.0"], [["XSLTPROC", 2]]);
+ // we dont need this anymore
+ pickRecentProgID = null;
+ //============================================
+ // Factory methods (IE)
+ //============================================
+ // see non-IE version
+ Sarissa.getDomDocument = function(sUri, sName){
+ var oDoc = new ActiveXObject(_SARISSA_DOM_PROGID);
+ // if a root tag name was provided, we need to load it in the DOM
+ // object
+ if (sName){
+ // if needed, create an artifical namespace prefix the way Moz
+ // does
+ if (sUri){
+ oDoc.loadXML("<a" + _sarissa_iNsCounter + ":" + sName + " xmlns:a" + _sarissa_iNsCounter + "=\"" + sUri + "\" />");
+ // don't use the same prefix again
+ ++_sarissa_iNsCounter;
+ }
+ else
+ oDoc.loadXML("<" + sName + "/>");
+ };
+ return oDoc;
+ };
+ // see non-IE version
+ Sarissa.getParseErrorText = function (oDoc) {
+ var parseErrorText = Sarissa.PARSED_OK;
+ if(oDoc.parseError != 0){
+ parseErrorText = "XML Parsing Error: " + oDoc.parseError.reason +
+ "\nLocation: " + oDoc.parseError.url +
+ "\nLine Number " + oDoc.parseError.line + ", Column " +
+ oDoc.parseError.linepos +
+ ":\n" + oDoc.parseError.srcText +
+ "\n";
+ for(var i = 0; i < oDoc.parseError.linepos;i++){
+ parseErrorText += "-";
+ };
+ parseErrorText += "^\n";
+ };
+ return parseErrorText;
+ };
+ // see non-IE version
+ Sarissa.setXpathNamespaces = function(oDoc, sNsSet) {
+ oDoc.setProperty("SelectionLanguage", "XPath");
+ oDoc.setProperty("SelectionNamespaces", sNsSet);
+ };
+ /**
+ * Basic implementation of Mozilla's XSLTProcessor for IE.
+ * Reuses the same XSLT stylesheet for multiple transforms
+ * @constructor
+ */
+ XSLTProcessor = function(){
+ this.template = new ActiveXObject(_SARISSA_XSLTEMPLATE_PROGID);
+ this.processor = null;
+ };
+ /**
+ * Impoprts the given XSLT DOM and compiles it to a reusable transform
+ * @argument xslDoc The XSLT DOMDocument to import
+ */
+ XSLTProcessor.prototype.importStylesheet = function(xslDoc){
+ // convert stylesheet to free threaded
+ var converted = new ActiveXObject(_SARISSA_THREADEDDOM_PROGID);
+ converted.loadXML(xslDoc.xml);
+ this.template.stylesheet = converted;
+ this.processor = this.template.createProcessor();
+ // (re)set default param values
+ this.paramsSet = new Array();
+ };
+ /**
+ * Transform the given XML DOM
+ * @argument sourceDoc The XML DOMDocument to transform
+ * @return The transformation result as a DOM Document
+ */
+ XSLTProcessor.prototype.transformToDocument = function(sourceDoc){
+ this.processor.input = sourceDoc;
+ var outDoc = new ActiveXObject(_SARISSA_DOM_PROGID);
+ this.processor.output = outDoc;
+ this.processor.transform();
+ return outDoc;
+ };
+ /**
+ * Set global XSLT parameter of the imported stylesheet
+ * @argument nsURI The parameter namespace URI
+ * @argument name The parameter base name
+ * @argument value The new parameter value
+ */
+ XSLTProcessor.prototype.setParameter = function(nsURI, name, value){
+ /* nsURI is optional but cannot be null */
+ if(nsURI){
+ this.processor.addParameter(name, value, nsURI);
+ }else{
+ this.processor.addParameter(name, value);
+ };
+ /* update updated params for getParameter */
+ if(!this.paramsSet[""+nsURI]){
+ this.paramsSet[""+nsURI] = new Array();
+ };
+ this.paramsSet[""+nsURI][name] = value;
+ };
+ /**
+ * Gets a parameter if previously set by setParameter. Returns null
+ * otherwise
+ * @argument name The parameter base name
+ * @argument value The new parameter value
+ * @return The parameter value if reviously set by setParameter, null otherwise
+ */
+ XSLTProcessor.prototype.getParameter = function(nsURI, name){
+ nsURI = nsURI || "";
+ if(nsURI in this.paramsSet && name in this.paramsSet[nsURI]){
+ return this.paramsSet[nsURI][name];
+ }else{
+ return null;
+ };
+ };
+}
+else{ /* end IE initialization, try to deal with real browsers now ;-) */
+ if(_SARISSA_HAS_DOM_CREATE_DOCUMENT){
+ /**
+ * <p>Ensures the document was loaded correctly, otherwise sets the
+ * parseError to -1 to indicate something went wrong. Internal use</p>
+ * @private
+ */
+ Sarissa.__handleLoad__ = function(oDoc){
+ if (!oDoc.documentElement || oDoc.documentElement.tagName == "parsererror")
+ oDoc.parseError = -1;
+ Sarissa.__setReadyState__(oDoc, 4);
+ };
+ /**
+ * <p>Attached by an event handler to the load event. Internal use.</p>
+ * @private
+ */
+ _sarissa_XMLDocument_onload = function(){
+ Sarissa.__handleLoad__(this);
+ };
+ /**
+ * <p>Sets the readyState property of the given DOM Document object.
+ * Internal use.</p>
+ * @private
+ * @argument oDoc the DOM Document object to fire the
+ * readystatechange event
+ * @argument iReadyState the number to change the readystate property to
+ */
+ Sarissa.__setReadyState__ = function(oDoc, iReadyState){
+ oDoc.readyState = iReadyState;
+ if (oDoc.onreadystatechange != null && typeof oDoc.onreadystatechange == "function")
+ oDoc.onreadystatechange();
+ };
+ Sarissa.getDomDocument = function(sUri, sName){
+ var oDoc = document.implementation.createDocument(sUri?sUri:"", sName?sName:"", null);
+ oDoc.addEventListener("load", _sarissa_XMLDocument_onload, false);
+ return oDoc;
+ };
+ if(false && window.XMLDocument){
+ /**
+ * <p>Emulate IE's onreadystatechange attribute</p>
+ */
+ XMLDocument.prototype.onreadystatechange = null;
+ /**
+ * <p>Emulates IE's readyState property, which always gives an integer from 0 to 4:</p>
+ * <ul><li>1 == LOADING,</li>
+ * <li>2 == LOADED,</li>
+ * <li>3 == INTERACTIVE,</li>
+ * <li>4 == COMPLETED</li></ul>
+ */
+ XMLDocument.prototype.readyState = 0;
+ /**
+ * <p>Emulate IE's parseError attribute</p>
+ */
+ XMLDocument.prototype.parseError = 0;
+
+ // NOTE: setting async to false will only work with documents
+ // called over HTTP (meaning a server), not the local file system,
+ // unless you are using Moz 1.4+.
+ // BTW the try>catch block is for 1.4; I haven't found a way to check if
+ // the property is implemented without
+ // causing an error and I dont want to use user agent stuff for that...
+ var _SARISSA_SYNC_NON_IMPLEMENTED = false;// ("async" in XMLDocument.prototype) ? false: true;
+ /**
+ * <p>Keeps a handle to the original load() method. Internal use and only
+ * if Mozilla version is lower than 1.4</p>
+ * @private
+ */
+ XMLDocument.prototype._sarissa_load = XMLDocument.prototype.load;
+
+ /**
+ * <p>Overrides the original load method to provide synchronous loading for
+ * Mozilla versions prior to 1.4, using an XMLHttpRequest object (if
+ * async is set to false)</p>
+ * @returns the DOM Object as it was before the load() call (may be empty)
+ */
+ XMLDocument.prototype.load = function(sURI) {
+ var oDoc = document.implementation.createDocument("", "", null);
+ Sarissa.copyChildNodes(this, oDoc);
+ this.parseError = 0;
+ Sarissa.__setReadyState__(this, 1);
+ try {
+ if(this.async == false && _SARISSA_SYNC_NON_IMPLEMENTED) {
+ var tmp = new XMLHttpRequest();
+ tmp.open("GET", sURI, false);
+ tmp.send(null);
+ Sarissa.__setReadyState__(this, 2);
+ Sarissa.copyChildNodes(tmp.responseXML, this);
+ Sarissa.__setReadyState__(this, 3);
+ }
+ else {
+ this._sarissa_load(sURI);
+ };
+ }
+ catch (objException) {
+ this.parseError = -1;
+ }
+ finally {
+ if(this.async == false){
+ Sarissa.__handleLoad__(this);
+ };
+ };
+ return oDoc;
+ };
+
+
+ }//if(window.XMLDocument)
+ else if(document.implementation && document.implementation.hasFeature && document.implementation.hasFeature('LS', '3.0')){
+ Document.prototype.async = true;
+ Document.prototype.onreadystatechange = null;
+ Document.prototype.parseError = 0;
+ Document.prototype.load = function(sURI) {
+ var parser = document.implementation.createLSParser(this.async ? document.implementation.MODE_ASYNCHRONOUS : document.implementation.MODE_SYNCHRONOUS, null);
+ if(this.async){
+ var self = this;
+ parser.addEventListener("load",
+ function(e) {
+ self.readyState = 4;
+ Sarissa.copyChildNodes(e.newDocument, self.documentElement, false);
+ self.onreadystatechange.call();
+ },
+ false);
+ };
+ try {
+ var oDoc = parser.parseURI(sURI);
+ }
+ catch(e){
+ this.parseError = -1;
+ };
+ if(!this.async)
+ Sarissa.copyChildNodes(oDoc, this.documentElement, false);
+ return oDoc;
+ };
+ /**
+ * <p>Factory method to obtain a new DOM Document object</p>
+ * @argument sUri the namespace of the root node (if any)
+ * @argument sUri the local name of the root node (if any)
+ * @returns a new DOM Document
+ */
+ Sarissa.getDomDocument = function(sUri, sName){
+ return document.implementation.createDocument(sUri?sUri:"", sName?sName:"", null);
+ };
+ };
+ };//if(_SARISSA_HAS_DOM_CREATE_DOCUMENT)
+};
+//==========================================
+// Common stuff
+//==========================================
+if(!window.DOMParser){
+ /*
+ * DOMParser is a utility class, used to construct DOMDocuments from XML strings
+ * @constructor
+ */
+ DOMParser = function() {
+ };
+ if(_SARISSA_IS_SAFARI){
+ /**
+ * Construct a new DOM Document from the given XMLstring
+ * @param sXml the given XML string
+ * @param contentType the content type of the document the given string represents (one of text/xml, application/xml, application/xhtml+xml).
+ * @return a new DOM Document from the given XML string
+ */
+ DOMParser.prototype.parseFromString = function(sXml, contentType){
+ if(contentType.toLowerCase() != "application/xml"){
+ throw "Cannot handle content type: \"" + contentType + "\"";
+ };
+ var xmlhttp = new XMLHttpRequest();
+ xmlhttp.open("GET", "data:text/xml;charset=utf-8," + encodeURIComponent(str), false);
+ xmlhttp.send(null);
+ return xmlhttp.responseXML;
+ };
+ }else if(Sarissa.getDomDocument && Sarissa.getDomDocument() && "loadXML" in Sarissa.getDomDocument()){
+ DOMParser.prototype.parseFromString = function(sXml, contentType){
+ var doc = Sarissa.getDomDocument();
+ doc.loadXML(sXml);
+ return doc;
+ };
+ };
+};
+
+if(window.XMLHttpRequest){
+ Sarissa.IS_ENABLED_XMLHTTP = true;
+}
+else if(_SARISSA_IS_IE){
+ /**
+ * Emulate XMLHttpRequest
+ * @constructor
+ */
+ XMLHttpRequest = function() {
+ return new ActiveXObject(_SARISSA_XMLHTTP_PROGID);
+ };
+ Sarissa.IS_ENABLED_XMLHTTP = true;
+};
+
+if(!window.document.importNode && _SARISSA_IS_IE){
+ try{
+ /**
+ * Implements importNode for the current window document in IE using innerHTML.
+ * Testing showed that DOM was multiple times slower than innerHTML for this,
+ * sorry folks. If you encounter trouble (who knows what IE does behind innerHTML)
+ * please gimme a call.
+ * @param oNode the Node to import
+ * @param bChildren whether to include the children of oNode
+ * @returns the imported node for further use
+ */
+ window.document.importNode = function(oNode, bChildren){
+ var importNode = document.createElement("div");
+ if(bChildren)
+ importNode.innerHTML = Sarissa.serialize(oNode);
+ else
+ importNode.innerHTML = Sarissa.serialize(oNode.cloneNode(false));
+ return importNode.firstChild;
+ };
+ }catch(e){};
+};
+if(!Sarissa.getParseErrorText){
+ /**
+ * <p>Returns a human readable description of the parsing error. Usefull
+ * for debugging. Tip: append the returned error string in a &lt;pre&gt;
+ * element if you want to render it.</p>
+ * <p>Many thanks to Christian Stocker for the initial patch.</p>
+ * @argument oDoc The target DOM document
+ * @returns The parsing error description of the target Document in
+ * human readable form (preformated text)
+ */
+ Sarissa.getParseErrorText = function (oDoc){
+ var parseErrorText = Sarissa.PARSED_OK;
+ if(oDoc && oDoc.parseError && oDoc.parseError != 0){
+ /*moz*/
+ if(oDoc.documentElement.tagName == "parsererror"){
+ parseErrorText = oDoc.documentElement.firstChild.data;
+ parseErrorText += "\n" + oDoc.documentElement.firstChild.nextSibling.firstChild.data;
+ }/*konq*/
+ else{
+ parseErrorText = Sarissa.getText(oDoc.documentElement);/*.getElementsByTagName("h1")[0], false) + "\n";
+ parseErrorText += Sarissa.getText(oDoc.documentElement.getElementsByTagName("body")[0], false) + "\n";
+ parseErrorText += Sarissa.getText(oDoc.documentElement.getElementsByTagName("pre")[0], false);*/
+ };
+ };
+ return parseErrorText;
+ };
+};
+Sarissa.getText = function(oNode, deep){
+ var s = "";
+ var nodes = oNode.childNodes;
+ for(var i=0; i < nodes.length; i++){
+ var node = nodes[i];
+ var nodeType = node.nodeType;
+ if(nodeType == Node.TEXT_NODE || nodeType == Node.CDATA_SECTION_NODE){
+ s += node.data;
+ }else if(deep == true
+ && (nodeType == Node.ELEMENT_NODE
+ || nodeType == Node.DOCUMENT_NODE
+ || nodeType == Node.DOCUMENT_FRAGMENT_NODE)){
+ s += Sarissa.getText(node, true);
+ };
+ };
+ return s;
+};
+if(window.XMLSerializer){
+ /**
+ * <p>Factory method to obtain the serialization of a DOM Node</p>
+ * @returns the serialized Node as an XML string
+ */
+ Sarissa.serialize = function(oDoc){
+ var s = null;
+ if(oDoc){
+ s = oDoc.innerHTML?oDoc.innerHTML:(new XMLSerializer()).serializeToString(oDoc);
+ };
+ return s;
+ };
+}else{
+ if(Sarissa.getDomDocument && (Sarissa.getDomDocument("","foo", null)).xml){
+ // see non-IE version
+ Sarissa.serialize = function(oDoc) {
+ var s = null;
+ if(oDoc){
+ s = oDoc.innerHTML?oDoc.innerHTML:oDoc.xml;
+ };
+ return s;
+ };
+ /**
+ * Utility class to serialize DOM Node objects to XML strings
+ * @constructor
+ */
+ XMLSerializer = function(){};
+ /**
+ * Serialize the given DOM Node to an XML string
+ * @param oNode the DOM Node to serialize
+ */
+ XMLSerializer.prototype.serializeToString = function(oNode) {
+ return oNode.xml;
+ };
+ };
+};
+
+/**
+ * strips tags from a markup string
+ */
+Sarissa.stripTags = function (s) {
+ return s.replace(/<[^>]+>/g,"");
+};
+/**
+ * <p>Deletes all child nodes of the given node</p>
+ * @argument oNode the Node to empty
+ */
+Sarissa.clearChildNodes = function(oNode) {
+ // need to check for firstChild due to opera 8 bug with hasChildNodes
+ while(oNode.firstChild){
+ oNode.removeChild(oNode.firstChild);
+ };
+};
+/**
+ * <p> Copies the childNodes of nodeFrom to nodeTo</p>
+ * <p> <b>Note:</b> The second object's original content is deleted before
+ * the copy operation, unless you supply a true third parameter</p>
+ * @argument nodeFrom the Node to copy the childNodes from
+ * @argument nodeTo the Node to copy the childNodes to
+ * @argument bPreserveExisting whether to preserve the original content of nodeTo, default is false
+ */
+Sarissa.copyChildNodes = function(nodeFrom, nodeTo, bPreserveExisting) {
+ if((!nodeFrom) || (!nodeTo)){
+ throw "Both source and destination nodes must be provided";
+ };
+ if(!bPreserveExisting){
+ Sarissa.clearChildNodes(nodeTo);
+ };
+ var ownerDoc = nodeTo.nodeType == Node.DOCUMENT_NODE ? nodeTo : nodeTo.ownerDocument;
+ var nodes = nodeFrom.childNodes;
+ if(ownerDoc.importNode && (!_SARISSA_IS_IE)) {
+ for(var i=0;i < nodes.length;i++) {
+ nodeTo.appendChild(ownerDoc.importNode(nodes[i], true));
+ };
+ }
+ else{
+ for(var i=0;i < nodes.length;i++) {
+ nodeTo.appendChild(nodes[i].cloneNode(true));
+ };
+ };
+};
+
+/**
+ * <p> Moves the childNodes of nodeFrom to nodeTo</p>
+ * <p> <b>Note:</b> The second object's original content is deleted before
+ * the move operation, unless you supply a true third parameter</p>
+ * @argument nodeFrom the Node to copy the childNodes from
+ * @argument nodeTo the Node to copy the childNodes to
+ * @argument bPreserveExisting whether to preserve the original content of nodeTo, default is
+ */
+Sarissa.moveChildNodes = function(nodeFrom, nodeTo, bPreserveExisting) {
+ if((!nodeFrom) || (!nodeTo)){
+ throw "Both source and destination nodes must be provided";
+ };
+ if(!bPreserveExisting){
+ Sarissa.clearChildNodes(nodeTo);
+ };
+ var nodes = nodeFrom.childNodes;
+ // if within the same doc, just move, else copy and delete
+ if(nodeFrom.ownerDocument == nodeTo.ownerDocument){
+ while(nodeFrom.firstChild){
+ nodeTo.appendChild(nodeFrom.firstChild);
+ };
+ }else{
+ var ownerDoc = nodeTo.nodeType == Node.DOCUMENT_NODE ? nodeTo : nodeTo.ownerDocument;
+ if(ownerDoc.importNode && (!_SARISSA_IS_IE)) {
+ for(var i=0;i < nodes.length;i++) {
+ nodeTo.appendChild(ownerDoc.importNode(nodes[i], true));
+ };
+ }else{
+ for(var i=0;i < nodes.length;i++) {
+ nodeTo.appendChild(nodes[i].cloneNode(true));
+ };
+ };
+ Sarissa.clearChildNodes(nodeFrom);
+ };
+};
+
+/**
+ * <p>Serialize any object to an XML string. All properties are serialized using the property name
+ * as the XML element name. Array elements are rendered as <code>array-item</code> elements,
+ * using their index/key as the value of the <code>key</code> attribute.</p>
+ * @argument anyObject the object to serialize
+ * @argument objectName a name for that object
+ * @return the XML serializationj of the given object as a string
+ */
+Sarissa.xmlize = function(anyObject, objectName, indentSpace){
+ indentSpace = indentSpace?indentSpace:'';
+ var s = indentSpace + '<' + objectName + '>';
+ var isLeaf = false;
+ if(!(anyObject instanceof Object) || anyObject instanceof Number || anyObject instanceof String
+ || anyObject instanceof Boolean || anyObject instanceof Date){
+ s += Sarissa.escape(""+anyObject);
+ isLeaf = true;
+ }else{
+ s += "\n";
+ var itemKey = '';
+ var isArrayItem = anyObject instanceof Array;
+ for(var name in anyObject){
+ s += Sarissa.xmlize(anyObject[name], (isArrayItem?"array-item key=\""+name+"\"":name), indentSpace + " ");
+ };
+ s += indentSpace;
+ };
+ return s += (objectName.indexOf(' ')!=-1?"</array-item>\n":"</" + objectName + ">\n");
+};
+
+/**
+ * Escape the given string chacters that correspond to the five predefined XML entities
+ * @param sXml the string to escape
+ */
+Sarissa.escape = function(sXml){
+ return sXml.replace(/&/g, "&amp;")
+ .replace(/</g, "&lt;")
+ .replace(/>/g, "&gt;")
+ .replace(/"/g, "&quot;")
+ .replace(/'/g, "&apos;");
+};
+
+/**
+ * Unescape the given string. This turns the occurences of the predefined XML
+ * entities to become the characters they represent correspond to the five predefined XML entities
+ * @param sXml the string to unescape
+ */
+Sarissa.unescape = function(sXml){
+ return sXml.replace(/&apos;/g,"'")
+ .replace(/&quot;/g,"\"")
+ .replace(/&gt;/g,">")
+ .replace(/&lt;/g,"<")
+ .replace(/&amp;/g,"&");
+};
+// EOF
diff --git a/tools/ajaxterm/sarissa_dhtml.js b/tools/ajaxterm/sarissa_dhtml.js
new file mode 100644
index 000000000..2d85c817e
--- /dev/null
+++ b/tools/ajaxterm/sarissa_dhtml.js
@@ -0,0 +1,105 @@
+/**
+ * ====================================================================
+ * About
+ * ====================================================================
+ * Sarissa cross browser XML library - AJAX module
+ * @version 0.9.6.1
+ * @author: Copyright Manos Batsis, mailto: mbatsis at users full stop sourceforge full stop net
+ *
+ * This module contains some convinient AJAX tricks based on Sarissa
+ *
+ * ====================================================================
+ * Licence
+ * ====================================================================
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * the GNU Lesser General Public License version 2.1 as published by
+ * the Free Software Foundation (your choice between the two).
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License or GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * or GNU Lesser General Public License along with this program; if not,
+ * write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * or visit http://www.gnu.org
+ *
+ */
+/**
+ * Update an element with response of a GET request on the given URL.
+ * @addon
+ * @param sFromUrl the URL to make the request to
+ * @param oTargetElement the element to update
+ * @param xsltproc (optional) the transformer to use on the returned
+ * content before updating the target element with it
+ */
+Sarissa.updateContentFromURI = function(sFromUrl, oTargetElement, xsltproc) {
+ try{
+ oTargetElement.style.cursor = "wait";
+ var xmlhttp = new XMLHttpRequest();
+ xmlhttp.open("GET", sFromUrl);
+ function sarissa_dhtml_loadHandler() {
+ if (xmlhttp.readyState == 4) {
+ oTargetElement.style.cursor = "auto";
+ Sarissa.updateContentFromNode(xmlhttp.responseXML, oTargetElement, xsltproc);
+ };
+ };
+ xmlhttp.onreadystatechange = sarissa_dhtml_loadHandler;
+ xmlhttp.send(null);
+ oTargetElement.style.cursor = "auto";
+ }
+ catch(e){
+ oTargetElement.style.cursor = "auto";
+ throw e;
+ };
+};
+
+/**
+ * Update an element's content with the given DOM node.
+ * @addon
+ * @param sFromUrl the URL to make the request to
+ * @param oTargetElement the element to update
+ * @param xsltproc (optional) the transformer to use on the given
+ * DOM node before updating the target element with it
+ */
+Sarissa.updateContentFromNode = function(oNode, oTargetElement, xsltproc) {
+ try {
+ oTargetElement.style.cursor = "wait";
+ Sarissa.clearChildNodes(oTargetElement);
+ // check for parsing errors
+ var ownerDoc = oNode.nodeType == Node.DOCUMENT_NODE?oNode:oNode.ownerDocument;
+ if(ownerDoc.parseError && ownerDoc.parseError != 0) {
+ var pre = document.createElement("pre");
+ pre.appendChild(document.createTextNode(Sarissa.getParseErrorText(ownerDoc)));
+ oTargetElement.appendChild(pre);
+ }
+ else {
+ // transform if appropriate
+ if(xsltproc) {
+ oNode = xsltproc.transformToDocument(oNode);
+ };
+ // be smart, maybe the user wants to display the source instead
+ if(oTargetElement.tagName.toLowerCase == "textarea" || oTargetElement.tagName.toLowerCase == "input") {
+ oTargetElement.value = Sarissa.serialize(oNode);
+ }
+ else {
+ // ok that was not smart; it was paranoid. Keep up the good work by trying to use DOM instead of innerHTML
+ if(oNode.nodeType == Node.DOCUMENT_NODE || oNode.ownerDocument.documentElement == oNode) {
+ oTargetElement.innerHTML = Sarissa.serialize(oNode);
+ }
+ else{
+ oTargetElement.appendChild(oTargetElement.ownerDocument.importNode(oNode, true));
+ };
+ };
+ };
+ }
+ catch(e) {
+ throw e;
+ }
+ finally{
+ oTargetElement.style.cursor = "auto";
+ };
+};
+
diff --git a/tools/euca-get-ajax-console b/tools/euca-get-ajax-console
new file mode 100755
index 000000000..37060e74f
--- /dev/null
+++ b/tools/euca-get-ajax-console
@@ -0,0 +1,164 @@
+#!/usr/bin/env python
+# pylint: disable-msg=C0103
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Euca add-on to use ajax console"""
+
+import getopt
+import os
+import sys
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+import boto
+import nova
+from boto.ec2.connection import EC2Connection
+from euca2ools import Euca2ool, InstanceValidationError, Util, ConnectionFailed
+
+usage_string = """
+Retrieves a url to an ajax console terminal
+
+euca-get-ajax-console [-h, --help] [--version] [--debug] instance_id
+
+REQUIRED PARAMETERS
+
+instance_id: unique identifier for the instance show the console output for.
+
+OPTIONAL PARAMETERS
+
+"""
+
+
+# This class extends boto to add AjaxConsole functionality
+class NovaEC2Connection(EC2Connection):
+
+ def get_ajax_console(self, instance_id):
+ """
+ Retrieves a console connection for the specified instance.
+
+ :type instance_id: string
+ :param instance_id: The instance ID of a running instance on the cloud.
+
+ :rtype: :class:`AjaxConsole`
+ """
+
+ class AjaxConsole:
+ def __init__(self, parent=None):
+ self.parent = parent
+ self.instance_id = None
+ self.url = None
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'instanceId':
+ self.instance_id = value
+ elif name == 'url':
+ self.url = value
+ else:
+ setattr(self, name, value)
+
+ params = {}
+ self.build_list_params(params, [instance_id], 'InstanceId')
+ return self.get_object('GetAjaxConsole', params, AjaxConsole)
+ pass
+
+
+def override_connect_ec2(aws_access_key_id=None,
+ aws_secret_access_key=None, **kwargs):
+ return NovaEC2Connection(aws_access_key_id,
+ aws_secret_access_key, **kwargs)
+
+# override boto's connect_ec2 method, so that we can use NovaEC2Connection
+boto.connect_ec2 = override_connect_ec2
+
+
+def usage(status=1):
+ print usage_string
+ Util().usage()
+ sys.exit(status)
+
+
+def version():
+ print Util().version()
+ sys.exit()
+
+
+def display_console_output(console_output):
+ print console_output.instance_id
+ print console_output.timestamp
+ print console_output.output
+
+
+def display_ajax_console_output(console_output):
+ print console_output.url
+
+
+def main():
+ try:
+ euca = Euca2ool()
+ except Exception, e:
+ print e
+ usage()
+
+ instance_id = None
+
+ for name, value in euca.opts:
+ if name in ('-h', '--help'):
+ usage(0)
+ elif name == '--version':
+ version()
+ elif name == '--debug':
+ debug = True
+
+ for arg in euca.args:
+ instance_id = arg
+ break
+
+ if instance_id:
+ try:
+ euca.validate_instance_id(instance_id)
+ except InstanceValidationError:
+ print 'Invalid instance id'
+ sys.exit(1)
+
+ try:
+ euca_conn = euca.make_connection()
+ except ConnectionFailed, e:
+ print e.message
+ sys.exit(1)
+ try:
+ console_output = euca_conn.get_ajax_console(instance_id)
+ except Exception, ex:
+ euca.display_error_and_exit('%s' % ex)
+
+ display_ajax_console_output(console_output)
+ else:
+ print 'instance_id must be specified'
+ usage()
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/install_venv.py b/tools/install_venv.py
index 32c372352..4e3941210 100644
--- a/tools/install_venv.py
+++ b/tools/install_venv.py
@@ -66,7 +66,8 @@ def check_dependencies():
# Try installing it via easy_install...
if HAS_EASY_INSTALL:
print 'Installing virtualenv via easy_install...',
- if not run_command(['which', 'easy_install']):
+ if not (run_command(['which', 'easy_install']) and
+ run_command(['easy_install', 'virtualenv'])):
die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,'
' please install it using your favorite package management tool')
print 'done.'