summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xCA/geninter.sh6
-rw-r--r--README20
-rwxr-xr-xbin/dhcpleasor.py92
-rwxr-xr-xbin/nova-api37
-rwxr-xr-xbin/nova-compute38
-rwxr-xr-xbin/nova-import-canonical-imagestore82
-rwxr-xr-xbin/nova-instancemonitor55
-rwxr-xr-xbin/nova-manage116
-rwxr-xr-xbin/nova-objectstore29
-rwxr-xr-xbin/nova-volume33
-rwxr-xr-xbuilddeb.sh22
-rw-r--r--debian/changelog230
-rw-r--r--debian/control116
-rw-r--r--debian/nova-api.conf5
-rw-r--r--debian/nova-api.init4
-rw-r--r--debian/nova-api.install1
-rw-r--r--debian/nova-common.dirs11
-rw-r--r--debian/nova-common.install10
-rw-r--r--debian/nova-compute.conf10
-rw-r--r--debian/nova-compute.init4
-rw-r--r--debian/nova-compute.install1
-rw-r--r--debian/nova-instancemonitor.init69
-rw-r--r--debian/nova-instancemonitor.install1
-rw-r--r--debian/nova-manage.conf4
-rw-r--r--debian/nova-objectstore.conf7
-rw-r--r--debian/nova-objectstore.init4
-rw-r--r--debian/nova-objectstore.install2
-rw-r--r--debian/nova-objectstore.links1
-rw-r--r--debian/nova-objectstore.nginx.conf17
-rw-r--r--debian/nova-volume.conf7
-rw-r--r--debian/nova-volume.init4
-rw-r--r--debian/nova-volume.install1
-rw-r--r--docs/architecture.rst26
-rw-r--r--docs/auth.rst26
-rw-r--r--docs/binaries.rst26
-rw-r--r--docs/compute.rst26
-rw-r--r--docs/conf.py2
-rw-r--r--docs/endpoint.rst26
-rw-r--r--docs/fakes.rst26
-rw-r--r--docs/getting.started.rst129
-rw-r--r--docs/index.rst27
-rw-r--r--docs/modules.rst26
-rw-r--r--docs/network.rst26
-rw-r--r--docs/nova.rst26
-rw-r--r--docs/objectstore.rst26
-rw-r--r--docs/packages.rst26
-rw-r--r--docs/storage.rst26
-rw-r--r--docs/volume.rst26
-rw-r--r--nova/__init__.py30
-rw-r--r--nova/adminclient.py63
-rw-r--r--nova/auth/__init__.py31
-rw-r--r--nova/auth/fakeldap.py291
-rw-r--r--nova/auth/novarc.template24
-rw-r--r--nova/auth/rbac.py55
-rw-r--r--nova/auth/signer.py32
-rwxr-xr-xnova/auth/slap.sh57
-rw-r--r--nova/auth/users.py473
-rw-r--r--nova/cloudpipe/__init__.py31
-rw-r--r--nova/cloudpipe/api.py61
-rwxr-xr-xnova/cloudpipe/bootscript.sh65
-rw-r--r--nova/cloudpipe/client.ovpn.template47
-rw-r--r--nova/cloudpipe/pipelib.py102
-rw-r--r--nova/compute/__init__.py30
-rw-r--r--nova/compute/disk.py126
-rw-r--r--nova/compute/exception.py30
-rw-r--r--nova/compute/fakevirtinstance.xml30
-rw-r--r--nova/compute/interfaces.template18
-rw-r--r--nova/compute/libvirt.xml.template30
-rw-r--r--nova/compute/linux_net.py32
-rw-r--r--nova/compute/model.py245
-rw-r--r--nova/compute/monitor.py516
-rw-r--r--nova/compute/network.py341
-rw-r--r--nova/compute/node.py453
-rw-r--r--nova/crypto.py46
-rw-r--r--nova/datastore.py461
-rw-r--r--nova/endpoint/__init__.py30
-rw-r--r--nova/endpoint/admin.py78
-rwxr-xr-xnova/endpoint/api.py30
-rw-r--r--nova/endpoint/cloud.py399
-rw-r--r--nova/endpoint/images.py23
-rw-r--r--nova/exception.py27
-rw-r--r--nova/fakerabbit.py23
-rw-r--r--nova/fakevirt.py23
-rw-r--r--nova/flags.py28
-rw-r--r--nova/objectstore/__init__.py30
-rw-r--r--nova/objectstore/bucket.py23
-rw-r--r--nova/objectstore/handler.py60
-rw-r--r--nova/objectstore/image.py91
-rw-r--r--nova/objectstore/stored.py30
-rw-r--r--nova/process.py40
-rw-r--r--nova/rpc.py94
-rw-r--r--nova/server.py29
-rw-r--r--nova/test.py55
-rw-r--r--nova/tests/__init__.py30
-rw-r--r--nova/tests/access_unittest.py166
-rw-r--r--nova/tests/api_integration.py24
-rw-r--r--nova/tests/api_unittest.py36
-rw-r--r--nova/tests/cloud_unittest.py26
-rw-r--r--nova/tests/datastore_unittest.py60
-rw-r--r--nova/tests/fake_flags.py32
-rw-r--r--nova/tests/future_unittest.py29
-rw-r--r--nova/tests/keeper_unittest.py57
-rw-r--r--nova/tests/model_unittest.py205
-rw-r--r--nova/tests/network_unittest.py167
-rw-r--r--nova/tests/node_unittest.py30
-rw-r--r--nova/tests/objectstore_unittest.py27
-rw-r--r--nova/tests/process_unittest.py23
-rw-r--r--nova/tests/real_flags.py30
-rw-r--r--nova/tests/storage_unittest.py119
-rw-r--r--nova/tests/users_unittest.py65
-rw-r--r--nova/tests/validator_unittest.py45
-rw-r--r--nova/twistd.py29
-rw-r--r--nova/utils.py52
-rw-r--r--nova/validate.py88
-rw-r--r--nova/vendor.py29
-rw-r--r--nova/volume/__init__.py30
-rw-r--r--nova/volume/storage.py234
-rw-r--r--run_tests.py37
-rw-r--r--setup.py31
-rw-r--r--smoketests/__init__.py33
-rw-r--r--smoketests/flags.py47
-rw-r--r--smoketests/novatestcase.py132
-rw-r--r--smoketests/openwrt-x86-ext2.imagebin0 -> 4612608 bytes
-rw-r--r--smoketests/openwrt-x86-vmlinuzbin0 -> 1169948 bytes
-rw-r--r--smoketests/smoketest.py568
-rwxr-xr-xtools/clean-vlans25
-rw-r--r--vendor/Twisted-10.0.0/twisted/internet/_sigchld.c101
-rw-r--r--vendor/Twisted-10.0.0/twisted/internet/_signals.py184
-rw-r--r--vendor/Twisted-10.0.0/twisted/internet/base.py34
-rw-r--r--vendor/Twisted-10.0.0/twisted/internet/gtk2reactor.py27
-rw-r--r--vendor/Twisted-10.0.0/twisted/internet/posixbase.py140
-rw-r--r--vendor/Twisted-10.0.0/twisted/internet/test/reactormixins.py15
-rw-r--r--vendor/Twisted-10.0.0/twisted/internet/test/test_process.py49
-rw-r--r--vendor/Twisted-10.0.0/twisted/internet/test/test_sigchld.py194
-rw-r--r--vendor/Twisted-10.0.0/twisted/topfiles/733.bugfix4
-rw-r--r--vendor/Twisted-10.0.0/twisted/topfiles/setup.py8
-rw-r--r--vendor/tornado/demos/appengine/blog.py18
-rwxr-xr-xvendor/tornado/demos/auth/authdemo.py18
-rwxr-xr-xvendor/tornado/demos/blog/blog.py18
-rwxr-xr-xvendor/tornado/demos/chat/chatdemo.py18
-rwxr-xr-xvendor/tornado/demos/facebook/facebook.py18
-rw-r--r--vendor/tornado/demos/facebook/uimodules.py18
-rwxr-xr-xvendor/tornado/demos/helloworld/helloworld.py18
-rw-r--r--vendor/tornado/setup.py18
-rw-r--r--vendor/tornado/tornado/__init__.py18
-rw-r--r--vendor/tornado/tornado/auth.py18
-rw-r--r--vendor/tornado/tornado/autoreload.py18
-rw-r--r--vendor/tornado/tornado/database.py18
-rw-r--r--vendor/tornado/tornado/escape.py18
-rw-r--r--vendor/tornado/tornado/httpclient.py18
-rw-r--r--vendor/tornado/tornado/httpserver.py18
-rw-r--r--vendor/tornado/tornado/ioloop.py18
-rw-r--r--vendor/tornado/tornado/iostream.py18
-rw-r--r--vendor/tornado/tornado/locale.py18
-rw-r--r--vendor/tornado/tornado/options.py18
-rw-r--r--vendor/tornado/tornado/s3server.py18
-rw-r--r--vendor/tornado/tornado/template.py18
-rw-r--r--vendor/tornado/tornado/web.py18
-rw-r--r--vendor/tornado/tornado/websocket.py18
-rw-r--r--vendor/tornado/tornado/wsgi.py18
-rw-r--r--vendor/tornado/website/website.py18
161 files changed, 7561 insertions, 2519 deletions
diff --git a/CA/geninter.sh b/CA/geninter.sh
index ad3332ad9..2aa64a842 100755
--- a/CA/geninter.sh
+++ b/CA/geninter.sh
@@ -16,7 +16,7 @@
# ARG is the id of the user
-
+export SUBJ=/C=US/ST=California/L=Mountain View/O=Anso Labs/OU=Nova Dev/CN=customer-intCA-$3
mkdir INTER/$1
cd INTER/$1
cp ../../openssl.cnf.tmpl openssl.cnf
@@ -25,6 +25,6 @@ mkdir certs crl newcerts private
echo "10" > serial
touch index.txt
openssl genrsa -out private/cakey.pem 1024 -config ./openssl.cnf -batch -nodes
-openssl req -new -sha1 -key private/cakey.pem -out ../../reqs/inter$1.csr -batch -subj "/C=US/ST=California/L=Mountain View/O=Anso Labs/OU=Nova Dev/CN=customer-intCA-$1"
+openssl req -new -sha2 -key private/cakey.pem -out ../../reqs/inter$1.csr -batch -subj "$SUBJ"
cd ../../
-openssl ca -extensions v3_ca -days 365 -out INTER/$1/cacert.pem -in reqs/inter$1.csr -config openssl.cnf -batch \ No newline at end of file
+openssl ca -extensions v3_ca -days 365 -out INTER/$1/cacert.pem -in reqs/inter$1.csr -config openssl.cnf -batch
diff --git a/README b/README
new file mode 100644
index 000000000..f7d21f400
--- /dev/null
+++ b/README
@@ -0,0 +1,20 @@
+The Choose Your Own Adventure README for Nova:
+
+ You have come across a cloud computing fabric controller. It has identified
+ itself as "Nova." It is apparent that it maintains compatability with
+ the popular Amazon EC2 and S3 APIs.
+
+To monitor it from a distance: follow @novacc on twitter
+
+To tame it for use in your own cloud: read http://docs.novacc.org/getting.started.html
+
+To study its anatomy: read http://docs.novacc.org/architecture.html
+
+To disect it in detail: visit http://github.com/nova/cc
+
+To taunt it with its weaknesses: use http://github.com/nova/cc/issues
+
+To hack at it: read HACKING
+
+To watch it: http://test.novacc.org/waterfall
+
diff --git a/bin/dhcpleasor.py b/bin/dhcpleasor.py
new file mode 100755
index 000000000..30f8fbdc3
--- /dev/null
+++ b/bin/dhcpleasor.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+dhcpleasor.py
+
+Handle lease database updates from DHCP servers.
+"""
+
+import sys
+import os
+import logging
+sys.path.append(os.path.abspath(os.path.join(__file__, "../../")))
+
+logging.debug(sys.path)
+import getopt
+from os import environ
+from nova.compute import linux_net
+from nova.compute import network
+from nova import rpc
+
+from nova import flags
+FLAGS = flags.FLAGS
+
+
+def add_lease(mac, ip, hostname, interface):
+ if FLAGS.fake_rabbit:
+ network.lease_ip(ip)
+ else:
+ rpc.cast(FLAGS.cloud_topic, {"method": "lease_ip",
+ "args" : {"address": ip}})
+
+def old_lease(mac, ip, hostname, interface):
+ logging.debug("Adopted old lease or got a change of mac/hostname")
+
+def del_lease(mac, ip, hostname, interface):
+ if FLAGS.fake_rabbit:
+ network.release_ip(ip)
+ else:
+ rpc.cast(FLAGS.cloud_topic, {"method": "release_ip",
+ "args" : {"address": ip}})
+
+def init_leases(interface):
+ net = network.get_network_by_interface(interface)
+ res = ""
+ for host_name in net.hosts:
+ res += "%s\n" % linux_net.hostDHCP(net, host_name, net.hosts[host_name])
+ return res
+
+
+def main(argv=None):
+ if argv is None:
+ argv = sys.argv
+ interface = environ.get('DNSMASQ_INTERFACE', 'br0')
+ if int(environ.get('TESTING', '0')):
+ FLAGS.fake_rabbit = True
+ FLAGS.redis_db = 8
+ FLAGS.network_size = 32
+ FLAGS.fake_libvirt=True
+ FLAGS.fake_network=True
+ FLAGS.fake_users = True
+ action = argv[1]
+ if action in ['add','del','old']:
+ mac = argv[2]
+ ip = argv[3]
+ hostname = argv[4]
+ logging.debug("Called %s for mac %s with ip %s and hostname %s on interface %s" % (action, mac, ip, hostname, interface))
+ globals()[action+'_lease'](mac, ip, hostname, interface)
+ else:
+ print init_leases(interface)
+ exit(0)
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/bin/nova-api b/bin/nova-api
index 8fea1da4d..1bef778c5 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -1,21 +1,26 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
"""
- Tornado daemon for the main API endpoint.
+Tornado daemon for the main API endpoint.
"""
import logging
@@ -29,6 +34,7 @@ from nova import rpc
from nova import server
from nova import utils
from nova.auth import users
+from nova.compute import model
from nova.endpoint import admin
from nova.endpoint import api
from nova.endpoint import cloud
@@ -37,12 +43,11 @@ FLAGS = flags.FLAGS
def main(_argv):
- user_manager = users.UserManager()
controllers = {
'Cloud': cloud.CloudController(),
- 'Admin': admin.AdminController(user_manager)
+ 'Admin': admin.AdminController()
}
- _app = api.APIServerApplication(user_manager, controllers)
+ _app = api.APIServerApplication(controllers)
conn = rpc.Connection.instance()
consumer = rpc.AdapterConsumer(connection=conn,
diff --git a/bin/nova-compute b/bin/nova-compute
index aa90f2c3d..1b438f6a7 100755
--- a/bin/nova-compute
+++ b/bin/nova-compute
@@ -1,23 +1,28 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
"""
Twistd daemon for the nova compute nodes.
Receives messages via AMQP, manages pool of worker threads
- for async tasks.
+ for async tasks.
"""
import logging
@@ -70,15 +75,16 @@ def main():
topic='%s.%s' % (FLAGS.compute_topic, FLAGS.node_name),
proxy=n)
- # heartbeat = task.LoopingCall(n.report_state)
- # heartbeat.start(interval=FLAGS.node_report_state_interval, now=False)
+ bin_name = os.path.basename(__file__)
+ pulse = task.LoopingCall(n.report_state, FLAGS.node_name, bin_name)
+ pulse.start(interval=FLAGS.node_report_state_interval, now=False)
injected = consumer_all.attach_to_twisted()
injected = consumer_node.attach_to_twisted()
# This is the parent service that twistd will be looking for when it
# parses this file, return it so that we can get it into globals below
- application = service.Application('nova-compute')
+ application = service.Application(bin_name)
n.setServiceParent(application)
return application
diff --git a/bin/nova-import-canonical-imagestore b/bin/nova-import-canonical-imagestore
new file mode 100755
index 000000000..804b0e272
--- /dev/null
+++ b/bin/nova-import-canonical-imagestore
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+"""
+ Download images from Canonical Image Store
+"""
+
+import json
+import os
+import tempfile
+import shutil
+import subprocess
+import sys
+import urllib2
+
+from nova.objectstore import image
+from nova import flags
+from nova import utils
+
+FLAGS = flags.FLAGS
+
+api_url = 'https://imagestore.canonical.com/api/dashboard'
+
+image_cache = None
+def images():
+ global image_cache
+ if not image_cache:
+ try:
+ images = json.load(urllib2.urlopen(api_url))['images']
+ image_cache = [i for i in images if i['title'].find('amd64') > -1]
+ except Exception:
+ print 'unable to download canonical image list'
+ sys.exit(1)
+ return image_cache
+
+# FIXME(ja): add checksum/signature checks
+def download(img):
+ tempdir = tempfile.mkdtemp(prefix='cis-')
+
+ kernel_id = None
+ ramdisk_id = None
+
+ for f in img['files']:
+ if f['kind'] == 'kernel':
+ dest = os.path.join(tempdir, 'kernel')
+ subprocess.call(['curl', f['url'], '-o', dest])
+ kernel_id = image.Image.add(dest,
+ description='kernel/' + img['title'], kernel=True)
+
+ for f in img['files']:
+ if f['kind'] == 'ramdisk':
+ dest = os.path.join(tempdir, 'ramdisk')
+ subprocess.call(['curl', f['url'], '-o', dest])
+ ramdisk_id = image.Image.add(dest,
+ description='ramdisk/' + img['title'], ramdisk=True)
+
+ for f in img['files']:
+ if f['kind'] == 'image':
+ dest = os.path.join(tempdir, 'image')
+ subprocess.call(['curl', f['url'], '-o', dest])
+ ramdisk_id = image.Image.add(dest,
+ description=img['title'], kernel=kernel_id, ramdisk=ramdisk_id)
+
+ shutil.rmtree(tempdir)
+
+def main():
+ utils.default_flagfile()
+ argv = FLAGS(sys.argv)
+
+ if len(argv) == 2:
+ for img in images():
+ if argv[1] == 'all' or argv[1] == img['title']:
+ download(img)
+ else:
+ print 'usage: %s (title|all)'
+ print 'available images:'
+ for image in images():
+ print image['title']
+
+if __name__ == '__main__':
+ main()
+
diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor
new file mode 100755
index 000000000..1f22b424e
--- /dev/null
+++ b/bin/nova-instancemonitor
@@ -0,0 +1,55 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+ Daemon for Nova RRD based instance resource monitoring.
+"""
+
+import logging
+
+from nova import vendor
+from twisted.internet import task
+from twisted.application import service
+
+from nova import twistd
+from nova.compute import monitor
+
+logging.getLogger('boto').setLevel(logging.WARN)
+
+def main():
+ logging.warn('Starting instance monitor')
+ m = monitor.InstanceMonitor()
+
+ # This is the parent service that twistd will be looking for when it
+ # parses this file, return it so that we can get it into globals below
+ application = service.Application('nova-instancemonitor')
+ m.setServiceParent(application)
+ return application
+
+if __name__ == '__main__':
+ twistd.serve(__file__)
+
+if __name__ == '__builtin__':
+ application = main()
+
+
+
+
diff --git a/bin/nova-manage b/bin/nova-manage
index 765eb1f53..12e4c9324 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -1,19 +1,24 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Copyright 2010 Anso Labs, LLC
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
"""
CLI interface for nova management.
Connects to the running ADMIN api in the api daemon.
@@ -25,11 +30,87 @@ from nova import flags
from nova import utils
from nova.auth import users
from nova.compute import model
+from nova.compute import network
+from nova.cloudpipe import pipelib
from nova.endpoint import cloud
import time
FLAGS = flags.FLAGS
+class NetworkCommands(object):
+ def restart(self):
+ network.restart_nets()
+
+class VpnCommands(object):
+ def __init__(self):
+ self.manager = users.UserManager.instance()
+ self.instdir = model.InstanceDirectory()
+ self.pipe = pipelib.CloudPipe(cloud.CloudController())
+
+ def list(self):
+ print "%-12s\t" % 'project',
+ print "%-12s\t" % 'ip:port',
+ print "%s" % 'state'
+ for project in self.manager.get_projects():
+ print "%-12s\t" % project.name,
+ print "%s:%s\t" % (project.vpn_ip, project.vpn_port),
+
+ vpn = self.__vpn_for(project.id)
+ if vpn:
+ out, err = utils.execute("ping -c1 -w1 %s > /dev/null; echo $?" % vpn['private_dns_name'])
+ if out.strip() == '0':
+ net = 'up'
+ else:
+ net = 'down'
+ print vpn['private_dns_name'],
+ print vpn['node_name'],
+ print vpn['instance_id'],
+ print vpn['state_description'],
+ print net
+
+ else:
+ print None
+
+ def __vpn_for(self, project_id):
+ for instance in self.instdir.all:
+ if (instance.state.has_key('image_id')
+ and instance['image_id'] == FLAGS.vpn_image_id
+ and not instance['state_description'] in ['shutting_down', 'shutdown']
+ and instance['project_id'] == project_id):
+ return instance
+
+ def spawn(self):
+ for p in reversed(self.manager.get_projects()):
+ if not self.__vpn_for(p.id):
+ print 'spawning %s' % p.id
+ self.pipe.launch_vpn_instance(p.id)
+ time.sleep(10)
+
+ def run(self, project_id):
+ self.pipe.launch_vpn_instance(project_id)
+
+class RoleCommands(object):
+ def __init__(self):
+ self.manager = users.UserManager.instance()
+
+ def add(self, user, role, project=None):
+ """adds role to user
+ if project is specified, adds project specific role
+ arguments: user, role [project]"""
+ self.manager.add_role(user, role, project)
+
+ def has(self, user, role, project=None):
+ """checks to see if user has role
+ if project is specified, returns True if user has
+ the global role and the project role
+ arguments: user, role [project]"""
+ print self.manager.has_role(user, role, project)
+
+ def remove(self, user, role, project=None):
+ """removes role from user
+ if project is specified, removes project specific role
+ arguments: user, role [project]"""
+ self.manager.remove_role(user, role, project)
class UserCommands(object):
def __init__(self):
@@ -75,6 +156,11 @@ class ProjectCommands(object):
def __init__(self):
self.manager = users.UserManager.instance()
+ def add(self, project, user):
+ """adds user to project
+ arguments: project user"""
+ self.manager.add_to_project(user, project)
+
def create(self, name, project_manager, description=None):
"""creates a new project
arguments: name project_manager [description]"""
@@ -91,6 +177,11 @@ class ProjectCommands(object):
for project in self.manager.get_projects():
print project.name
+ def remove(self, project, user):
+ """removes user from project
+ arguments: project user"""
+ self.manager.remove_from_project(user, project)
+
def zip(self, project_id, user_id, filename='nova.zip'):
"""exports credentials for user to a zip file
arguments: project_id user_id [filename='nova.zip]"""
@@ -107,8 +198,11 @@ def usage(script_name):
categories = [
+ ('network', NetworkCommands),
('user', UserCommands),
('project', ProjectCommands),
+ ('role', RoleCommands),
+ ('vpn', VpnCommands),
]
@@ -126,7 +220,7 @@ def methods_of(obj):
if __name__ == '__main__':
- utils.default_flagfile()
+ utils.default_flagfile('/etc/nova/nova-manage.conf')
argv = FLAGS(sys.argv)
script_name = argv.pop(0)
if len(argv) < 1:
@@ -173,7 +267,9 @@ if __name__ == '__main__':
# call the action with the remaining arguments
try:
fn(*argv)
+ sys.exit(0)
except TypeError:
print "Wrong number of arguments supplied"
print "%s %s: %s" % (category, action, fn.__doc__)
+ sys.exit(2)
diff --git a/bin/nova-objectstore b/bin/nova-objectstore
index 38a23f1ff..f6d7b1270 100755
--- a/bin/nova-objectstore
+++ b/bin/nova-objectstore
@@ -1,19 +1,24 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
"""
Tornado daemon for nova objectstore. Supports S3 API.
"""
diff --git a/bin/nova-volume b/bin/nova-volume
index e36954cd3..2720e12cd 100755
--- a/bin/nova-volume
+++ b/bin/nova-volume
@@ -1,19 +1,24 @@
#!/usr/bin/env python
-# vim: tabstop=4 shiftwidth=4 softtabstop
-
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
"""
Tornado Storage daemon manages AoE volumes via AMQP messaging.
"""
diff --git a/builddeb.sh b/builddeb.sh
new file mode 100755
index 000000000..2fb20da2a
--- /dev/null
+++ b/builddeb.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+dpkg-buildpackage -b -rfakeroot -tc -uc -D
diff --git a/debian/changelog b/debian/changelog
index 2b226e048..31dd5e91e 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,6 +1,232 @@
-nova (0.3.0-1) UNRELEASED; urgency=low
+nova (0.2.3-1) UNRELEASED; urgency=low
+
+ * Relax the Twisted dependency to python-twisted-core (rather than the
+ full stack).
+ * Move nova related configuration files into /etc/nova/.
+ * Add a dependency on nginx from nova-objectsstore and install a
+ suitable configuration file.
+ * Ship the CA directory in nova-common.
+ * Add a default flag file for nova-manage to help it find the CA.
+ * If set, pass KernelId and RamdiskId from RunInstances call to the
+ target compute node.
+ * Added --network_path setting to nova-compute's flagfile.
+ * Move templates from python directories to /usr/share/nova.
+ * Add debian/nova-common.dirs to create
+ var/lib/nova/{buckets,CA,images,instances,keys,networks}
+ * Don't pass --daemonize=1 to nova-compute. It's already daemonising
+ by default.
+
+ -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 14 Jul 2010 12:00:00 -0700
+
+nova (0.2.2-10) UNRELEASED; urgency=low
+
+ * Fixed extra space in vblade-persist
+
+ -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 13 Jul 2010 19:00:00 -0700
+
+nova (0.2.2-9) UNRELEASED; urgency=low
+
+ * Fixed invalid dn bug in ldap for adding roles
+
+ -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 12 Jul 2010 15:20:00 -0700
+
+nova (0.2.2-8) UNRELEASED; urgency=low
+
+ * Added a missing comma
+
+ -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 08 Jul 2010 10:05:00 -0700
+
+nova (0.2.2-7) UNRELEASED; urgency=low
+
+ * Missing files from twisted patch
+ * License upedates
+ * Reformatting/cleanup
+ * Users/ldap bugfixes
+ * Merge fixes
+ * Documentation updates
+ * Vpn key creation fix
+ * Multiple shelves for volumes
+
+ -- Vishvananda Ishaya <vishvananda@gmail.com> Wed, 07 Jul 2010 18:45:00 -0700
+
+nova (0.2.2-6) UNRELEASED; urgency=low
+
+ * Fix to make Key Injection work again
+
+ -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 14 Jun 2010 21:35:00 -0700
+
+nova (0.2.2-5) UNRELEASED; urgency=low
+
+ * Lowered message callback frequency to stop compute and volume
+ from eating tons of cpu
+
+ -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 14 Jun 2010 14:15:00 -0700
+
+nova (0.2.2-4) UNRELEASED; urgency=low
+
+ * Documentation fixes
+ * Uncaught exceptions now log properly
+ * Nova Manage zip exporting works again
+ * Twisted threads no longer interrupt system calls
+
+ -- Vishvananda Ishaya <vishvananda@gmail.com> Sun, 13 Jun 2010 01:40:00 -0700
+
+nova (0.2.2-3) UNRELEASED; urgency=low
+
+ * Fixes to api calls
+ * More accurate documentation
+ * Removal of buggy multiprocessing
+ * Asynchronus execution of shell commands
+ * Fix of messaging race condition
+ * Test redis database cleaned out on each run of tests
+ * Smoketest updates
+
+ -- Vishvananda Ishaya <vishvananda@gmail.com> Fri, 12 Jun 2010 20:10:00 -0700
+
+nova (0.2.2-2) UNRELEASED; urgency=low
+
+ * Bugfixes to volume code
+ * Instances no longer use keeper
+ * Sectors off by one fix
+ * State reported properly by instances
+
+ -- Vishvananda Ishaya <vishvananda@gmail.com> Wed, 03 Jun 2010 15:21:00 -0700
+
+nova (0.2.2-1) UNRELEASED; urgency=low
+
+ * First release based on nova/cc
+ * Major rewrites to volumes and instances
+ * Addition of cloudpipe and rbac
+ * Major bugfixes
+
+ -- Vishvananda Ishaya <vishvananda@gmail.com> Wed, 02 Jun 2010 17:42:00 -0700
+
+nova (0.2.1-1) UNRELEASED; urgency=low
+
+ * Support ephemeral (local) space for instances
+ * instance related fixes
+ * fix network & cloudpipe bugs
+
+ -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 25 May 2010 12:14:00 -0700
+
+nova (0.2.0-20) UNRELEASED; urgency=low
+
+ * template files are in proper folder
+
+ -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 25 May 2010 12:14:00 -0700
+
+nova (0.2.0-19) UNRELEASED; urgency=low
+
+ * removed mox dependency and added templates to install
+
+ -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 25 May 2010 11:53:00 -0700
+
+nova (0.2.0-18) UNRELEASED; urgency=low
+
+ * api server properly sends instance status code
+
+ -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 24 May 2010 17:18:00 -0700
+
+nova (0.2.0-17) UNRELEASED; urgency=low
+
+ * redis-backed datastore
+
+ -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 24 May 2010 16:28:00 -0700
+
+nova (0.2.0-16) UNRELEASED; urgency=low
+
+ * make sure twistd.pid is really overriden
+
+ -- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 22:18:47 -0700
+
+nova (0.2.0-15) UNRELEASED; urgency=low
+
+ * rpc shouldn't require tornado unless you are using attach_to_tornado
+
+ -- Jesse Andrews <anotherjesse@gmail.com> Sun, 23 May 2010 21:59:00 -0700
+
+nova (0.2.0-14) UNRELEASED; urgency=low
+
+ * quicky init scripts for the other services, based on nova-objectstore
+
+ -- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 21:49:43 -0700
+
+nova (0.2.0-13) UNRELEASED; urgency=low
+
+ * init script for nova-objectstore
+
+ -- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 21:33:25 -0700
+
+nova (0.2.0-12) UNRELEASED; urgency=low
+
+ * kvm, kpartx required for nova-compute
+
+ -- Jesse Andrews <anotherjesse@gmail.com> Sun, 23 May 2010 21:32:00 -0700
+
+nova (0.2.0-11) UNRELEASED; urgency=low
+
+ * Need to include the python modules in nova-common.install as well.
+
+ -- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 20:04:27 -0700
+
+nova (0.2.0-10) UNRELEASED; urgency=low
+
+ * add more requirements to bin packages
+
+ -- Jesse Andrews <anotherjesse@gmail.com> Sun, 23 May 2010 19:54:00 -0700
+
+nova (0.2.0-9) UNRELEASED; urgency=low
+
+ * nova bin packages should depend on the same version of nova-common they
+ were built from.
+
+ -- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 18:46:34 -0700
+
+nova (0.2.0-8) UNRELEASED; urgency=low
+
+ * Require libvirt 0.8.1 or newer for nova-compute
+
+ -- Jesse Andrews <anotherjesse@gmail.com> Sun, 23 May 2010 18:33:00 -0700
+
+nova (0.2.0-7) UNRELEASED; urgency=low
+
+ * Split bins into separate packages
+
+ -- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 18:46:34 -0700
+
+nova (0.2.0-6) UNRELEASED; urgency=low
+
+ * Add python-m2crypto to deps
+
+ -- Jesse Andrews <anotherjesse@gmail.com> Sun, 23 May 2010 18:33:00 -0700
+
+nova (0.2.0-5) UNRELEASED; urgency=low
+
+ * Add python-gflags to deps
+
+ -- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 18:28:50 -0700
+
+nova (0.2.0-4) UNRELEASED; urgency=low
+
+ * install scripts
+
+ -- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 18:16:27 -0700
+
+nova (0.2.0-3) UNRELEASED; urgency=low
+
+ * debian build goop
+
+ -- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 18:06:37 -0700
+
+nova (0.2.0-2) UNRELEASED; urgency=low
+
+ * improved requirements
+
+ -- Jesse Andrews <anotherjesse@gmail.com> Sun, 23 May 2010 17:42:00 -0700
+
+nova (0.2.0-1) UNRELEASED; urgency=low
* initial release
- -- Jesse Andrews <jesse@ansolabs.com> Thur, 27 May 2010 12:28:00 -0700
+ -- Jesse Andrews <anotherjesse@gmail.com> Fri, 21 May 2010 12:28:00 -0700
diff --git a/debian/control b/debian/control
index 81af9f4e9..342dfb185 100644
--- a/debian/control
+++ b/debian/control
@@ -3,38 +3,134 @@ Section: net
Priority: extra
Maintainer: Jesse Andrews <jesse@ansolabs.com>
Build-Depends: debhelper (>= 7)
-Build-Depends-Indep: python-support
+Build-Depends-Indep: python-support, python-setuptools
Standards-Version: 3.8.4
XS-Python-Version: 2.6
Package: nova-common
Architecture: all
-Depends: ${python:Depends}, aoetools, vlan, python-ipy, python-boto, python-m2crypto, python-pycurl, python-twisted, python-daemon, python-redis, python-carrot, python-lockfile, python-gflags, python-tornado, ${misc:Depends}
+Depends: ${python:Depends}, aoetools, vlan, python-ipy, python-boto, python-m2crypto, python-pycurl, python-twisted-core, python-daemon, python-redis, python-carrot, python-lockfile, python-gflags, python-tornado, ${misc:Depends}
Provides: ${python:Provides}
-Conflicts: nova
-Description: Nova is a cloud
+Description: Nova Cloud Computing - common files
+ Nova is a cloud computing fabric controller (the main part of an IaaS
+ system) built to match the popular AWS EC2 and S3 APIs. It is written in
+ Python, using the Tornado and Twisted frameworks, and relies on the
+ standard AMQP messaging protocol, and the Redis distributed KVS.
+ .
+ Nova is intended to be easy to extend, and adapt. For example, it
+ currently uses an LDAP server for users and groups, but also includes a
+ fake LDAP server, that stores data in Redis. It has extensive test
+ coverage, and uses the Sphinx toolkit (the same as Python itself) for code
+ and user documentation.
+ .
+ While Nova is currently in Beta use within several organizations, the
+ codebase is very much under active development.
+ .
+ This package contains things that are needed by all parts of Nova.
Package: nova-compute
Architecture: all
-Depends: nova-common (= ${binary:Version}), kpartx, kvm, python-libvirt, libvirt-bin (>= 0.8.1), ${python:Depends}, ${misc:Depends}
-Description: Nova compute
+Depends: nova-common (= ${binary:Version}), kpartx, kvm, python-libvirt, libvirt-bin (>= 0.7.5), curl, ${python:Depends}, ${misc:Depends}
+Description: Nova Cloud Computing - compute node
+ Nova is a cloud computing fabric controller (the main part of an IaaS
+ system) built to match the popular AWS EC2 and S3 APIs. It is written in
+ Python, using the Tornado and Twisted frameworks, and relies on the
+ standard AMQP messaging protocol, and the Redis distributed KVS.
+ .
+ Nova is intended to be easy to extend, and adapt. For example, it
+ currently uses an LDAP server for users and groups, but also includes a
+ fake LDAP server, that stores data in Redis. It has extensive test
+ coverage, and uses the Sphinx toolkit (the same as Python itself) for code
+ and user documentation.
+ .
+ While Nova is currently in Beta use within several organizations, the
+ codebase is very much under active development.
+ .
+ This is the package you will install on the nodes that will run your
+ virtual machines.
Package: nova-volume
Architecture: all
Depends: nova-common (= ${binary:Version}), vblade, vblade-persist, ${python:Depends}, ${misc:Depends}
-Description: Nova volume
+Description: Nova Cloud Computing - storage
+ Nova is a cloud computing fabric controller (the main part of an IaaS
+ system) built to match the popular AWS EC2 and S3 APIs. It is written in
+ Python, using the Tornado and Twisted frameworks, and relies on the
+ standard AMQP messaging protocol, and the Redis distributed KVS.
+ .
+ Nova is intended to be easy to extend, and adapt. For example, it
+ currently uses an LDAP server for users and groups, but also includes a
+ fake LDAP server, that stores data in Redis. It has extensive test
+ coverage, and uses the Sphinx toolkit (the same as Python itself) for code
+ and user documentation.
+ .
+ While Nova is currently in Beta use within several organizations, the
+ codebase is very much under active development.
+ .
+ This is the package you will install on your storage nodes.
Package: nova-api
Architecture: all
Depends: nova-common (= ${binary:Version}), ${python:Depends}, ${misc:Depends}
-Description: Nova api
+Description: Nova Cloud Computing - API frontend
+ Nova is a cloud computing fabric controller (the main part of an IaaS
+ system) built to match the popular AWS EC2 and S3 APIs. It is written in
+ Python, using the Tornado and Twisted frameworks, and relies on the
+ standard AMQP messaging protocol, and the Redis distributed KVS.
+ .
+ Nova is intended to be easy to extend, and adapt. For example, it
+ currently uses an LDAP server for users and groups, but also includes a
+ fake LDAP server, that stores data in Redis. It has extensive test
+ coverage, and uses the Sphinx toolkit (the same as Python itself) for code
+ and user documentation.
+ .
+ While Nova is currently in Beta use within several organizations, the
+ codebase is very much under active development.
+ .
+ This package provides the API frontend.
Package: nova-objectstore
Architecture: all
+Depends: nova-common (= ${binary:Version}), nginx, ${python:Depends}, ${misc:Depends}
+Description: Nova Cloud Computing - object store
+ Nova is a cloud computing fabric controller (the main part of an IaaS
+ system) built to match the popular AWS EC2 and S3 APIs. It is written in
+ Python, using the Tornado and Twisted frameworks, and relies on the
+ standard AMQP messaging protocol, and the Redis distributed KVS.
+ .
+ Nova is intended to be easy to extend, and adapt. For example, it
+ currently uses an LDAP server for users and groups, but also includes a
+ fake LDAP server, that stores data in Redis. It has extensive test
+ coverage, and uses the Sphinx toolkit (the same as Python itself) for code
+ and user documentation.
+ .
+ While Nova is currently in Beta use within several organizations, the
+ codebase is very much under active development.
+ .
+ This is the package you will install on the nodes that will contain your
+ object store.
+
+Package: nova-instancemonitor
+Architecture: all
Depends: nova-common (= ${binary:Version}), ${python:Depends}, ${misc:Depends}
-Description: Nova object store
+Description: Nova instance monitor
Package: nova-tools
Architecture: all
Depends: python-boto, ${python:Depends}, ${misc:Depends}
-Description: CLI tools to access nova
+Description: Nova Cloud Computing - management tools
+ Nova is a cloud computing fabric controller (the main part of an IaaS
+ system) built to match the popular AWS EC2 and S3 APIs. It is written in
+ Python, using the Tornado and Twisted frameworks, and relies on the
+ standard AMQP messaging protocol, and the Redis distributed KVS.
+ .
+ Nova is intended to be easy to extend, and adapt. For example, it
+ currently uses an LDAP server for users and groups, but also includes a
+ fake LDAP server, that stores data in Redis. It has extensive test
+ coverage, and uses the Sphinx toolkit (the same as Python itself) for code
+ and user documentation.
+ .
+ While Nova is currently in Beta use within several organizations, the
+ codebase is very much under active development.
+ .
+ This package contains admin tools for Nova.
diff --git a/debian/nova-api.conf b/debian/nova-api.conf
new file mode 100644
index 000000000..9cd4051b1
--- /dev/null
+++ b/debian/nova-api.conf
@@ -0,0 +1,5 @@
+--daemonize=1
+--ca_path=/var/lib/nova/CA
+--keys_path=/var/lib/nova/keys
+--fake_users=1
+--datastore_path=/var/lib/nova/keeper
diff --git a/debian/nova-api.init b/debian/nova-api.init
index 925c92c5e..597fbef95 100644
--- a/debian/nova-api.init
+++ b/debian/nova-api.init
@@ -13,10 +13,10 @@
set -e
DAEMON=/usr/bin/nova-api
-DAEMON_ARGS="--flagfile=/etc/nova.conf"
+DAEMON_ARGS="--flagfile=/etc/nova/nova-api.conf"
PIDFILE=/var/run/nova-api.pid
-ENABLED=false
+ENABLED=true
if test -f /etc/default/nova-api; then
. /etc/default/nova-api
diff --git a/debian/nova-api.install b/debian/nova-api.install
index 757235b11..02dbda02d 100644
--- a/debian/nova-api.install
+++ b/debian/nova-api.install
@@ -1 +1,2 @@
bin/nova-api usr/bin
+debian/nova-api.conf etc/nova
diff --git a/debian/nova-common.dirs b/debian/nova-common.dirs
new file mode 100644
index 000000000..b58fe8b7f
--- /dev/null
+++ b/debian/nova-common.dirs
@@ -0,0 +1,11 @@
+etc/nova
+var/lib/nova/buckets
+var/lib/nova/CA
+var/lib/nova/CA/INTER
+var/lib/nova/CA/newcerts
+var/lib/nova/CA/private
+var/lib/nova/CA/reqs
+var/lib/nova/images
+var/lib/nova/instances
+var/lib/nova/keys
+var/lib/nova/networks
diff --git a/debian/nova-common.install b/debian/nova-common.install
index c9358ac41..9b1bbf147 100644
--- a/debian/nova-common.install
+++ b/debian/nova-common.install
@@ -1,4 +1,10 @@
bin/nova-manage usr/bin
-nova/auth/novarc.template usr/lib/pymodules/python2.6/nova/auth
-nova/compute/libvirt.xml.template usr/lib/pymodules/python2.6/nova/compute
+debian/nova-manage.conf etc/nova
+nova/auth/novarc.template usr/share/nova
+nova/cloudpipe/client.ovpn.template usr/share/nova
+nova/compute/libvirt.xml.template usr/share/nova
+nova/compute/interfaces.template usr/share/nova
usr/lib/python*/*-packages/nova/*
+CA/openssl.cnf.tmpl var/lib/nova/CA
+CA/geninter.sh var/lib/nova/CA
+CA/genrootca.sh var/lib/nova/CA
diff --git a/debian/nova-compute.conf b/debian/nova-compute.conf
new file mode 100644
index 000000000..e4ca3fe95
--- /dev/null
+++ b/debian/nova-compute.conf
@@ -0,0 +1,10 @@
+--ca_path=/var/lib/nova/CA
+--keys_path=/var/lib/nova/keys
+--datastore_path=/var/lib/nova/keeper
+--instances_path=/var/lib/nova/instances
+--networks_path=/var/lib/nova/networks
+--simple_network_template=/usr/share/nova/interfaces.template
+--libvirt_xml_template=/usr/share/nova/libvirt.xml.template
+--vpn_client_template=/usr/share/nova/client.ovpn.template
+--credentials_template=/usr/share/nova/novarc.template
+--fake_users=1
diff --git a/debian/nova-compute.init b/debian/nova-compute.init
index 89d0e5fce..d0f093a7a 100644
--- a/debian/nova-compute.init
+++ b/debian/nova-compute.init
@@ -13,10 +13,10 @@
set -e
DAEMON=/usr/bin/nova-compute
-DAEMON_ARGS="--flagfile=/etc/nova.conf"
+DAEMON_ARGS="--flagfile=/etc/nova/nova-compute.conf"
PIDFILE=/var/run/nova-compute.pid
-ENABLED=false
+ENABLED=true
if test -f /etc/default/nova-compute; then
. /etc/default/nova-compute
diff --git a/debian/nova-compute.install b/debian/nova-compute.install
index 6387cef07..5f9df46a8 100644
--- a/debian/nova-compute.install
+++ b/debian/nova-compute.install
@@ -1 +1,2 @@
bin/nova-compute usr/bin
+debian/nova-compute.conf etc/nova
diff --git a/debian/nova-instancemonitor.init b/debian/nova-instancemonitor.init
new file mode 100644
index 000000000..2865fc334
--- /dev/null
+++ b/debian/nova-instancemonitor.init
@@ -0,0 +1,69 @@
+#! /bin/sh
+### BEGIN INIT INFO
+# Provides: nova-instancemonitor
+# Required-Start: $remote_fs $syslog
+# Required-Stop: $remote_fs $syslog
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: nova-instancemonitor
+# Description: nova-instancemonitor
+### END INIT INFO
+
+
+set -e
+
+DAEMON=/usr/bin/nova-instancemonitor
+DAEMON_ARGS="--flagfile=/etc/nova.conf"
+PIDFILE=/var/run/nova-instancemonitor.pid
+
+ENABLED=false
+
+if test -f /etc/default/nova-instancemonitor; then
+ . /etc/default/nova-instancemonitor
+fi
+
+. /lib/lsb/init-functions
+
+export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
+
+case "$1" in
+ start)
+ test "$ENABLED" = "true" || exit 0
+ log_daemon_msg "Starting nova compute" "nova-instancemonitor"
+ cd /var/run
+ if $DAEMON $DAEMON_ARGS start; then
+ log_end_msg 0
+ else
+ log_end_msg 1
+ fi
+ ;;
+ stop)
+ test "$ENABLED" = "true" || exit 0
+ log_daemon_msg "Stopping nova compute" "nova-instancemonitor"
+ cd /var/run
+ if $DAEMON $DAEMON_ARGS stop; then
+ log_end_msg 0
+ else
+ log_end_msg 1
+ fi
+ ;;
+ restart|force-reload)
+ test "$ENABLED" = "true" || exit 1
+ cd /var/run
+ if $DAEMON $DAEMON_ARGS restart; then
+ log_end_msg 0
+ else
+ log_end_msg 1
+ fi
+ ;;
+ status)
+ test "$ENABLED" = "true" || exit 0
+ status_of_proc -p $PIDFILE $DAEMON nova-instancemonitor && exit 0 || exit $?
+ ;;
+ *)
+ log_action_msg "Usage: /etc/init.d/nova-instancemonitor {start|stop|restart|force-reload|status}"
+ exit 1
+ ;;
+esac
+
+exit 0
diff --git a/debian/nova-instancemonitor.install b/debian/nova-instancemonitor.install
new file mode 100644
index 000000000..48e7884b4
--- /dev/null
+++ b/debian/nova-instancemonitor.install
@@ -0,0 +1 @@
+bin/nova-instancemonitor usr/bin
diff --git a/debian/nova-manage.conf b/debian/nova-manage.conf
new file mode 100644
index 000000000..5ccda7ecf
--- /dev/null
+++ b/debian/nova-manage.conf
@@ -0,0 +1,4 @@
+--ca_path=/var/lib/nova/CA
+--credentials_template=/usr/share/nova/novarc.template
+--keys_path=/var/lib/nova/keys
+--vpn_client_template=/usr/share/nova/client.ovpn.template
diff --git a/debian/nova-objectstore.conf b/debian/nova-objectstore.conf
new file mode 100644
index 000000000..af3271d3b
--- /dev/null
+++ b/debian/nova-objectstore.conf
@@ -0,0 +1,7 @@
+--daemonize=1
+--ca_path=/var/lib/nova/CA
+--keys_path=/var/lib/nova/keys
+--datastore_path=/var/lib/nova/keeper
+--fake_users=1
+--images_path=/var/lib/nova/images
+--buckets_path=/var/lib/nova/buckets
diff --git a/debian/nova-objectstore.init b/debian/nova-objectstore.init
index be7d32d8e..9676345ad 100644
--- a/debian/nova-objectstore.init
+++ b/debian/nova-objectstore.init
@@ -13,10 +13,10 @@
set -e
DAEMON=/usr/bin/nova-objectstore
-DAEMON_ARGS="--flagfile=/etc/nova.conf"
+DAEMON_ARGS="--flagfile=/etc/nova/nova-objectstore.conf"
PIDFILE=/var/run/nova-objectstore.pid
-ENABLED=false
+ENABLED=true
if test -f /etc/default/nova-objectstore; then
. /etc/default/nova-objectstore
diff --git a/debian/nova-objectstore.install b/debian/nova-objectstore.install
index ccc60fccc..3ed93ff37 100644
--- a/debian/nova-objectstore.install
+++ b/debian/nova-objectstore.install
@@ -1 +1,3 @@
bin/nova-objectstore usr/bin
+debian/nova-objectstore.conf etc/nova
+debian/nova-objectstore.nginx.conf etc/nginx/sites-available
diff --git a/debian/nova-objectstore.links b/debian/nova-objectstore.links
new file mode 100644
index 000000000..38e33948e
--- /dev/null
+++ b/debian/nova-objectstore.links
@@ -0,0 +1 @@
+/etc/nginx/sites-available/nova-objectstore.nginx.conf /etc/nginx/sites-enabled/nova-objectstore.nginx.conf
diff --git a/debian/nova-objectstore.nginx.conf b/debian/nova-objectstore.nginx.conf
new file mode 100644
index 000000000..b63424150
--- /dev/null
+++ b/debian/nova-objectstore.nginx.conf
@@ -0,0 +1,17 @@
+server {
+ listen 3333 default;
+ server_name localhost;
+ client_max_body_size 10m;
+
+ access_log /var/log/nginx/localhost.access.log;
+
+ location ~ /_images/.+ {
+ root /var/lib/nova/images;
+ rewrite ^/_images/(.*)$ /$1 break;
+ }
+
+ location / {
+ proxy_pass http://localhost:3334/;
+ }
+}
+
diff --git a/debian/nova-volume.conf b/debian/nova-volume.conf
new file mode 100644
index 000000000..af3271d3b
--- /dev/null
+++ b/debian/nova-volume.conf
@@ -0,0 +1,7 @@
+--daemonize=1
+--ca_path=/var/lib/nova/CA
+--keys_path=/var/lib/nova/keys
+--datastore_path=/var/lib/nova/keeper
+--fake_users=1
+--images_path=/var/lib/nova/images
+--buckets_path=/var/lib/nova/buckets
diff --git a/debian/nova-volume.init b/debian/nova-volume.init
index 80da3f70c..d5c2dddf8 100644
--- a/debian/nova-volume.init
+++ b/debian/nova-volume.init
@@ -13,10 +13,10 @@
set -e
DAEMON=/usr/bin/nova-volume
-DAEMON_ARGS="--flagfile=/etc/nova.conf"
+DAEMON_ARGS="--flagfile=/etc/nova/nova-volume.conf"
PIDFILE=/var/run/nova-volume.pid
-ENABLED=false
+ENABLED=true
if test -f /etc/default/nova-volume; then
. /etc/default/nova-volume
diff --git a/debian/nova-volume.install b/debian/nova-volume.install
index 37b535c03..9a840c78e 100644
--- a/debian/nova-volume.install
+++ b/debian/nova-volume.install
@@ -1 +1,2 @@
bin/nova-volume usr/bin
+debian/nova-volume.conf etc/nova
diff --git a/docs/architecture.rst b/docs/architecture.rst
index 9aab7afbf..0000a02dd 100644
--- a/docs/architecture.rst
+++ b/docs/architecture.rst
@@ -1,17 +1,21 @@
..
- Copyright [2010] [Anso Labs, LLC]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
+ Copyright 2010 United States Government as represented by the
+ Administrator of the National Aeronautics and Space Administration.
+ All Rights Reserved.
+
+ Copyright 2010 Anso Labs, LLC
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
http://www.apache.org/licenses/LICENSE-2.0
-
+
Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
nova System Architecture
========================
diff --git a/docs/auth.rst b/docs/auth.rst
index ba001cfec..c906a481f 100644
--- a/docs/auth.rst
+++ b/docs/auth.rst
@@ -1,17 +1,21 @@
..
- Copyright [2010] [Anso Labs, LLC]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
+ Copyright 2010 United States Government as represented by the
+ Administrator of the National Aeronautics and Space Administration.
+ All Rights Reserved.
+
+ Copyright 2010 Anso Labs, LLC
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
http://www.apache.org/licenses/LICENSE-2.0
-
+
Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
Auth Documentation
==================
diff --git a/docs/binaries.rst b/docs/binaries.rst
index eee089164..fa6127bb7 100644
--- a/docs/binaries.rst
+++ b/docs/binaries.rst
@@ -1,17 +1,21 @@
..
- Copyright [2010] [Anso Labs, LLC]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
+ Copyright 2010 United States Government as represented by the
+ Administrator of the National Aeronautics and Space Administration.
+ All Rights Reserved.
+
+ Copyright 2010 Anso Labs, LLC
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
http://www.apache.org/licenses/LICENSE-2.0
-
+
Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
Nova Binaries
===============
diff --git a/docs/compute.rst b/docs/compute.rst
index e2b32fae0..7b4f82a3b 100644
--- a/docs/compute.rst
+++ b/docs/compute.rst
@@ -1,17 +1,21 @@
..
- Copyright [2010] [Anso Labs, LLC]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
+ Copyright 2010 United States Government as represented by the
+ Administrator of the National Aeronautics and Space Administration.
+ All Rights Reserved.
+
+ Copyright 2010 Anso Labs, LLC
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
http://www.apache.org/licenses/LICENSE-2.0
-
+
Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
Compute Documentation
=====================
diff --git a/docs/conf.py b/docs/conf.py
index 9dfdfc8be..bc61f438c 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -16,7 +16,7 @@ import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.append(os.path.abspath('.'))
+sys.path.append(os.path.abspath('/Users/jmckenty/Projects/cc'))
sys.path.append([os.path.abspath('../nova'),os.path.abspath('../'),os.path.abspath('../vendor')])
from nova import vendor
diff --git a/docs/endpoint.rst b/docs/endpoint.rst
index 86a1a3be0..830c99665 100644
--- a/docs/endpoint.rst
+++ b/docs/endpoint.rst
@@ -1,17 +1,21 @@
..
- Copyright [2010] [Anso Labs, LLC]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
+ Copyright 2010 United States Government as represented by the
+ Administrator of the National Aeronautics and Space Administration.
+ All Rights Reserved.
+
+ Copyright 2010 Anso Labs, LLC
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
http://www.apache.org/licenses/LICENSE-2.0
-
+
Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
Endpoint Documentation
======================
diff --git a/docs/fakes.rst b/docs/fakes.rst
index f105c6b8d..1454ca7b5 100644
--- a/docs/fakes.rst
+++ b/docs/fakes.rst
@@ -1,17 +1,21 @@
..
- Copyright [2010] [Anso Labs, LLC]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
+ Copyright 2010 United States Government as represented by the
+ Administrator of the National Aeronautics and Space Administration.
+ All Rights Reserved.
+
+ Copyright 2010 Anso Labs, LLC
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
http://www.apache.org/licenses/LICENSE-2.0
-
+
Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
Nova Fakes
==========
diff --git a/docs/getting.started.rst b/docs/getting.started.rst
index 777cd32e9..55a73dd00 100644
--- a/docs/getting.started.rst
+++ b/docs/getting.started.rst
@@ -1,38 +1,39 @@
-..
- Copyright [2010] [Anso Labs, LLC]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
Getting Started with Nova
=========================
GOTTA HAVE A nova.pth file added or it WONT WORK (will write setup.py file soon)
+Create a file named nova.pth in your python libraries directory
+(usually /usr/local/lib/python2.6/dist-packages) with a single line that points
+to the directory where you checked out the source (that contains the nova/
+directory).
+
DEPENDENCIES
------------
+Related servers we rely on
+
* RabbitMQ: messaging queue, used for all communication between components
* OpenLDAP: users, groups (maybe cut)
+* ReDIS: Remote Dictionary Store (for fast, shared state data)
+* nginx: HTTP server to handle serving large files (because Tornado can't)
+
+Python libraries we don't vendor
+
+* M2Crypto: python library interface for openssl
+* curl
+
+Vendored python libaries (don't require any installation)
+
* Tornado: scalable non blocking web server for api requests
* Twisted: just for the twisted.internet.defer package
* boto: python api for aws api
-* M2Crypto: python library interface for openssl
* IPy: library for managing ip addresses
-* ReDIS: Remote Dictionary Store (for fast, shared state data)
Recommended
-----------------
+
* euca2ools: python implementation of aws ec2-tools and ami tools
* build tornado to use C module for evented section
@@ -41,30 +42,90 @@ Installation
--------------
::
- # ON ALL SYSTEMS
- apt-get install -y python-libvirt libvirt-bin python-setuptools python-dev python-pycurl python-m2crypto python-twisted
- apt-get install -y aoetools vlan
+ # system libraries and tools
+ apt-get install -y aoetools vlan curl
modprobe aoe
+ # python libraries
+ apt-get install -y python-setuptools python-dev python-pycurl python-m2crypto
+
# ON THE CLOUD CONTROLLER
- apt-get install -y rabbitmq-server dnsmasq
- # fix ec2 metadata/userdata uri - where $IP is the IP of the cloud
- iptables -t nat -A PREROUTING -s 0.0.0.0/0 -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination $IP:8773
- iptables --table nat --append POSTROUTING --out-interface $PUBLICIFACE -j MASQUERADE
- # setup ldap (slap.sh as root will remove ldap and reinstall it)
- auth/slap.sh
+ apt-get install -y rabbitmq-server dnsmasq nginx
+ # build redis from 2.0.0-rc1 source
+ # setup ldap (slap.sh as root will remove ldap and reinstall it)
+ NOVA_PATH/nova/auth/slap.sh
/etc/init.d/rabbitmq-server start
# ON VOLUME NODE:
- apt-get install -y vblade-persist
+ apt-get install -y vblade-persist
# ON THE COMPUTE NODE:
- apt-get install -y kpartx kvm
+ apt-get install -y python-libvirt
+ apt-get install -y kpartx kvm libvirt-bin
+ modprobe kvm
# optional packages
- apt-get install -y euca2ools
-
- # Set up flagfiles with the appropriate hostnames, etc.
- # start api_worker, s3_worker, node_worker, storage_worker
- # Add yourself to the libvirtd group, log out, and log back in
- # Make sure the user who will launch the workers has sudo privileges w/o pass (will fix later)
+ apt-get install -y euca2ools
+
+Configuration
+---------------
+
+ON CLOUD CONTROLLER
+
+* Add yourself to the libvirtd group, log out, and log back in
+* fix hardcoded ec2 metadata/userdata uri ($IP is the IP of the cloud), and masqurade all traffic from launched instances
+::
+
+ iptables -t nat -A PREROUTING -s 0.0.0.0/0 -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination $IP:8773
+ iptables --table nat --append POSTROUTING --out-interface $PUBLICIFACE -j MASQUERADE
+
+
+* Configure NginX proxy (/etc/nginx/sites-enabled/default)
+
+::
+
+ server {
+ listen 3333 default;
+ server-name localhost;
+ client_max_body_size 10m;
+
+ access_log /var/log/nginx/localhost.access.log;
+
+ location ~ /_images/.+ {
+ root NOVA_PATH/images;
+ rewrite ^/_images/(.*)$ /$1 break;
+ }
+
+ location / {
+ proxy_pass http://localhost:3334/;
+ }
+ }
+
+ON VOLUME NODE
+
+* create a filesystem (you can use an actual disk if you have one spare, default is /dev/sdb)
+
+::
+
+ # This creates a 1GB file to create volumes out of
+ dd if=/dev/zero of=MY_FILE_PATH bs=100M count=10
+ losetup --show -f MY_FILE_PATH
+ # replace loop0 below with whatever losetup returns
+ echo "--storage_dev=/dev/loop0" >> NOVA_PATH/bin/nova.conf
+
+Running
+---------
+
+Launch servers
+
+* rabbitmq
+* redis
+* slapd
+* nginx
+
+Launch nova components
+
+* nova-api
+* nova-compute
+* nova-objectstore
+* nova-volume
diff --git a/docs/index.rst b/docs/index.rst
index b86f14324..9a5fa32ae 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -1,18 +1,21 @@
..
- Copyright [2010] [Anso Labs, LLC]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
+ Copyright 2010 United States Government as represented by the
+ Administrator of the National Aeronautics and Space Administration.
+ All Rights Reserved.
+
+ Copyright 2010 Anso Labs, LLC
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
Welcome to nova's documentation!
================================
diff --git a/docs/modules.rst b/docs/modules.rst
index f927a52d0..11e1eef7d 100644
--- a/docs/modules.rst
+++ b/docs/modules.rst
@@ -1,17 +1,21 @@
..
- Copyright [2010] [Anso Labs, LLC]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
+ Copyright 2010 United States Government as represented by the
+ Administrator of the National Aeronautics and Space Administration.
+ All Rights Reserved.
+
+ Copyright 2010 Anso Labs, LLC
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
http://www.apache.org/licenses/LICENSE-2.0
-
+
Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
Nova Documentation
==================
diff --git a/docs/network.rst b/docs/network.rst
index 49e36170d..2787ae458 100644
--- a/docs/network.rst
+++ b/docs/network.rst
@@ -1,17 +1,21 @@
..
- Copyright [2010] [Anso Labs, LLC]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
+ Copyright 2010 United States Government as represented by the
+ Administrator of the National Aeronautics and Space Administration.
+ All Rights Reserved.
+
+ Copyright 2010 Anso Labs, LLC
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
http://www.apache.org/licenses/LICENSE-2.0
-
+
Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
nova Networking
================
diff --git a/docs/nova.rst b/docs/nova.rst
index 7f1feda10..01da34414 100644
--- a/docs/nova.rst
+++ b/docs/nova.rst
@@ -1,17 +1,21 @@
..
- Copyright [2010] [Anso Labs, LLC]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
+ Copyright 2010 United States Government as represented by the
+ Administrator of the National Aeronautics and Space Administration.
+ All Rights Reserved.
+
+ Copyright 2010 Anso Labs, LLC
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
http://www.apache.org/licenses/LICENSE-2.0
-
+
Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
NOVA Libraries
===============
diff --git a/docs/objectstore.rst b/docs/objectstore.rst
index 64122c9b7..70048354a 100644
--- a/docs/objectstore.rst
+++ b/docs/objectstore.rst
@@ -1,17 +1,21 @@
..
- Copyright [2010] [Anso Labs, LLC]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
+ Copyright 2010 United States Government as represented by the
+ Administrator of the National Aeronautics and Space Administration.
+ All Rights Reserved.
+
+ Copyright 2010 Anso Labs, LLC
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
http://www.apache.org/licenses/LICENSE-2.0
-
+
Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
Objectstore Documentation
=========================
diff --git a/docs/packages.rst b/docs/packages.rst
index ad1386f19..fb28e850b 100644
--- a/docs/packages.rst
+++ b/docs/packages.rst
@@ -1,17 +1,21 @@
..
- Copyright [2010] [Anso Labs, LLC]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
+ Copyright 2010 United States Government as represented by the
+ Administrator of the National Aeronautics and Space Administration.
+ All Rights Reserved.
+
+ Copyright 2010 Anso Labs, LLC
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
http://www.apache.org/licenses/LICENSE-2.0
-
+
Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
nova Packages & Dependencies
============================
diff --git a/docs/storage.rst b/docs/storage.rst
index 94d7bdeea..f8c98b18d 100644
--- a/docs/storage.rst
+++ b/docs/storage.rst
@@ -1,17 +1,21 @@
..
- Copyright [2010] [Anso Labs, LLC]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
+ Copyright 2010 United States Government as represented by the
+ Administrator of the National Aeronautics and Space Administration.
+ All Rights Reserved.
+
+ Copyright 2010 Anso Labs, LLC
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
http://www.apache.org/licenses/LICENSE-2.0
-
+
Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
Storage in the Nova Cloud
=========================
diff --git a/docs/volume.rst b/docs/volume.rst
index 18ce70a3a..3981daf92 100644
--- a/docs/volume.rst
+++ b/docs/volume.rst
@@ -1,17 +1,21 @@
..
- Copyright [2010] [Anso Labs, LLC]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
+ Copyright 2010 United States Government as represented by the
+ Administrator of the National Aeronautics and Space Administration.
+ All Rights Reserved.
+
+ Copyright 2010 Anso Labs, LLC
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
http://www.apache.org/licenses/LICENSE-2.0
-
+
Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
Volume Documentation
====================
diff --git a/nova/__init__.py b/nova/__init__.py
index 2b25d1628..1c886716f 100644
--- a/nova/__init__.py
+++ b/nova/__init__.py
@@ -1,16 +1,22 @@
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
:mod:`nova` -- Cloud IaaS Platform
diff --git a/nova/adminclient.py b/nova/adminclient.py
index 2cc592b9f..3d239fb1d 100644
--- a/nova/adminclient.py
+++ b/nova/adminclient.py
@@ -1,27 +1,35 @@
-# Copyright [2010] [Anso Labs, LLC]
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Nova User API client library.
"""
+import base64
+
+from nova import vendor
import boto
from boto.ec2.regioninfo import RegionInfo
-import base64
class UserInfo(object):
- """ Information about a Nova user
+ """
+ Information about a Nova user, as parsed through SAX
fields include:
username
accesskey
@@ -52,6 +60,32 @@ class UserInfo(object):
elif name == 'secretkey':
self.secretkey = str(value)
+class HostInfo(object):
+ """
+ Information about a Nova Host, as parsed through SAX:
+ Disk stats
+ Running Instances
+ Memory stats
+ CPU stats
+ Network address info
+ Firewall info
+ Bridge and devices
+ """
+
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.hostname = None
+
+ def __repr__(self):
+ return 'Host:%s' % self.hostname
+
+ # this is needed by the sax parser, so ignore the ugly name
+ def startElement(self, name, attrs, connection):
+ return None
+
+ # this is needed by the sax parser, so ignore the ugly name
+ def endElement(self, name, value, connection):
+ setattr(self, name, value)
class NovaAdminClient(object):
def __init__(self, clc_ip='127.0.0.1', region='nova', access_key='admin',
@@ -86,7 +120,7 @@ class NovaAdminClient(object):
def get_users(self):
""" grabs the list of all users """
- return self.apiconn.get_list('DescribeUsers', {}, (['item', UserInfo]))
+ return self.apiconn.get_list('DescribeUsers', {}, [('item', UserInfo)])
def get_user(self, name):
""" grab a single user by name """
@@ -111,3 +145,6 @@ class NovaAdminClient(object):
""" returns the content of a zip file containing novarc and access credentials. """
return self.apiconn.get_object('GenerateX509ForUser', {'Name': username}, UserInfo).file
+ def get_hosts(self):
+ return self.apiconn.get_list('DescribeHosts', {}, [('item', HostInfo)])
+
diff --git a/nova/auth/__init__.py b/nova/auth/__init__.py
index 7cd6c618d..0d115e9d9 100644
--- a/nova/auth/__init__.py
+++ b/nova/auth/__init__.py
@@ -1,17 +1,22 @@
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
:mod:`nova.auth` -- Authentication and Access Control
=====================================================
diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py
index 8a5bbdf44..116fcbb78 100644
--- a/nova/auth/fakeldap.py
+++ b/nova/auth/fakeldap.py
@@ -1,141 +1,224 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
- Fake LDAP server for test harnesses.
+Fake LDAP server for test harnesses.
+
+This class does very little error checking, and knows nothing about ldap
+class definitions. It implements the minimum emulation of the python ldap
+library to work with nova.
"""
-import logging
+import json
from nova import datastore
-SCOPE_SUBTREE = 1
+
+SCOPE_SUBTREE = 2
MOD_ADD = 0
MOD_DELETE = 1
-SUBS = {
- 'groupOfNames': ['novaProject']
-}
-
class NO_SUCH_OBJECT(Exception):
pass
+class OBJECT_CLASS_VIOLATION(Exception):
+ pass
+
+
def initialize(uri):
- return FakeLDAP(uri)
+ return FakeLDAP()
+
+
+def _match_query(query, attrs):
+ """Match an ldap query to an attribute dictionary.
+
+ &, |, and ! are supported in the query. No syntax checking is performed,
+ so malformed querys will not work correctly.
+
+ """
+ # cut off the parentheses
+ inner = query[1:-1]
+ if inner.startswith('&'):
+ # cut off the &
+ l, r = _paren_groups(inner[1:])
+ return _match_query(l, attrs) and _match_query(r, attrs)
+ if inner.startswith('|'):
+ # cut off the |
+ l, r = _paren_groups(inner[1:])
+ return _match_query(l, attrs) or _match_query(r, attrs)
+ if inner.startswith('!'):
+ # cut off the ! and the nested parentheses
+ return not _match_query(query[2:-1], attrs)
+
+ (k, sep, v) = inner.partition('=')
+ return _match(k, v, attrs)
+
+
+def _paren_groups(source):
+ """Split a string into parenthesized groups."""
+ count = 0
+ start = 0
+ result = []
+ for pos in xrange(len(source)):
+ if source[pos] == '(':
+ if count == 0:
+ start = pos
+ count += 1
+ if source[pos] == ')':
+ count -= 1
+ if count == 0:
+ result.append(source[start:pos+1])
+ return result
+
+
+def _match(k, v, attrs):
+ """Match a given key and value against an attribute list."""
+ if k not in attrs:
+ return False
+ if k != "objectclass":
+ return v in attrs[k]
+ # it is an objectclass check, so check subclasses
+ values = _subs(v)
+ for value in values:
+ if value in attrs[k]:
+ return True
+ return False
+
+
+def _subs(value):
+ """Returns a list of subclass strings.
+
+ The strings represent the ldap objectclass plus any subclasses that
+ inherit from it. Fakeldap doesn't know about the ldap object structure,
+ so subclasses need to be defined manually in the dictionary below.
+
+ """
+ subs = {'groupOfNames': ['novaProject']}
+ if value in subs:
+ return [value] + subs[value]
+ return [value]
+
+
+def _from_json(encoded):
+ """Convert attribute values from json representation.
+
+ Args:
+ encoded -- a json encoded string
+
+ Returns a list of strings
+
+ """
+ return [str(x) for x in json.loads(encoded)]
+
+
+def _to_json(unencoded):
+ """Convert attribute values into json representation.
+
+ Args:
+ unencoded -- an unencoded string or list of strings. If it
+ is a single string, it will be converted into a list.
+
+ Returns a json string
+
+ """
+ return json.dumps(list(unencoded))
class FakeLDAP(object):
- def __init__(self, _uri):
- self.keeper = datastore.Keeper('fakeldap')
- if self.keeper['objects'] is None:
- self.keeper['objects'] = {}
+ #TODO(vish): refactor this class to use a wrapper instead of accessing
+ # redis directly
def simple_bind_s(self, dn, password):
+ """This method is ignored, but provided for compatibility."""
pass
def unbind_s(self):
+ """This method is ignored, but provided for compatibility."""
pass
- def _paren_groups(self, source):
- count = 0
- start = 0
- result = []
- for pos in xrange(len(source)):
- if source[pos] == '(':
- if count == 0:
- start = pos
- count += 1
- if source[pos] == ')':
- count -= 1
- if count == 0:
- result.append(source[start:pos+1])
-
- def _match_query(self, query, attrs):
- inner = query[1:-1]
- if inner.startswith('&'):
- l, r = self._paren_groups(inner[1:])
- return self._match_query(l, attrs) and self._match_query(r, attrs)
- if inner.startswith('|'):
- l, r = self._paren_groups(inner[1:])
- return self._match_query(l, attrs) or self._match_query(r, attrs)
- if inner.startswith('!'):
- return not self._match_query(query[2:-1], attrs)
-
- (k, sep, v) = inner.partition('=')
- return self._match(k, v, attrs)
-
- def _subs(self, v):
- if v in SUBS:
- return [v] + SUBS[v]
- return [v]
-
- def _match(self, k, v, attrs):
- if attrs.has_key(k):
- for v in self._subs(v):
- if (v in attrs[k]):
- return True
- return False
+ def add_s(self, dn, attr):
+ """Add an object with the specified attributes at dn."""
+ key = "%s%s" % (self.__redis_prefix, dn)
- def search_s(self, dn, scope, query=None, fields=None):
- logging.debug("searching for %s" % dn)
- filtered = {}
- d = self.keeper['objects'] or {}
- for cn, attrs in d.iteritems():
- if cn[-len(dn):] == dn:
- filtered[cn] = attrs
- objects = filtered
- if query:
- objects = {}
- for cn, attrs in filtered.iteritems():
- if self._match_query(query, attrs):
- objects[cn] = attrs
- if objects == {}:
- raise NO_SUCH_OBJECT()
- return objects.items()
-
- def add_s(self, cn, attr):
- logging.debug("adding %s" % cn)
- stored = {}
- for k, v in attr:
- if type(v) is list:
- stored[k] = v
- else:
- stored[k] = [v]
- d = self.keeper['objects']
- d[cn] = stored
- self.keeper['objects'] = d
-
- def delete_s(self, cn):
- logging.debug("deleting %s" % cn)
- d = self.keeper['objects']
- del d[cn]
- self.keeper['objects'] = d
-
- def modify_s(self, cn, attr):
- logging.debug("modifying %s" % cn)
- d = self.keeper['objects']
- for cmd, k, v in attr:
- logging.debug("command %s" % cmd)
+ value_dict = dict([(k, _to_json(v)) for k, v in attr])
+ datastore.Redis.instance().hmset(key, value_dict)
+
+ def delete_s(self, dn):
+ """Remove the ldap object at specified dn."""
+ datastore.Redis.instance().delete("%s%s" % (self.__redis_prefix, dn))
+
+ def modify_s(self, dn, attrs):
+ """Modify the object at dn using the attribute list.
+
+ Args:
+ dn -- a dn
+ attrs -- a list of tuples in the following form:
+ ([MOD_ADD | MOD_DELETE], attribute, value)
+
+ """
+ redis = datastore.Redis.instance()
+ key = "%s%s" % (self.__redis_prefix, dn)
+
+ for cmd, k, v in attrs:
+ values = _from_json(redis.hget(key, k))
if cmd == MOD_ADD:
- d[cn][k].append(v)
+ values.append(v)
else:
- d[cn][k].remove(v)
- self.keeper['objects'] = d
+ values.remove(v)
+ values = redis.hset(key, k, _to_json(values))
+
+ def search_s(self, dn, scope, query=None, fields=None):
+ """Search for all matching objects under dn using the query.
+
+ Args:
+ dn -- dn to search under
+ scope -- only SCOPE_SUBTREE is supported
+ query -- query to filter objects by
+ fields -- fields to return. Returns all fields if not specified
+
+ """
+ if scope != SCOPE_SUBTREE:
+ raise NotImplementedError(str(scope))
+ redis = datastore.Redis.instance()
+ keys = redis.keys("%s*%s" % (self.__redis_prefix, dn))
+ objects = []
+ for key in keys:
+ # get the attributes from redis
+ attrs = redis.hgetall(key)
+ # turn the values from redis into lists
+ attrs = dict([(k, _from_json(v))
+ for k, v in attrs.iteritems()])
+ # filter the objects by query
+ if not query or _match_query(query, attrs):
+ # filter the attributes by fields
+ attrs = dict([(k, v) for k, v in attrs.iteritems()
+ if not fields or k in fields])
+ objects.append((key[len(self.__redis_prefix):], attrs))
+ if objects == []:
+ raise NO_SUCH_OBJECT()
+ return objects
+ @property
+ def __redis_prefix(self):
+ return 'ldap:'
diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template
index ee1bc75f2..b6d65297a 100644
--- a/nova/auth/novarc.template
+++ b/nova/auth/novarc.template
@@ -1,16 +1,22 @@
-# Copyright [2010] [Anso Labs, LLC]
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
NOVA_KEY_DIR=$(pushd $(dirname $BASH_SOURCE)>/dev/null; pwd; popd>/dev/null)
export EC2_ACCESS_KEY="%(access)s:%(project)s"
diff --git a/nova/auth/rbac.py b/nova/auth/rbac.py
new file mode 100644
index 000000000..f4abd1075
--- /dev/null
+++ b/nova/auth/rbac.py
@@ -0,0 +1,55 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import exception
+from nova.auth import users
+
+
+def allow(*roles):
+ def wrap(f):
+ def wrapped_f(self, context, *args, **kwargs):
+ if context.user.is_superuser():
+ return f(self, context, *args, **kwargs)
+ for role in roles:
+ if __matches_role(context, role):
+ return f(self, context, *args, **kwargs)
+ raise exception.NotAuthorized()
+ return wrapped_f
+ return wrap
+
+def deny(*roles):
+ def wrap(f):
+ def wrapped_f(self, context, *args, **kwargs):
+ if context.user.is_superuser():
+ return f(self, context, *args, **kwargs)
+ for role in roles:
+ if __matches_role(context, role):
+ raise exception.NotAuthorized()
+ return f(self, context, *args, **kwargs)
+ return wrapped_f
+ return wrap
+
+def __matches_role(context, role):
+ if role == 'all':
+ return True
+ if role == 'none':
+ return False
+ return context.project.has_role(context.user.id, role)
+
diff --git a/nova/auth/signer.py b/nova/auth/signer.py
index 4b0169652..4f7ac43bc 100644
--- a/nova/auth/signer.py
+++ b/nova/auth/signer.py
@@ -1,17 +1,23 @@
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
# PORTIONS OF THIS FILE ARE FROM:
# http://code.google.com/p/boto
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
diff --git a/nova/auth/slap.sh b/nova/auth/slap.sh
index c3369e396..90dc7a9d6 100755
--- a/nova/auth/slap.sh
+++ b/nova/auth/slap.sh
@@ -1,18 +1,23 @@
#!/usr/bin/env bash
-# Copyright [2010] [Anso Labs, LLC]
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
# LDAP INSTALL SCRIPT - SHOULD BE IDEMPOTENT, but it SCRUBS all USERS
apt-get install -y slapd ldap-utils python-ldap
@@ -213,6 +218,38 @@ description: Special account for authenticating users
userPassword: {MD5}TLnIqASP0CKUR3/LGkEZGg==
objectClass: account
objectClass: simpleSecurityObject
+
+# create the sysadmin entry
+
+dn: cn=developers,ou=Groups,dc=example,dc=com
+objectclass: groupOfNames
+cn: developers
+description: IT admin group
+member: uid=admin,ou=Users,dc=example,dc=com
+
+dn: cn=sysadmins,ou=Groups,dc=example,dc=com
+objectclass: groupOfNames
+cn: sysadmins
+description: IT admin group
+member: uid=admin,ou=Users,dc=example,dc=com
+
+dn: cn=netadmins,ou=Groups,dc=example,dc=com
+objectclass: groupOfNames
+cn: netadmins
+description: Network admin group
+member: uid=admin,ou=Users,dc=example,dc=com
+
+dn: cn=cloudadmins,ou=Groups,dc=example,dc=com
+objectclass: groupOfNames
+cn: cloudadmins
+description: Cloud admin group
+member: uid=admin,ou=Users,dc=example,dc=com
+
+dn: cn=itsec,ou=Groups,dc=example,dc=com
+objectclass: groupOfNames
+cn: itsec
+description: IT security users group
+member: uid=admin,ou=Users,dc=example,dc=com
BASE_LDIF_EOF
/etc/init.d/slapd stop
diff --git a/nova/auth/users.py b/nova/auth/users.py
index c60922feb..0ee2d4441 100644
--- a/nova/auth/users.py
+++ b/nova/auth/users.py
@@ -1,18 +1,23 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Nova users and user management, including RBAC hooks.
@@ -22,7 +27,9 @@ import datetime
import logging
import os
import shutil
+import signer
import string
+from string import Template
import tempfile
import uuid
import zipfile
@@ -33,29 +40,61 @@ except Exception, e:
import fakeldap as ldap
import fakeldap
-from nova import datastore
# TODO(termie): clean up these imports
-import signer
+from nova import datastore
from nova import exception
from nova import flags
from nova import crypto
from nova import utils
+
from nova import objectstore # for flags
FLAGS = flags.FLAGS
-flags.DEFINE_string('ldap_url', 'ldap://localhost', 'Point this at your ldap server')
+flags.DEFINE_string('ldap_url', 'ldap://localhost',
+ 'Point this at your ldap server')
flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password')
-flags.DEFINE_string('user_dn', 'cn=Manager,dc=example,dc=com', 'DN of admin user')
+flags.DEFINE_string('user_dn', 'cn=Manager,dc=example,dc=com',
+ 'DN of admin user')
flags.DEFINE_string('user_unit', 'Users', 'OID for Users')
-flags.DEFINE_string('user_ldap_subtree', 'ou=Users,dc=example,dc=com', 'OU for Users')
-flags.DEFINE_string('project_ldap_subtree', 'ou=Groups,dc=example,dc=com', 'OU for Projects')
+flags.DEFINE_string('user_ldap_subtree', 'ou=Users,dc=example,dc=com',
+ 'OU for Users')
+flags.DEFINE_string('project_ldap_subtree', 'ou=Groups,dc=example,dc=com',
+ 'OU for Projects')
+flags.DEFINE_string('role_ldap_subtree', 'ou=Groups,dc=example,dc=com',
+ 'OU for Roles')
+
+# NOTE(vish): mapping with these flags is necessary because we're going
+# to tie in to an existing ldap schema
+flags.DEFINE_string('ldap_cloudadmin',
+ 'cn=cloudadmins,ou=Groups,dc=example,dc=com', 'cn for Cloud Admins')
+flags.DEFINE_string('ldap_itsec',
+ 'cn=itsec,ou=Groups,dc=example,dc=com', 'cn for ItSec')
+flags.DEFINE_string('ldap_sysadmin',
+ 'cn=sysadmins,ou=Groups,dc=example,dc=com', 'cn for Sysadmins')
+flags.DEFINE_string('ldap_netadmin',
+ 'cn=netadmins,ou=Groups,dc=example,dc=com', 'cn for NetAdmins')
+flags.DEFINE_string('ldap_developer',
+ 'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers')
+
+# NOTE(vish): a user with one of these roles will be a superuser and
+# have access to all api commands
+flags.DEFINE_list('superuser_roles', ['cloudadmin'],
+ 'roles that ignore rbac checking completely')
+
+# NOTE(vish): a user with one of these roles will have it for every
+# project, even if he or she is not a member of the project
+flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'],
+ 'roles that apply to all projects')
flags.DEFINE_string('credentials_template',
utils.abspath('auth/novarc.template'),
'Template for creating users rc file')
+flags.DEFINE_string('vpn_client_template',
+ utils.abspath('cloudpipe/client.ovpn.template'),
+ 'Template for creating users vpn file')
flags.DEFINE_string('credential_key_file', 'pk.pem',
'Filename of private key in credentials zip')
flags.DEFINE_string('credential_cert_file', 'cert.pem',
@@ -63,17 +102,36 @@ flags.DEFINE_string('credential_cert_file', 'cert.pem',
flags.DEFINE_string('credential_rc_file', 'novarc',
'Filename of rc in credentials zip')
+flags.DEFINE_integer('vpn_start_port', 1000,
+ 'Start port for the cloudpipe VPN servers')
+flags.DEFINE_integer('vpn_end_port', 2000,
+ 'End port for the cloudpipe VPN servers')
+
+flags.DEFINE_string('credential_cert_subject',
+ '/C=US/ST=California/L=MountainView/O=AnsoLabs/'
+ 'OU=NovaDev/CN=%s-%s',
+ 'Subject for certificate for users')
+
+flags.DEFINE_string('vpn_ip', '127.0.0.1',
+ 'Public IP for the cloudpipe VPN servers')
+
+
class AuthBase(object):
@classmethod
def safe_id(cls, obj):
- """this method will return the id of the object if the object is of this class, otherwise
- it will return the original object. This allows methods to accept objects or
- ids as paramaters"""
+ """Safe get object id.
+
+ This method will return the id of the object if the object
+ is of this class, otherwise it will return the original object.
+ This allows methods to accept objects or ids as paramaters.
+
+ """
if isinstance(obj, cls):
return obj.id
else:
return obj
+
class User(AuthBase):
"""id and name are currently the same"""
def __init__(self, id, name, access, secret, admin):
@@ -83,9 +141,30 @@ class User(AuthBase):
self.secret = secret
self.admin = admin
+ def is_superuser(self):
+ """allows user to bypass rbac completely"""
+ if self.admin:
+ return True
+ for role in FLAGS.superuser_roles:
+ if self.has_role(role):
+ return True
+
def is_admin(self):
"""allows user to see objects from all projects"""
- return self.admin
+ if self.is_superuser():
+ return True
+ for role in FLAGS.global_roles:
+ if self.has_role(role):
+ return True
+
+ def has_role(self, role):
+ return UserManager.instance().has_role(self, role)
+
+ def add_role(self, role):
+ return UserManager.instance().add_role(self, role)
+
+ def remove_role(self, role):
+ return UserManager.instance().remove_role(self, role)
def is_project_member(self, project):
return UserManager.instance().is_project_member(self, project)
@@ -127,7 +206,9 @@ class User(AuthBase):
return UserManager.instance().get_key_pairs(self.id)
def __repr__(self):
- return "User('%s', '%s', '%s', '%s', %s)" % (self.id, self.name, self.access, self.secret, self.admin)
+ return "User('%s', '%s', '%s', '%s', %s)" % (
+ self.id, self.name, self.access, self.secret, self.admin)
+
class KeyPair(AuthBase):
def __init__(self, id, owner_id, public_key, fingerprint):
@@ -141,7 +222,9 @@ class KeyPair(AuthBase):
return UserManager.instance().delete_key_pair(self.owner, self.name)
def __repr__(self):
- return "KeyPair('%s', '%s', '%s', '%s')" % (self.id, self.owner_id, self.public_key, self.fingerprint)
+ return "KeyPair('%s', '%s', '%s', '%s')" % (
+ self.id, self.owner_id, self.public_key, self.fingerprint)
+
class Group(AuthBase):
"""id and name are currently the same"""
@@ -155,13 +238,14 @@ class Group(AuthBase):
return User.safe_id(user) in self.member_ids
def __repr__(self):
- return "Group('%s', '%s', %s)" % (self.id, self.description, self.member_ids)
+ return "Group('%s', '%s', %s)" % (
+ self.id, self.description, self.member_ids)
+
class Project(Group):
def __init__(self, id, project_manager_id, description, member_ids):
self.project_manager_id = project_manager_id
super(Project, self).__init__(id, description, member_ids)
- self.keeper = datastore.Keeper(prefix="project-")
@property
def project_manager(self):
@@ -170,18 +254,44 @@ class Project(Group):
def has_manager(self, user):
return User.safe_id(user) == self.project_manager_id
+ def add_role(self, user, role):
+ return UserManager.instance().add_role(user, role, self)
+
+ def remove_role(self, user, role):
+ return UserManager.instance().remove_role(user, role, self)
+
+ def has_role(self, user, role):
+ return UserManager.instance().has_role(user, role, self)
+
+ @property
+ def vpn_ip(self):
+ return Vpn(self.id).ip
+
+ @property
+ def vpn_port(self):
+ return Vpn(self.id).port
+
def get_credentials(self, user):
if not isinstance(user, User):
user = UserManager.instance().get_user(user)
rc = user.generate_rc(self.id)
private_key, signed_cert = self.generate_x509_cert(user)
+ configfile = open(FLAGS.vpn_client_template,"r")
+ s = string.Template(configfile.read())
+ configfile.close()
+ config = s.substitute(keyfile=FLAGS.credential_key_file,
+ certfile=FLAGS.credential_cert_file,
+ ip=self.vpn_ip,
+ port=self.vpn_port)
+
tmpdir = tempfile.mkdtemp()
zf = os.path.join(tmpdir, "temp.zip")
zippy = zipfile.ZipFile(zf, 'w')
zippy.writestr(FLAGS.credential_rc_file, rc)
zippy.writestr(FLAGS.credential_key_file, private_key)
zippy.writestr(FLAGS.credential_cert_file, signed_cert)
+ zippy.writestr("nebula-client.conf", config)
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(self.id))
zippy.close()
with open(zf, 'rb') as f:
@@ -194,7 +304,78 @@ class Project(Group):
return UserManager.instance().generate_x509_cert(user, self)
def __repr__(self):
- return "Project('%s', '%s', '%s', %s)" % (self.id, self.project_manager_id, self.description, self.member_ids)
+ return "Project('%s', '%s', '%s', %s)" % (
+ self.id, self.project_manager_id,
+ self.description, self.member_ids)
+
+
+class NoMorePorts(exception.Error):
+ pass
+
+
+class Vpn(datastore.BasicModel):
+ def __init__(self, project_id):
+ self.project_id = project_id
+ super(Vpn, self).__init__()
+
+ @property
+ def identifier(self):
+ return self.project_id
+
+ @classmethod
+ def create(cls, project_id):
+ # TODO(vish): get list of vpn ips from redis
+ port = cls.find_free_port_for_ip(FLAGS.vpn_ip)
+ vpn = cls(project_id)
+ # save ip for project
+ vpn['project'] = project_id
+ vpn['ip'] = FLAGS.vpn_ip
+ vpn['port'] = port
+ vpn.save()
+ return vpn
+
+ @classmethod
+ def find_free_port_for_ip(cls, ip):
+ # TODO(vish): these redis commands should be generalized and
+ # placed into a base class. Conceptually, it is
+ # similar to an association, but we are just
+ # storing a set of values instead of keys that
+ # should be turned into objects.
+ redis = datastore.Redis.instance()
+ key = 'ip:%s:ports' % ip
+ # TODO(vish): these ports should be allocated through an admin
+ # command instead of a flag
+ if (not redis.exists(key) and
+ not redis.exists(cls._redis_association_name('ip', ip))):
+ for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
+ redis.sadd(key, i)
+
+ port = redis.spop(key)
+ if not port:
+ raise NoMorePorts()
+ return port
+
+ @classmethod
+ def num_ports_for_ip(cls, ip):
+ return datastore.Redis.instance().scard('ip:%s:ports' % ip)
+
+ @property
+ def ip(self):
+ return self['ip']
+
+ @property
+ def port(self):
+ return int(self['port'])
+
+ def save(self):
+ self.associate_with('ip', self.ip)
+ super(Vpn, self).save()
+
+ def destroy(self):
+ self.unassociate_with('ip', self.ip)
+ datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port)
+ super(Vpn, self).destroy()
+
class UserManager(object):
def __init__(self):
@@ -218,24 +399,31 @@ class UserManager(object):
except: pass
return cls._instance
- def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', verify_signature=True):
+ def authenticate(self, access, signature, params, verb='GET',
+ server_string='127.0.0.1:8773', path='/',
+ verify_signature=True):
# TODO: Check for valid timestamp
(access_key, sep, project_name) = access.partition(':')
user = self.get_user_from_access_key(access_key)
if user == None:
- raise exception.NotFound('No user found for access key')
+ raise exception.NotFound('No user found for access key %s' %
+ access_key)
if project_name is '':
project_name = user.name
project = self.get_project(project_name)
if project == None:
- raise exception.NotFound('No project called %s could be found' % project_name)
+ raise exception.NotFound('No project called %s could be found' %
+ project_name)
if not user.is_admin() and not project.has_member(user):
- raise exception.NotFound('User %s is not a member of project %s' % (user.id, project.id))
+ raise exception.NotFound('User %s is not a member of project %s' %
+ (user.id, project.id))
if verify_signature:
- # hmac can't handle unicode, so encode ensures that secret isn't unicode
- expected_signature = signer.Signer(user.secret.encode()).generate(params, verb, server_string, path)
+ # NOTE(vish): hmac can't handle unicode, so encode ensures that
+ # secret isn't unicode
+ expected_signature = signer.Signer(user.secret.encode()).generate(
+ params, verb, server_string, path)
logging.debug('user.secret: %s', user.secret)
logging.debug('expected_signature: %s', expected_signature)
logging.debug('signature: %s', signature)
@@ -243,11 +431,50 @@ class UserManager(object):
raise exception.NotAuthorized('Signature does not match')
return (user, project)
- def create_project(self, name, manager_user, description=None, member_users=None):
+ def has_role(self, user, role, project=None):
+ with LDAPWrapper() as conn:
+ if role == 'projectmanager':
+ if not project:
+ raise exception.Error("Must specify project")
+ return self.is_project_manager(user, project)
+
+ global_role = conn.has_role(User.safe_id(user),
+ role,
+ None)
+ if not global_role:
+ return global_role
+
+ if not project or role in FLAGS.global_roles:
+ return global_role
+
+ return conn.has_role(User.safe_id(user),
+ role,
+ Project.safe_id(project))
+
+ def add_role(self, user, role, project=None):
+ with LDAPWrapper() as conn:
+ return conn.add_role(User.safe_id(user), role,
+ Project.safe_id(project))
+
+ def remove_role(self, user, role, project=None):
+ with LDAPWrapper() as conn:
+ return conn.remove_role(User.safe_id(user), role,
+ Project.safe_id(project))
+
+ def create_project(self, name, manager_user,
+ description=None, member_users=None):
if member_users:
member_users = [User.safe_id(u) for u in member_users]
with LDAPWrapper() as conn:
- return conn.create_project(name, User.safe_id(manager_user), description, member_users)
+ # NOTE(vish): try to associate a vpn ip and port first because
+ # if it throws an exception, we save having to
+ # create and destroy a project
+ Vpn.create(name)
+ return conn.create_project(name,
+ User.safe_id(manager_user),
+ description,
+ member_users)
+
def get_projects(self):
with LDAPWrapper() as conn:
@@ -260,7 +487,8 @@ class UserManager(object):
def add_to_project(self, user, project):
with LDAPWrapper() as conn:
- return conn.add_to_project(User.safe_id(user), Project.safe_id(project))
+ return conn.add_to_project(User.safe_id(user),
+ Project.safe_id(project))
def is_project_manager(self, user, project):
if not isinstance(project, Project):
@@ -276,7 +504,8 @@ class UserManager(object):
def remove_from_project(self, user, project):
with LDAPWrapper() as conn:
- return conn.remove_from_project(User.safe_id(user), Project.safe_id(project))
+ return conn.remove_from_project(User.safe_id(user),
+ Project.safe_id(project))
def delete_project(self, project):
with LDAPWrapper() as conn:
@@ -294,14 +523,21 @@ class UserManager(object):
with LDAPWrapper() as conn:
return conn.find_users()
- def create_user(self, user, access=None, secret=None, admin=False, create_project=True):
+ def create_user(self, user, access=None, secret=None,
+ admin=False, create_project=True):
if access == None: access = str(uuid.uuid4())
if secret == None: secret = str(uuid.uuid4())
with LDAPWrapper() as conn:
user = User.safe_id(user)
result = conn.create_user(user, access, secret, admin)
if create_project:
- conn.create_project(user, user, user)
+ # NOTE(vish): if the project creation fails, we delete
+ # the user and return an exception
+ try:
+ conn.create_project(user, user, user)
+ except Exception:
+ conn.delete_user(user)
+ raise
return result
def delete_user(self, user, delete_project=True):
@@ -322,14 +558,17 @@ class UserManager(object):
if not conn.user_exists(user):
raise exception.NotFound("User %s doesn't exist" % user)
if conn.key_pair_exists(user, key_name):
- raise exception.Duplicate("The keypair %s already exists" % key_name)
+ raise exception.Duplicate("The keypair %s already exists"
+ % key_name)
private_key, public_key, fingerprint = crypto.generate_key_pair()
- self.create_key_pair(User.safe_id(user), key_name, public_key, fingerprint)
+ self.create_key_pair(User.safe_id(user), key_name,
+ public_key, fingerprint)
return private_key, fingerprint
def create_key_pair(self, user, key_name, public_key, fingerprint):
with LDAPWrapper() as conn:
- return conn.create_key_pair(User.safe_id(user), key_name, public_key, fingerprint)
+ return conn.create_key_pair(User.safe_id(user), key_name,
+ public_key, fingerprint)
def get_key_pair(self, user, key_name):
with LDAPWrapper() as conn:
@@ -344,16 +583,15 @@ class UserManager(object):
conn.delete_key_pair(User.safe_id(user), key_name)
def generate_x509_cert(self, user, project):
- (private_key, csr) = crypto.generate_x509_cert(self.__cert_subject(User.safe_id(user)))
+ (private_key, csr) = crypto.generate_x509_cert(
+ self.__cert_subject(User.safe_id(user)))
# TODO - This should be async call back to the cloud controller
signed_cert = crypto.sign_csr(csr, Project.safe_id(project))
return (private_key, signed_cert)
- def sign_cert(self, csr, uid):
- return crypto.sign_csr(csr, uid)
-
def __cert_subject(self, uid):
- return "/C=US/ST=California/L=The_Mission/O=AnsoLabs/OU=Nova/CN=%s-%s" % (uid, str(datetime.datetime.utcnow().isoformat()))
+ # FIXME(ja) - this should be pulled from a global configuration
+ return FLAGS.credential_cert_subject % (uid, utils.isotime())
class LDAPWrapper(object):
@@ -366,16 +604,18 @@ class LDAPWrapper(object):
return self
def __exit__(self, type, value, traceback):
- #logging.info('type, value, traceback: %s, %s, %s', type, value, traceback)
self.conn.unbind_s()
return False
def connect(self):
""" connect to ldap as admin user """
if FLAGS.fake_users:
+ self.NO_SUCH_OBJECT = fakeldap.NO_SUCH_OBJECT
+ self.OBJECT_CLASS_VIOLATION = fakeldap.OBJECT_CLASS_VIOLATION
self.conn = fakeldap.initialize(FLAGS.ldap_url)
else:
- assert(ldap.__name__ != 'fakeldap')
+ self.NO_SUCH_OBJECT = ldap.NO_SUCH_OBJECT
+ self.OBJECT_CLASS_VIOLATION = ldap.OBJECT_CLASS_VIOLATION
self.conn = ldap.initialize(FLAGS.ldap_url)
self.conn.simple_bind_s(self.user, self.passwd)
@@ -385,32 +625,51 @@ class LDAPWrapper(object):
return None
return objects[0]
+ def find_dns(self, dn, query=None):
+ try:
+ res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query)
+ except self.NO_SUCH_OBJECT:
+ return []
+ # just return the DNs
+ return [dn for dn, attributes in res]
+
def find_objects(self, dn, query = None):
try:
res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query)
- except Exception:
+ except self.NO_SUCH_OBJECT:
return []
# just return the attributes
- return [x[1] for x in res]
+ return [attributes for dn, attributes in res]
def find_users(self):
- attrs = self.find_objects(FLAGS.user_ldap_subtree, '(objectclass=novaUser)')
+ attrs = self.find_objects(FLAGS.user_ldap_subtree,
+ '(objectclass=novaUser)')
return [self.__to_user(attr) for attr in attrs]
def find_key_pairs(self, uid):
- attrs = self.find_objects(self.__uid_to_dn(uid), '(objectclass=novaKeyPair)')
+ attrs = self.find_objects(self.__uid_to_dn(uid),
+ '(objectclass=novaKeyPair)')
return [self.__to_key_pair(uid, attr) for attr in attrs]
def find_projects(self):
- attrs = self.find_objects(FLAGS.project_ldap_subtree, '(objectclass=novaProject)')
+ attrs = self.find_objects(FLAGS.project_ldap_subtree,
+ '(objectclass=novaProject)')
return [self.__to_project(attr) for attr in attrs]
- def find_groups_with_member(self, tree, dn):
- attrs = self.find_objects(tree, '(&(objectclass=groupOfNames)(member=%s))' % dn )
+ def find_roles(self, tree):
+ attrs = self.find_objects(tree,
+ '(&(objectclass=groupOfNames)(!(objectclass=novaProject)))')
return [self.__to_group(attr) for attr in attrs]
+ def find_group_dns_with_member(self, tree, uid):
+ dns = self.find_dns(tree,
+ '(&(objectclass=groupOfNames)(member=%s))' %
+ self.__uid_to_dn(uid))
+ return dns
+
def find_user(self, uid):
- attr = self.find_object(self.__uid_to_dn(uid), '(objectclass=novaUser)')
+ attr = self.find_object(self.__uid_to_dn(uid),
+ '(objectclass=novaUser)')
return self.__to_user(attr)
def find_key_pair(self, uid, key_name):
@@ -467,11 +726,14 @@ class LDAPWrapper(object):
self.conn.add_s(self.__uid_to_dn(name), attr)
return self.__to_user(dict(attr))
- def create_project(self, name, manager_uid, description=None, member_uids=None):
+ def create_project(self, name, manager_uid,
+ description=None, member_uids=None):
if self.project_exists(name):
- raise exception.Duplicate("Project can't be created because project %s already exists" % name)
+ raise exception.Duplicate("Project can't be created because "
+ "project %s already exists" % name)
if not self.user_exists(manager_uid):
- raise exception.NotFound("Project can't be created because manager %s doesn't exist" % manager_uid)
+ raise exception.NotFound("Project can't be created because "
+ "manager %s doesn't exist" % manager_uid)
manager_dn = self.__uid_to_dn(manager_uid)
# description is a required attribute
if description is None:
@@ -480,7 +742,8 @@ class LDAPWrapper(object):
if member_uids != None:
for member_uid in member_uids:
if not self.user_exists(member_uid):
- raise exception.NotFound("Project can't be created because user %s doesn't exist" % member_uid)
+ raise exception.NotFound("Project can't be created "
+ "because user %s doesn't exist" % member_uid)
members.append(self.__uid_to_dn(member_uid))
# always add the manager as a member because members is required
if not manager_dn in members:
@@ -507,14 +770,25 @@ class LDAPWrapper(object):
dn = 'cn=%s,%s' % (project_id, FLAGS.project_ldap_subtree)
return self.is_in_group(uid, dn)
- def __create_group(self, group_dn, name, uid, description, member_uids = None):
- if self.group_exists(name):
- raise exception.Duplicate("Group can't be created because group %s already exists" % name)
+ def __role_to_dn(self, role, project_id=None):
+ if project_id == None:
+ return FLAGS.__getitem__("ldap_%s" % role).value
+ else:
+ return 'cn=%s,cn=%s,%s' % (role,
+ project_id,
+ FLAGS.project_ldap_subtree)
+
+ def __create_group(self, group_dn, name, uid,
+ description, member_uids = None):
+ if self.group_exists(group_dn):
+ raise exception.Duplicate("Group can't be created because "
+ "group %s already exists" % name)
members = []
if member_uids != None:
for member_uid in member_uids:
if not self.user_exists(member_uid):
- raise exception.NotFound("Group can't be created because user %s doesn't exist" % member_uid)
+ raise exception.NotFound("Group can't be created "
+ "because user %s doesn't exist" % member_uid)
members.append(self.__uid_to_dn(member_uid))
dn = self.__uid_to_dn(uid)
if not dn in members:
@@ -528,9 +802,27 @@ class LDAPWrapper(object):
self.conn.add_s(group_dn, attr)
return self.__to_group(dict(attr))
+ def has_role(self, uid, role, project_id=None):
+ role_dn = self.__role_to_dn(role, project_id)
+ return self.is_in_group(uid, role_dn)
+
+ def add_role(self, uid, role, project_id=None):
+ role_dn = self.__role_to_dn(role, project_id)
+ if not self.group_exists(role_dn):
+ # create the role if it doesn't exist
+ description = '%s role for %s' % (role, project_id)
+ self.__create_group(role_dn, role, uid, description)
+ else:
+ return self.add_to_group(uid, role_dn)
+
+ def remove_role(self, uid, role, project_id=None):
+ role_dn = self.__role_to_dn(role, project_id)
+ return self.remove_from_group(uid, role_dn)
+
def is_in_group(self, uid, group_dn):
if not self.user_exists(uid):
- raise exception.NotFound("User %s can't be searched in group becuase the user doesn't exist" % (uid,))
+ raise exception.NotFound("User %s can't be searched in group "
+ "becuase the user doesn't exist" % (uid,))
if not self.group_exists(group_dn):
return False
res = self.find_object(group_dn,
@@ -539,11 +831,14 @@ class LDAPWrapper(object):
def add_to_group(self, uid, group_dn):
if not self.user_exists(uid):
- raise exception.NotFound("User %s can't be added to the group becuase the user doesn't exist" % (uid,))
+ raise exception.NotFound("User %s can't be added to the group "
+ "becuase the user doesn't exist" % (uid,))
if not self.group_exists(group_dn):
- raise exception.NotFound("The group at dn %s doesn't exist" % (group_dn,))
+ raise exception.NotFound("The group at dn %s doesn't exist" %
+ (group_dn,))
if self.is_in_group(uid, group_dn):
- raise exception.Duplicate("User %s is already a member of the group %s" % (uid, group_dn))
+ raise exception.Duplicate("User %s is already a member of "
+ "the group %s" % (uid, group_dn))
attr = [
(ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))
]
@@ -551,31 +846,39 @@ class LDAPWrapper(object):
def remove_from_group(self, uid, group_dn):
if not self.group_exists(group_dn):
- raise exception.NotFound("The group at dn %s doesn't exist" % (group_dn,))
+ raise exception.NotFound("The group at dn %s doesn't exist" %
+ (group_dn,))
if not self.user_exists(uid):
- raise exception.NotFound("User %s can't be removed from the group because the user doesn't exist" % (uid,))
+ raise exception.NotFound("User %s can't be removed from the "
+ "group because the user doesn't exist" % (uid,))
if not self.is_in_group(uid, group_dn):
- raise exception.NotFound("User %s is not a member of the group" % (uid,))
- attr = [
- (ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))
- ]
+ raise exception.NotFound("User %s is not a member of the group" %
+ (uid,))
+ self._safe_remove_from_group(group_dn, uid)
+
+ def _safe_remove_from_group(self, group_dn, uid):
+ # FIXME(vish): what if deleted user is a project manager?
+ attr = [(ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))]
try:
self.conn.modify_s(group_dn, attr)
- except ldap.OBJECT_CLASS_VIOLATION:
- logging.debug("Attempted to remove the last member of a group. Deleting the group instead.")
+ except self.OBJECT_CLASS_VIOLATION:
+ logging.debug("Attempted to remove the last member of a group. "
+ "Deleting the group at %s instead." % group_dn )
self.delete_group(group_dn)
def remove_from_all(self, uid):
- # FIXME(vish): what if deleted user is a project manager?
if not self.user_exists(uid):
- raise exception.NotFound("User %s can't be removed from all because the user doesn't exist" % (uid,))
+ raise exception.NotFound("User %s can't be removed from all "
+ "because the user doesn't exist" % (uid,))
dn = self.__uid_to_dn(uid)
- attr = [
- (ldap.MOD_DELETE, 'member', dn)
- ]
- projects = self.find_groups_with_member(FLAGS.project_ldap_subtree, dn)
- for project in projects:
- self.conn.modify_s('cn=%s,%s' % (project.id, FLAGS.project_ldap_subtree), attr)
+ role_dns = self.find_group_dns_with_member(
+ FLAGS.role_ldap_subtree, uid)
+ for role_dn in role_dns:
+ self._safe_remove_from_group(role_dn, uid)
+ project_dns = self.find_group_dns_with_member(
+ FLAGS.project_ldap_subtree, uid)
+ for project_dn in project_dns:
+ self._safe_remove_from_group(project_dn, uid)
def create_key_pair(self, uid, key_name, public_key, fingerprint):
"""create's a public key in the directory underneath the user"""
@@ -617,8 +920,14 @@ class LDAPWrapper(object):
raise exception.NotFound("Group at dn %s doesn't exist" % group_dn)
self.conn.delete_s(group_dn)
+ def delete_roles(self, project_dn):
+ roles = self.find_roles(project_dn)
+ for role in roles:
+ self.delete_group('cn=%s,%s' % (role.id, project_dn))
+
def delete_project(self, name):
project_dn = 'cn=%s,%s' % (name, FLAGS.project_ldap_subtree)
+ self.delete_roles(project_dn)
self.delete_group(project_dn)
def __to_user(self, attr):
diff --git a/nova/cloudpipe/__init__.py b/nova/cloudpipe/__init__.py
new file mode 100644
index 000000000..57ef14651
--- /dev/null
+++ b/nova/cloudpipe/__init__.py
@@ -0,0 +1,31 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`nova.cloudpipe` -- VPN Server Management
+=====================================================
+
+.. automodule:: nova.cloudpipe
+ :platform: Unix
+ :synopsis: An OpenVPN server for every nova user.
+.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
+.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
+.. moduleauthor:: Joshua McKenty <jmckenty@gmail.com>
+"""
diff --git a/nova/cloudpipe/api.py b/nova/cloudpipe/api.py
new file mode 100644
index 000000000..610239c2e
--- /dev/null
+++ b/nova/cloudpipe/api.py
@@ -0,0 +1,61 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Tornado REST API Request Handlers for CloudPipe
+"""
+
+import logging
+import urllib
+
+from nova import vendor
+import tornado.web
+
+from nova import crypto
+from nova.auth import users
+
+_log = logging.getLogger("api")
+_log.setLevel(logging.DEBUG)
+
+
+class CloudPipeRequestHandler(tornado.web.RequestHandler):
+ def get(self, path):
+ path = self.request.path
+ _log.debug( "Cloudpipe path is %s" % path)
+ if path.endswith("/getca/"):
+ self.send_root_ca()
+ self.finish()
+
+ def get_project_id_from_ip(self, ip):
+ cc = self.application.controllers['Cloud']
+ instance = cc.get_instance_by_ip(ip)
+ instance['project_id']
+
+ def send_root_ca(self):
+ _log.debug( "Getting root ca")
+ project_id = self.get_project_id_from_ip(self.request.remote_ip)
+ self.set_header("Content-Type", "text/plain")
+ self.write(crypto.fetch_ca(project_id))
+
+ def post(self, *args, **kwargs):
+ project_id = self.get_project_id_from_ip(self.request.remote_ip)
+ cert = self.get_argument('cert', '')
+ self.write(crypto.sign_csr(urllib.unquote(cert), project_id))
+ self.finish()
diff --git a/nova/cloudpipe/bootscript.sh b/nova/cloudpipe/bootscript.sh
new file mode 100755
index 000000000..639aad66f
--- /dev/null
+++ b/nova/cloudpipe/bootscript.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This gets zipped and run on the cloudpipe-managed OpenVPN server
+
+export SUPERVISOR="http://10.255.255.1:8773/cloudpipe"
+export VPN_IP=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f2 | awk '{print $1}'`
+export BROADCAST=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f3 | awk '{print $1}'`
+export DHCP_MASK=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f4 | awk '{print $1}'`
+export GATEWAY=`netstat -r | grep default | cut -d' ' -f10`
+export SUBJ=/C=US/ST=California/L=Mountain View/O=Anso Labs/OU=Nova Dev/CN=customer-vpn-$VPN_IP
+
+DHCP_LOWER=`echo $BROADCAST | awk -F. '{print $1"."$2"."$3"." $4 - 10 }'`
+DHCP_UPPER=`echo $BROADCAST | awk -F. '{print $1"."$2"."$3"." $4 - 1 }'`
+
+# generate a server DH
+openssl dhparam -out /etc/openvpn/dh1024.pem 1024
+
+# generate a server priv key
+openssl genrsa -out /etc/openvpn/server.key 2048
+
+# generate a server CSR
+openssl req -new -key /etc/openvpn/server.key -out /etc/openvpn/server.csr -batch -subj "$SUBJ"
+
+# URLEncode the CSR
+CSRTEXT=`cat /etc/openvpn/server.csr`
+CSRTEXT=$(python -c "import urllib; print urllib.quote('''$CSRTEXT''')")
+
+# SIGN the csr and save as server.crt
+# CURL fetch to the supervisor, POSTing the CSR text, saving the result as the CRT file
+curl $SUPERVISOR -d "cert=$CSRTEXT" > /etc/openvpn/server.crt
+curl $SUPERVISOR/getca/ > /etc/openvpn/ca.crt
+
+# Customize the server.conf.template
+cd /etc/openvpn
+
+sed -e s/VPN_IP/$VPN_IP/g server.conf.template > server.conf
+sed -i -e s/DHCP_SUBNET/$DHCP_MASK/g server.conf
+sed -i -e s/DHCP_LOWER/$DHCP_LOWER/g server.conf
+sed -i -e s/DHCP_UPPER/$DHCP_UPPER/g server.conf
+sed -i -e s/max-clients\ 1/max-clients\ 10/g server.conf
+
+echo "\npush \"route 10.255.255.1 255.255.255.255 $GATEWAY\"\n" >> server.conf
+echo "\npush \"route 10.255.255.253 255.255.255.255 $GATEWAY\"\n" >> server.conf
+echo "\nduplicate-cn\n" >> server.conf
+
+/etc/init.d/openvpn start
diff --git a/nova/cloudpipe/client.ovpn.template b/nova/cloudpipe/client.ovpn.template
new file mode 100644
index 000000000..a8ec5dc6e
--- /dev/null
+++ b/nova/cloudpipe/client.ovpn.template
@@ -0,0 +1,47 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# NOVA user connection
+# Edit the following lines to point to your cert files:
+cert $certfile
+key $keyfile
+
+ca cacert.pem
+
+client
+dev tap
+proto udp
+
+remote $ip $port
+resolv-retry infinite
+nobind
+
+# Downgrade privileges after initialization (non-Windows only)
+user nobody
+group nogroup
+comp-lzo
+
+# Set log file verbosity.
+verb 2
+
+keepalive 10 120
+ping-timer-rem
+persist-tun
+persist-key
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
new file mode 100644
index 000000000..5f6ccf82e
--- /dev/null
+++ b/nova/cloudpipe/pipelib.py
@@ -0,0 +1,102 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+CloudPipe - Build a user-data payload zip file, and launch
+an instance with it.
+
+"""
+
+import logging
+import os
+import tempfile
+import base64
+from zipfile import ZipFile, ZIP_DEFLATED
+
+from nova import exception
+from nova import flags
+from nova.auth import users
+from nova import utils
+from nova.endpoint import api
+
+FLAGS = flags.FLAGS
+
+flags.DEFINE_string('boot_script_template',
+ utils.abspath('cloudpipe/bootscript.sh'),
+ 'Template for script to run on cloudpipe instance boot')
+
+class CloudPipe(object):
+ def __init__(self, cloud_controller):
+ self.controller = cloud_controller
+ self.manager = users.UserManager.instance()
+
+ def launch_vpn_instance(self, project_id):
+ logging.debug( "Launching VPN for %s" % (project_id))
+ project = self.manager.get_project(project_id)
+ # Make a payload.zip
+ tmpfolder = tempfile.mkdtemp()
+ filename = "payload.zip"
+ zippath = os.path.join(tmpfolder, filename)
+ z = ZipFile(zippath, "w", ZIP_DEFLATED)
+
+ z.write(FLAGS.boot_script_template,'autorun.sh')
+ z.close()
+
+ key_name = self.setup_keypair(project.project_manager_id, project_id)
+ zippy = open(zippath, "r")
+ context = api.APIRequestContext(handler=None, user=project.project_manager, project=project)
+
+ reservation = self.controller.run_instances(context,
+ # run instances expects encoded userdata, it is decoded in the get_metadata_call
+ # autorun.sh also decodes the zip file, hence the double encoding
+ user_data=zippy.read().encode("base64").encode("base64"),
+ max_count=1,
+ min_count=1,
+ instance_type='m1.tiny',
+ image_id=FLAGS.vpn_image_id,
+ key_name=key_name,
+ security_groups=["vpn-secgroup"])
+ zippy.close()
+
+ def setup_keypair(self, user_id, project_id):
+ key_name = '%s%s' % (project_id, FLAGS.vpn_key_suffix)
+ try:
+ private_key, fingerprint = self.manager.generate_key_pair(user_id, key_name)
+ try:
+ key_dir = os.path.join(FLAGS.keys_path, user_id)
+ if not os.path.exists(key_dir):
+ os.makedirs(key_dir)
+ with open(os.path.join(key_dir, '%s.pem' % key_name),'w') as f:
+ f.write(private_key)
+ except:
+ pass
+ except exception.Duplicate:
+ pass
+ return key_name
+
+ # def setup_secgroups(self, username):
+ # conn = self.euca.connection_for(username)
+ # try:
+ # secgroup = conn.create_security_group("vpn-secgroup", "vpn-secgroup")
+ # secgroup.authorize(ip_protocol = "udp", from_port = "1194", to_port = "1194", cidr_ip = "0.0.0.0/0")
+ # secgroup.authorize(ip_protocol = "tcp", from_port = "80", to_port = "80", cidr_ip = "0.0.0.0/0")
+ # secgroup.authorize(ip_protocol = "tcp", from_port = "22", to_port = "22", cidr_ip = "0.0.0.0/0")
+ # except:
+ # pass
diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py
index e8a6921e7..1c688c100 100644
--- a/nova/compute/__init__.py
+++ b/nova/compute/__init__.py
@@ -1,16 +1,22 @@
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
:mod:`nova.compute` -- Compute Nodes using LibVirt
diff --git a/nova/compute/disk.py b/nova/compute/disk.py
index d3eeb951f..b6398f41e 100644
--- a/nova/compute/disk.py
+++ b/nova/compute/disk.py
@@ -1,16 +1,22 @@
-# Copyright [2010] [Anso Labs, LLC]
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Utility methods to resize, repartition, and modify disk images.
@@ -21,14 +27,21 @@ import logging
import os
import tempfile
-from nova.exception import Error
-from nova.utils import execute
+from nova import vendor
+from twisted.internet import defer
+
+from nova import exception
+
+@defer.inlineCallbacks
+def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None):
+ """Takes a single partition represented by infile and writes a bootable
+ drive image into outfile.
-def partition(infile, outfile, local_bytes=0, local_type='ext2'):
- """Takes a single partition represented by infile and writes a bootable drive image into outfile.
The first 63 sectors (0-62) of the resulting image is a master boot record.
Infile becomes the first primary partition.
- If local bytes is specified, a second primary partition is created and formatted as ext2.
+ If local bytes is specified, a second primary partition is created and
+ formatted as ext2.
+
In the diagram below, dashes represent drive sectors.
0 a b c d e
+-----+------. . .-------+------. . .------+
@@ -38,85 +51,106 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2'):
sector_size = 512
file_size = os.path.getsize(infile)
if file_size % sector_size != 0:
- logging.warn("Input partition size not evenly divisible by sector size: %d / %d" (file_size, sector_size))
+ logging.warn("Input partition size not evenly divisible by"
+ " sector size: %d / %d", file_size, sector_size)
primary_sectors = file_size / sector_size
if local_bytes % sector_size != 0:
- logging.warn("Bytes for local storage not evenly divisible by sector size: %d / %d" (local_bytes, sector_size))
+ logging.warn("Bytes for local storage not evenly divisible"
+ " by sector size: %d / %d", local_bytes, sector_size)
local_sectors = local_bytes / sector_size
mbr_last = 62 # a
primary_first = mbr_last + 1 # b
- primary_last = primary_first + primary_sectors # c
+ primary_last = primary_first + primary_sectors - 1 # c
local_first = primary_last + 1 # d
- local_last = local_first + local_sectors # e
+ local_last = local_first + local_sectors - 1 # e
last_sector = local_last # e
# create an empty file
- execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' % (outfile, last_sector, sector_size))
+ execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
+ % (outfile, last_sector, sector_size))
# make mbr partition
- execute('parted --script %s mklabel msdos' % outfile)
+ yield execute('parted --script %s mklabel msdos' % outfile)
# make primary partition
- execute('parted --script %s mkpart primary %ds %ds' % (outfile, primary_first, primary_last))
+ yield execute('parted --script %s mkpart primary %ds %ds'
+ % (outfile, primary_first, primary_last))
# make local partition
if local_bytes > 0:
- execute('parted --script %s mkpartfs primary %s %ds %ds' % (outfile, local_type, local_first, local_last))
+ yield execute('parted --script %s mkpartfs primary %s %ds %ds'
+ % (outfile, local_type, local_first, local_last))
# copy file into partition
- execute('dd if=%s of=%s bs=%d seek=%d conv=notrunc,fsync' % (infile, outfile, sector_size, primary_first))
+ yield execute('dd if=%s of=%s bs=%d seek=%d conv=notrunc,fsync'
+ % (infile, outfile, sector_size, primary_first))
+
+@defer.inlineCallbacks
+def inject_data(image, key=None, net=None, partition=None, execute=None):
+ """Injects a ssh key and optionally net data into a disk image.
+ it will mount the image as a fully partitioned disk and attempt to inject
+ into the specified partition number.
-def inject_key(key, image, partition=None):
- """Injects a ssh key into a disk image.
- It adds the specified key to /root/.ssh/authorized_keys
- it will mount the image as a fully partitioned disk and attempt to inject into the specified partition number.
If partition is not specified it mounts the image as a single partition.
+
"""
- out, err = execute('sudo losetup -f --show %s' % image)
+ out, err = yield execute('sudo losetup -f --show %s' % image)
if err:
- raise Error('Could not attach image to loopback: %s' % err)
+ raise exception.Error('Could not attach image to loopback: %s' % err)
device = out.strip()
try:
if not partition is None:
# create partition
- out, err = execute('sudo kpartx -a %s' % device)
+ out, err = yield execute('sudo kpartx -a %s' % device)
if err:
- raise Error('Failed to load partition: %s' % err)
- mapped_device = '/dev/mapper/%sp%s' % ( device.split('/')[-1] , partition )
+ raise exception.Error('Failed to load partition: %s' % err)
+ mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1],
+ partition)
else:
mapped_device = device
- out, err = execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device)
+ out, err = yield execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device)
tmpdir = tempfile.mkdtemp()
try:
# mount loopback to dir
- out, err = execute('sudo mount %s %s' % (mapped_device, tmpdir))
+ out, err = yield execute(
+ 'sudo mount %s %s' % (mapped_device, tmpdir))
if err:
- raise Error('Failed to mount filesystem: %s' % err)
+ raise exception.Error('Failed to mount filesystem: %s' % err)
try:
- # inject key file
- _inject_into_fs(key, tmpdir)
+ if key:
+ # inject key file
+ yield _inject_key_into_fs(key, tmpdir, execute=execute)
+ if net:
+ yield _inject_net_into_fs(net, tmpdir, execute=execute)
finally:
# unmount device
- execute('sudo umount %s' % mapped_device)
+ yield execute('sudo umount %s' % mapped_device)
finally:
# remove temporary directory
- os.rmdir(tmpdir)
+ yield execute('rmdir %s' % tmpdir)
if not partition is None:
# remove partitions
- execute('sudo kpartx -d %s' % device)
+ yield execute('sudo kpartx -d %s' % device)
finally:
# remove loopback
- execute('sudo losetup -d %s' % device)
+ yield execute('sudo losetup -d %s' % device)
-def _inject_into_fs(key, fs):
+@defer.inlineCallbacks
+def _inject_key_into_fs(key, fs, execute=None):
sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh')
- execute('sudo mkdir %s' % sshdir) #error on existing dir doesn't matter
- execute('sudo chown root %s' % sshdir)
- execute('sudo chmod 700 %s' % sshdir)
+ yield execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
+ yield execute('sudo chown root %s' % sshdir)
+ yield execute('sudo chmod 700 %s' % sshdir)
keyfile = os.path.join(sshdir, 'authorized_keys')
- execute('sudo bash -c "cat >> %s"' % keyfile, '\n' + key + '\n')
+ yield execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n')
+
+@defer.inlineCallbacks
+def _inject_net_into_fs(net, fs, execute=None):
+ netfile = os.path.join(os.path.join(os.path.join(
+ fs, 'etc'), 'network'), 'interfaces')
+ yield execute('sudo tee %s' % netfile, net)
diff --git a/nova/compute/exception.py b/nova/compute/exception.py
index 6fe8e381f..b2bfc39e6 100644
--- a/nova/compute/exception.py
+++ b/nova/compute/exception.py
@@ -1,16 +1,22 @@
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Exceptions for Compute Node errors, mostly network addressing.
diff --git a/nova/compute/fakevirtinstance.xml b/nova/compute/fakevirtinstance.xml
index 6036516bb..0df76f5ef 100644
--- a/nova/compute/fakevirtinstance.xml
+++ b/nova/compute/fakevirtinstance.xml
@@ -1,18 +1,22 @@
<!--
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
- -->
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+-->
<domain type='kvm' id='100'>
<name>i-A9B8C7D6</name>
<uuid>12a345bc-67c8-901d-2e34-56f7g89012h3</uuid>
diff --git a/nova/compute/interfaces.template b/nova/compute/interfaces.template
new file mode 100644
index 000000000..11df301f6
--- /dev/null
+++ b/nova/compute/interfaces.template
@@ -0,0 +1,18 @@
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+# The primary network interface
+auto eth0
+iface eth0 inet static
+ address %(address)s
+ netmask %(netmask)s
+ network %(network)s
+ broadcast %(broadcast)s
+ gateway %(gateway)s
+ dns-nameservers %(dns)s
+
+
diff --git a/nova/compute/libvirt.xml.template b/nova/compute/libvirt.xml.template
index 4cf6e8b10..11dab7eb8 100644
--- a/nova/compute/libvirt.xml.template
+++ b/nova/compute/libvirt.xml.template
@@ -1,18 +1,22 @@
<!--
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
- -->
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+-->
<domain type='kvm'>
<name>%(name)s</name>
<os>
diff --git a/nova/compute/linux_net.py b/nova/compute/linux_net.py
index b9f775aa3..c9e5bb1a7 100644
--- a/nova/compute/linux_net.py
+++ b/nova/compute/linux_net.py
@@ -1,5 +1,23 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
import logging
import signal
import os
@@ -44,6 +62,9 @@ def remove_rule(cmd):
def bind_public_ip(ip, interface):
runthis("Binding IP to interface: %s", "sudo ip addr add %s dev %s" % (ip, interface))
+
+def unbind_public_ip(ip, interface):
+ runthis("Binding IP to interface: %s", "sudo ip addr del %s dev %s" % (ip, interface))
def vlan_create(net):
""" create a vlan on on a bridge device unless vlan already exists """
@@ -58,7 +79,7 @@ def bridge_create(net):
if not device_exists(net['bridge_name']):
logging.debug("Starting Bridge inteface for %s network", (net['vlan']))
execute("sudo brctl addbr %s" % (net['bridge_name']))
- # execute("sudo brctl setfd %s 0" % (net.bridge_name))
+ execute("sudo brctl setfd %s 0" % (net.bridge_name))
# execute("sudo brctl setageing %s 10" % (net.bridge_name))
execute("sudo brctl stp %s off" % (net['bridge_name']))
execute("sudo brctl addif %s vlan%s" % (net['bridge_name'], net['vlan']))
@@ -77,10 +98,10 @@ def dnsmasq_cmd(net):
' --pid-file=%s' % dhcp_file(net['vlan'], 'pid'),
' --listen-address=%s' % net.dhcp_listen_address,
' --except-interface=lo',
- ' --dhcp-range=%s,static,120s' % (net.dhcp_range_start),
- ' --dhcp-lease-max=61',
+ ' --dhcp-range=%s,static,600s' % (net.dhcp_range_start),
' --dhcp-hostsfile=%s' % dhcp_file(net['vlan'], 'conf'),
- ' --dhcp-leasefile=%s' % dhcp_file(net['vlan'], 'leases')]
+ ' --dhcp-script=%s' % bin_file('dhcpleasor.py'),
+ ' --leasefile-ro']
return ''.join(cmd)
def hostDHCP(network, host, mac):
@@ -136,6 +157,9 @@ def dhcp_file(vlan, kind):
return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path, vlan, kind))
+def bin_file(script):
+ return os.path.abspath(os.path.join(__file__, "../../../bin", script))
+
def dnsmasq_pid_for(network):
""" the pid for prior dnsmasq instance for a vlan,
returns None if no pid file exists
diff --git a/nova/compute/model.py b/nova/compute/model.py
index 2754e9e6d..cc5f74b3d 100644
--- a/nova/compute/model.py
+++ b/nova/compute/model.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Datastore Model objects for Compute Instances, with
@@ -37,132 +42,93 @@ True
True
"""
+import logging
+import time
+
from nova import vendor
+import redis
from nova import datastore
+from nova import exception
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
-flags.DEFINE_string('instances_prefix', 'compute-',
- 'prefix for keepers for instances')
-# TODO(ja): singleton instance of the directory
+# TODO(todd): Implement this at the class level for Instance
class InstanceDirectory(object):
- """an api for interacting with the global state of instances """
- def __init__(self):
- self.keeper = datastore.Keeper(FLAGS.instances_prefix)
+ """an api for interacting with the global state of instances"""
def get(self, instance_id):
- """ returns an instance object for a given id """
+ """returns an instance object for a given id"""
return Instance(instance_id)
def __getitem__(self, item):
return self.get(item)
+ @datastore.absorb_connection_error
def by_project(self, project):
- """ returns a list of instance objects for a project """
- for instance_id in self.keeper.smembers('project:%s:instances' % project):
+ """returns a list of instance objects for a project"""
+ for instance_id in datastore.Redis.instance().smembers('project:%s:instances' % project):
yield Instance(instance_id)
def by_node(self, node_id):
- """ returns a list of instances for a node """
+ """returns a list of instances for a node"""
for instance in self.all:
if instance['node_name'] == node_id:
yield instance
def by_ip(self, ip_address):
- """ returns an instance object that is using the IP """
+ """returns an instance object that is using the IP"""
for instance in self.all:
if instance['private_dns_name'] == ip_address:
return instance
return None
def by_volume(self, volume_id):
- """ returns the instance a volume is attached to """
+ """returns the instance a volume is attached to"""
pass
+ @datastore.absorb_connection_error
def exists(self, instance_id):
- return self.keeper.set_is_member('instances', instance_id)
+ return datastore.Redis.instance().sismember('instances', instance_id)
@property
+ @datastore.absorb_connection_error
def all(self):
- """ returns a list of all instances """
- for instance_id in self.keeper.set_members('instances'):
+ """returns a list of all instances"""
+ for instance_id in datastore.Redis.instance().smembers('instances'):
yield Instance(instance_id)
def new(self):
- """ returns an empty Instance object, with ID """
+ """returns an empty Instance object, with ID"""
instance_id = utils.generate_uid('i')
return self.get(instance_id)
-
-class Instance(object):
- """ Wrapper around stored properties of an instance """
+class Instance(datastore.BasicModel):
+ """Wrapper around stored properties of an instance"""
def __init__(self, instance_id):
- """ loads an instance from the datastore if exists """
- self.keeper = datastore.Keeper(FLAGS.instances_prefix)
+ """loads an instance from the datastore if exists"""
+ # set instance data before super call since it uses default_state
self.instance_id = instance_id
- self.initial_state = {}
- self.state = self.keeper[self.__redis_key]
- if self.state:
- self.initial_state = self.state
- else:
- self.state = {'state': 'pending',
- 'instance_id': instance_id,
- 'node_name': 'unassigned',
- 'project_id': 'unassigned',
- 'user_id': 'unassigned'
- }
-
- @property
- def __redis_key(self):
- """ Magic string for instance keys """
- return 'instance:%s' % self.instance_id
-
- def __repr__(self):
- return "<Instance:%s>" % self.instance_id
-
- def get(self, item, default):
- return self.state.get(item, default)
-
- def __getitem__(self, item):
- return self.state[item]
-
- def __setitem__(self, item, val):
- self.state[item] = val
- return self.state[item]
+ super(Instance, self).__init__()
- def __delitem__(self, item):
- """ We don't support this """
- raise Exception("Silly monkey, Instances NEED all their properties.")
+ def default_state(self):
+ return {'state': 0,
+ 'state_description': 'pending',
+ 'instance_id': self.instance_id,
+ 'node_name': 'unassigned',
+ 'project_id': 'unassigned',
+ 'user_id': 'unassigned'}
- def save(self):
- """ update the directory with the state from this instance
- make sure you've set the project_id and user_id before you call save
- for the first time.
- """
- # TODO(ja): implement hmset in redis-py and use it
- # instead of multiple calls to hset
- state = self.keeper[self.__redis_key]
- if not state:
- state = {}
- for key, val in self.state.iteritems():
- # if (not self.initial_state.has_key(key)
- # or self.initial_state[key] != val):
- state[key] = val
- self.keeper[self.__redis_key] = state
- if self.initial_state == {}:
- self.keeper.set_add('project:%s:instances' % self.project,
- self.instance_id)
- self.keeper.set_add('instances', self.instance_id)
- self.initial_state = self.state
- return True
+ @property
+ def identifier(self):
+ return self.instance_id
@property
def project(self):
@@ -170,40 +136,101 @@ class Instance(object):
return self.state['project_id']
return self.state.get('owner_id', 'unassigned')
- def destroy(self):
- """ deletes all related records from datastore.
- does NOT do anything to running libvirt state.
- """
- self.keeper.set_remove('project:%s:instances' % self.project,
- self.instance_id)
- del self.keeper[self.__redis_key]
- self.keeper.set_remove('instances', self.instance_id)
- return True
-
@property
def volumes(self):
- """ returns a list of attached volumes """
+ """returns a list of attached volumes"""
pass
@property
def reservation(self):
- """ Returns a reservation object """
+ """Returns a reservation object"""
pass
-# class Reservation(object):
-# """ ORM wrapper for a batch of launched instances """
-# def __init__(self):
-# pass
-#
-# def userdata(self):
-# """ """
-# pass
-#
-#
-# class NodeDirectory(object):
-# def __init__(self):
-# pass
-#
+ def save(self):
+ """Call into superclass to save object, then save associations"""
+ # NOTE(todd): doesn't track migration between projects/nodes,
+ # it just adds the first one
+ should_update_project = self.is_new_record()
+ should_update_node = self.is_new_record()
+ success = super(Instance, self).save()
+ if success and should_update_project:
+ self.associate_with("project", self.project)
+ if success and should_update_node:
+ self.associate_with("node", self['node_name'])
+ return True
+
+ def destroy(self):
+ """Destroy associations, then destroy the object"""
+ self.unassociate_with("project", self.project)
+ self.unassociate_with("node", self['node_name'])
+ return super(Instance, self).destroy()
+
+class Host(datastore.BasicModel):
+ """A Host is the machine where a Daemon is running."""
+
+ def __init__(self, hostname):
+ """loads an instance from the datastore if exists"""
+ # set instance data before super call since it uses default_state
+ self.hostname = hostname
+ super(Host, self).__init__()
+
+ def default_state(self):
+ return {"hostname": self.hostname}
+
+ @property
+ def identifier(self):
+ return self.hostname
+
+
+class Daemon(datastore.BasicModel):
+ """A Daemon is a job (compute, api, network, ...) that runs on a host."""
+
+ def __init__(self, host_or_combined, binpath=None):
+ """loads an instance from the datastore if exists"""
+ # set instance data before super call since it uses default_state
+ # since loading from datastore expects a combined key that
+ # is equivilent to identifier, we need to expect that, while
+ # maintaining meaningful semantics (2 arguments) when creating
+ # from within other code like the bin/nova-* scripts
+ if binpath:
+ self.hostname = host_or_combined
+ self.binary = binpath
+ else:
+ self.hostname, self.binary = host_or_combined.split(":")
+ super(Daemon, self).__init__()
+
+ def default_state(self):
+ return {"hostname": self.hostname,
+ "binary": self.binary,
+ "updated_at": utils.isotime()
+ }
+
+ @property
+ def identifier(self):
+ return "%s:%s" % (self.hostname, self.binary)
+
+ def save(self):
+ """Call into superclass to save object, then save associations"""
+ # NOTE(todd): this makes no attempt to destroy itsself,
+ # so after termination a record w/ old timestmap remains
+ success = super(Daemon, self).save()
+ if success:
+ self.associate_with("host", self.hostname)
+ return True
+
+ def destroy(self):
+ """Destroy associations, then destroy the object"""
+ self.unassociate_with("host", self.hostname)
+ return super(Daemon, self).destroy()
+
+ def heartbeat(self):
+ self['updated_at'] = utils.isotime()
+ return self.save()
+
+ @classmethod
+ def by_host(cls, hostname):
+ for x in cls.associated_to("host", hostname):
+ yield x
if __name__ == "__main__":
import doctest
diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py
new file mode 100644
index 000000000..c17cd0be4
--- /dev/null
+++ b/nova/compute/monitor.py
@@ -0,0 +1,516 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Instance Monitoring:
+
+ Optionally may be run on each compute node. Provides RRD
+ based statistics and graphs and makes them internally available
+ in the object store.
+"""
+
+import datetime
+import logging
+import os
+import sys
+import time
+
+try:
+ import libvirt
+except Exception, err:
+ logging.warning('no libvirt found')
+
+from nova import flags
+from nova import vendor
+import boto
+import boto.s3
+import libxml2
+import rrdtool
+from twisted.internet import defer
+from twisted.internet import task
+from twisted.application import service
+
+FLAGS = flags.FLAGS
+flags.DEFINE_integer(
+ 'monitoring_instances_delay', 5, 'Sleep time between updates')
+flags.DEFINE_integer(
+ 'monitoring_instances_step', 300, 'Interval of RRD updates')
+flags.DEFINE_string(
+ 'monitoring_rrd_path', '/var/nova/monitor/instances',
+ 'Location of RRD files')
+
+
+RRD_VALUES = {
+ 'cpu': [
+ 'DS:cpu:GAUGE:600:0:100',
+ 'RRA:AVERAGE:0.5:1:800',
+ 'RRA:AVERAGE:0.5:6:800',
+ 'RRA:AVERAGE:0.5:24:800',
+ 'RRA:AVERAGE:0.5:288:800',
+ 'RRA:MAX:0.5:1:800',
+ 'RRA:MAX:0.5:6:800',
+ 'RRA:MAX:0.5:24:800',
+ 'RRA:MAX:0.5:288:800',
+ ],
+ 'net': [
+ 'DS:rx:COUNTER:600:0:1250000',
+ 'DS:tx:COUNTER:600:0:1250000',
+ 'RRA:AVERAGE:0.5:1:800',
+ 'RRA:AVERAGE:0.5:6:800',
+ 'RRA:AVERAGE:0.5:24:800',
+ 'RRA:AVERAGE:0.5:288:800',
+ 'RRA:MAX:0.5:1:800',
+ 'RRA:MAX:0.5:6:800',
+ 'RRA:MAX:0.5:24:800',
+ 'RRA:MAX:0.5:288:800',
+ ],
+ 'disk': [
+ 'DS:rd:COUNTER:600:U:U',
+ 'DS:wr:COUNTER:600:U:U',
+ 'RRA:AVERAGE:0.5:1:800',
+ 'RRA:AVERAGE:0.5:6:800',
+ 'RRA:AVERAGE:0.5:24:800',
+ 'RRA:AVERAGE:0.5:288:800',
+ 'RRA:MAX:0.5:1:800',
+ 'RRA:MAX:0.5:6:800',
+ 'RRA:MAX:0.5:24:800',
+ 'RRA:MAX:0.5:444:800',
+ ]
+}
+
+
+utcnow = datetime.datetime.utcnow
+
+def update_rrd(instance, name, data):
+ """
+ Updates the specified RRD file.
+ """
+ filename = os.path.join(instance.get_rrd_path(), '%s.rrd' % name)
+
+ if not os.path.exists(filename):
+ init_rrd(instance, name)
+
+ timestamp = int(time.mktime(utcnow().timetuple()))
+ rrdtool.update (
+ filename,
+ '%d:%s' % (timestamp, data)
+ )
+
+def init_rrd(instance, name):
+ """
+ Initializes the specified RRD file.
+ """
+ path = os.path.join(FLAGS.monitoring_rrd_path, instance.instance_id)
+
+ if not os.path.exists(path):
+ os.makedirs(path)
+
+ filename = os.path.join(path, '%s.rrd' % name)
+
+ if not os.path.exists(filename):
+ rrdtool.create (
+ filename,
+ '--step', '%d' % FLAGS.monitoring_instances_step,
+ '--start', '0',
+ *RRD_VALUES[name]
+ )
+
+def get_disks(domain):
+ """
+ Returns a list of all block devices for this domain.
+ """
+ # TODO(devcamcar): Replace libxml2 with etree.
+ xml = domain.XMLDesc(0)
+ doc = None
+
+ try:
+ doc = libxml2.parseDoc(xml)
+ except:
+ return []
+
+ ctx = doc.xpathNewContext()
+ disks = []
+
+ try:
+ ret = ctx.xpathEval('/domain/devices/disk')
+
+ for node in ret:
+ devdst = None
+
+ for child in node.children:
+ if child.name == 'target':
+ devdst = child.prop('dev')
+
+ if devdst == None:
+ continue
+
+ disks.append(devdst)
+ finally:
+ if ctx != None:
+ ctx.xpathFreeContext()
+ if doc != None:
+ doc.freeDoc()
+
+ return disks
+
+def get_interfaces(domain):
+ """
+ Returns a list of all network interfaces for this instance.
+ """
+ # TODO(devcamcar): Replace libxml2 with etree.
+ xml = domain.XMLDesc(0)
+ doc = None
+
+ try:
+ doc = libxml2.parseDoc(xml)
+ except:
+ return []
+
+ ctx = doc.xpathNewContext()
+ interfaces = []
+
+ try:
+ ret = ctx.xpathEval('/domain/devices/interface')
+
+ for node in ret:
+ devdst = None
+
+ for child in node.children:
+ if child.name == 'target':
+ devdst = child.prop('dev')
+
+ if devdst == None:
+ continue
+
+ interfaces.append(devdst)
+ finally:
+ if ctx != None:
+ ctx.xpathFreeContext()
+ if doc != None:
+ doc.freeDoc()
+
+ return interfaces
+
+
+def graph_cpu(instance, duration):
+ """
+ Creates a graph of cpu usage for the specified instance and duration.
+ """
+ path = instance.get_rrd_path()
+ filename = os.path.join(path, 'cpu-%s.png' % duration)
+
+ rrdtool.graph (
+ filename,
+ '--disable-rrdtool-tag',
+ '--imgformat', 'PNG',
+ '--width', '400',
+ '--height', '120',
+ '--start', 'now-%s' % duration,
+ '--vertical-label', '% cpu used',
+ '-l', '0',
+ '-u', '100',
+ 'DEF:cpu=%s:cpu:AVERAGE' % os.path.join(path, 'cpu.rrd'),
+ 'AREA:cpu#eacc00:% CPU',
+ )
+
+ store_graph(instance.instance_id, filename)
+
+def graph_net(instance, duration):
+ """
+ Creates a graph of network usage for the specified instance and duration.
+ """
+ path = instance.get_rrd_path()
+ filename = os.path.join(path, 'net-%s.png' % duration)
+
+ rrdtool.graph (
+ filename,
+ '--disable-rrdtool-tag',
+ '--imgformat', 'PNG',
+ '--width', '400',
+ '--height', '120',
+ '--start', 'now-%s' % duration,
+ '--vertical-label', 'bytes/s',
+ '--logarithmic',
+ '--units', 'si',
+ '--lower-limit', '1000',
+ '--rigid',
+ 'DEF:rx=%s:rx:AVERAGE' % os.path.join(path, 'net.rrd'),
+ 'DEF:tx=%s:tx:AVERAGE' % os.path.join(path, 'net.rrd'),
+ 'AREA:rx#00FF00:In traffic',
+ 'LINE1:tx#0000FF:Out traffic',
+ )
+
+ store_graph(instance.instance_id, filename)
+
+def graph_disk(instance, duration):
+ """
+ Creates a graph of disk usage for the specified duration.
+ """
+ path = instance.get_rrd_path()
+ filename = os.path.join(path, 'disk-%s.png' % duration)
+
+ rrdtool.graph (
+ filename,
+ '--disable-rrdtool-tag',
+ '--imgformat', 'PNG',
+ '--width', '400',
+ '--height', '120',
+ '--start', 'now-%s' % duration,
+ '--vertical-label', 'bytes/s',
+ '--logarithmic',
+ '--units', 'si',
+ '--lower-limit', '1000',
+ '--rigid',
+ 'DEF:rd=%s:rd:AVERAGE' % os.path.join(path, 'disk.rrd'),
+ 'DEF:wr=%s:wr:AVERAGE' % os.path.join(path, 'disk.rrd'),
+ 'AREA:rd#00FF00:Read',
+ 'LINE1:wr#0000FF:Write',
+ )
+
+ store_graph(instance.instance_id, filename)
+
+def store_graph(instance_id, filename):
+ """
+ Transmits the specified graph file to internal object store on cloud
+ controller.
+ """
+ # TODO(devcamcar): Need to use an asynchronous method to make this
+ # connection. If boto has some separate method that generates
+ # the request it would like to make and another method to parse
+ # the response we can make our own client that does the actual
+ # request and hands it off to the response parser.
+ s3 = boto.s3.connection.S3Connection(
+ aws_access_key_id='admin',
+ aws_secret_access_key='admin',
+ is_secure=False,
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+ port=FLAGS.s3_port,
+ host=FLAGS.s3_host
+ )
+ bucket_name = '_%s.monitor' % instance_id
+
+ # Object store isn't creating the bucket like it should currently
+ # when it is first requested, so have to catch and create manually.
+ try:
+ bucket = s3.get_bucket(bucket_name)
+ except Exception:
+ bucket = s3.create_bucket(bucket_name)
+
+ key = boto.s3.Key(bucket)
+ key.key = os.path.basename(filename)
+ key.set_contents_from_filename(filename)
+
+
+class Instance(object):
+ def __init__(self, conn, domain):
+ self.conn = conn
+ self.domain = domain
+ self.instance_id = domain.name()
+ self.last_updated = datetime.datetime.min
+ self.cputime = 0
+ self.cputime_last_updated = None
+
+ init_rrd(self, 'cpu')
+ init_rrd(self, 'net')
+ init_rrd(self, 'disk')
+
+ def needs_update(self):
+ """
+ Indicates whether this instance is due to have its statistics updated.
+ """
+ delta = utcnow() - self.last_updated
+ return delta.seconds >= FLAGS.monitoring_instances_step
+
+ def update(self):
+ """
+ Updates the instances statistics and stores the resulting graphs
+ in the internal object store on the cloud controller.
+ """
+ logging.debug('updating %s...', self.instance_id)
+
+ try:
+ data = self.fetch_cpu_stats()
+ if data != None:
+ logging.debug('CPU: %s', data)
+ update_rrd(self, 'cpu', data)
+
+ data = self.fetch_net_stats()
+ logging.debug('NET: %s', data)
+ update_rrd(self, 'net', data)
+
+ data = self.fetch_disk_stats()
+ logging.debug('DISK: %s', data)
+ update_rrd(self, 'disk', data)
+
+ # TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls
+ # and make the methods @defer.inlineCallbacks.
+ graph_cpu(self, '1d')
+ graph_cpu(self, '1w')
+ graph_cpu(self, '1m')
+
+ graph_net(self, '1d')
+ graph_net(self, '1w')
+ graph_net(self, '1m')
+
+ graph_disk(self, '1d')
+ graph_disk(self, '1w')
+ graph_disk(self, '1m')
+ except Exception:
+ logging.exception('unexpected error during update')
+
+ self.last_updated = utcnow()
+
+ def get_rrd_path(self):
+ """
+ Returns the path to where RRD files are stored.
+ """
+ return os.path.join(FLAGS.monitoring_rrd_path, self.instance_id)
+
+ def fetch_cpu_stats(self):
+ """
+ Returns cpu usage statistics for this instance.
+ """
+ info = self.domain.info()
+
+ # Get the previous values.
+ cputime_last = self.cputime
+ cputime_last_updated = self.cputime_last_updated
+
+ # Get the raw CPU time used in nanoseconds.
+ self.cputime = float(info[4])
+ self.cputime_last_updated = utcnow()
+
+ logging.debug('CPU: %d', self.cputime)
+
+ # Skip calculation on first pass. Need delta to get a meaningful value.
+ if cputime_last_updated == None:
+ return None
+
+ # Calculate the number of seconds between samples.
+ d = self.cputime_last_updated - cputime_last_updated
+ t = d.days * 86400 + d.seconds
+
+ logging.debug('t = %d', t)
+
+ # Calculate change over time in number of nanoseconds of CPU time used.
+ cputime_delta = self.cputime - cputime_last
+
+ logging.debug('cputime_delta = %s', cputime_delta)
+
+ # Get the number of virtual cpus in this domain.
+ vcpus = int(info[3])
+
+ logging.debug('vcpus = %d', vcpus)
+
+ # Calculate CPU % used and cap at 100.
+ return min(cputime_delta / (t * vcpus * 1.0e9) * 100, 100)
+
+ def fetch_disk_stats(self):
+ """
+ Returns disk usage statistics for this instance.
+ """
+ rd = 0
+ wr = 0
+
+ # Get a list of block devices for this instance.
+ disks = get_disks(self.domain)
+
+ # Aggregate the read and write totals.
+ for disk in disks:
+ try:
+ rd_req, rd_bytes, wr_req, wr_bytes, errs = \
+ self.domain.blockStats(disk)
+ rd += rd_bytes
+ wr += wr_bytes
+ except TypeError:
+ logging.error('Cannot get blockstats for "%s" on "%s"',
+ disk, self.instance_id)
+ raise
+
+ return '%d:%d' % (rd, wr)
+
+ def fetch_net_stats(self):
+ """
+ Returns network usage statistics for this instance.
+ """
+ rx = 0
+ tx = 0
+
+ # Get a list of all network interfaces for this instance.
+ interfaces = get_interfaces(self.domain)
+
+ # Aggregate the in and out totals.
+ for interface in interfaces:
+ try:
+ stats = self.domain.interfaceStats(interface)
+ rx += stats[0]
+ tx += stats[4]
+ except TypeError:
+ logging.error('Cannot get ifstats for "%s" on "%s"',
+ interface, self.instance_id)
+ raise
+
+ return '%d:%d' % (rx, tx)
+
+
+class InstanceMonitor(object, service.Service):
+ """
+ Monitors the running instances of the current machine.
+ """
+ def __init__(self):
+ """
+ Initialize the monitoring loop.
+ """
+ self._instances = {}
+ self._loop = task.LoopingCall(self.updateInstances)
+
+ def startService(self):
+ self._instances = {}
+ self._loop.start(interval=FLAGS.monitoring_instances_delay)
+ service.Service.startService(self)
+
+ def stopService(self):
+ self._loop.stop()
+ service.Service.stopService(self)
+
+ def updateInstances(self):
+ """
+ Update resource usage for all running instances.
+ """
+ try:
+ conn = libvirt.openReadOnly(None)
+ except libvirt.libvirtError:
+ logging.exception('unexpected libvirt error')
+ time.sleep(FLAGS.monitoring_instances_delay)
+ return
+
+ domain_ids = conn.listDomainsID()
+
+ for domain_id in domain_ids:
+ if not domain_id in self._instances:
+ domain = conn.lookupByID(domain_id)
+ instance = Instance(conn, domain)
+ self._instances[domain_id] = instance
+ logging.debug('Found instance: %s', instance.instance_id)
+
+ for key in self._instances.keys():
+ instance = self._instances[key]
+ if instance.needs_update():
+ instance.update()
diff --git a/nova/compute/network.py b/nova/compute/network.py
index 4d5f4bf5f..e5d3d18df 100644
--- a/nova/compute/network.py
+++ b/nova/compute/network.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Classes for network control, including VLANs, DHCP, and IP allocation.
@@ -26,10 +31,10 @@ from nova import vendor
import IPy
from nova import datastore
-import nova.exception
-from nova.compute import exception
from nova import flags
from nova import utils
+from nova import exception
+from nova.compute import exception as compute_exception
from nova.auth import users
import linux_net
@@ -48,21 +53,126 @@ flags.DEFINE_integer('network_size', 256,
'Number of addresses in each private subnet')
flags.DEFINE_string('public_range', '4.4.4.0/24', 'Public IP address block')
flags.DEFINE_string('private_range', '10.0.0.0/8', 'Private IP address block')
+flags.DEFINE_integer('cnt_vpn_clients', 5,
+ 'Number of addresses reserved for vpn clients')
+flags.DEFINE_integer('cloudpipe_start_port', 12000,
+ 'Starting port for mapped CloudPipe external ports')
+
+flags.DEFINE_boolean('simple_network', False,
+ 'Use simple networking instead of vlans')
+flags.DEFINE_string('simple_network_bridge', 'br100',
+ 'Bridge for simple network instances')
+flags.DEFINE_list('simple_network_ips', ['192.168.0.2'],
+ 'Available ips for simple network')
+flags.DEFINE_string('simple_network_template',
+ utils.abspath('compute/interfaces.template'),
+ 'Template file for simple network')
+flags.DEFINE_string('simple_network_netmask', '255.255.255.0',
+ 'Netmask for simple network')
+flags.DEFINE_string('simple_network_network', '192.168.0.0',
+ 'Network for simple network')
+flags.DEFINE_string('simple_network_gateway', '192.168.0.1',
+ 'Broadcast for simple network')
+flags.DEFINE_string('simple_network_broadcast', '192.168.0.255',
+ 'Broadcast for simple network')
+flags.DEFINE_string('simple_network_dns', '8.8.4.4',
+ 'Dns for simple network')
logging.getLogger().setLevel(logging.DEBUG)
-class BaseNetwork(datastore.RedisModel):
- bridge_gets_ip = False
- object_type = 'network'
+class Vlan(datastore.BasicModel):
+ def __init__(self, project, vlan):
+ """
+ Since we don't want to try and find a vlan by its identifier,
+ but by a project id, we don't call super-init.
+ """
+ self.project_id = project
+ self.vlan_id = vlan
+
+ @property
+ def identifier(self):
+ return "%s:%s" % (self.project_id, self.vlan_id)
+
+ @classmethod
+ def create(cls, project, vlan):
+ instance = cls(project, vlan)
+ instance.save()
+ return instance
+
+ @classmethod
+ @datastore.absorb_connection_error
+ def lookup(cls, project):
+ set_name = cls._redis_set_name(cls.__name__)
+ vlan = datastore.Redis.instance().hget(set_name, project)
+ if vlan:
+ return cls(project, vlan)
+ else:
+ return None
+
+ @classmethod
+ @datastore.absorb_connection_error
+ def dict_by_project(cls):
+ """a hash of project:vlan"""
+ set_name = cls._redis_set_name(cls.__name__)
+ return datastore.Redis.instance().hgetall(set_name)
+
+ @classmethod
+ @datastore.absorb_connection_error
+ def dict_by_vlan(cls):
+ """a hash of vlan:project"""
+ set_name = cls._redis_set_name(cls.__name__)
+ rv = {}
+ h = datastore.Redis.instance().hgetall(set_name)
+ for v in h.keys():
+ rv[h[v]] = v
+ return rv
@classmethod
- def get_all_hosts(cls):
- for vlan in get_assigned_vlans().values():
- network_str = get_subnet_from_vlan(vlan)
- for addr in datastore.Redis.instance().hgetall(
- "network:%s:hosts" % (network_str)):
- yield addr
+ @datastore.absorb_connection_error
+ def all(cls):
+ set_name = cls._redis_set_name(cls.__name__)
+ for project,vlan in datastore.Redis.instance().hgetall(set_name):
+ yield cls(project, vlan)
+
+ @datastore.absorb_connection_error
+ def save(self):
+ """
+ Vlan saves state into a giant hash named "vlans", with keys of
+ project_id and value of vlan number. Therefore, we skip the
+ default way of saving into "vlan:ID" and adding to a set of "vlans".
+ """
+ set_name = self._redis_set_name(self.__class__.__name__)
+ datastore.Redis.instance().hset(set_name, self.project_id, self.vlan_id)
+
+ @datastore.absorb_connection_error
+ def destroy(self):
+ set_name = self._redis_set_name(self.__class__.__name__)
+ datastore.Redis.instance().hdel(set_name, self.project)
+
+ def subnet(self):
+ vlan = int(self.vlan_id)
+ network = IPy.IP(FLAGS.private_range)
+ start = (vlan-FLAGS.vlan_start) * FLAGS.network_size
+ # minus one for the gateway.
+ return "%s-%s" % (network[start],
+ network[start + FLAGS.network_size - 1])
+
+# CLEANUP:
+# TODO(ja): Save the IPs at the top of each subnet for cloudpipe vpn clients
+# TODO(ja): use singleton for usermanager instead of self.manager in vlanpool et al
+# TODO(ja): does vlanpool "keeper" need to know the min/max - shouldn't FLAGS always win?
+# TODO(joshua): Save the IPs at the top of each subnet for cloudpipe vpn clients
+
+class BaseNetwork(datastore.BasicModel):
+ override_type = 'network'
+
+ @property
+ def identifier(self):
+ return self.network_id
+
+ def default_state(self):
+ return {'network_id': self.network_id, 'network_str': self.network_str}
@classmethod
def create(cls, user_id, project_id, security_group, vlan, network_str):
@@ -76,20 +186,27 @@ class BaseNetwork(datastore.RedisModel):
return net
def __init__(self, network_id, network_str=None):
- super(BaseNetwork, self).__init__(object_id=network_id)
- self['network_id'] = network_id
- self['network_str'] = network_str
+ self.network_id = network_id
+ self.network_str = network_str
+ super(BaseNetwork, self).__init__()
self.save()
@property
def network(self):
return IPy.IP(self['network_str'])
+
@property
def netmask(self):
return self.network.netmask()
+
+ @property
+ def gateway(self):
+ return self.network[1]
+
@property
def broadcast(self):
return self.network.broadcast()
+
@property
def bridge_name(self):
return "br%s" % (self["vlan"])
@@ -108,7 +225,7 @@ class BaseNetwork(datastore.RedisModel):
@property
def hosts(self):
- return datastore.Redis.instance().hgetall(self._hosts_key)
+ return datastore.Redis.instance().hgetall(self._hosts_key) or {}
def _add_host(self, _user_id, _project_id, host, target):
datastore.Redis.instance().hset(self._hosts_key, host, target)
@@ -122,7 +239,9 @@ class BaseNetwork(datastore.RedisModel):
@property
def available(self):
- for idx in range(3, len(self.network) - 1):
+ # the .2 address is always CloudPipe
+ # and the top <n> are for vpn clients
+ for idx in range(3, len(self.network)-(1 + FLAGS.cnt_vpn_clients)):
address = str(self.network[idx])
if not address in self.hosts.keys():
yield str(address)
@@ -133,13 +252,21 @@ class BaseNetwork(datastore.RedisModel):
self._add_host(user_id, project_id, address, mac)
self.express(address=address)
return address
- raise exception.NoMoreAddresses()
+ raise compute_exception.NoMoreAddresses("Project %s with network %s" %
+ (project_id, str(self.network)))
- def deallocate_ip(self, ip_str):
+ def lease_ip(self, ip_str):
+ logging.debug("Leasing allocated IP %s" % (ip_str))
+
+ def release_ip(self, ip_str):
if not ip_str in self.assigned:
- raise exception.AddressNotAllocated()
- self._rem_host(ip_str)
+ raise compute_exception.AddressNotAllocated()
self.deexpress(address=ip_str)
+ self._rem_host(ip_str)
+
+ def deallocate_ip(self, ip_str):
+ # Do nothing for now, cleanup on ip release
+ pass
def list_addresses(self):
for address in self.hosts:
@@ -166,12 +293,14 @@ class BridgedNetwork(BaseNetwork):
netmask
"""
+ override_type = 'network'
+
@classmethod
def get_network_for_project(cls, user_id, project_id, security_group):
vlan = get_vlan_for_project(project_id)
- network_str = get_subnet_from_vlan(vlan)
- logging.debug("creating network on vlan %s with network string %s" % (vlan, network_str))
- return cls.create(user_id, project_id, security_group, vlan, network_str)
+ network_str = vlan.subnet()
+ return cls.create(user_id, project_id, security_group, vlan.vlan_id,
+ network_str)
def __init__(self, *args, **kwargs):
super(BridgedNetwork, self).__init__(*args, **kwargs)
@@ -191,15 +320,20 @@ class DHCPNetwork(BridgedNetwork):
dhcp_range_end: the last ip to give out
"""
bridge_gets_ip = True
+ override_type = 'network'
def __init__(self, *args, **kwargs):
super(DHCPNetwork, self).__init__(*args, **kwargs)
- logging.debug("Initing DHCPNetwork object...")
+ # logging.debug("Initing DHCPNetwork object...")
self.dhcp_listen_address = self.network[1]
self.dhcp_range_start = self.network[3]
- self.dhcp_range_end = self.network[-1]
+ self.dhcp_range_end = self.network[-(1 + FLAGS.cnt_vpn_clients)]
try:
os.makedirs(FLAGS.networks_path)
+ # NOTE(todd): I guess this is a lazy way to not have to check if the
+ # directory exists, but shouldn't we be smarter about
+ # telling the difference between existing directory and
+ # permission denied? (Errno 17 vs 13, OSError)
except Exception, err:
pass
@@ -211,6 +345,20 @@ class DHCPNetwork(BridgedNetwork):
linux_net.start_dnsmasq(self)
else:
logging.debug("Not launching dnsmasq: no hosts.")
+ self.express_cloudpipe()
+
+ def allocate_vpn_ip(self, mac):
+ address = str(self.network[2])
+ self._add_host(self['user_id'], self['project_id'], address, mac)
+ self.express(address=address)
+ return address
+
+ def express_cloudpipe(self):
+ private_ip = self.network[2]
+ linux_net.confirm_rule("FORWARD -d %s -p udp --dport 1194 -j ACCEPT"
+ % (private_ip, ))
+ linux_net.confirm_rule("PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194"
+ % (self.project.vpn_ip, self.project.vpn_port, private_ip))
def deexpress(self, address=None):
# if this is the last address, stop dns
@@ -220,26 +368,34 @@ class DHCPNetwork(BridgedNetwork):
else:
linux_net.start_dnsmasq(self)
-class PublicAddress(datastore.RedisModel):
- object_type="address"
+class PublicAddress(datastore.BasicModel):
+ override_type = "address"
def __init__(self, address):
- super(PublicAddress, self).__init__(address)
+ self.address = address
+ super(PublicAddress, self).__init__()
+
+ @property
+ def identifier(self):
+ return self.address
+
+ def default_state(self):
+ return {'address': self.address}
@classmethod
def create(cls, user_id, project_id, address):
- addr = cls(address=address)
- addr['address'] = address
+ addr = cls(address)
addr['user_id'] = user_id
addr['project_id'] = project_id
addr['instance_id'] = 'available'
addr['private_ip'] = 'available'
- addr["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
addr.save()
return addr
DEFAULT_PORTS = [("tcp",80), ("tcp",22), ("udp",1194), ("tcp",443)]
class PublicNetworkController(BaseNetwork):
+ override_type = 'network'
+
def __init__(self, *args, **kwargs):
network_id = "public:default"
super(PublicNetworkController, self).__init__(network_id, FLAGS.public_range)
@@ -283,14 +439,14 @@ class PublicNetworkController(BaseNetwork):
def associate_address(self, public_ip, private_ip, instance_id):
if not public_ip in self.assigned:
- raise exception.AddressNotAllocated()
+ raise compute_exception.AddressNotAllocated()
# TODO(joshua): Keep an index going both ways
for addr in self.host_objs:
if addr.get('private_ip', None) == private_ip:
- raise exception.AddressAlreadyAssociated()
+ raise compute_exception.AddressAlreadyAssociated()
addr = self.get_host(public_ip)
if addr.get('private_ip', 'available') != 'available':
- raise exception.AddressAlreadyAssociated()
+ raise compute_exception.AddressAlreadyAssociated()
addr['private_ip'] = private_ip
addr['instance_id'] = instance_id
addr.save()
@@ -298,10 +454,10 @@ class PublicNetworkController(BaseNetwork):
def disassociate_address(self, public_ip):
if not public_ip in self.assigned:
- raise exception.AddressNotAllocated()
+ raise compute_exception.AddressNotAllocated()
addr = self.get_host(public_ip)
if addr.get('private_ip', 'available') == 'available':
- raise exception.AddressNotAssociated()
+ raise compute_exception.AddressNotAssociated()
self.deexpress(address=public_ip)
addr['private_ip'] = 'available'
addr['instance_id'] = 'available'
@@ -331,6 +487,7 @@ class PublicNetworkController(BaseNetwork):
def deexpress(self, address=None):
addr = self.get_host(address)
private_ip = addr['private_ip']
+ linux_net.unbind_public_ip(address, FLAGS.public_interface)
linux_net.remove_rule("PREROUTING -t nat -d %s -j DNAT --to %s"
% (address, private_ip))
linux_net.remove_rule("POSTROUTING -t nat -s %s -j SNAT --to %s"
@@ -342,64 +499,90 @@ class PublicNetworkController(BaseNetwork):
% (private_ip, protocol, port))
-VLANS_KEY = "vlans"
-def _add_vlan(project_id, vlan):
- datastore.Redis.instance().hset(VLANS_KEY, project_id, vlan)
-
-def _rem_vlan(project_id):
- datastore.Redis.instance().hdel(VLANS_KEY, project_id)
-
-def get_assigned_vlans():
- """ Returns a dictionary, with keys of project_id and values of vlan_id """
- return datastore.Redis.instance().hgetall(VLANS_KEY)
-
+# FIXME(todd): does this present a race condition, or is there some piece of
+# architecture that mitigates it (only one queue listener per net)?
def get_vlan_for_project(project_id):
"""
Allocate vlan IDs to individual users.
"""
- vlan = datastore.Redis.instance().hget(VLANS_KEY, project_id)
+ vlan = Vlan.lookup(project_id)
if vlan:
return vlan
- assigned_vlans = get_assigned_vlans()
- # TODO(joshua) I can do this in one loop, I think
- for old_project_id, vlan in assigned_vlans.iteritems():
+ known_vlans = Vlan.dict_by_vlan()
+ for vnum in range(FLAGS.vlan_start, FLAGS.vlan_end):
+ vstr = str(vnum)
+ if not known_vlans.has_key(vstr):
+ return Vlan.create(project_id, vnum)
+ old_project_id = known_vlans[vstr]
if not users.UserManager.instance().get_project(old_project_id):
- _rem_vlan(old_project_id)
- _add_vlan(project_id, vlan)
- return vlan
- for vlan in range(FLAGS.vlan_start, FLAGS.vlan_end):
- if not str(vlan) in assigned_vlans.values():
- _add_vlan(project_id, vlan)
- return vlan
- raise exception.AddressNotAllocated("Out of VLANs")
-
+ vlan = Vlan.lookup(old_project_id)
+ if vlan:
+ # NOTE(todd): This doesn't check for vlan id match, because
+ # it seems to be assumed that vlan<=>project is
+ # always a 1:1 mapping. It could be made way
+ # sexier if it didn't fight against the way
+ # BasicModel worked and used associate_with
+ # to build connections to projects.
+ # NOTE(josh): This is here because we want to make sure we
+ # don't orphan any VLANs. It is basically
+ # garbage collection for after projects abandoned
+ # their reference.
+ vlan.project_id = project_id
+ vlan.save()
+ return vlan
+ else:
+ return Vlan.create(project_id, vnum)
+ raise compute_exception.AddressNotAllocated("Out of VLANs")
def get_network_by_address(address):
+ logging.debug("Get Network By Address: %s" % address)
for project in users.UserManager.instance().get_projects():
net = get_project_network(project.id)
if address in net.assigned:
+ logging.debug("Found %s in %s" % (address, project.id))
return net
- raise exception.AddressNotAllocated()
+ raise compute_exception.AddressNotAllocated()
+
+def allocate_simple_ip():
+ redis = datastore.Redis.instance()
+ if not redis.exists('ips') and not len(redis.keys('instances:*')):
+ for address in FLAGS.simple_network_ips:
+ redis.sadd('ips', address)
+ address = redis.spop('ips')
+ if not address:
+ raise exception.NoMoreAddresses()
+ return address
+
+def deallocate_simple_ip(address):
+ datastore.Redis.instance().sadd('ips', address)
+
+
+def allocate_vpn_ip(user_id, project_id, mac):
+ return get_project_network(project_id).allocate_vpn_ip(mac)
def allocate_ip(user_id, project_id, mac):
return get_project_network(project_id).allocate_ip(user_id, project_id, mac)
def deallocate_ip(address):
return get_network_by_address(address).deallocate_ip(address)
+
+def release_ip(address):
+ return get_network_by_address(address).release_ip(address)
+
+def lease_ip(address):
+ return get_network_by_address(address).lease_ip(address)
def get_project_network(project_id, security_group='default'):
""" get a project's private network, allocating one if needed """
+ # TODO(todd): It looks goofy to get a project from a UserManager.
+ # Refactor to still use the LDAP backend, but not User specific.
project = users.UserManager.instance().get_project(project_id)
if not project:
- raise nova.exception.Error("Project %s doesn't exist, uhoh." % project_id)
- return DHCPNetwork.get_network_for_project(project.project_manager_id, project.id, security_group)
-
-def get_subnet_from_vlan(vlan):
- """Assign one subnet to each VLAN, for now."""
- vlan = int(vlan)
- network = IPy.IP(FLAGS.private_range)
- start = (vlan-FLAGS.vlan_start) * FLAGS.network_size
- return "%s-%s" % (network[start], network[start + FLAGS.network_size - 1])
+ raise exception.Error("Project %s doesn't exist, uhoh." %
+ project_id)
+ return DHCPNetwork.get_network_for_project(project.project_manager_id,
+ project.id, security_group)
+
def restart_nets():
""" Ensure the network for each user is enabled"""
diff --git a/nova/compute/node.py b/nova/compute/node.py
index 7c1636f34..f41bc34ea 100644
--- a/nova/compute/node.py
+++ b/nova/compute/node.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Compute Node:
@@ -25,7 +30,6 @@ import base64
import json
import logging
import os
-import random
import shutil
import sys
@@ -47,12 +51,13 @@ from nova import utils
from nova.compute import disk
from nova.compute import model
from nova.compute import network
+from nova.volume import storage
from nova.objectstore import image # for image_path flag
FLAGS = flags.FLAGS
flags.DEFINE_string('libvirt_xml_template',
utils.abspath('compute/libvirt.xml.template'),
- 'Network XML Template')
+ 'Libvirt XML Template')
flags.DEFINE_bool('use_s3', True,
'whether to get images from s3 or use local copy')
flags.DEFINE_string('instances_path', utils.abspath('../instances'),
@@ -66,10 +71,14 @@ INSTANCE_TYPES['m1.large'] = {'memory_mb': 4096, 'vcpus': 4, 'local_gb': 10}
INSTANCE_TYPES['m1.xlarge'] = {'memory_mb': 8192, 'vcpus': 4, 'local_gb': 10}
INSTANCE_TYPES['c1.medium'] = {'memory_mb': 2048, 'vcpus': 4, 'local_gb': 10}
-# The number of processes to start in our process pool
-# TODO(termie): this should probably be a flag and the pool should probably
-# be a singleton
-PROCESS_POOL_SIZE = 4
+
+def _image_path(path=''):
+ return os.path.join(FLAGS.images_path, path)
+
+
+def _image_url(path):
+ return "%s:%s/_images/%s" % (FLAGS.s3_host, FLAGS.s3_port, path)
+
class Node(object, service.Service):
"""
@@ -80,7 +89,7 @@ class Node(object, service.Service):
super(Node, self).__init__()
self._instances = {}
self._conn = self._get_connection()
- self._pool = process.Pool(PROCESS_POOL_SIZE)
+ self._pool = process.ProcessPool()
self.instdir = model.InstanceDirectory()
# TODO(joshua): This needs to ensure system state, specifically: modprobe aoe
@@ -133,24 +142,38 @@ class Node(object, service.Service):
return retval
@defer.inlineCallbacks
- def report_state(self):
- logging.debug("Reporting State")
- return
+ def report_state(self, nodename, daemon):
+ # TODO(termie): make this pattern be more elegant. -todd
+ try:
+ record = model.Daemon(nodename, daemon)
+ record.heartbeat()
+ if getattr(self, "model_disconnected", False):
+ self.model_disconnected = False
+ logging.error("Recovered model server connection!")
+
+ except model.ConnectionError, ex:
+ if not getattr(self, "model_disconnected", False):
+ self.model_disconnected = True
+ logging.exception("model server went away")
+ yield
# @exception.wrap_exception
def run_instance(self, instance_id, **_kwargs):
""" launch a new instance with specified options """
logging.debug("Starting instance %s..." % (instance_id))
inst = self.instdir.get(instance_id)
- # TODO: Get the real security group of launch in here
- security_group = "default"
- net = network.BridgedNetwork.get_network_for_project(inst['user_id'], inst['project_id'],
+ if not FLAGS.simple_network:
+ # TODO: Get the real security group of launch in here
+ security_group = "default"
+ net = network.BridgedNetwork.get_network_for_project(inst['user_id'],
+ inst['project_id'],
security_group).express()
inst['node_name'] = FLAGS.node_name
inst.save()
# TODO(vish) check to make sure the availability zone matches
new_inst = Instance(self._conn, name=instance_id,
pool=self._pool, data=inst)
+ logging.info("Instances current state is %s", new_inst.state)
if new_inst.is_running():
raise exception.Error("Instance is already running")
d = new_inst.spawn()
@@ -200,24 +223,29 @@ class Node(object, service.Service):
@defer.inlineCallbacks
@exception.wrap_exception
def attach_volume(self, instance_id = None,
- aoe_device = None, mountpoint = None):
- utils.runthis("Attached Volume: %s",
+ volume_id = None, mountpoint = None):
+ volume = storage.get_volume(volume_id)
+ yield self._init_aoe()
+ yield utils.runthis("Attached Volume: %s",
"sudo virsh attach-disk %s /dev/etherd/%s %s"
- % (instance_id, aoe_device, mountpoint.split("/")[-1]))
- return defer.succeed(True)
+ % (instance_id, volume['aoe_device'], mountpoint.split("/")[-1]))
+ volume.finish_attach()
+ defer.returnValue(True)
def _init_aoe(self):
utils.runthis("Doin an AoE discover, returns %s", "sudo aoe-discover")
utils.runthis("Doin an AoE stat, returns %s", "sudo aoe-stat")
@exception.wrap_exception
- def detach_volume(self, instance_id, mountpoint):
+ def detach_volume(self, instance_id, volume_id):
""" detach a volume from an instance """
# despite the documentation, virsh detach-disk just wants the device
# name without the leading /dev/
- target = mountpoint.rpartition('/dev/')[2]
+ volume = storage.get_volume(volume_id)
+ target = volume['mountpoint'].rpartition('/dev/')[2]
utils.runthis("Detached Volume: %s", "sudo virsh detach-disk %s %s "
% (instance_id, target))
+ volume.finish_detach()
return defer.succeed(True)
@@ -231,63 +259,6 @@ class ProductCode(object):
self.product_code = product_code
-def _create_image(data, libvirt_xml):
- """ create libvirt.xml and copy files into instance path """
- def basepath(path=''):
- return os.path.abspath(os.path.join(data['basepath'], path))
-
- def imagepath(path=''):
- return os.path.join(FLAGS.images_path, path)
-
- def image_url(path):
- return "%s:%s/_images/%s" % (FLAGS.s3_host, FLAGS.s3_port, path)
- logging.info(basepath('disk'))
- try:
- os.makedirs(data['basepath'])
- os.chmod(data['basepath'], 0777)
- except OSError:
- # TODO: there is already an instance with this name, do something
- pass
- try:
- logging.info('Creating image for: %s', data['instance_id'])
- f = open(basepath('libvirt.xml'), 'w')
- f.write(libvirt_xml)
- f.close()
- if not FLAGS.fake_libvirt:
- if FLAGS.use_s3:
- if not os.path.exists(basepath('disk')):
- utils.fetchfile(image_url("%s/image" % data['image_id']),
- basepath('disk-raw'))
- if not os.path.exists(basepath('kernel')):
- utils.fetchfile(image_url("%s/image" % data['kernel_id']),
- basepath('kernel'))
- if not os.path.exists(basepath('ramdisk')):
- utils.fetchfile(image_url("%s/image" % data['ramdisk_id']),
- basepath('ramdisk'))
- else:
- if not os.path.exists(basepath('disk')):
- shutil.copyfile(imagepath("%s/image" % data['image_id']),
- basepath('disk-raw'))
- if not os.path.exists(basepath('kernel')):
- shutil.copyfile(imagepath("%s/image" % data['kernel_id']),
- basepath('kernel'))
- if not os.path.exists(basepath('ramdisk')):
- shutil.copyfile(imagepath("%s/image" %
- data['ramdisk_id']),
- basepath('ramdisk'))
- if data['key_data']:
- logging.info('Injecting key data into image %s' %
- data['image_id'])
- disk.inject_key(data['key_data'], basepath('disk-raw'))
- if os.path.exists(basepath('disk')):
- os.remove(basepath('disk'))
- bytes = INSTANCE_TYPES[data['instance_type']]['local_gb'] * 1024 * 1024 * 1024
- disk.partition(basepath('disk-raw'), basepath('disk'), bytes)
- logging.info('Done create image for: %s', data['instance_id'])
- except Exception as ex:
- return {'exception': ex}
-
-
class Instance(object):
NOSTATE = 0x00
@@ -298,16 +269,6 @@ class Instance(object):
SHUTOFF = 0x05
CRASHED = 0x06
- def is_pending(self):
- return (self.state == Instance.NOSTATE or self.state == 'pending')
-
- def is_destroyed(self):
- return self.state == Instance.SHUTOFF
-
- def is_running(self):
- logging.debug("Instance state is: %s" % self.state)
- return (self.state == Instance.RUNNING or self.state == 'running')
-
def __init__(self, conn, pool, name, data):
""" spawn an instance with a given name """
# TODO(termie): pool should probably be a singleton instead of being passed
@@ -316,68 +277,49 @@ class Instance(object):
self._conn = conn
# TODO(vish): this can be removed after data has been updated
# data doesn't seem to have a working iterator so in doesn't work
- if not data.get('owner_id', None) is None:
+ if data.get('owner_id', None) is not None:
data['user_id'] = data['owner_id']
data['project_id'] = data['owner_id']
self.datamodel = data
- # NOTE(termie): to be passed to multiprocess self._s must be
- # pickle-able by cPickle
- self._s = {}
-
- # TODO(termie): is instance_type that actual name for this?
size = data.get('instance_type', FLAGS.default_instance_type)
if size not in INSTANCE_TYPES:
raise exception.Error('invalid instance type: %s' % size)
- self._s.update(INSTANCE_TYPES[size])
+ self.datamodel.update(INSTANCE_TYPES[size])
- self._s['name'] = name
- self._s['instance_id'] = name
- self._s['instance_type'] = size
- self._s['mac_address'] = data.get(
- 'mac_address', 'df:df:df:df:df:df')
- self._s['basepath'] = data.get(
+ self.datamodel['name'] = name
+ self.datamodel['instance_id'] = name
+ self.datamodel['basepath'] = data.get(
'basepath', os.path.abspath(
os.path.join(FLAGS.instances_path, self.name)))
- self._s['memory_kb'] = int(self._s['memory_mb']) * 1024
- self._s['image_id'] = data.get('image_id', FLAGS.default_image)
- self._s['kernel_id'] = data.get('kernel_id', FLAGS.default_kernel)
- self._s['ramdisk_id'] = data.get('ramdisk_id', FLAGS.default_ramdisk)
- self._s['user_id'] = data.get('user_id', None)
- self._s['project_id'] = data.get('project_id', self._s['user_id'])
- self._s['node_name'] = data.get('node_name', '')
- self._s['user_data'] = data.get('user_data', '')
- self._s['ami_launch_index'] = data.get('ami_launch_index', None)
- self._s['launch_time'] = data.get('launch_time', None)
- self._s['reservation_id'] = data.get('reservation_id', None)
- # self._s['state'] = Instance.NOSTATE
- self._s['state'] = data.get('state', Instance.NOSTATE)
- self._s['key_data'] = data.get('key_data', None)
-
- # TODO: we may not need to save the next few
- self._s['groups'] = data.get('security_group', ['default'])
- self._s['product_codes'] = data.get('product_code', [])
- self._s['key_name'] = data.get('key_name', None)
- self._s['addressing_type'] = data.get('addressing_type', None)
- self._s['availability_zone'] = data.get('availability_zone', 'fixme')
-
- self._s['bridge_name'] = data.get('bridge_name', None)
- #TODO: put real dns items here
- self._s['private_dns_name'] = data.get('private_dns_name', 'fixme')
- self._s['dns_name'] = data.get('dns_name',
- self._s['private_dns_name'])
+ self.datamodel['memory_kb'] = int(self.datamodel['memory_mb']) * 1024
+ self.datamodel.setdefault('image_id', FLAGS.default_image)
+ self.datamodel.setdefault('kernel_id', FLAGS.default_kernel)
+ self.datamodel.setdefault('ramdisk_id', FLAGS.default_ramdisk)
+ self.datamodel.setdefault('project_id', self.datamodel['user_id'])
+ self.datamodel.setdefault('bridge_name', None)
+ #self.datamodel.setdefault('key_data', None)
+ #self.datamodel.setdefault('key_name', None)
+ #self.datamodel.setdefault('addressing_type', None)
+
+ # TODO(joshua) - The ugly non-flat ones
+ self.datamodel['groups'] = data.get('security_group', 'default')
+ # TODO(joshua): Support product codes somehow
+ self.datamodel.setdefault('product_codes', None)
+
+ self.datamodel.save()
logging.debug("Finished init of Instance with id of %s" % name)
def toXml(self):
# TODO(termie): cache?
logging.debug("Starting the toXML method")
libvirt_xml = open(FLAGS.libvirt_xml_template).read()
- xml_info = self._s.copy()
- #xml_info.update(self._s)
+ xml_info = self.datamodel.copy()
+ # TODO(joshua): Make this xml express the attached disks as well
# TODO(termie): lazy lazy hack because xml is annoying
- xml_info['nova'] = json.dumps(self._s)
+ xml_info['nova'] = json.dumps(self.datamodel.copy())
libvirt_xml = libvirt_xml % xml_info
logging.debug("Finished the toXML method")
@@ -386,23 +328,38 @@ class Instance(object):
@classmethod
def fromName(cls, conn, pool, name):
""" use the saved data for reloading the instance """
- # if FLAGS.fake_libvirt:
- # raise Exception('this is a bit useless, eh?')
-
instdir = model.InstanceDirectory()
instance = instdir.get(name)
return cls(conn=conn, pool=pool, name=name, data=instance)
+ def set_state(self, state_code, state_description=None):
+ self.datamodel['state'] = state_code
+ if not state_description:
+ state_description = STATE_NAMES[state_code]
+ self.datamodel['state_description'] = state_description
+ self.datamodel.save()
+
@property
def state(self):
- return self._s['state']
+ # it is a string in datamodel
+ return int(self.datamodel['state'])
@property
def name(self):
- return self._s['name']
+ return self.datamodel['name']
+
+ def is_pending(self):
+ return (self.state == Instance.NOSTATE or self.state == 'pending')
+
+ def is_destroyed(self):
+ return self.state == Instance.SHUTOFF
+
+ def is_running(self):
+ logging.debug("Instance state is: %s" % self.state)
+ return (self.state == Instance.RUNNING or self.state == 'running')
def describe(self):
- return self._s
+ return self.datamodel
def info(self):
logging.debug("Getting info for dom %s" % self.name)
@@ -412,13 +369,16 @@ class Instance(object):
'max_mem': max_mem,
'mem': mem,
'num_cpu': num_cpu,
- 'cpu_time': cpu_time}
+ 'cpu_time': cpu_time,
+ 'node_name': FLAGS.node_name}
+
+ def basepath(self, path=''):
+ return os.path.abspath(os.path.join(self.datamodel['basepath'], path))
def update_state(self):
- info = self.info()
- self.datamodel['state'] = info['state']
- self.datamodel['node_name'] = FLAGS.node_name
- self.datamodel.save()
+ self.datamodel.update(self.info())
+ self.set_state(self.state)
+ self.datamodel.save() # Extra, but harmless
@exception.wrap_exception
def destroy(self):
@@ -427,8 +387,7 @@ class Instance(object):
raise exception.Error('trying to destroy already destroyed'
' instance: %s' % self.name)
- self.datamodel['state'] = 'shutting_down'
- self.datamodel.save()
+ self.set_state(Instance.NOSTATE, 'shutting_down')
try:
virt_dom = self._conn.lookupByName(self.name)
virt_dom.destroy()
@@ -436,6 +395,7 @@ class Instance(object):
pass
# If the instance is already terminated, we're still happy
d = defer.Deferred()
+ d.addCallback(lambda x: self._cleanup())
d.addCallback(lambda x: self.datamodel.destroy())
# TODO(termie): short-circuit me for tests
# WE'LL save this for when we do shutdown,
@@ -443,79 +403,176 @@ class Instance(object):
timer = task.LoopingCall(f=None)
def _wait_for_shutdown():
try:
- info = self.info()
- if info['state'] == Instance.SHUTDOWN:
- self._s['state'] = Instance.SHUTDOWN
- #self.datamodel['state'] = 'shutdown'
- #self.datamodel.save()
+ self.update_state()
+ if self.state == Instance.SHUTDOWN:
timer.stop()
d.callback(None)
except Exception:
- self._s['state'] = Instance.SHUTDOWN
+ self.set_state(Instance.SHUTDOWN)
timer.stop()
d.callback(None)
timer.f = _wait_for_shutdown
timer.start(interval=0.5, now=True)
return d
+ def _cleanup(self):
+ target = os.path.abspath(self.datamodel['basepath'])
+ logging.info("Deleting instance files at %s", target)
+ shutil.rmtree(target)
+
@defer.inlineCallbacks
@exception.wrap_exception
def reboot(self):
- # if not self.is_running():
- # raise exception.Error(
- # 'trying to reboot a non-running'
- # 'instance: %s (state: %s)' % (self.name, self.state))
+ if not self.is_running():
+ raise exception.Error(
+ 'trying to reboot a non-running'
+ 'instance: %s (state: %s)' % (self.name, self.state))
+ logging.debug('rebooting instance %s' % self.name)
+ self.set_state(Instance.NOSTATE, 'rebooting')
yield self._conn.lookupByName(self.name).destroy()
- self.datamodel['state'] = 'rebooting'
- self.datamodel.save()
- self._s['state'] = Instance.NOSTATE
self._conn.createXML(self.toXml(), 0)
- # TODO(termie): this should actually register a callback to check
- # for successful boot
- self.datamodel['state'] = 'running'
- self.datamodel.save()
- self._s['state'] = Instance.RUNNING
- logging.debug('rebooted instance %s' % self.name)
- defer.returnValue(None)
- # @exception.wrap_exception
+ d = defer.Deferred()
+ timer = task.LoopingCall(f=None)
+ def _wait_for_reboot():
+ try:
+ self.update_state()
+ if self.is_running():
+ logging.debug('rebooted instance %s' % self.name)
+ timer.stop()
+ d.callback(None)
+ except Exception:
+ self.set_state(Instance.SHUTDOWN)
+ timer.stop()
+ d.callback(None)
+ timer.f = _wait_for_reboot
+ timer.start(interval=0.5, now=True)
+ yield d
+
+ def _fetch_s3_image(self, image, path):
+ url = _image_url('%s/image' % image)
+ d = self._pool.simpleExecute('curl --silent %s -o %s' % (url, path))
+ return d
+
+ def _fetch_local_image(self, image, path):
+ source = _image_path('%s/image' % image)
+ d = self._pool.simpleExecute('cp %s %s' % (source, path))
+ return d
+
+ @defer.inlineCallbacks
+ def _create_image(self, libvirt_xml):
+ # syntactic nicety
+ data = self.datamodel
+ basepath = self.basepath
+
+ # ensure directories exist and are writable
+ yield self._pool.simpleExecute('mkdir -p %s' % basepath())
+ yield self._pool.simpleExecute('chmod 0777 %s' % basepath())
+
+
+ # TODO(termie): these are blocking calls, it would be great
+ # if they weren't.
+ logging.info('Creating image for: %s', data['instance_id'])
+ f = open(basepath('libvirt.xml'), 'w')
+ f.write(libvirt_xml)
+ f.close()
+
+ if FLAGS.fake_libvirt:
+ logging.info('fake_libvirt, nothing to do for create_image')
+ raise defer.returnValue(None);
+
+ if FLAGS.use_s3:
+ _fetch_file = self._fetch_s3_image
+ else:
+ _fetch_file = self._fetch_local_image
+
+ if not os.path.exists(basepath('disk')):
+ yield _fetch_file(data['image_id'], basepath('disk-raw'))
+ if not os.path.exists(basepath('kernel')):
+ yield _fetch_file(data['kernel_id'], basepath('kernel'))
+ if not os.path.exists(basepath('ramdisk')):
+ yield _fetch_file(data['ramdisk_id'], basepath('ramdisk'))
+
+ execute = lambda cmd, input=None: self._pool.simpleExecute(cmd=cmd,
+ input=input,
+ error_ok=1)
+
+ key = data['key_data']
+ net = None
+ if FLAGS.simple_network:
+ with open(FLAGS.simple_network_template) as f:
+ net = f.read() % {'address': data['private_dns_name'],
+ 'network': FLAGS.simple_network_network,
+ 'netmask': FLAGS.simple_network_netmask,
+ 'gateway': FLAGS.simple_network_gateway,
+ 'broadcast': FLAGS.simple_network_broadcast,
+ 'dns': FLAGS.simple_network_dns}
+ if key or net:
+ logging.info('Injecting data into image %s', data['image_id'])
+ yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute)
+
+ if os.path.exists(basepath('disk')):
+ yield self._pool.simpleExecute('rm -f %s' % basepath('disk'))
+
+ bytes = (INSTANCE_TYPES[data['instance_type']]['local_gb']
+ * 1024 * 1024 * 1024)
+ yield disk.partition(
+ basepath('disk-raw'), basepath('disk'), bytes, execute=execute)
+
+ @defer.inlineCallbacks
+ @exception.wrap_exception
def spawn(self):
- self.datamodel['state'] = "spawning"
- self.datamodel.save()
+ self.set_state(Instance.NOSTATE, 'spawning')
logging.debug("Starting spawn in Instance")
+
xml = self.toXml()
- def _launch(retvals):
- self.datamodel['state'] = 'launching'
- self.datamodel.save()
- try:
- logging.debug("Arrived in _launch")
- if retvals and 'exception' in retvals:
- raise retvals['exception']
- self._conn.createXML(self.toXml(), 0)
- # TODO(termie): this should actually register
- # a callback to check for successful boot
- self._s['state'] = Instance.RUNNING
- self.datamodel['state'] = 'running'
- self.datamodel.save()
- logging.debug("Instance is running")
- except Exception as ex:
- logging.debug(ex)
- self.datamodel['state'] = 'shutdown'
- self.datamodel.save()
- #return self
-
- d = self._pool.apply(_create_image, self._s, xml)
- d.addCallback(_launch)
- return d
+ self.set_state(Instance.NOSTATE, 'launching')
+ logging.info('self %s', self)
+ try:
+ yield self._create_image(xml)
+ self._conn.createXML(xml, 0)
+ # TODO(termie): this should actually register
+ # a callback to check for successful boot
+ logging.debug("Instance is running")
+
+ local_d = defer.Deferred()
+ timer = task.LoopingCall(f=None)
+ def _wait_for_boot():
+ try:
+ self.update_state()
+ if self.is_running():
+ logging.debug('booted instance %s' % self.name)
+ timer.stop()
+ local_d.callback(None)
+ except Exception:
+ self.set_state(Instance.SHUTDOWN)
+ logging.error('Failed to boot instance %s' % self.name)
+ timer.stop()
+ local_d.callback(None)
+ timer.f = _wait_for_boot
+ timer.start(interval=0.5, now=True)
+ except Exception, ex:
+ logging.debug(ex)
+ self.set_state(Instance.SHUTDOWN)
@exception.wrap_exception
def console_output(self):
if not FLAGS.fake_libvirt:
fname = os.path.abspath(
- os.path.join(self._s['basepath'], 'console.log'))
+ os.path.join(self.datamodel['basepath'], 'console.log'))
with open(fname, 'r') as f:
console = f.read()
else:
console = 'FAKE CONSOLE OUTPUT'
return defer.succeed(console)
+
+STATE_NAMES = {
+ Instance.NOSTATE : 'pending',
+ Instance.RUNNING : 'running',
+ Instance.BLOCKED : 'blocked',
+ Instance.PAUSED : 'paused',
+ Instance.SHUTDOWN : 'shutdown',
+ Instance.SHUTOFF : 'shutdown',
+ Instance.CRASHED : 'crashed',
+}
diff --git a/nova/crypto.py b/nova/crypto.py
index 1f35ffa39..fc6ed714f 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -1,27 +1,34 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Wrappers around standard crypto, including root and intermediate CAs,
SSH keypairs and x509 certificates.
"""
+import base64
import hashlib
import logging
import os
import shutil
+import struct
import tempfile
import time
import utils
@@ -81,17 +88,20 @@ def generate_key_pair(bits=1024):
def ssl_pub_to_ssh_pub(ssl_public_key, name='root', suffix='nova'):
- """requires lsh-utils"""
- convert="sed -e'1d' -e'$d' | pkcs1-conv --public-key-info --base-64 |" \
- + " sexp-conv | sed -e'1s/(rsa-pkcs1/(rsa-pkcs1-sha1/' | sexp-conv -s" \
- + " transport | lsh-export-key --openssh"
- (out, err) = utils.execute(convert, ssl_public_key)
- if err:
- raise exception.Error("Failed to generate key: %s", err)
- return '%s %s@%s\n' %(out.strip(), name, suffix)
+ rsa_key = M2Crypto.RSA.load_pub_key_bio(M2Crypto.BIO.MemoryBuffer(ssl_public_key))
+ e, n = rsa_key.pub()
+
+ key_type = 'ssh-rsa'
+
+ key_data = struct.pack('>I', len(key_type))
+ key_data += key_type
+ key_data += '%s%s' % (e,n)
+
+ b64_blob = base64.b64encode(key_data)
+ return '%s %s %s@%s\n' %(key_type, b64_blob, name, suffix)
-def generate_x509_cert(subject="/C=US/ST=California/L=The Mission/O=CloudFed/OU=NOVA/CN=foo", bits=1024):
+def generate_x509_cert(subject, bits=1024):
tmpdir = tempfile.mkdtemp()
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
csrfile = os.path.join(tmpdir, 'temp.csr')
diff --git a/nova/datastore.py b/nova/datastore.py
index 0da89d594..da3e01c25 100644
--- a/nova/datastore.py
+++ b/nova/datastore.py
@@ -1,52 +1,46 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Copyright 2010 Anso Labs, LLC
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Datastore:
-Providers the Keeper class, a simple pseudo-dictionary that
-persists on disk.
-
MAKE Sure that ReDIS is running, and your flags are set properly,
before trying to run this.
"""
-import json
import logging
-import os
-import sqlite3
-import time
from nova import vendor
import redis
+from nova import exception
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
-flags.DEFINE_string('datastore_path', utils.abspath('../keeper'),
- 'where keys are stored on disk')
flags.DEFINE_string('redis_host', '127.0.0.1',
'Host that redis is running on.')
flags.DEFINE_integer('redis_port', 6379,
'Port that redis is running on.')
flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away')
-flags.DEFINE_string('keeper_backend', 'redis',
- 'which backend to use for keeper')
class Redis(object):
@@ -57,39 +51,108 @@ class Redis(object):
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
- inst = redis.Redis(host=FLAGS.redis_host, port=FLAGS.redis_port, db=FLAGS.redis_db)
+ inst = redis.Redis(host=FLAGS.redis_host,
+ port=FLAGS.redis_port,
+ db=FLAGS.redis_db)
cls._instance = inst
return cls._instance
-class RedisModel(object):
- """ Wrapper around redis-backed properties """
- object_type = 'generic'
- def __init__(self, object_id):
- """ loads an object from the datastore if exists """
- self.object_id = object_id
+class ConnectionError(exception.Error):
+ pass
+
+
+def absorb_connection_error(fn):
+ def _wrapper(*args, **kwargs):
+ try:
+ return fn(*args, **kwargs)
+ except redis.exceptions.ConnectionError, ce:
+ raise ConnectionError(str(ce))
+ return _wrapper
+
+
+class BasicModel(object):
+ """
+ All Redis-backed data derives from this class.
+
+ You MUST specify an identifier() property that returns a unique string
+ per instance.
+
+ You MUST have an initializer that takes a single argument that is a value
+ returned by identifier() to load a new class with.
+
+ You may want to specify a dictionary for default_state().
+
+ You may also specify override_type at the class left to use a key other
+ than __class__.__name__.
+
+ You override save and destroy calls to automatically build and destroy
+ associations.
+ """
+
+ override_type = None
+
+ @absorb_connection_error
+ def __init__(self):
self.initial_state = {}
self.state = Redis.instance().hgetall(self.__redis_key)
if self.state:
self.initial_state = self.state
else:
- self.set_default_state()
+ self.state = self.default_state()
- def set_default_state(self):
- self.state = {'state' : 'pending'}
- self.state[self.object_type+"_id"] = self.object_id
- self.state["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
+ def default_state(self):
+ """You probably want to define this in your subclass"""
+ return {}
+
+ @classmethod
+ def _redis_name(cls):
+ return self.override_type or cls.__name__
+
+ @classmethod
+ def lookup(cls, identifier):
+ rv = cls(identifier)
+ if rv.is_new_record():
+ return None
+ else:
+ return rv
+
+ @classmethod
+ @absorb_connection_error
+ def all(cls):
+ """yields all objects in the store"""
+ redis_set = cls._redis_set_name(cls.__name__)
+ for identifier in Redis.instance().smembers(redis_set):
+ yield cls(identifier)
+
+ @classmethod
+ @absorb_connection_error
+ def associated_to(cls, foreign_type, foreign_id):
+ redis_set = cls._redis_association_name(foreign_type, foreign_id)
+ for identifier in Redis.instance().smembers(redis_set):
+ yield cls(identifier)
+
+ @classmethod
+ def _redis_set_name(cls, kls_name):
+ # stupidly pluralize (for compatiblity with previous codebase)
+ return kls_name.lower() + "s"
+
+ @classmethod
+ def _redis_association_name(cls, foreign_type, foreign_id):
+ return cls._redis_set_name("%s:%s:%s" %
+ (foreign_type, foreign_id, cls.__name__))
+
+ @property
+ def identifier(self):
+ """You DEFINITELY want to define this in your subclass"""
+ raise NotImplementedError("Your subclass should define identifier")
@property
def __redis_key(self):
- """ Magic string for keys """
- return '%s:%s' % (self.object_type, self.object_id)
+ return '%s:%s' % (self.__class__.__name__.lower(), self.identifier)
def __repr__(self):
- return "<%s:%s>" % (self.object_type, self.object_id)
-
- def __str__(self):
- return str(self.state)
+ return "<%s:%s>" % (self.__class__.__name__, self.identifier)
def keys(self):
return self.state.keys()
@@ -103,6 +166,12 @@ class RedisModel(object):
def get(self, item, default):
return self.state.get(item, default)
+ def update(self, update_dict):
+ return self.state.update(update_dict)
+
+ def setdefault(self, item, default):
+ return self.state.setdefault(item, default)
+
def __getitem__(self, item):
return self.state[item]
@@ -111,269 +180,77 @@ class RedisModel(object):
return self.state[item]
def __delitem__(self, item):
- """ We don't support this """
- raise Exception("Silly monkey, we NEED all our properties.")
+ """We don't support this"""
+ raise Exception("Silly monkey, models NEED all their properties.")
+
+ def is_new_record(self):
+ return self.initial_state == {}
+
+ @absorb_connection_error
+ def add_to_index(self):
+ set_name = self.__class__._redis_set_name(self.__class__.__name__)
+ Redis.instance().sadd(set_name, self.identifier)
+
+ @absorb_connection_error
+ def remove_from_index(self):
+ set_name = self.__class__._redis_set_name(self.__class__.__name__)
+ Redis.instance().srem(set_name, self.identifier)
+
+ @absorb_connection_error
+ def remove_from_index(self):
+ set_name = self.__class__._redis_set_name(self.__class__.__name__)
+ Redis.instance().srem(set_name, self.identifier)
+
+ @absorb_connection_error
+ def associate_with(self, foreign_type, foreign_id):
+ # note the extra 's' on the end is for plurality
+ # to match the old data without requiring a migration of any sort
+ self.add_associated_model_to_its_set(foreign_type, foreign_id)
+ redis_set = self.__class__._redis_association_name(foreign_type,
+ foreign_id)
+ Redis.instance().sadd(redis_set, self.identifier)
+
+ @absorb_connection_error
+ def unassociate_with(self, foreign_type, foreign_id):
+ redis_set = self.__class__._redis_association_name(foreign_type,
+ foreign_id)
+ Redis.instance().srem(redis_set, self.identifier)
+
+ def add_associated_model_to_its_set(self, my_type, my_id):
+ table = globals()
+ klsname = my_type.capitalize()
+ if table.has_key(klsname):
+ my_class = table[klsname]
+ my_inst = my_class(my_id)
+ my_inst.save()
+ else:
+ logging.warning("no model class for %s when building"
+ " association from %s",
+ klsname, self)
+ @absorb_connection_error
def save(self):
- """ update the directory with the state from this instance """
+ """
+ update the directory with the state from this model
+ also add it to the index of items of the same type
+ then set the initial_state = state so new changes are tracked
+ """
# TODO(ja): implement hmset in redis-py and use it
# instead of multiple calls to hset
+ if self.is_new_record():
+ self["create_time"] = utils.isotime()
for key, val in self.state.iteritems():
- # if (not self.initial_state.has_key(key)
- # or self.initial_state[key] != val):
- Redis.instance().hset(self.__redis_key, key, val)
- if self.initial_state == {}:
- self.first_save()
+ Redis.instance().hset(self.__redis_key, key, val)
+ self.add_to_index()
self.initial_state = self.state
return True
- def first_save(self):
- pass
-
+ @absorb_connection_error
def destroy(self):
- """ deletes all related records from datastore.
- does NOT do anything to running state.
- """
+ """deletes all related records from datastore."""
+ logging.info("Destroying datamodel for %s %s",
+ self.__class__.__name__, self.identifier)
Redis.instance().delete(self.__redis_key)
+ self.remove_from_index()
return True
-
-def slugify(key, prefix=None):
- """
- Key has to be a valid filename. Slugify solves that.
- """
- return "%s%s" % (prefix, key)
-
-
-class SqliteKeeper(object):
- """ Keeper implementation in SQLite, mostly for in-memory testing """
- _conn = {} # class variable
-
- def __init__(self, prefix):
- self.prefix = prefix
-
- @property
- def conn(self):
- if self.prefix not in self.__class__._conn:
- logging.debug('no sqlite connection (%s), making new', self.prefix)
- if FLAGS.datastore_path != ':memory:':
- try:
- os.mkdir(FLAGS.datastore_path)
- except Exception:
- pass
- conn = sqlite3.connect(os.path.join(
- FLAGS.datastore_path, '%s.sqlite' % self.prefix))
- else:
- conn = sqlite3.connect(':memory:')
-
- c = conn.cursor()
- try:
- c.execute('''CREATE TABLE data (item text, value text)''')
- conn.commit()
- except Exception:
- logging.exception('create table failed')
- finally:
- c.close()
-
- self.__class__._conn[self.prefix] = conn
-
- return self.__class__._conn[self.prefix]
-
- def __delitem__(self, item):
- #logging.debug('sqlite deleting %s', item)
- c = self.conn.cursor()
- try:
- c.execute('DELETE FROM data WHERE item = ?', (item, ))
- self.conn.commit()
- except Exception:
- logging.exception('delete failed: %s', item)
- finally:
- c.close()
-
- def __getitem__(self, item):
- #logging.debug('sqlite getting %s', item)
- result = None
- c = self.conn.cursor()
- try:
- c.execute('SELECT value FROM data WHERE item = ?', (item, ))
- row = c.fetchone()
- if row:
- result = json.loads(row[0])
- else:
- result = None
- except Exception:
- logging.exception('select failed: %s', item)
- finally:
- c.close()
- #logging.debug('sqlite got %s: %s', item, result)
- return result
-
- def __setitem__(self, item, value):
- serialized_value = json.dumps(value)
- insert = True
- if self[item] is not None:
- insert = False
- #logging.debug('sqlite insert %s: %s', item, value)
- c = self.conn.cursor()
- try:
- if insert:
- c.execute('INSERT INTO data VALUES (?, ?)',
- (item, serialized_value))
- else:
- c.execute('UPDATE data SET item=?, value=? WHERE item = ?',
- (item, serialized_value, item))
-
- self.conn.commit()
- except Exception:
- logging.exception('select failed: %s', item)
- finally:
- c.close()
-
- def clear(self):
- if self.prefix not in self.__class__._conn:
- return
- self.conn.close()
- if FLAGS.datastore_path != ':memory:':
- os.unlink(os.path.join(FLAGS.datastore_path, '%s.sqlite' % self.prefix))
- del self.__class__._conn[self.prefix]
-
- def clear_all(self):
- for k, conn in self.__class__._conn.iteritems():
- conn.close()
- if FLAGS.datastore_path != ':memory:':
- os.unlink(os.path.join(FLAGS.datastore_path,
- '%s.sqlite' % self.prefix))
- self.__class__._conn = {}
-
-
- def set_add(self, item, value):
- group = self[item]
- if not group:
- group = []
- group.append(value)
- self[item] = group
-
- def set_is_member(self, item, value):
- group = self[item]
- if not group:
- return False
- return value in group
-
- def set_remove(self, item, value):
- group = self[item]
- if not group:
- group = []
- group.remove(value)
- self[item] = group
-
- def set_members(self, item):
- group = self[item]
- if not group:
- group = []
- return group
-
- def set_fetch(self, item):
- # TODO(termie): I don't really know what set_fetch is supposed to do
- group = self[item]
- if not group:
- group = []
- return iter(group)
-
-class JsonKeeper(object):
- """
- Simple dictionary class that persists using
- JSON in files saved to disk.
- """
- def __init__(self, prefix):
- self.prefix = prefix
-
- def __delitem__(self, item):
- """
- Removing a key means deleting a file from disk.
- """
- item = slugify(item, self.prefix)
- path = "%s/%s" % (FLAGS.datastore_path, item)
- if os.path.isfile(path):
- os.remove(path)
-
- def __getitem__(self, item):
- """
- Fetch file contents and dejsonify them.
- """
- item = slugify(item, self.prefix)
- path = "%s/%s" % (FLAGS.datastore_path, item)
- if os.path.isfile(path):
- return json.load(open(path, 'r'))
- return None
-
- def __setitem__(self, item, value):
- """
- JSON encode value and save to file.
- """
- item = slugify(item, self.prefix)
- path = "%s/%s" % (FLAGS.datastore_path, item)
- with open(path, "w") as blobfile:
- blobfile.write(json.dumps(value))
- return value
-
-
-class RedisKeeper(object):
- """
- Simple dictionary class that persists using
- ReDIS.
- """
- def __init__(self, prefix="redis-"):
- self.prefix = prefix
- Redis.instance().ping()
-
- def __setitem__(self, item, value):
- """
- JSON encode value and save to file.
- """
- item = slugify(item, self.prefix)
- Redis.instance().set(item, json.dumps(value))
- return value
-
- def __getitem__(self, item):
- item = slugify(item, self.prefix)
- value = Redis.instance().get(item)
- if value:
- return json.loads(value)
-
- def __delitem__(self, item):
- item = slugify(item, self.prefix)
- return Redis.instance().delete(item)
-
- def clear(self):
- raise NotImplementedError()
-
- def clear_all(self):
- raise NotImplementedError()
-
- def set_add(self, item, value):
- item = slugify(item, self.prefix)
- return Redis.instance().sadd(item, json.dumps(value))
-
- def set_is_member(self, item, value):
- item = slugify(item, self.prefix)
- return Redis.instance().sismember(item, json.dumps(value))
-
- def set_remove(self, item, value):
- item = slugify(item, self.prefix)
- return Redis.instance().srem(item, json.dumps(value))
-
- def set_members(self, item):
- item = slugify(item, self.prefix)
- return [json.loads(v) for v in Redis.instance().smembers(item)]
-
- def set_fetch(self, item):
- item = slugify(item, self.prefix)
- for obj in Redis.instance().sinter([item]):
- yield json.loads(obj)
-
-
-def Keeper(prefix=''):
- KEEPERS = {'redis': RedisKeeper,
- 'sqlite': SqliteKeeper}
- return KEEPERS[FLAGS.keeper_backend](prefix)
-
diff --git a/nova/endpoint/__init__.py b/nova/endpoint/__init__.py
index dbf15d259..00ebd398c 100644
--- a/nova/endpoint/__init__.py
+++ b/nova/endpoint/__init__.py
@@ -1,16 +1,22 @@
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
:mod:`nova.endpoint` -- Main NOVA Api endpoints
diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py
index b51929a83..ceab7d1f9 100644
--- a/nova/endpoint/admin.py
+++ b/nova/endpoint/admin.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Admin API controller, exposed through http via the api worker.
@@ -19,6 +24,9 @@ Admin API controller, exposed through http via the api worker.
import base64
+from nova.auth import users
+from nova.compute import model
+
def user_dict(user, base64_file=None):
"""Convert the user object to a result dict"""
if user:
@@ -31,16 +39,10 @@ def user_dict(user, base64_file=None):
else:
return {}
-def node_dict(node):
- """Convert a node object to a result dict"""
- if node:
- return {
- 'node_id': node.id,
- 'workers': ", ".join(node.workers),
- 'disks': ", ".join(node.disks),
- 'ram': node.memory,
- 'load_average' : node.load_average,
- }
+def host_dict(host):
+ """Convert a host model object to a result dict"""
+ if host:
+ return host.state
else:
return {}
@@ -58,35 +60,29 @@ def admin_only(target):
class AdminController(object):
"""
- API Controller for users, node status, and worker mgmt.
+ API Controller for users, hosts, nodes, and workers.
Trivial admin_only wrapper will be replaced with RBAC,
allowing project managers to administer project users.
"""
- def __init__(self, user_manager, node_manager=None):
- self.user_manager = user_manager
- self.node_manager = node_manager
def __str__(self):
return 'AdminController'
@admin_only
def describe_user(self, _context, name, **_kwargs):
- """Returns user data, including access and secret keys.
- """
- return user_dict(self.user_manager.get_user(name))
+ """Returns user data, including access and secret keys."""
+ return user_dict(users.UserManager.instance().get_user(name))
@admin_only
def describe_users(self, _context, **_kwargs):
- """Returns all users - should be changed to deal with a list.
- """
+ """Returns all users - should be changed to deal with a list."""
return {'userSet':
- [user_dict(u) for u in self.user_manager.get_users()] }
+ [user_dict(u) for u in users.UserManager.instance().get_users()] }
@admin_only
def register_user(self, _context, name, **_kwargs):
- """ Creates a new user, and returns generated credentials.
- """
- return user_dict(self.user_manager.create_user(name))
+ """Creates a new user, and returns generated credentials."""
+ return user_dict(users.UserManager.instance().create_user(name))
@admin_only
def deregister_user(self, _context, name, **_kwargs):
@@ -94,7 +90,7 @@ class AdminController(object):
Should throw an exception if the user has instances,
volumes, or buckets remaining.
"""
- self.user_manager.delete_user(name)
+ users.UserManager.instance().delete_user(name)
return True
@@ -106,12 +102,12 @@ class AdminController(object):
"""
if project is None:
project = name
- project = self.user_manager.get_project(project)
- user = self.user_manager.get_user(name)
+ project = users.UserManager.instance().get_project(project)
+ user = users.UserManager.instance().get_user(name)
return user_dict(user, base64.b64encode(project.get_credentials(user)))
@admin_only
- def describe_nodes(self, _context, **_kwargs):
+ def describe_hosts(self, _context, **_kwargs):
"""Returns status info for all nodes. Includes:
* Disk Space
* Instance List
@@ -120,11 +116,9 @@ class AdminController(object):
* DHCP servers running
* Iptables / bridges
"""
- return {'nodeSet':
- [node_dict(n) for n in self.node_manager.get_nodes()] }
+ return {'hostSet': [host_dict(h) for h in model.Host.all()]}
@admin_only
- def describe_node(self, _context, name, **_kwargs):
- """Returns status info for single node.
- """
- return node_dict(self.node_manager.get_node(name))
+ def describe_host(self, _context, name, **_kwargs):
+ """Returns status info for single node."""
+ return host_dict(model.Host.lookup(name))
diff --git a/nova/endpoint/api.py b/nova/endpoint/api.py
index e70694210..86a4551ad 100755
--- a/nova/endpoint/api.py
+++ b/nova/endpoint/api.py
@@ -1,18 +1,22 @@
-#!/usr/bin/python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Tornado REST API Request Handlers for Nova functions
@@ -37,6 +41,7 @@ from nova import flags
from nova import utils
from nova.endpoint import cloud
from nova.auth import users
+import nova.cloudpipe.api
FLAGS = flags.FLAGS
flags.DEFINE_integer('cc_port', 8773, 'cloud controller port')
@@ -319,9 +324,11 @@ class APIRequestHandler(tornado.web.RequestHandler):
class APIServerApplication(tornado.web.Application):
- def __init__(self, user_manager, controllers):
+ def __init__(self, controllers):
tornado.web.Application.__init__(self, [
(r'/', RootRequestHandler),
+ (r'/cloudpipe/(.*)', nova.cloudpipe.api.CloudPipeRequestHandler),
+ (r'/cloudpipe', nova.cloudpipe.api.CloudPipeRequestHandler),
(r'/services/([A-Za-z0-9]+)/', APIRequestHandler),
(r'/latest/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2009-04-04/([-A-Za-z0-9/]*)', MetadataRequestHandler),
@@ -334,5 +341,4 @@ class APIServerApplication(tornado.web.Application):
(r'/2007-01-19/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/1.0/([-A-Za-z0-9/]*)', MetadataRequestHandler),
], pool=multiprocessing.Pool(4))
- self.user_manager = user_manager
self.controllers = controllers
diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index 768a6b3e0..32c7cbce0 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Cloud Controller: Implementation of EC2 REST API calls, which are
@@ -33,9 +38,11 @@ from nova import flags
from nova import rpc
from nova import utils
from nova import exception
+from nova.auth import rbac
from nova.auth import users
from nova.compute import model
from nova.compute import network
+from nova.compute import node
from nova.endpoint import images
from nova.volume import storage
@@ -66,14 +73,13 @@ class CloudController(object):
@property
def instances(self):
""" All instances in the system, as dicts """
- for instance in self.instdir.all:
- yield {instance['instance_id']: instance}
+ return self.instdir.all
@property
def volumes(self):
""" returns a list of all volumes """
for volume_id in datastore.Redis.instance().smembers("volumes"):
- volume = storage.Volume(volume_id=volume_id)
+ volume = storage.get_volume(volume_id)
yield volume
def __str__(self):
@@ -96,10 +102,22 @@ class CloudController(object):
def get_instance_by_ip(self, ip):
return self.instdir.by_ip(ip)
+ def _get_mpi_data(self, project_id):
+ result = {}
+ for instance in self.instdir.all:
+ if instance['project_id'] == project_id:
+ line = '%s slots=%d' % (instance['private_dns_name'], node.INSTANCE_TYPES[instance['instance_type']]['vcpus'])
+ if instance['key_name'] in result:
+ result[instance['key_name']].append(line)
+ else:
+ result[instance['key_name']] = [line]
+ return result
+
def get_metadata(self, ip):
i = self.get_instance_by_ip(ip)
if i is None:
return None
+ mpi = self._get_mpi_data(i['project_id'])
if i['key_name']:
keys = {
'0': {
@@ -136,7 +154,8 @@ class CloudController(object):
'public-keys' : keys,
'ramdisk-id': i.get('ramdisk_id', ''),
'reservation-id': i['reservation_id'],
- 'security-groups': i.get('groups', '')
+ 'security-groups': i.get('groups', ''),
+ 'mpi': mpi
}
}
if False: # TODO: store ancestor ids
@@ -145,30 +164,52 @@ class CloudController(object):
data['product-codes'] = i['product_codes']
return data
+ @rbac.allow('all')
def describe_availability_zones(self, context, **kwargs):
return {'availabilityZoneInfo': [{'zoneName': 'nova',
'zoneState': 'available'}]}
+ @rbac.allow('all')
+ def describe_regions(self, context, region_name=None, **kwargs):
+ # TODO(vish): region_name is an array. Support filtering
+ return {'regionInfo': [{'regionName': 'nova',
+ 'regionUrl': FLAGS.ec2_url}]}
+
+ @rbac.allow('all')
+ def describe_snapshots(self,
+ context,
+ snapshot_id=None,
+ owner=None,
+ restorable_by=None,
+ **kwargs):
+ return {'snapshotSet': [{'snapshotId': 'fixme',
+ 'volumeId': 'fixme',
+ 'status': 'fixme',
+ 'startTime': 'fixme',
+ 'progress': 'fixme',
+ 'ownerId': 'fixme',
+ 'volumeSize': 0,
+ 'description': 'fixme'}]}
+
+ @rbac.allow('all')
def describe_key_pairs(self, context, key_name=None, **kwargs):
- key_pairs = []
- key_names = key_name and key_name or []
- if len(key_names) > 0:
- for key_name in key_names:
- key_pair = context.user.get_key_pair(key_name)
- if key_pair != None:
- key_pairs.append({
- 'keyName': key_pair.name,
- 'keyFingerprint': key_pair.fingerprint,
- })
- else:
- for key_pair in context.user.get_key_pairs():
- key_pairs.append({
+ key_pairs = context.user.get_key_pairs()
+ if not key_name is None:
+ key_pairs = [x for x in key_pairs if x.name in key_name]
+
+ result = []
+ for key_pair in key_pairs:
+ # filter out the vpn keys
+ suffix = FLAGS.vpn_key_suffix
+ if context.user.is_admin() or not key_pair.name.endswith(suffix):
+ result.append({
'keyName': key_pair.name,
'keyFingerprint': key_pair.fingerprint,
})
- return { 'keypairsSet': key_pairs }
+ return { 'keypairsSet': result }
+ @rbac.allow('all')
def create_key_pair(self, context, key_name, **kwargs):
try:
d = defer.Deferred()
@@ -187,28 +228,31 @@ class CloudController(object):
except users.UserError, e:
raise
+ @rbac.allow('all')
def delete_key_pair(self, context, key_name, **kwargs):
context.user.delete_key_pair(key_name)
# aws returns true even if the key doens't exist
return True
+ @rbac.allow('all')
def describe_security_groups(self, context, group_names, **kwargs):
groups = { 'securityGroupSet': [] }
# Stubbed for now to unblock other things.
return groups
+ @rbac.allow('netadmin')
def create_security_group(self, context, group_name, **kwargs):
return True
+ @rbac.allow('netadmin')
def delete_security_group(self, context, group_name, **kwargs):
return True
+ @rbac.allow('projectmanager', 'sysadmin')
def get_console_output(self, context, instance_id, **kwargs):
# instance_id is passed in as a list of instances
instance = self._get_instance(context, instance_id[0])
- if instance['state'] == 'pending':
- raise exception.ApiError('Cannot get output for pending instance')
return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
{"method": "get_console_output",
"args" : {"instance_id": instance_id[0]}})
@@ -219,6 +263,7 @@ class CloudController(object):
else:
return None
+ @rbac.allow('projectmanager', 'sysadmin')
def describe_volumes(self, context, **kwargs):
volumes = []
for volume in self.volumes:
@@ -241,13 +286,24 @@ class CloudController(object):
volume.get('node_name', None),
volume.get('instance_id', ''),
volume.get('mountpoint', ''))
+ if volume['attach_status'] == 'attached':
+ v['attachmentSet'] = [{'attachTime': volume['attach_time'],
+ 'deleteOnTermination': volume['delete_on_termination'],
+ 'device' : volume['mountpoint'],
+ 'instanceId' : volume['instance_id'],
+ 'status' : 'attached',
+ 'volume_id' : volume['volume_id']}]
+ else:
+ v['attachmentSet'] = [{}]
return v
+ @rbac.allow('projectmanager', 'sysadmin')
def create_volume(self, context, size, **kwargs):
# TODO(vish): refactor this to create the volume object here and tell storage to create it
res = rpc.call(FLAGS.storage_topic, {"method": "create_volume",
"args" : {"size": size,
- "user_id": context.user.id}})
+ "user_id": context.user.id,
+ "project_id": context.project.id}})
def _format_result(result):
volume = self._get_volume(context, result['result'])
return {'volumeSet': [self.format_volume(context, volume)]}
@@ -256,10 +312,9 @@ class CloudController(object):
def _get_address(self, context, public_ip):
# FIXME(vish) this should move into network.py
- for address in self.network.hosts:
- if address['address'] == public_ip:
- if context.user.is_admin() or address['project_id'] == context.project.id:
- return address
+ address = self.network.get_host(public_ip)
+ if address and (context.user.is_admin() or address['project_id'] == context.project.id):
+ return address
raise exception.NotFound("Address at ip %s not found" % public_ip)
def _get_image(self, context, image_id):
@@ -272,122 +327,137 @@ class CloudController(object):
return image
def _get_instance(self, context, instance_id):
- for instance in self.instances:
+ for instance in self.instdir.all:
if instance['instance_id'] == instance_id:
if context.user.is_admin() or instance['project_id'] == context.project.id:
return instance
raise exception.NotFound('Instance %s could not be found' % instance_id)
def _get_volume(self, context, volume_id):
- for volume in self.volumes:
- if volume['volume_id'] == volume_id:
- if context.user.is_admin() or volume['project_id'] == context.project.id:
- return volume
+ volume = storage.get_volume(volume_id)
+ if context.user.is_admin() or volume['project_id'] == context.project.id:
+ return volume
raise exception.NotFound('Volume %s could not be found' % volume_id)
+ @rbac.allow('projectmanager', 'sysadmin')
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
volume = self._get_volume(context, volume_id)
- storage_node = volume['node_name']
- # TODO: (joshua) Fix volumes to store creator id
+ if volume['status'] == "attached":
+ raise exception.ApiError("Volume is already attached")
+ # TODO(vish): looping through all volumes is slow. We should probably maintain an index
+ for vol in self.volumes:
+ if vol['instance_id'] == instance_id and vol['mountpoint'] == device:
+ raise exception.ApiError("Volume %s is already attached to %s" % (vol['volume_id'], vol['mountpoint']))
+ volume.start_attach(instance_id, device)
instance = self._get_instance(context, instance_id)
compute_node = instance['node_name']
- aoe_device = volume['aoe_device']
- # Needs to get right node controller for attaching to
- # TODO: Maybe have another exchange that goes to everyone?
rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node),
{"method": "attach_volume",
- "args" : {"aoe_device": aoe_device,
- "instance_id" : instance_id,
- "mountpoint" : device}})
- rpc.cast('%s.%s' % (FLAGS.storage_topic, storage_node),
- {"method": "attach_volume",
"args" : {"volume_id": volume_id,
"instance_id" : instance_id,
"mountpoint" : device}})
- return defer.succeed(True)
+ return defer.succeed({'attachTime' : volume['attach_time'],
+ 'device' : volume['mountpoint'],
+ 'instanceId' : instance_id,
+ 'requestId' : context.request_id,
+ 'status' : volume['attach_status'],
+ 'volumeId' : volume_id})
+ @rbac.allow('projectmanager', 'sysadmin')
def detach_volume(self, context, volume_id, **kwargs):
- # TODO(joshua): Make sure the updated state has been received first
volume = self._get_volume(context, volume_id)
- storage_node = volume['node_name']
- if 'instance_id' in volume.keys():
- instance_id = volume['instance_id']
- try:
- instance = self._get_instance(context, instance_id)
- compute_node = instance['node_name']
- mountpoint = volume['mountpoint']
- rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node),
+ instance_id = volume.get('instance_id', None)
+ if not instance_id:
+ raise exception.Error("Volume isn't attached to anything!")
+ if volume['status'] == "available":
+ raise exception.Error("Volume is already detached")
+ try:
+ volume.start_detach()
+ instance = self._get_instance(context, instance_id)
+ rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
{"method": "detach_volume",
"args" : {"instance_id": instance_id,
- "mountpoint": mountpoint}})
- except exception.NotFound:
- pass
- rpc.cast('%s.%s' % (FLAGS.storage_topic, storage_node),
- {"method": "detach_volume",
- "args" : {"volume_id": volume_id}})
- return defer.succeed(True)
-
- def _convert_to_set(self, lst, str):
+ "volume_id": volume_id}})
+ except exception.NotFound:
+ # If the instance doesn't exist anymore,
+ # then we need to call detach blind
+ volume.finish_detach()
+ return defer.succeed({'attachTime' : volume['attach_time'],
+ 'device' : volume['mountpoint'],
+ 'instanceId' : instance_id,
+ 'requestId' : context.request_id,
+ 'status' : volume['attach_status'],
+ 'volumeId' : volume_id})
+
+ def _convert_to_set(self, lst, label):
if lst == None or lst == []:
return None
- return [{str: x} for x in lst]
+ if not isinstance(lst, list):
+ lst = [lst]
+ return [{label: x} for x in lst]
+ @rbac.allow('all')
def describe_instances(self, context, **kwargs):
return defer.succeed(self._format_instances(context))
def _format_instances(self, context, reservation_id = None):
- if self.instances == {}:
- return {'reservationSet': []}
reservations = {}
- for inst in self.instances:
- instance = inst.values()[0]
+ if context.user.is_admin():
+ instgenerator = self.instdir.all
+ else:
+ instgenerator = self.instdir.by_project(context.project.id)
+ for instance in instgenerator:
res_id = instance.get('reservation_id', 'Unknown')
- if ((context.user.is_admin() or context.project.id == instance['project_id'])
- and (reservation_id == None or reservation_id == res_id)):
- i = {}
- i['instance_id'] = instance.get('instance_id', None)
- i['image_id'] = instance.get('image_id', None)
- i['instance_state'] = {
- 'code': 42,
- 'name': instance.get('state', 'pending')
- }
- i['public_dns_name'] = self.network.get_public_ip_for_instance(
- i['instance_id'])
- i['private_dns_name'] = instance.get('private_dns_name', None)
- if not i['public_dns_name']:
- i['public_dns_name'] = i['private_dns_name']
- i['dns_name'] = instance.get('dns_name', None)
- i['key_name'] = instance.get('key_name', None)
- if context.user.is_admin():
- i['key_name'] = '%s (%s, %s)' % (i['key_name'],
- instance.get('owner_id', None), instance.get('node_name',''))
- i['product_codes_set'] = self._convert_to_set(
- instance.get('product_codes', None), 'product_code')
- i['instance_type'] = instance.get('instance_type', None)
- i['launch_time'] = instance.get('launch_time', None)
- i['ami_launch_index'] = instance.get('ami_launch_index',
- None)
- if not reservations.has_key(res_id):
- r = {}
- r['reservation_id'] = res_id
- r['owner_id'] = instance.get('project_id', None)
- r['group_set'] = self._convert_to_set(
- instance.get('groups', None), 'group_id')
- r['instances_set'] = []
- reservations[res_id] = r
- reservations[res_id]['instances_set'].append(i)
+ if reservation_id != None and reservation_id != res_id:
+ continue
+ if not context.user.is_admin():
+ if instance['image_id'] == FLAGS.vpn_image_id:
+ continue
+ i = {}
+ i['instance_id'] = instance.get('instance_id', None)
+ i['image_id'] = instance.get('image_id', None)
+ i['instance_state'] = {
+ 'code': instance.get('state', 0),
+ 'name': instance.get('state_description', 'pending')
+ }
+ i['public_dns_name'] = self.network.get_public_ip_for_instance(
+ i['instance_id'])
+ i['private_dns_name'] = instance.get('private_dns_name', None)
+ if not i['public_dns_name']:
+ i['public_dns_name'] = i['private_dns_name']
+ i['dns_name'] = instance.get('dns_name', None)
+ i['key_name'] = instance.get('key_name', None)
+ if context.user.is_admin():
+ i['key_name'] = '%s (%s, %s)' % (i['key_name'],
+ instance.get('project_id', None), instance.get('node_name',''))
+ i['product_codes_set'] = self._convert_to_set(
+ instance.get('product_codes', None), 'product_code')
+ i['instance_type'] = instance.get('instance_type', None)
+ i['launch_time'] = instance.get('launch_time', None)
+ i['ami_launch_index'] = instance.get('ami_launch_index',
+ None)
+ if not reservations.has_key(res_id):
+ r = {}
+ r['reservation_id'] = res_id
+ r['owner_id'] = instance.get('project_id', None)
+ r['group_set'] = self._convert_to_set(
+ instance.get('groups', None), 'group_id')
+ r['instances_set'] = []
+ reservations[res_id] = r
+ reservations[res_id]['instances_set'].append(i)
instance_response = {'reservationSet' : list(reservations.values()) }
return instance_response
+ @rbac.allow('all')
def describe_addresses(self, context, **kwargs):
- return self.format_addresses(context.user)
+ return self.format_addresses(context)
def format_addresses(self, context):
addresses = []
# TODO(vish): move authorization checking into network.py
- for address in self.network.hosts:
+ for address in self.network.host_objs:
#logging.debug(address_record)
address_rv = {
'public_ip': address['address'],
@@ -403,15 +473,18 @@ class CloudController(object):
# logging.debug(addresses)
return {'addressesSet': addresses}
+ @rbac.allow('netadmin')
def allocate_address(self, context, **kwargs):
address = self.network.allocate_ip(
context.user.id, context.project.id, 'public')
return defer.succeed({'addressSet': [{'publicIp' : address}]})
+ @rbac.allow('netadmin')
def release_address(self, context, public_ip, **kwargs):
self.network.deallocate_ip(public_ip)
return defer.succeed({'releaseResponse': ["Address released."]})
+ @rbac.allow('netadmin')
def associate_address(self, context, instance_id, **kwargs):
instance = self._get_instance(context, instance_id)
self.network.associate_address(
@@ -420,14 +493,39 @@ class CloudController(object):
instance_id)
return defer.succeed({'associateResponse': ["Address associated."]})
+ @rbac.allow('netadmin')
def disassociate_address(self, context, public_ip, **kwargs):
- address = self._get_address(public_ip)
+ address = self._get_address(context, public_ip)
self.network.disassociate_address(public_ip)
# TODO - Strip the IP from the instance
return defer.succeed({'disassociateResponse': ["Address disassociated."]})
+ def release_ip(self, context, private_ip, **kwargs):
+ self.network.release_ip(private_ip)
+ return defer.succeed({'releaseResponse': ["Address released."]})
+
+ def lease_ip(self, context, private_ip, **kwargs):
+ self.network.lease_ip(private_ip)
+ return defer.succeed({'leaseResponse': ["Address leased."]})
+
+ @rbac.allow('projectmanager', 'sysadmin')
def run_instances(self, context, **kwargs):
- image = self._get_image(context, kwargs['image_id'])
+ # make sure user can access the image
+ # vpn image is private so it doesn't show up on lists
+ if kwargs['image_id'] != FLAGS.vpn_image_id:
+ image = self._get_image(context, kwargs['image_id'])
+
+ # FIXME(ja): if image is cloudpipe, this breaks
+
+ # get defaults from imagestore
+ image_id = image['imageId']
+ kernel_id = image.get('kernelId', None)
+ ramdisk_id = image.get('ramdiskId', None)
+
+ # API parameters overrides of defaults
+ kernel_id = kwargs.get('kernel_id', kernel_id)
+ ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id)
+
logging.debug("Going to run instances...")
reservation_id = utils.generate_uid('r')
launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
@@ -438,13 +536,21 @@ class CloudController(object):
raise exception.ApiError('Key Pair %s not found' %
kwargs['key_name'])
key_data = key_pair.public_key
-
+ # TODO: Get the real security group of launch in here
+ security_group = "default"
+ if FLAGS.simple_network:
+ bridge_name = FLAGS.simple_network_bridge
+ else:
+ net = network.BridgedNetwork.get_network_for_project(
+ context.user.id, context.project.id, security_group)
+ bridge_name = net['bridge_name']
for num in range(int(kwargs['max_count'])):
inst = self.instdir.new()
- # TODO(ja): add ari, aki
- inst['image_id'] = kwargs['image_id']
+ inst['image_id'] = image_id
+ inst['kernel_id'] = kernel_id
+ inst['ramdisk_id'] = ramdisk_id
inst['user_data'] = kwargs.get('user_data', '')
- inst['instance_type'] = kwargs.get('instance_type', '')
+ inst['instance_type'] = kwargs.get('instance_type', 'm1.small')
inst['reservation_id'] = reservation_id
inst['launch_time'] = launch_time
inst['key_data'] = key_data or ''
@@ -453,10 +559,21 @@ class CloudController(object):
inst['project_id'] = context.project.id
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = num
- address = network.allocate_ip(
- inst['user_id'], inst['project_id'], mac=inst['mac_address'])
+ inst['bridge_name'] = bridge_name
+ if FLAGS.simple_network:
+ address = network.allocate_simple_ip()
+ else:
+ if inst['image_id'] == FLAGS.vpn_image_id:
+ address = network.allocate_vpn_ip(
+ inst['user_id'],
+ inst['project_id'],
+ mac=inst['mac_address'])
+ else:
+ address = network.allocate_ip(
+ inst['user_id'],
+ inst['project_id'],
+ mac=inst['mac_address'])
inst['private_dns_name'] = str(address)
- inst['bridge_name'] = network.BridgedNetwork.get_network_for_project(inst['user_id'], inst['project_id'])['bridge_name']
# TODO: allocate expresses on the router node
inst.save()
rpc.cast(FLAGS.compute_topic,
@@ -466,8 +583,9 @@ class CloudController(object):
(context.user.name, inst['private_dns_name']))
# TODO: Make the NetworkComputeNode figure out the network name from ip.
return defer.succeed(self._format_instances(
- context.user, reservation_id))
+ context, reservation_id))
+ @rbac.allow('projectmanager', 'sysadmin')
def terminate_instances(self, context, instance_id, **kwargs):
logging.debug("Going to start terminating instances")
for i in instance_id:
@@ -484,10 +602,13 @@ class CloudController(object):
pass
if instance.get('private_dns_name', None):
logging.debug("Deallocating address %s" % instance.get('private_dns_name', None))
- try:
- self.network.deallocate_ip(instance.get('private_dns_name', None))
- except Exception, _err:
- pass
+ if FLAGS.simple_network:
+ network.deallocate_simple_ip(instance.get('private_dns_name', None))
+ else:
+ try:
+ self.network.deallocate_ip(instance.get('private_dns_name', None))
+ except Exception, _err:
+ pass
if instance.get('node_name', 'unassigned') != 'unassigned': #It's also internal default
rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
{"method": "terminate_instance",
@@ -496,17 +617,17 @@ class CloudController(object):
instance.destroy()
return defer.succeed(True)
+ @rbac.allow('projectmanager', 'sysadmin')
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids"""
for i in instance_id:
instance = self._get_instance(context, i)
- if instance['state'] == 'pending':
- raise exception.ApiError('Cannot reboot pending instance')
- rpc.cast('%s.%s' % (FLAGS.node_topic, instance['node_name']),
+ rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
{"method": "reboot_instance",
"args" : {"instance_id": i}})
return defer.succeed(True)
+ @rbac.allow('projectmanager', 'sysadmin')
def delete_volume(self, context, volume_id, **kwargs):
# TODO: return error if not authorized
volume = self._get_volume(context, volume_id)
@@ -516,16 +637,19 @@ class CloudController(object):
"args" : {"volume_id": volume_id}})
return defer.succeed(True)
+ @rbac.allow('all')
def describe_images(self, context, image_id=None, **kwargs):
# The objectstore does its own authorization for describe
imageSet = images.list(context, image_id)
return defer.succeed({'imagesSet': imageSet})
+ @rbac.allow('projectmanager', 'sysadmin')
def deregister_image(self, context, image_id, **kwargs):
# FIXME: should the objectstore be doing these authorization checks?
images.deregister(context, image_id)
return defer.succeed({'imageId': image_id})
+ @rbac.allow('projectmanager', 'sysadmin')
def register_image(self, context, image_location=None, **kwargs):
# FIXME: should the objectstore be doing these authorization checks?
if image_location is None and kwargs.has_key('name'):
@@ -535,14 +659,29 @@ class CloudController(object):
return defer.succeed({'imageId': image_id})
+ @rbac.allow('all')
+ def describe_image_attribute(self, context, image_id, attribute, **kwargs):
+ if attribute != 'launchPermission':
+ raise exception.ApiError('attribute not supported: %s' % attribute)
+ try:
+ image = images.list(context, image_id)[0]
+ except IndexError:
+ raise exception.ApiError('invalid id: %s' % image_id)
+ result = { 'image_id': image_id, 'launchPermission': [] }
+ if image['isPublic']:
+ result['launchPermission'].append({ 'group': 'all' })
+ return defer.succeed(result)
+
+ @rbac.allow('projectmanager', 'sysadmin')
def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs):
+ # TODO(devcamcar): Support users and groups other than 'all'.
if attribute != 'launchPermission':
- raise exception.ApiError('only launchPermission is supported')
+ raise exception.ApiError('attribute not supported: %s' % attribute)
if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all':
raise exception.ApiError('only group "all" is supported')
- if not operation_type in ['add', 'delete']:
- raise exception.ApiError('operation_type must be add or delete')
- result = images.modify(context.user, image_id, operation_type)
+ if not operation_type in ['add', 'remove']:
+ raise exception.ApiError('operation_type must be add or remove')
+ result = images.modify(context, image_id, operation_type)
return defer.succeed(result)
def update_state(self, topic, value):
diff --git a/nova/endpoint/images.py b/nova/endpoint/images.py
index 673a108e9..420b491f9 100644
--- a/nova/endpoint/images.py
+++ b/nova/endpoint/images.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Proxy AMI-related calls from the cloud controller, to the running
diff --git a/nova/exception.py b/nova/exception.py
index 82d08e840..ebd85a5a7 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Nova base exception handling, including decorator for re-raising
@@ -47,9 +52,9 @@ def wrap_exception(f):
return f(*args, **kw)
except Exception, e:
if not isinstance(e, Error):
- # exc_type, exc_value, exc_traceback = sys.exc_info()
+ #exc_type, exc_value, exc_traceback = sys.exc_info()
logging.exception('Uncaught exception')
- # logging.debug(traceback.extract_stack(exc_traceback))
+ #logging.error(traceback.extract_stack(exc_traceback))
raise Error(str(e))
raise
_wrap.func_name = f.func_name
diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py
index 13d432b45..acb4d276c 100644
--- a/nova/fakerabbit.py
+++ b/nova/fakerabbit.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
""" Based a bit on the carrot.backeds.queue backend... but a lot better """
diff --git a/nova/fakevirt.py b/nova/fakevirt.py
index 2b918d388..aec2fe0ca 100644
--- a/nova/fakevirt.py
+++ b/nova/fakevirt.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
A fake (in-memory) hypervisor+api. Allows nova testing w/o KVM and libvirt.
diff --git a/nova/flags.py b/nova/flags.py
index 7818e1b14..985f9ba04 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Package-level global flags are defined here, the rest are defined
@@ -68,6 +73,11 @@ DEFINE_string('default_instance_type',
'm1.small',
'default instance type to use, testing only')
+DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server')
+DEFINE_string('vpn_key_suffix',
+ '-key',
+ 'Suffix to add to project name for vpn key')
+
# UNUSED
DEFINE_string('node_availability_zone',
'nova',
diff --git a/nova/objectstore/__init__.py b/nova/objectstore/__init__.py
index c6c09e53e..25e9bf74a 100644
--- a/nova/objectstore/__init__.py
+++ b/nova/objectstore/__init__.py
@@ -1,16 +1,22 @@
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
:mod:`nova.objectstore` -- S3-type object store
diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py
index 0bf102867..a9dec5afb 100644
--- a/nova/objectstore/bucket.py
+++ b/nova/objectstore/bucket.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Simple object store using Blobs and JSON files on disk.
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
index a7fff12fc..2482f6fea 100644
--- a/nova/objectstore/handler.py
+++ b/nova/objectstore/handler.py
@@ -1,18 +1,24 @@
-#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Implementation of an S3-like storage server based on local files.
@@ -71,6 +77,7 @@ class Application(web.Application):
def __init__(self, user_manager):
web.Application.__init__(self, [
(r"/", RootHandler),
+ (r"/_images/(.+)", ImageDownloadHandler),
(r"/_images/", ImageHandler),
(r"/([^/]+)/(.+)", ObjectHandler),
(r"/([^/]+)/", BucketHandler),
@@ -224,6 +231,31 @@ class ObjectHandler(BaseRequestHandler):
self.finish()
+class ImageDownloadHandler(BaseRequestHandler):
+ SUPPORTED_METHODS = ("GET", )
+
+ @catch_nova_exceptions
+ def get(self, image_id):
+ """ send the decrypted image file
+
+ streaming content through python is slow and should only be used
+ in development mode. You should serve files via a web server
+ in production.
+ """
+
+ self.set_header("Content-Type", "application/octet-stream")
+
+ READ_SIZE = 64*1024
+
+ img = image.Image(image_id)
+ with open(img.image_path, 'rb') as fp:
+ s = fp.read(READ_SIZE)
+ while s:
+ self.write(s)
+ s = fp.read(READ_SIZE)
+
+ self.finish()
+
class ImageHandler(BaseRequestHandler):
SUPPORTED_METHODS = ("POST", "PUT", "GET", "DELETE")
@@ -254,8 +286,8 @@ class ImageHandler(BaseRequestHandler):
if not bucket_object.is_authorized(self.context):
raise web.HTTPError(403)
- p = multiprocessing.Process(target=image.Image.create,args=
- (image_id, image_location, self.context))
+ p = multiprocessing.Process(target=image.Image.register_aws_image,
+ args=(image_id, image_location, self.context))
p.start()
self.finish()
@@ -268,7 +300,7 @@ class ImageHandler(BaseRequestHandler):
image_object = image.Image(image_id)
- if not image.is_authorized(self.context):
+ if not image_object.is_authorized(self.context):
raise web.HTTPError(403)
image_object.set_public(operation=='add')
@@ -281,7 +313,7 @@ class ImageHandler(BaseRequestHandler):
image_id = self.get_argument("image_id", u"")
image_object = image.Image(image_id)
- if not image.is_authorized(self.context):
+ if not image_object.is_authorized(self.context):
raise web.HTTPError(403)
image_object.delete()
diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py
index 892ada00c..4de41ea96 100644
--- a/nova/objectstore/image.py
+++ b/nova/objectstore/image.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Take uploaded bucket contents and register them as disk images (AMIs).
@@ -47,6 +52,10 @@ class Image(object):
not os.path.isdir(self.path):
raise exception.NotFound
+ @property
+ def image_path(self):
+ return os.path.join(self.path, 'image')
+
def delete(self):
for fn in ['info.json', 'image']:
try:
@@ -91,7 +100,69 @@ class Image(object):
return json.load(f)
@staticmethod
- def create(image_id, image_location, context):
+ def add(src, description, kernel=None, ramdisk=None, public=True):
+ """adds an image to imagestore
+
+ @type src: str
+ @param src: location of the partition image on disk
+
+ @type description: str
+ @param description: string describing the image contents
+
+ @type kernel: bool or str
+ @param kernel: either TRUE meaning this partition is a kernel image or
+ a string of the image id for the kernel
+
+ @type ramdisk: bool or str
+ @param ramdisk: either TRUE meaning this partition is a ramdisk image or
+ a string of the image id for the ramdisk
+
+
+ @type public: bool
+ @param public: determine if this is a public image or private
+
+ @rtype: str
+ @return: a string with the image id
+ """
+
+ image_type = 'machine'
+ image_id = utils.generate_uid('ami')
+
+ if kernel is True:
+ image_type = 'kernel'
+ image_id = utils.generate_uid('aki')
+ if ramdisk is True:
+ image_type = 'ramdisk'
+ image_id = utils.generate_uid('ari')
+
+ image_path = os.path.join(FLAGS.images_path, image_id)
+ os.makedirs(image_path)
+
+ shutil.copyfile(src, os.path.join(image_path, 'image'))
+
+ info = {
+ 'imageId': image_id,
+ 'imageLocation': description,
+ 'imageOwnerId': 'system',
+ 'isPublic': public,
+ 'architecture': 'x86_64',
+ 'type': image_type,
+ 'state': 'available'
+ }
+
+ if type(kernel) is str and len(kernel) > 0:
+ info['kernelId'] = kernel
+
+ if type(ramdisk) is str and len(ramdisk) > 0:
+ info['ramdiskId'] = ramdisk
+
+ with open(os.path.join(image_path, 'info.json'), "w") as f:
+ json.dump(info, f)
+
+ return image_id
+
+ @staticmethod
+ def register_aws_image(image_id, image_location, context):
image_path = os.path.join(FLAGS.images_path, image_id)
os.makedirs(image_path)
diff --git a/nova/objectstore/stored.py b/nova/objectstore/stored.py
index 05a7a1102..fc656a040 100644
--- a/nova/objectstore/stored.py
+++ b/nova/objectstore/stored.py
@@ -1,16 +1,22 @@
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Properties of an object stored within a bucket.
diff --git a/nova/process.py b/nova/process.py
index b114146ce..bc2d4474b 100644
--- a/nova/process.py
+++ b/nova/process.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Process pool, still buggy right now.
@@ -118,7 +123,7 @@ class BackRelayWithInput(_BackRelay):
def errReceivedIsGood(self, text):
self.stderr.write(text)
-
+
def connectionMade(self):
if self.startedDeferred:
self.startedDeferred.callback(self)
@@ -151,6 +156,11 @@ def getProcessOutput(executable, args=None, env=None, path=None, reactor=None,
d = defer.Deferred()
p = BackRelayWithInput(
d, startedDeferred=startedDeferred, error_ok=error_ok, input=input)
+ # VISH: commands come in as unicode, but self.executes needs
+ # strings or process.spawn raises a deprecation warning
+ executable = str(executable)
+ if not args is None:
+ args = [str(x) for x in args]
reactor.spawnProcess(p, executable, (executable,)+tuple(args), env, path)
return d
@@ -167,7 +177,7 @@ class ProcessPool(object):
def simpleExecute(self, cmd, **kw):
""" Weak emulation of the old utils.execute() function.
-
+
This only exists as a way to quickly move old execute methods to
this new style of code.
@@ -190,7 +200,7 @@ class ProcessPool(object):
d.process = None
d.started = started
-
+
d.addCallback(lambda _: getProcessOutput(*args, **kw))
d.addBoth(self._release)
return d
diff --git a/nova/rpc.py b/nova/rpc.py
index 62c6afff3..b0f6ef7f3 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -1,35 +1,41 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
AMQP-based RPC. Queues have consumers and publishers.
No fan-out support yet.
"""
+import json
import logging
import sys
import uuid
from nova import vendor
-import anyjson
from carrot import connection
from carrot import messaging
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet import task
+from nova import exception
from nova import fakerabbit
from nova import flags
@@ -57,6 +63,10 @@ class Connection(connection.BrokerConnection):
cls._instance = cls(**params)
return cls._instance
+ @classmethod
+ def recreate(cls):
+ del cls._instance
+ return cls.instance()
class Consumer(messaging.Consumer):
# TODO(termie): it would be nice to give these some way of automatically
@@ -67,22 +77,39 @@ class Consumer(messaging.Consumer):
io_inst = ioloop.IOLoop.instance()
injected = ioloop.PeriodicCallback(
- lambda: self.fetch(enable_callbacks=True), 1, io_loop=io_inst)
+ lambda: self.fetch(enable_callbacks=True), 100, io_loop=io_inst)
injected.start()
return injected
attachToTornado = attach_to_tornado
+ def fetch(self, *args, **kwargs):
+ # TODO(vish): the logic for failed connections and logging should be
+ # refactored into some sort of connection manager object
+ try:
+ if getattr(self, 'failed_connection', False):
+ # attempt to reconnect
+ self.conn = Connection.recreate()
+ self.backend = self.conn.create_backend()
+ super(Consumer, self).fetch(*args, **kwargs)
+ if getattr(self, 'failed_connection', False):
+ logging.error("Reconnected to queue")
+ self.failed_connection = False
+ except Exception, ex:
+ if not getattr(self, 'failed_connection', False):
+ logging.exception("Failed to fetch message from queue")
+ self.failed_connection = True
+
def attach_to_twisted(self):
loop = task.LoopingCall(self.fetch, enable_callbacks=True)
- loop.start(interval=0.001)
+ loop.start(interval=0.1)
class Publisher(messaging.Publisher):
pass
class TopicConsumer(Consumer):
- exchange_type = "topic"
+ exchange_type = "topic"
def __init__(self, connection=None, topic="broadcast"):
self.queue = topic
self.routing_key = topic
@@ -95,36 +122,43 @@ class AdapterConsumer(TopicConsumer):
_log.debug('Initing the Adapter Consumer for %s' % (topic))
self.proxy = proxy
super(AdapterConsumer, self).__init__(connection=connection, topic=topic)
-
+
+ @exception.wrap_exception
def receive(self, message_data, message):
_log.debug('received %s' % (message_data))
msg_id = message_data.pop('_msg_id', None)
method = message_data.get('method')
args = message_data.get('args', {})
+ message.ack()
if not method:
+ # NOTE(vish): we may not want to ack here, but that means that bad
+ # messages stay in the queue indefinitely, so for now
+ # we just log the message and send an error string
+ # back to the caller
+ _log.warn('no method for message: %s' % (message_data))
+ msg_reply(msg_id, 'No method for message: %s' % message_data)
return
- node_func = getattr(self.proxy, str(method))
+ node_func = getattr(self.proxy, str(method))
node_args = dict((str(k), v) for k, v in args.iteritems())
d = defer.maybeDeferred(node_func, **node_args)
if msg_id:
d.addCallback(lambda rval: msg_reply(msg_id, rval))
d.addErrback(lambda e: msg_reply(msg_id, str(e)))
- message.ack()
return
class TopicPublisher(Publisher):
- exchange_type = "topic"
+ exchange_type = "topic"
def __init__(self, connection=None, topic="broadcast"):
self.routing_key = topic
self.exchange = FLAGS.control_exchange
super(TopicPublisher, self).__init__(connection=connection)
-
+
class DirectConsumer(Consumer):
- exchange_type = "direct"
+ exchange_type = "direct"
def __init__(self, connection=None, msg_id=None):
self.queue = msg_id
self.routing_key = msg_id
@@ -145,12 +179,12 @@ class DirectPublisher(Publisher):
def msg_reply(msg_id, reply):
conn = Connection.instance()
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
-
+
try:
publisher.send({'result': reply})
except TypeError:
publisher.send(
- {'result': dict((k, repr(v))
+ {'result': dict((k, repr(v))
for k, v in reply.__dict__.iteritems())
})
publisher.close()
@@ -161,7 +195,7 @@ def call(topic, msg):
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
_log.debug("MSG_ID is %s" % (msg_id))
-
+
conn = Connection.instance()
d = defer.Deferred()
consumer = DirectConsumer(connection=conn, msg_id=msg_id)
@@ -198,7 +232,7 @@ def send_message(topic, message, wait=True):
_log.debug('message %s', message)
if wait:
- consumer = messaging.Consumer(connection=rpc.Connection.instance(),
+ consumer = messaging.Consumer(connection=Connection.instance(),
queue=msg_id,
exchange=msg_id,
auto_delete=True,
@@ -206,7 +240,7 @@ def send_message(topic, message, wait=True):
routing_key=msg_id)
consumer.register_callback(generic_response)
- publisher = messaging.Publisher(connection=rpc.Connection.instance(),
+ publisher = messaging.Publisher(connection=Connection.instance(),
exchange="nova",
exchange_type="topic",
routing_key=topic)
@@ -215,8 +249,8 @@ def send_message(topic, message, wait=True):
if wait:
consumer.wait()
-
-# TODO: Replace with a docstring test
+
+# TODO: Replace with a docstring test
if __name__ == "__main__":
- send_message(sys.argv[1], anyjson.deserialize(sys.argv[2]))
+ send_message(sys.argv[1], json.loads(sys.argv[2]))
diff --git a/nova/server.py b/nova/server.py
index 227f7fddc..49d5a1e89 100644
--- a/nova/server.py
+++ b/nova/server.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Base functionality for nova daemons - gradually being replaced with twistd.py.
diff --git a/nova/test.py b/nova/test.py
index 610ad89aa..fa05a02af 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -1,21 +1,26 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Base classes for our unit tests.
-Allows overriding of flags for use of fakes,
+Allows overriding of flags for use of fakes,
and some black magic for inline callbacks.
"""
@@ -31,7 +36,6 @@ from twisted.python import failure
from twisted.trial import unittest as trial_unittest
import stubout
-from nova import datastore
from nova import fakerabbit
from nova import flags
@@ -47,12 +51,13 @@ def skip_if_fake(f):
raise trial_unittest.SkipTest('Test cannot be run in fake mode')
else:
return f(*args, **kw)
-
+
_skipper.func_name = f.func_name
return _skipper
class TrialTestCase(trial_unittest.TestCase):
+
def setUp(self):
super(TrialTestCase, self).setUp()
@@ -72,10 +77,6 @@ class TrialTestCase(trial_unittest.TestCase):
if FLAGS.fake_rabbit:
fakerabbit.reset_all()
-
- # attempt to wipe all keepers
- #keeper = datastore.Keeper()
- #keeper.clear_all()
def flags(self, **kw):
for k, v in kw.iteritems():
@@ -90,7 +91,7 @@ class TrialTestCase(trial_unittest.TestCase):
for k, v in self.flag_overrides.iteritems():
setattr(FLAGS, k, v)
-
+
class BaseTestCase(TrialTestCase):
def setUp(self):
@@ -99,7 +100,7 @@ class BaseTestCase(TrialTestCase):
# the injected listeners... this is fine for now though
self.injected = []
self.ioloop = ioloop.IOLoop.instance()
-
+
self._waiting = None
self._doneWaiting = False
self._timedOut = False
@@ -144,10 +145,10 @@ class BaseTestCase(TrialTestCase):
pass
self._waiting = None
self._doneWaiting = True
-
+
def _maybeInlineCallbacks(self, f):
""" If we're doing async calls in our tests, wait on them.
-
+
This is probably the most complicated hunk of code we have so far.
First up, if the function is normal (not async) we just act normal
@@ -158,7 +159,7 @@ class BaseTestCase(TrialTestCase):
of making epic callback chains.
Example (callback chain, ugly):
-
+
d = self.node.terminate_instance(instance_id) # a Deferred instance
def _describe(_):
d_desc = self.node.describe_instances() # another Deferred instance
@@ -169,7 +170,7 @@ class BaseTestCase(TrialTestCase):
d.addCallback(_checkDescribe)
d.addCallback(lambda x: self._done())
self._waitForTest()
-
+
Example (inline callbacks! yay!):
yield self.node.terminate_instance(instance_id)
@@ -186,11 +187,11 @@ class BaseTestCase(TrialTestCase):
if not hasattr(g, 'send'):
self._done()
return defer.succeed(g)
-
+
inlined = defer.inlineCallbacks(f)
d = inlined()
return d
-
+
def _catchExceptions(self, result, failure):
exc = (failure.type, failure.value, failure.getTracebackObject())
if isinstance(failure.value, self.failureException):
diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py
index a4ccbbaeb..de4a3531a 100644
--- a/nova/tests/__init__.py
+++ b/nova/tests/__init__.py
@@ -1,16 +1,22 @@
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
:mod:`nova.tests` -- Nova Unittests
diff --git a/nova/tests/access_unittest.py b/nova/tests/access_unittest.py
new file mode 100644
index 000000000..fc2c19227
--- /dev/null
+++ b/nova/tests/access_unittest.py
@@ -0,0 +1,166 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+import logging
+
+from nova.auth.users import UserManager
+from nova.auth import rbac
+from nova import exception
+from nova import flags
+from nova import test
+
+FLAGS = flags.FLAGS
+class Context(object):
+ pass
+
+class AccessTestCase(test.BaseTestCase):
+ def setUp(self):
+ super(AccessTestCase, self).setUp()
+ FLAGS.fake_libvirt = True
+ FLAGS.fake_storage = True
+ um = UserManager.instance()
+ # Make test users
+ try:
+ self.testadmin = um.create_user('testadmin')
+ except Exception, err:
+ logging.error(str(err))
+ try:
+ self.testpmsys = um.create_user('testpmsys')
+ except: pass
+ try:
+ self.testnet = um.create_user('testnet')
+ except: pass
+ try:
+ self.testsys = um.create_user('testsys')
+ except: pass
+ # Assign some rules
+ try:
+ um.add_role('testadmin', 'cloudadmin')
+ except: pass
+ try:
+ um.add_role('testpmsys', 'sysadmin')
+ except: pass
+ try:
+ um.add_role('testnet', 'netadmin')
+ except: pass
+ try:
+ um.add_role('testsys', 'sysadmin')
+ except: pass
+
+ # Make a test project
+ try:
+ self.project = um.create_project('testproj', 'testpmsys', 'a test project', ['testpmsys', 'testnet', 'testsys'])
+ except: pass
+ try:
+ self.project.add_role(self.testnet, 'netadmin')
+ except: pass
+ try:
+ self.project.add_role(self.testsys, 'sysadmin')
+ except: pass
+ self.context = Context()
+ self.context.project = self.project
+ #user is set in each test
+
+ def tearDown(self):
+ um = UserManager.instance()
+ # Delete the test project
+ um.delete_project('testproj')
+ # Delete the test user
+ um.delete_user('testadmin')
+ um.delete_user('testpmsys')
+ um.delete_user('testnet')
+ um.delete_user('testsys')
+ super(AccessTestCase, self).tearDown()
+
+ def test_001_allow_all(self):
+ self.context.user = self.testadmin
+ self.assertTrue(self._allow_all(self.context))
+ self.context.user = self.testpmsys
+ self.assertTrue(self._allow_all(self.context))
+ self.context.user = self.testnet
+ self.assertTrue(self._allow_all(self.context))
+ self.context.user = self.testsys
+ self.assertTrue(self._allow_all(self.context))
+
+ def test_002_allow_none(self):
+ self.context.user = self.testadmin
+ self.assertTrue(self._allow_none(self.context))
+ self.context.user = self.testpmsys
+ self.assertRaises(exception.NotAuthorized, self._allow_none, self.context)
+ self.context.user = self.testnet
+ self.assertRaises(exception.NotAuthorized, self._allow_none, self.context)
+ self.context.user = self.testsys
+ self.assertRaises(exception.NotAuthorized, self._allow_none, self.context)
+
+ def test_003_allow_project_manager(self):
+ self.context.user = self.testadmin
+ self.assertTrue(self._allow_project_manager(self.context))
+ self.context.user = self.testpmsys
+ self.assertTrue(self._allow_project_manager(self.context))
+ self.context.user = self.testnet
+ self.assertRaises(exception.NotAuthorized, self._allow_project_manager, self.context)
+ self.context.user = self.testsys
+ self.assertRaises(exception.NotAuthorized, self._allow_project_manager, self.context)
+
+ def test_004_allow_sys_and_net(self):
+ self.context.user = self.testadmin
+ self.assertTrue(self._allow_sys_and_net(self.context))
+ self.context.user = self.testpmsys # doesn't have the per project sysadmin
+ self.assertRaises(exception.NotAuthorized, self._allow_sys_and_net, self.context)
+ self.context.user = self.testnet
+ self.assertTrue(self._allow_sys_and_net(self.context))
+ self.context.user = self.testsys
+ self.assertTrue(self._allow_sys_and_net(self.context))
+
+ def test_005_allow_sys_no_pm(self):
+ self.context.user = self.testadmin
+ self.assertTrue(self._allow_sys_no_pm(self.context))
+ self.context.user = self.testpmsys
+ self.assertRaises(exception.NotAuthorized, self._allow_sys_no_pm, self.context)
+ self.context.user = self.testnet
+ self.assertRaises(exception.NotAuthorized, self._allow_sys_no_pm, self.context)
+ self.context.user = self.testsys
+ self.assertTrue(self._allow_sys_no_pm(self.context))
+
+ @rbac.allow('all')
+ def _allow_all(self, context):
+ return True
+
+ @rbac.allow('none')
+ def _allow_none(self, context):
+ return True
+
+ @rbac.allow('projectmanager')
+ def _allow_project_manager(self, context):
+ return True
+
+ @rbac.allow('sysadmin', 'netadmin')
+ def _allow_sys_and_net(self, context):
+ return True
+
+ @rbac.allow('sysadmin')
+ @rbac.deny('projectmanager')
+ def _allow_sys_no_pm(self, context):
+ return True
+
+if __name__ == "__main__":
+ # TODO: Implement use_fake as an option
+ unittest.main()
diff --git a/nova/tests/api_integration.py b/nova/tests/api_integration.py
index cf84b9907..0216b16ec 100644
--- a/nova/tests/api_integration.py
+++ b/nova/tests/api_integration.py
@@ -1,16 +1,22 @@
-# Copyright [2010] [Anso Labs, LLC]
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import unittest
diff --git a/nova/tests/api_unittest.py b/nova/tests/api_unittest.py
index fdbf088f9..d82089e6f 100644
--- a/nova/tests/api_unittest.py
+++ b/nova/tests/api_unittest.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import httplib
import random
@@ -154,7 +159,7 @@ class ApiEc2TestCase(test.BaseTestCase):
self.host = '127.0.0.1'
- self.app = api.APIServerApplication(self.users, {'Cloud': self.cloud})
+ self.app = api.APIServerApplication({'Cloud': self.cloud})
self.ec2 = boto.connect_ec2(
aws_access_key_id='fake',
aws_secret_access_key='fake',
@@ -174,16 +179,25 @@ class ApiEc2TestCase(test.BaseTestCase):
def test_describe_instances(self):
self.expect_http()
self.mox.ReplayAll()
-
+ try:
+ self.users.create_user('fake', 'fake', 'fake')
+ except Exception, _err:
+ pass # User may already exist
self.assertEqual(self.ec2.get_all_instances(), [])
+ self.users.delete_user('fake')
def test_get_all_key_pairs(self):
self.expect_http()
self.mox.ReplayAll()
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") for x in range(random.randint(4, 8)))
+ try:
+ self.users.create_user('fake', 'fake', 'fake')
+ except Exception, _err:
+ pass # User may already exist
self.users.generate_key_pair('fake', keyname)
rv = self.ec2.get_all_key_pairs()
self.assertTrue(filter(lambda k: k.name == keyname, rv))
+ self.users.delete_user('fake')
diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py
index 40eeb8a23..992fc8c90 100644
--- a/nova/tests/cloud_unittest.py
+++ b/nova/tests/cloud_unittest.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import logging
import StringIO
@@ -40,8 +45,7 @@ class CloudTestCase(test.BaseTestCase):
super(CloudTestCase, self).setUp()
self.flags(fake_libvirt=True,
fake_storage=True,
- fake_users=True,
- redis_db=8)
+ fake_users=True)
self.conn = rpc.Connection.instance()
logging.getLogger().setLevel(logging.DEBUG)
diff --git a/nova/tests/datastore_unittest.py b/nova/tests/datastore_unittest.py
deleted file mode 100644
index 4e4d8586a..000000000
--- a/nova/tests/datastore_unittest.py
+++ /dev/null
@@ -1,60 +0,0 @@
-from nova import test
-from nova import datastore
-import random
-
-class KeeperTestCase(test.BaseTestCase):
- """
- Basic persistence tests for Keeper datastore.
- Generalize, then use these to support
- migration to redis / cassandra / multiple stores.
- """
-
- def __init__(self, *args, **kwargs):
- """
- Create a new keeper instance for test keys.
- """
- super(KeeperTestCase, self).__init__(*args, **kwargs)
- self.keeper = datastore.Keeper('test-')
-
- def tear_down(self):
- """
- Scrub out test keeper data.
- """
- pass
-
- def test_store_strings(self):
- """
- Confirm that simple strings go in and come out safely.
- Should also test unicode strings.
- """
- randomstring = ''.join(
- [random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-')
- for _x in xrange(20)]
- )
- self.keeper['test_string'] = randomstring
- self.assertEqual(randomstring, self.keeper['test_string'])
-
- def test_store_dicts(self):
- """
- Arbitrary dictionaries should be storable.
- """
- test_dict = {'key_one': 'value_one'}
- self.keeper['test_dict'] = test_dict
- self.assertEqual(test_dict['key_one'],
- self.keeper['test_dict']['key_one'])
-
- def test_sets(self):
- """
- A keeper dict should be self-serializing.
- """
- self.keeper.set_add('test_set', 'foo')
- test_dict = {'arbitrary': 'dict of stuff'}
- self.keeper.set_add('test_set', test_dict)
- self.assertTrue(self.keeper.set_is_member('test_set', 'foo'))
- self.assertFalse(self.keeper.set_is_member('test_set', 'bar'))
- self.keeper.set_remove('test_set', 'foo')
- self.assertFalse(self.keeper.set_is_member('test_set', 'foo'))
- rv = self.keeper.set_fetch('test_set')
- self.assertEqual(test_dict, rv.next())
- self.keeper.set_remove('test_set', test_dict)
-
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index 3c7b0be52..cbc0042da 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -1,16 +1,22 @@
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
from nova import flags
@@ -21,6 +27,4 @@ FLAGS.fake_storage = True
FLAGS.fake_rabbit = True
FLAGS.fake_network = True
FLAGS.fake_users = True
-FLAGS.keeper_backend = 'sqlite'
-FLAGS.datastore_path = ':memory:'
FLAGS.verbose = True
diff --git a/nova/tests/future_unittest.py b/nova/tests/future_unittest.py
index 81d69dfff..af86f2788 100644
--- a/nova/tests/future_unittest.py
+++ b/nova/tests/future_unittest.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import logging
import StringIO
diff --git a/nova/tests/keeper_unittest.py b/nova/tests/keeper_unittest.py
deleted file mode 100644
index 3896c9e57..000000000
--- a/nova/tests/keeper_unittest.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-import random
-
-from nova import datastore
-from nova import test
-
-class KeeperTestCase(test.TrialTestCase):
- """
- Basic persistence tests for Keeper datastore.
- Generalize, then use these to support
- migration to redis / cassandra / multiple stores.
- """
-
- def setUp(self):
- super(KeeperTestCase, self).setUp()
- self.keeper = datastore.Keeper('test')
-
- def tearDown(self):
- super(KeeperTestCase, self).tearDown()
- self.keeper.clear()
-
- def test_store_strings(self):
- """
- Confirm that simple strings go in and come out safely.
- Should also test unicode strings.
- """
- randomstring = ''.join(
- [random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-')
- for _x in xrange(20)]
- )
- self.keeper['test_string'] = randomstring
- self.assertEqual(randomstring, self.keeper['test_string'])
-
- def test_store_dicts(self):
- """
- Arbitrary dictionaries should be storable.
- """
- test_dict = {'key_one': 'value_one'}
- self.keeper['test_dict'] = test_dict
- self.assertEqual(test_dict['key_one'],
- self.keeper['test_dict']['key_one'])
-
- def test_sets(self):
- """
- A keeper dict should be self-serializing.
- """
- self.keeper.set_add('test_set', 'foo')
- test_dict = {'arbitrary': 'dict of stuff'}
- self.keeper.set_add('test_set', test_dict)
- self.assertTrue(self.keeper.set_is_member('test_set', 'foo'))
- self.assertFalse(self.keeper.set_is_member('test_set', 'bar'))
- self.keeper.set_remove('test_set', 'foo')
- self.assertFalse(self.keeper.set_is_member('test_set', 'foo'))
- rv = self.keeper.set_fetch('test_set')
- self.assertEqual(test_dict, rv.next())
- self.keeper.set_remove('test_set', test_dict)
-
diff --git a/nova/tests/model_unittest.py b/nova/tests/model_unittest.py
new file mode 100644
index 000000000..23e2f9e73
--- /dev/null
+++ b/nova/tests/model_unittest.py
@@ -0,0 +1,205 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright [2010] [Anso Labs, LLC]
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+
+from nova import vendor
+from twisted.internet import defer
+
+from nova import exception
+from nova import flags
+from nova import test
+from nova import utils
+from nova.compute import model
+from nova.compute import node
+
+
+FLAGS = flags.FLAGS
+
+
+class ModelTestCase(test.TrialTestCase):
+ def setUp(self):
+ super(ModelTestCase, self).setUp()
+ self.flags(fake_libvirt=True,
+ fake_storage=True,
+ fake_users=True)
+
+ def tearDown(self):
+ model.Instance('i-test').destroy()
+ model.Host('testhost').destroy()
+ model.Daemon('testhost', 'nova-testdaemon').destroy()
+
+ def create_instance(self):
+ inst = model.Instance('i-test')
+ inst['reservation_id'] = 'r-test'
+ inst['launch_time'] = '10'
+ inst['user_id'] = 'fake'
+ inst['project_id'] = 'fake'
+ inst['instance_type'] = 'm1.tiny'
+ inst['node_name'] = FLAGS.node_name
+ inst['mac_address'] = utils.generate_mac()
+ inst['ami_launch_index'] = 0
+ inst.save()
+ return inst
+
+ def create_host(self):
+ host = model.Host('testhost')
+ host.save()
+ return host
+
+ def create_daemon(self):
+ daemon = model.Daemon('testhost', 'nova-testdaemon')
+ daemon.save()
+ return daemon
+
+ @defer.inlineCallbacks
+ def test_create_instance(self):
+ """store with create_instace, then test that a load finds it"""
+ instance = yield self.create_instance()
+ old = yield model.Instance(instance.identifier)
+ self.assertFalse(old.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_delete_instance(self):
+ """create, then destroy, then make sure loads a new record"""
+ instance = yield self.create_instance()
+ yield instance.destroy()
+ newinst = yield model.Instance('i-test')
+ self.assertTrue(newinst.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_instance_added_to_set(self):
+ """create, then check that it is listed for the project"""
+ instance = yield self.create_instance()
+ found = False
+ for x in model.InstanceDirectory().all:
+ if x.identifier == 'i-test':
+ found = True
+ self.assert_(found)
+
+ @defer.inlineCallbacks
+ def test_instance_associates_project(self):
+ """create, then check that it is listed for the project"""
+ instance = yield self.create_instance()
+ found = False
+ for x in model.InstanceDirectory().by_project(instance.project):
+ if x.identifier == 'i-test':
+ found = True
+ self.assert_(found)
+
+ @defer.inlineCallbacks
+ def test_host_class_finds_hosts(self):
+ host = yield self.create_host()
+ self.assertEqual('testhost', model.Host.lookup('testhost').identifier)
+
+ @defer.inlineCallbacks
+ def test_host_class_doesnt_find_missing_hosts(self):
+ rv = yield model.Host.lookup('woahnelly')
+ self.assertEqual(None, rv)
+
+ @defer.inlineCallbacks
+ def test_create_host(self):
+ """store with create_host, then test that a load finds it"""
+ host = yield self.create_host()
+ old = yield model.Host(host.identifier)
+ self.assertFalse(old.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_delete_host(self):
+ """create, then destroy, then make sure loads a new record"""
+ instance = yield self.create_host()
+ yield instance.destroy()
+ newinst = yield model.Host('testhost')
+ self.assertTrue(newinst.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_host_added_to_set(self):
+ """create, then check that it is included in list"""
+ instance = yield self.create_host()
+ found = False
+ for x in model.Host.all():
+ if x.identifier == 'testhost':
+ found = True
+ self.assert_(found)
+
+ @defer.inlineCallbacks
+ def test_create_daemon_two_args(self):
+ """create a daemon with two arguments"""
+ d = yield self.create_daemon()
+ d = model.Daemon('testhost', 'nova-testdaemon')
+ self.assertFalse(d.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_create_daemon_single_arg(self):
+ """Create a daemon using the combined host:bin format"""
+ d = yield model.Daemon("testhost:nova-testdaemon")
+ d.save()
+ d = model.Daemon('testhost:nova-testdaemon')
+ self.assertFalse(d.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_equality_of_daemon_single_and_double_args(self):
+ """Create a daemon using the combined host:bin arg, find with 2"""
+ d = yield model.Daemon("testhost:nova-testdaemon")
+ d.save()
+ d = model.Daemon('testhost', 'nova-testdaemon')
+ self.assertFalse(d.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_equality_daemon_of_double_and_single_args(self):
+ """Create a daemon using the combined host:bin arg, find with 2"""
+ d = yield self.create_daemon()
+ d = model.Daemon('testhost:nova-testdaemon')
+ self.assertFalse(d.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_delete_daemon(self):
+ """create, then destroy, then make sure loads a new record"""
+ instance = yield self.create_daemon()
+ yield instance.destroy()
+ newinst = yield model.Daemon('testhost', 'nova-testdaemon')
+ self.assertTrue(newinst.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_daemon_heartbeat(self):
+ """Create a daemon, sleep, heartbeat, check for update"""
+ d = yield self.create_daemon()
+ ts = d['updated_at']
+ time.sleep(2)
+ d.heartbeat()
+ d2 = model.Daemon('testhost', 'nova-testdaemon')
+ ts2 = d2['updated_at']
+ self.assert_(ts2 > ts)
+
+ @defer.inlineCallbacks
+ def test_daemon_added_to_set(self):
+ """create, then check that it is included in list"""
+ instance = yield self.create_daemon()
+ found = False
+ for x in model.Daemon.all():
+ if x.identifier == 'testhost:nova-testdaemon':
+ found = True
+ self.assert_(found)
+
+ @defer.inlineCallbacks
+ def test_daemon_associates_host(self):
+ """create, then check that it is listed for the host"""
+ instance = yield self.create_daemon()
+ found = False
+ for x in model.Daemon.by_host('testhost'):
+ if x.identifier == 'testhost:nova-testdaemon':
+ found = True
+ self.assertTrue(found)
diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index a2ad89a32..bccaacfa7 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -1,18 +1,24 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import os
import logging
import unittest
@@ -21,6 +27,8 @@ import IPy
from nova import flags
from nova import test
+from nova import exception
+from nova.compute.exception import NoMoreAddresses
from nova.compute import network
from nova.auth import users
from nova import utils
@@ -32,10 +40,10 @@ class NetworkTestCase(test.TrialTestCase):
self.flags(fake_libvirt=True,
fake_storage=True,
fake_network=True,
- network_size=32,
- redis_db=8)
+ network_size=32)
logging.getLogger().setLevel(logging.DEBUG)
self.manager = users.UserManager.instance()
+ self.dnsmasq = FakeDNSMasq()
try:
self.manager.create_user('netuser', 'netuser', 'netuser')
except: pass
@@ -62,59 +70,128 @@ class NetworkTestCase(test.TrialTestCase):
address = network.allocate_ip(
"netuser", "project0", utils.generate_mac())
logging.debug("Was allocated %s" % (address))
- self.assertEqual(True, address in self._get_project_addresses("project0"))
+ net = network.get_project_network("project0", "default")
+ self.assertEqual(True, is_in_project(address, "project0"))
+ mac = utils.generate_mac()
+ hostname = "test-host"
+ self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
rv = network.deallocate_ip(address)
- self.assertEqual(False, address in self._get_project_addresses("project0"))
+
+ # Doesn't go away until it's dhcp released
+ self.assertEqual(True, is_in_project(address, "project0"))
+
+ self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
+ self.assertEqual(False, is_in_project(address, "project0"))
def test_range_allocation(self):
+ mac = utils.generate_mac()
+ secondmac = utils.generate_mac()
+ hostname = "test-host"
address = network.allocate_ip(
- "netuser", "project0", utils.generate_mac())
+ "netuser", "project0", mac)
secondaddress = network.allocate_ip(
- "netuser", "project1", utils.generate_mac())
- self.assertEqual(True,
- address in self._get_project_addresses("project0"))
- self.assertEqual(True,
- secondaddress in self._get_project_addresses("project1"))
- self.assertEqual(False, address in self._get_project_addresses("project1"))
+ "netuser", "project1", secondmac)
+ net = network.get_project_network("project0", "default")
+ secondnet = network.get_project_network("project1", "default")
+
+ self.assertEqual(True, is_in_project(address, "project0"))
+ self.assertEqual(True, is_in_project(secondaddress, "project1"))
+ self.assertEqual(False, is_in_project(address, "project1"))
+
+ # Addresses are allocated before they're issued
+ self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
+ self.dnsmasq.issue_ip(secondmac, secondaddress,
+ hostname, secondnet.bridge_name)
+
rv = network.deallocate_ip(address)
- self.assertEqual(False, address in self._get_project_addresses("project0"))
+ self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
+ self.assertEqual(False, is_in_project(address, "project0"))
+
+ # First address release shouldn't affect the second
+ self.assertEqual(True, is_in_project(secondaddress, "project1"))
+
rv = network.deallocate_ip(secondaddress)
- self.assertEqual(False,
- secondaddress in self._get_project_addresses("project1"))
+ self.dnsmasq.release_ip(secondmac, secondaddress,
+ hostname, secondnet.bridge_name)
+ self.assertEqual(False, is_in_project(secondaddress, "project1"))
def test_subnet_edge(self):
secondaddress = network.allocate_ip("netuser", "project0",
utils.generate_mac())
+ hostname = "toomany-hosts"
for project in range(1,5):
project_id = "project%s" % (project)
+ mac = utils.generate_mac()
+ mac2 = utils.generate_mac()
+ mac3 = utils.generate_mac()
address = network.allocate_ip(
- "netuser", project_id, utils.generate_mac())
+ "netuser", project_id, mac)
address2 = network.allocate_ip(
- "netuser", project_id, utils.generate_mac())
+ "netuser", project_id, mac2)
address3 = network.allocate_ip(
- "netuser", project_id, utils.generate_mac())
- self.assertEqual(False,
- address in self._get_project_addresses("project0"))
- self.assertEqual(False,
- address2 in self._get_project_addresses("project0"))
- self.assertEqual(False,
- address3 in self._get_project_addresses("project0"))
+ "netuser", project_id, mac3)
+ self.assertEqual(False, is_in_project(address, "project0"))
+ self.assertEqual(False, is_in_project(address2, "project0"))
+ self.assertEqual(False, is_in_project(address3, "project0"))
rv = network.deallocate_ip(address)
rv = network.deallocate_ip(address2)
rv = network.deallocate_ip(address3)
+ net = network.get_project_network(project_id, "default")
+ self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
+ self.dnsmasq.release_ip(mac2, address2, hostname, net.bridge_name)
+ self.dnsmasq.release_ip(mac3, address3, hostname, net.bridge_name)
+ net = network.get_project_network("project0", "default")
rv = network.deallocate_ip(secondaddress)
+ self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
- def test_too_many_projects(self):
- for i in range(0, 30):
- name = 'toomany-project%s' % i
- self.manager.create_project(name, 'netuser', name)
- address = network.allocate_ip(
- "netuser", name, utils.generate_mac())
- rv = network.deallocate_ip(address)
- self.manager.delete_project(name)
+ def test_release_before_deallocate(self):
+ pass
+
+ def test_deallocate_before_issued(self):
+ pass
+
+ def test_too_many_addresses(self):
+ """
+ Network size is 32, there are 5 addresses reserved for VPN.
+ So we should get 23 usable addresses
+ """
+ net = network.get_project_network("project0", "default")
+ hostname = "toomany-hosts"
+ macs = {}
+ addresses = {}
+ for i in range(0, 22):
+ macs[i] = utils.generate_mac()
+ addresses[i] = network.allocate_ip("netuser", "project0", macs[i])
+ self.dnsmasq.issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
+
+ self.assertRaises(NoMoreAddresses, network.allocate_ip, "netuser", "project0", utils.generate_mac())
+
+ for i in range(0, 22):
+ rv = network.deallocate_ip(addresses[i])
+ self.dnsmasq.release_ip(macs[i], addresses[i], hostname, net.bridge_name)
+
+def is_in_project(address, project_id):
+ return address in network.get_project_network(project_id).list_addresses()
+
+def _get_project_addresses(project_id):
+ project_addresses = []
+ for addr in network.get_project_network(project_id).list_addresses():
+ project_addresses.append(addr)
+ return project_addresses
+
+def binpath(script):
+ return os.path.abspath(os.path.join(__file__, "../../../bin", script))
+
+class FakeDNSMasq(object):
+ def issue_ip(self, mac, ip, hostname, interface):
+ cmd = "%s add %s %s %s" % (binpath('dhcpleasor.py'), mac, ip, hostname)
+ env = {'DNSMASQ_INTERFACE': interface, 'TESTING' : '1'}
+ (out, err) = utils.execute(cmd, addl_env=env)
+ logging.debug("ISSUE_IP: %s, %s " % (out, err))
+
+ def release_ip(self, mac, ip, hostname, interface):
+ cmd = "%s del %s %s %s" % (binpath('dhcpleasor.py'), mac, ip, hostname)
+ env = {'DNSMASQ_INTERFACE': interface, 'TESTING' : '1'}
+ (out, err) = utils.execute(cmd, addl_env=env)
+ logging.debug("RELEASE_IP: %s, %s " % (out, err))
- def _get_project_addresses(self, project_id):
- project_addresses = []
- for addr in network.get_project_network(project_id).list_addresses():
- project_addresses.append(addr)
- return project_addresses
diff --git a/nova/tests/node_unittest.py b/nova/tests/node_unittest.py
index 5ecd56d52..44c8d0421 100644
--- a/nova/tests/node_unittest.py
+++ b/nova/tests/node_unittest.py
@@ -1,19 +1,26 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import logging
+import time
+
from xml.etree import ElementTree
from nova import vendor
@@ -56,8 +63,7 @@ class NodeConnectionTestCase(test.TrialTestCase):
super(NodeConnectionTestCase, self).setUp()
self.flags(fake_libvirt=True,
fake_storage=True,
- fake_users=True,
- redis_db=8)
+ fake_users=True)
self.node = node.Node()
def create_instance(self):
@@ -82,11 +88,13 @@ class NodeConnectionTestCase(test.TrialTestCase):
rv = yield self.node.run_instance(instance_id)
rv = yield self.node.describe_instances()
+ logging.info("Running instances: %s", rv)
self.assertEqual(rv[instance_id].name, instance_id)
rv = yield self.node.terminate_instance(instance_id)
rv = yield self.node.describe_instances()
+ logging.info("After terminating instances: %s", rv)
self.assertEqual(rv, {})
@defer.inlineCallbacks
diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py
index 812f5418b..ddd455a73 100644
--- a/nova/tests/objectstore_unittest.py
+++ b/nova/tests/objectstore_unittest.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import glob
import hashlib
@@ -23,7 +28,6 @@ import tempfile
from nova import vendor
from nova import flags
-from nova import rpc
from nova import objectstore
from nova import test
from nova.auth import users
@@ -52,7 +56,6 @@ class ObjectStoreTestCase(test.BaseTestCase):
buckets_path=os.path.join(oss_tempdir, 'buckets'),
images_path=os.path.join(oss_tempdir, 'images'),
ca_path=os.path.join(os.path.dirname(__file__), 'CA'))
- self.conn = rpc.Connection.instance()
logging.getLogger().setLevel(logging.DEBUG)
self.um = users.UserManager.instance()
@@ -152,7 +155,7 @@ class ObjectStoreTestCase(test.BaseTestCase):
bucket[os.path.basename(path)] = open(path, 'rb').read()
# register an image
- objectstore.image.Image.create('i-testing', 'image_bucket/1mb.manifest.xml', self.context)
+ objectstore.image.Image.register_aws_image('i-testing', 'image_bucket/1mb.manifest.xml', self.context)
# verify image
my_img = objectstore.image.Image('i-testing')
diff --git a/nova/tests/process_unittest.py b/nova/tests/process_unittest.py
index 50368dd3f..4e891fca7 100644
--- a/nova/tests/process_unittest.py
+++ b/nova/tests/process_unittest.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import logging
from xml.etree import ElementTree
diff --git a/nova/tests/real_flags.py b/nova/tests/real_flags.py
index 68fe8dc5b..b30987f1e 100644
--- a/nova/tests/real_flags.py
+++ b/nova/tests/real_flags.py
@@ -1,16 +1,22 @@
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
from nova import flags
diff --git a/nova/tests/storage_unittest.py b/nova/tests/storage_unittest.py
index 31966d2d5..36fcc6f19 100644
--- a/nova/tests/storage_unittest.py
+++ b/nova/tests/storage_unittest.py
@@ -1,28 +1,24 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import logging
-import StringIO
-import time
-import unittest
-from xml.etree import ElementTree
-
-from nova import vendor
-import mox
-from tornado import ioloop
-from twisted.internet import defer
from nova import exception
from nova import flags
@@ -41,46 +37,77 @@ class StorageTestCase(test.TrialTestCase):
self.mynode = node.Node()
self.mystorage = None
self.flags(fake_libvirt=True,
- fake_storage=True,
- redis_db=8)
- if FLAGS.fake_storage:
- self.mystorage = storage.FakeBlockStore()
- else:
- self.mystorage = storage.BlockStore()
-
- @test.skip_if_fake
+ fake_storage=True)
+ self.mystorage = storage.BlockStore()
+
def test_run_create_volume(self):
vol_size = '0'
user_id = 'fake'
- volume_id = self.mystorage.create_volume(vol_size, user_id)
- # rv = self.mystorage.describe_volumes()
-
- # Volumes have to be sorted by timestamp in order to work here...
+ project_id = 'fake'
+ volume_id = self.mystorage.create_volume(vol_size, user_id, project_id)
# TODO(termie): get_volume returns differently than create_volume
self.assertEqual(volume_id,
- self.mystorage.get_volume(volume_id)['volume_id'])
+ storage.get_volume(volume_id)['volume_id'])
rv = self.mystorage.delete_volume(volume_id)
self.assertRaises(exception.Error,
- self.mystorage.get_volume,
+ storage.get_volume,
volume_id)
- @test.skip_if_fake
+ def test_too_big_volume(self):
+ vol_size = '1001'
+ user_id = 'fake'
+ project_id = 'fake'
+ self.assertRaises(TypeError,
+ self.mystorage.create_volume,
+ vol_size, user_id, project_id)
+
+ def test_too_many_volumes(self):
+ vol_size = '1'
+ user_id = 'fake'
+ project_id = 'fake'
+ num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1
+ total_slots = FLAGS.slots_per_shelf * num_shelves
+ for i in xrange(total_slots):
+ self.mystorage.create_volume(vol_size, user_id, project_id)
+ self.assertRaises(storage.NoMoreVolumes,
+ self.mystorage.create_volume,
+ vol_size, user_id, project_id)
+
def test_run_attach_detach_volume(self):
# Create one volume and one node to test with
instance_id = "storage-test"
- # TODO(joshua) - Redo this test, can't make fake instances this way any more
- # rv = self.mynode.run_instance(instance_id)
vol_size = "5"
user_id = "fake"
- volume_id = self.mystorage.create_volume(vol_size, user_id)
- rv = self.mystorage.attach_volume(volume_id,
+ project_id = 'fake'
+ mountpoint = "/dev/sdf"
+ volume_id = self.mystorage.create_volume(vol_size, user_id, project_id)
+
+ volume_obj = storage.get_volume(volume_id)
+ volume_obj.start_attach(instance_id, mountpoint)
+ rv = yield self.mynode.attach_volume(volume_id,
instance_id,
- "/dev/sdf")
- volume_obj = self.mystorage.get_volume(volume_id)
- self.assertEqual(volume_obj['status'], "attached")
- # TODO(???): assert that it's attached to the right instance
+ mountpoint)
+ self.assertEqual(volume_obj['status'], "in-use")
+ self.assertEqual(volume_obj['attachStatus'], "attached")
+ self.assertEqual(volume_obj['instance_id'], instance_id)
+ self.assertEqual(volume_obj['mountpoint'], mountpoint)
- rv = self.mystorage.detach_volume(volume_id)
- volume_obj = self.mystorage.get_volume(volume_id)
+ self.assertRaises(exception.Error,
+ self.mystorage.delete_volume,
+ volume_id)
+
+ rv = yield self.mystorage.detach_volume(volume_id)
+ volume_obj = storage.get_volume(volume_id)
self.assertEqual(volume_obj['status'], "available")
+
+ rv = self.mystorage.delete_volume(volume_id)
+ self.assertRaises(exception.Error,
+ storage.get_volume,
+ volume_id)
+
+ def test_multi_node(self):
+ # TODO(termie): Figure out how to test with two nodes,
+ # each of them having a different FLAG for storage_node
+ # This will allow us to test cross-node interactions
+ pass
diff --git a/nova/tests/users_unittest.py b/nova/tests/users_unittest.py
index ff34b8957..a31ad4d7a 100644
--- a/nova/tests/users_unittest.py
+++ b/nova/tests/users_unittest.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import logging
import unittest
@@ -31,11 +36,11 @@ FLAGS = flags.FLAGS
class UserTestCase(test.BaseTestCase):
+ flush_db = False
def setUp(self):
super(UserTestCase, self).setUp()
self.flags(fake_libvirt=True,
- fake_storage=True,
- redis_db=8)
+ fake_storage=True)
self.users = users.UserManager.instance()
def test_001_can_create_users(self):
@@ -97,8 +102,19 @@ class UserTestCase(test.BaseTestCase):
def test_010_can_list_users(self):
users = self.users.get_users()
+ logging.warn(users)
self.assertTrue(filter(lambda u: u.id == 'test1', users))
+ def test_101_can_add_user_role(self):
+ self.assertFalse(self.users.has_role('test1', 'itsec'))
+ self.users.add_role('test1', 'itsec')
+ self.assertTrue(self.users.has_role('test1', 'itsec'))
+
+ def test_199_can_remove_user_role(self):
+ self.assertTrue(self.users.has_role('test1', 'itsec'))
+ self.users.remove_role('test1', 'itsec')
+ self.assertFalse(self.users.has_role('test1', 'itsec'))
+
def test_201_can_create_project(self):
project = self.users.create_project('testproj', 'test1', 'A test project', ['test1'])
self.assertTrue(filter(lambda p: p.name == 'testproj', self.users.get_projects()))
@@ -151,6 +167,33 @@ class UserTestCase(test.BaseTestCase):
else:
self.assertFalse(signed_cert.verify(cloud_cert.get_pubkey()))
+ def test_210_can_add_project_role(self):
+ project = self.users.get_project('testproj')
+ self.assertFalse(project.has_role('test1', 'sysadmin'))
+ self.users.add_role('test1', 'sysadmin')
+ self.assertFalse(project.has_role('test1', 'sysadmin'))
+ project.add_role('test1', 'sysadmin')
+ self.assertTrue(project.has_role('test1', 'sysadmin'))
+
+ def test_211_can_remove_project_role(self):
+ project = self.users.get_project('testproj')
+ self.assertTrue(project.has_role('test1', 'sysadmin'))
+ project.remove_role('test1', 'sysadmin')
+ self.assertFalse(project.has_role('test1', 'sysadmin'))
+ self.users.remove_role('test1', 'sysadmin')
+ self.assertFalse(project.has_role('test1', 'sysadmin'))
+
+ def test_212_vpn_ip_and_port_looks_valid(self):
+ project = self.users.get_project('testproj')
+ self.assert_(project.vpn_ip)
+ self.assert_(project.vpn_port >= FLAGS.vpn_start_port)
+ self.assert_(project.vpn_port <= FLAGS.vpn_end_port)
+
+ def test_213_too_many_vpns(self):
+ for i in xrange(users.Vpn.num_ports_for_ip(FLAGS.vpn_ip)):
+ users.Vpn.create("vpnuser%s" % i)
+ self.assertRaises(users.NoMorePorts, users.Vpn.create, "boom")
+
def test_299_can_delete_project(self):
self.users.delete_project('testproj')
self.assertFalse(filter(lambda p: p.name == 'testproj', self.users.get_projects()))
diff --git a/nova/tests/validator_unittest.py b/nova/tests/validator_unittest.py
new file mode 100644
index 000000000..eea1beccb
--- /dev/null
+++ b/nova/tests/validator_unittest.py
@@ -0,0 +1,45 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import unittest
+
+from nova import vendor
+
+from nova import flags
+from nova import test
+from nova import validate
+
+
+class ValidationTestCase(test.TrialTestCase):
+ def setUp(self):
+ super(ValidationTestCase, self).setUp()
+
+ def tearDown(self):
+ super(ValidationTestCase, self).tearDown()
+
+ def test_type_validation(self):
+ self.assertTrue(type_case("foo", 5, 1))
+ self.assertRaises(TypeError, type_case, "bar", "5", 1)
+ self.assertRaises(TypeError, type_case, None, 5, 1)
+
+@validate.typetest(instanceid=str, size=int, number_of_instances=int)
+def type_case(instanceid, size, number_of_instances):
+ return True
diff --git a/nova/twistd.py b/nova/twistd.py
index ea3c9c168..44a19e9dc 100644
--- a/nova/twistd.py
+++ b/nova/twistd.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Twisted daemon helpers, specifically to parse out gFlags from twisted flags,
diff --git a/nova/utils.py b/nova/utils.py
index 4acd23101..2982b5480 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -1,29 +1,35 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
System-level utilities and helper functions.
"""
+import inspect
import logging
+import os
+import random
+import subprocess
import socket
import sys
-import os.path
-import inspect
-import subprocess
-import random
+from datetime import datetime
from nova import flags
@@ -41,11 +47,12 @@ def fetchfile(url, target):
# fp.close()
execute("curl %s -o %s" % (url, target))
-
-def execute(cmd, input=None):
- #logging.debug("Running %s" % (cmd))
+def execute(cmd, input=None, addl_env=None):
+ env = os.environ.copy()
+ if addl_env:
+ env.update(addl_env)
obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
result = None
if input != None:
result = obj.communicate(input)
@@ -109,3 +116,8 @@ def get_my_ip():
(addr, port) = csock.getsockname()
csock.close()
return addr
+
+def isotime(at=None):
+ if not at:
+ at = datetime.utcnow()
+ return at.strftime("%Y-%m-%dT%H:%M:%SZ")
diff --git a/nova/validate.py b/nova/validate.py
new file mode 100644
index 000000000..820546b73
--- /dev/null
+++ b/nova/validate.py
@@ -0,0 +1,88 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+ Decorators for argument validation, courtesy of
+ http://rmi.net/~lutz/rangetest.html
+"""
+
+def rangetest(**argchecks): # validate ranges for both+defaults
+ def onDecorator(func): # onCall remembers func and argchecks
+ import sys
+ code = func.__code__ if sys.version_info[0] == 3 else func.func_code
+ allargs = code.co_varnames[:code.co_argcount]
+ funcname = func.__name__
+
+ def onCall(*pargs, **kargs):
+ # all pargs match first N args by position
+ # the rest must be in kargs or omitted defaults
+ positionals = list(allargs)
+ positionals = positionals[:len(pargs)]
+
+ for (argname, (low, high)) in argchecks.items():
+ # for all args to be checked
+ if argname in kargs:
+ # was passed by name
+ if float(kargs[argname]) < low or float(kargs[argname]) > high:
+ errmsg = '{0} argument "{1}" not in {2}..{3}'
+ errmsg = errmsg.format(funcname, argname, low, high)
+ raise TypeError(errmsg)
+
+ elif argname in positionals:
+ # was passed by position
+ position = positionals.index(argname)
+ if float(pargs[position]) < low or float(pargs[position]) > high:
+ errmsg = '{0} argument "{1}" with value of {4} not in {2}..{3}'
+ errmsg = errmsg.format(funcname, argname, low, high, pargs[position])
+ raise TypeError(errmsg)
+ else:
+ pass
+
+ return func(*pargs, **kargs) # okay: run original call
+ return onCall
+ return onDecorator
+
+def typetest(**argchecks):
+ def onDecorator(func):
+ import sys
+ code = func.__code__ if sys.version_info[0] == 3 else func.func_code
+ allargs = code.co_varnames[:code.co_argcount]
+ funcname = func.__name__
+
+ def onCall(*pargs, **kargs):
+ positionals = list(allargs)[:len(pargs)]
+ for (argname, typeof) in argchecks.items():
+ if argname in kargs:
+ if not isinstance(kargs[argname], typeof):
+ errmsg = '{0} argument "{1}" not of type {2}'
+ errmsg = errmsg.format(funcname, argname, typeof)
+ raise TypeError(errmsg)
+ elif argname in positionals:
+ position = positionals.index(argname)
+ if not isinstance(pargs[position], typeof):
+ errmsg = '{0} argument "{1}" with value of {2} not of type {3}'
+ errmsg = errmsg.format(funcname, argname, pargs[position], typeof)
+ raise TypeError(errmsg)
+ else:
+ pass
+ return func(*pargs, **kargs)
+ return onCall
+ return onDecorator
+
diff --git a/nova/vendor.py b/nova/vendor.py
index 758adeb3c..77bfe4be5 100644
--- a/nova/vendor.py
+++ b/nova/vendor.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Get our vendor folders into the system path.
diff --git a/nova/volume/__init__.py b/nova/volume/__init__.py
index 1c569f383..ad2181525 100644
--- a/nova/volume/__init__.py
+++ b/nova/volume/__init__.py
@@ -1,16 +1,22 @@
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
:mod:`nova.volume` -- Nova Block Storage
diff --git a/nova/volume/storage.py b/nova/volume/storage.py
index cf64b995f..288ab76ba 100644
--- a/nova/volume/storage.py
+++ b/nova/volume/storage.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
Nova Storage manages creating, attaching, detaching, and
@@ -21,9 +26,10 @@ Currently uses Ata-over-Ethernet.
import glob
import logging
-import random
+import os
import socket
-import subprocess
+import shutil
+import tempfile
import time
from nova import vendor
@@ -33,8 +39,8 @@ from twisted.internet import defer
from nova import datastore
from nova import exception
from nova import flags
-from nova import rpc
from nova import utils
+from nova import validate
FLAGS = flags.FLAGS
@@ -47,92 +53,101 @@ flags.DEFINE_string('aoe_eth_dev', 'eth0',
flags.DEFINE_string('storage_name',
socket.gethostname(),
'name of this node')
-flags.DEFINE_integer('shelf_id',
- utils.last_octet(utils.get_my_ip()),
- 'AoE shelf_id for this node')
+flags.DEFINE_integer('first_shelf_id',
+ utils.last_octet(utils.get_my_ip()) * 10,
+ 'AoE starting shelf_id for this node')
+flags.DEFINE_integer('last_shelf_id',
+ utils.last_octet(utils.get_my_ip()) * 10 + 9,
+ 'AoE starting shelf_id for this node')
+flags.DEFINE_string('aoe_export_dir',
+ '/var/lib/vblade-persist/vblades',
+ 'AoE directory where exports are created')
+flags.DEFINE_integer('slots_per_shelf',
+ 16,
+ 'Number of AoE slots per shelf')
flags.DEFINE_string('storage_availability_zone',
'nova',
'availability zone of this node')
flags.DEFINE_boolean('fake_storage', False,
'Should we make real storage volumes to attach?')
+
+class NoMoreVolumes(exception.Error):
+ pass
+
+def get_volume(volume_id):
+ """ Returns a redis-backed volume object """
+ volume_class = Volume
+ if FLAGS.fake_storage:
+ volume_class = FakeVolume
+ if datastore.Redis.instance().sismember('volumes', volume_id):
+ return volume_class(volume_id=volume_id)
+ raise exception.Error("Volume does not exist")
+
class BlockStore(object):
+ """
+ There is one BlockStore running on each volume node.
+ However, each BlockStore can report on the state of
+ *all* volumes in the cluster.
+ """
def __init__(self):
super(BlockStore, self).__init__()
self.volume_class = Volume
if FLAGS.fake_storage:
+ FLAGS.aoe_export_dir = tempfile.mkdtemp()
self.volume_class = FakeVolume
self._init_volume_group()
- self.keeper = datastore.Keeper('storage-')
+
+ def __del__(self):
+ if FLAGS.fake_storage:
+ shutil.rmtree(FLAGS.aoe_export_dir)
def report_state(self):
#TODO: aggregate the state of the system
pass
- def create_volume(self, size, user_id):
+ @validate.rangetest(size=(0, 1000))
+ def create_volume(self, size, user_id, project_id):
"""
Creates an exported volume (fake or real),
restarts exports to make it available.
Volume at this point has size, owner, and zone.
"""
logging.debug("Creating volume of size: %s" % (size))
- vol = self.volume_class.create(size, user_id)
- self.keeper.set_add('volumes', vol['volume_id'])
+ vol = self.volume_class.create(size, user_id, project_id)
+ datastore.Redis.instance().sadd('volumes', vol['volume_id'])
+ datastore.Redis.instance().sadd('volumes:%s' % (FLAGS.storage_name), vol['volume_id'])
self._restart_exports()
return vol['volume_id']
- def get_volume(self, volume_id):
- """ Returns a redis-backed volume object """
- if self.keeper.set_is_member('volumes', volume_id):
- return self.volume_class(volume_id=volume_id)
- raise exception.Error("Volume does not exist")
-
- def by_project(self, project):
- """ returns a list of volume objects for a project """
- # TODO(termie): I don't understand why this is doing a range
- #for volume_id in datastore.Redis.instance().lrange("project:%s:volumes" %
- #project, 0, -1):
- for volume_id in datastore['project:%s:volumes' % project]:
- yield self.volume_class(volume_id=volume_id)
-
def by_node(self, node_id):
""" returns a list of volumes for a node """
- for volume in self.all:
- if volume['node_name'] == node_id:
- yield volume
+ for volume_id in datastore.Redis.instance().smembers('volumes:%s' % (node_id)):
+ yield self.volume_class(volume_id=volume_id)
@property
def all(self):
""" returns a list of all volumes """
- for volume_id in self.keeper['volumes']:
+ for volume_id in datastore.Redis.instance().smembers('volumes'):
yield self.volume_class(volume_id=volume_id)
-
def delete_volume(self, volume_id):
logging.debug("Deleting volume with id of: %s" % (volume_id))
- vol = self.get_volume(volume_id)
+ vol = get_volume(volume_id)
+ if vol['status'] == "attached":
+ raise exception.Error("Volume is still attached")
+ if vol['node_name'] != FLAGS.storage_name:
+ raise exception.Error("Volume is not local to this node")
vol.destroy()
- self.keeper.set_remove('volumes', vol['volume_id'])
+ datastore.Redis.instance().srem('volumes', vol['volume_id'])
+ datastore.Redis.instance().srem('volumes:%s' % (FLAGS.storage_name), vol['volume_id'])
return True
- def attach_volume(self, volume_id, instance_id, mountpoint):
- self.volume_class(volume_id).attach(instance_id, mountpoint)
-
- def detach_volume(self, volume_id):
- self.volume_class(volume_id).detach()
-
- def loop_volumes(self):
- volumes = subprocess.Popen(["sudo", "lvs", "--noheadings"], stdout=subprocess.PIPE).communicate()[0].split("\n")
- for lv in volumes:
- if len(lv.split(" ")) > 1:
- yield lv.split(" ")[2]
-
def _restart_exports(self):
if FLAGS.fake_storage:
return
utils.runthis("Setting exports to auto: %s", "sudo vblade-persist auto all")
utils.runthis("Starting all exports: %s", "sudo vblade-persist start all")
- utils.runthis("Discovering AOE devices: %s", "sudo aoe-discover")
def _init_volume_group(self):
if FLAGS.fake_storage:
@@ -140,59 +155,68 @@ class BlockStore(object):
utils.runthis("PVCreate returned: %s", "sudo pvcreate %s" % (FLAGS.storage_dev))
utils.runthis("VGCreate returned: %s", "sudo vgcreate %s %s" % (FLAGS.volume_group, FLAGS.storage_dev))
-
-class FakeBlockStore(BlockStore):
- def __init__(self):
- super(FakeBlockStore, self).__init__()
-
- def loop_volumes(self):
- return self.volumes
-
- def _init_volume_group(self):
- pass
-
- def _restart_exports(self):
- pass
-
-
-class Volume(datastore.RedisModel):
-
- object_type = 'volume'
+class Volume(datastore.BasicModel):
def __init__(self, volume_id=None):
self.volume_id = volume_id
- super(Volume, self).__init__(object_id=volume_id)
+ super(Volume, self).__init__()
+
+ @property
+ def identifier(self):
+ self.volume_id
+
+ def default_state(self):
+ return {"volume_id": self.volume_id}
@classmethod
- def create(cls, size, user_id):
+ def create(cls, size, user_id, project_id):
volume_id = utils.generate_uid('vol')
- vol = cls(volume_id=volume_id)
- #TODO(vish): do we really need to store the volume id as .object_id .volume_id and ['volume_id']?
- vol['volume_id'] = volume_id
+ vol = cls(volume_id)
vol['node_name'] = FLAGS.storage_name
vol['size'] = size
vol['user_id'] = user_id
+ vol['project_id'] = project_id
vol['availability_zone'] = FLAGS.storage_availability_zone
vol["instance_id"] = 'none'
vol["mountpoint"] = 'none'
- vol["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
- vol["attachment_set"] = ''
+ vol['attach_time'] = 'none'
+ vol['status'] = "creating" # creating | available | in-use
+ vol['attach_status'] = "detached" # attaching | attached | detaching | detached
+ vol['delete_on_termination'] = 'False'
+ vol.save()
vol.create_lv()
- vol.setup_export()
+ vol._setup_export()
+ # TODO(joshua) - We need to trigger a fanout message for aoe-discover on all the nodes
+ # TODO(joshua
vol['status'] = "available"
vol.save()
return vol
- def attach(self, instance_id, mountpoint):
+ def start_attach(self, instance_id, mountpoint):
+ """ """
self['instance_id'] = instance_id
self['mountpoint'] = mountpoint
- self['status'] = "attached"
+ self['status'] = "in-use"
+ self['attach_status'] = "attaching"
+ self['attach_time'] = utils.isotime()
+ self['delete_on_termination'] = 'False'
self.save()
- def detach(self):
+ def finish_attach(self):
+ """ """
+ self['attach_status'] = "attached"
+ self.save()
+
+ def start_detach(self):
+ """ """
+ self['attach_status'] = "detaching"
+ self.save()
+
+ def finish_detach(self):
self['instance_id'] = None
self['mountpoint'] = None
self['status'] = "available"
+ self['attach_status'] = "detached"
self.save()
def destroy(self):
@@ -208,33 +232,41 @@ class Volume(datastore.RedisModel):
sizestr = '100M'
else:
sizestr = '%sG' % self['size']
- utils.runthis("Creating LV: %s", "sudo lvcreate -L %s -n %s %s" % (sizestr, self.volume_id, FLAGS.volume_group))
+ utils.runthis("Creating LV: %s", "sudo lvcreate -L %s -n %s %s" % (sizestr, self['volume_id'], FLAGS.volume_group))
def _delete_lv(self):
- utils.runthis("Removing LV: %s", "sudo lvremove -f %s/%s" % (FLAGS.volume_group, self.volume_id))
+ utils.runthis("Removing LV: %s", "sudo lvremove -f %s/%s" % (FLAGS.volume_group, self['volume_id']))
- def setup_export(self):
+ def _setup_export(self):
(shelf_id, blade_id) = get_next_aoe_numbers()
self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id)
+ self['shelf_id'] = shelf_id
+ self['blade_id'] = blade_id
self.save()
+ self._exec_export()
+
+ def _exec_export(self):
utils.runthis("Creating AOE export: %s",
"sudo vblade-persist setup %s %s %s /dev/%s/%s" %
- (shelf_id, blade_id, FLAGS.aoe_eth_dev, FLAGS.volume_group, self.volume_id))
+ (self['shelf_id'],
+ self['blade_id'],
+ FLAGS.aoe_eth_dev,
+ FLAGS.volume_group,
+ self['volume_id']))
def _remove_export(self):
- utils.runthis("Destroyed AOE export: %s", "sudo vblade-persist stop %s %s" % (self.aoe_device[1], self.aoe_device[3]))
- utils.runthis("Destroyed AOE export: %s", "sudo vblade-persist destroy %s %s" % (self.aoe_device[1], self.aoe_device[3]))
+ utils.runthis("Stopped AOE export: %s", "sudo vblade-persist stop %s %s" % (self['shelf_id'], self['blade_id']))
+ utils.runthis("Destroyed AOE export: %s", "sudo vblade-persist destroy %s %s" % (self['shelf_id'], self['blade_id']))
class FakeVolume(Volume):
def create_lv(self):
pass
- def setup_export(self):
- # TODO(???): This may not be good enough?
- self['aoe_device'] = 'e%s.%s' % (FLAGS.shelf_id,
- ''.join([random.choice('0123456789') for x in xrange(3)]))
- self.save()
+ def _exec_export(self):
+ fname = os.path.join(FLAGS.aoe_export_dir, self['aoe_device'])
+ f = file(fname, "w")
+ f.close()
def _remove_export(self):
pass
@@ -243,9 +275,13 @@ class FakeVolume(Volume):
pass
def get_next_aoe_numbers():
- aoes = glob.glob("/var/lib/vblade-persist/vblades/e*")
- aoes.extend(['e0.0'])
- blade_id = int(max([int(a.split('.')[1]) for a in aoes])) + 1
- logging.debug("Next blade_id is %s" % (blade_id))
- shelf_id = FLAGS.shelf_id
- return (shelf_id, blade_id)
+ for shelf_id in xrange(FLAGS.first_shelf_id, FLAGS.last_shelf_id + 1):
+ aoes = glob.glob("%s/e%s.*" % (FLAGS.aoe_export_dir, shelf_id))
+ if not aoes:
+ blade_id = 0
+ else:
+ blade_id = int(max([int(a.rpartition('.')[2]) for a in aoes])) + 1
+ if blade_id < FLAGS.slots_per_shelf:
+ logging.debug("Next shelf.blade is %s.%s", shelf_id, blade_id)
+ return (shelf_id, blade_id)
+ raise NoMoreVolumes()
diff --git a/run_tests.py b/run_tests.py
index f80f0af16..d6f68f830 100644
--- a/run_tests.py
+++ b/run_tests.py
@@ -1,17 +1,22 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright [2010] [Anso Labs, LLC]
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""
This is our basic test running framework based on Twisted's Trial.
@@ -41,23 +46,27 @@ import sys
from nova import vendor
from twisted.scripts import trial as trial_script
+from nova import datastore
from nova import flags
from nova import twistd
+from nova.tests.access_unittest import *
from nova.tests.api_unittest import *
from nova.tests.cloud_unittest import *
-from nova.tests.keeper_unittest import *
+from nova.tests.model_unittest import *
from nova.tests.network_unittest import *
from nova.tests.node_unittest import *
from nova.tests.objectstore_unittest import *
from nova.tests.process_unittest import *
from nova.tests.storage_unittest import *
from nova.tests.users_unittest import *
-from nova.tests.datastore_unittest import *
+from nova.tests.validator_unittest import *
FLAGS = flags.FLAGS
+flags.DEFINE_bool('flush_db', True,
+ 'Flush the database before running fake tests')
if __name__ == '__main__':
OptionsClass = twistd.WrapTwistedOptions(trial_script.Options)
@@ -69,6 +78,12 @@ if __name__ == '__main__':
# TODO(termie): these should make a call instead of doing work on import
if FLAGS.fake_tests:
from nova.tests.fake_flags import *
+ # use db 8 for fake tests
+ FLAGS.redis_db = 8
+ if FLAGS.flush_db:
+ logging.info("Flushing redis datastore")
+ r = datastore.Redis.instance()
+ r.flushdb()
else:
from nova.tests.real_flags import *
diff --git a/setup.py b/setup.py
index a25ae0c8c..95d0ee7d1 100644
--- a/setup.py
+++ b/setup.py
@@ -1,17 +1,22 @@
-#!/usr/bin/env python
-# Copyright [2010] [Anso Labs, LLC]
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import glob
import os
diff --git a/smoketests/__init__.py b/smoketests/__init__.py
new file mode 100644
index 000000000..ddd6919a0
--- /dev/null
+++ b/smoketests/__init__.py
@@ -0,0 +1,33 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`smoketests` -- Nova Integration "Smoke" Tests
+=====================================================
+
+.. automodule:: nova.volume
+ :platform: Unix
+.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
+.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
+.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
+.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
+.. moduleauthor:: Manish Singh <yosh@gimp.org>
+.. moduleauthor:: Andy Smith <andy@anarkystic.com>
+""" \ No newline at end of file
diff --git a/smoketests/flags.py b/smoketests/flags.py
new file mode 100644
index 000000000..f239c5f40
--- /dev/null
+++ b/smoketests/flags.py
@@ -0,0 +1,47 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Package-level global flags are defined here, the rest are defined
+where they're used.
+"""
+
+from nova import vendor
+from gflags import *
+
+# This keeps pylint from barfing on the imports
+FLAGS = FLAGS
+DEFINE_string = DEFINE_string
+DEFINE_integer = DEFINE_integer
+DEFINE_bool = DEFINE_bool
+
+# __GLOBAL FLAGS ONLY__
+# Define any app-specific flags in their own files, docs at:
+# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39
+DEFINE_string('admin_access_key', 'admin', 'Access key for admin user')
+DEFINE_string('admin_secret_key', 'admin', 'Secret key for admin user')
+DEFINE_string('clc_ip', '127.0.0.1', 'IP of cloud controller API')
+DEFINE_string('bundle_kernel', 'openwrt-x86-vmlinuz',
+ 'Local kernel file to use for bundling tests')
+DEFINE_string('bundle_image', 'openwrt-x86-ext2.image',
+ 'Local image file to use for bundling tests')
+#DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE',
+# 'AMI for cloudpipe vpn server')
+
diff --git a/smoketests/novatestcase.py b/smoketests/novatestcase.py
new file mode 100644
index 000000000..c19ef781b
--- /dev/null
+++ b/smoketests/novatestcase.py
@@ -0,0 +1,132 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import commands
+import os
+import random
+import sys
+import unittest
+
+from nova import vendor
+import paramiko
+
+from nova import adminclient
+from smoketests import flags
+
+FLAGS = flags.FLAGS
+
+
+class NovaTestCase(unittest.TestCase):
+ def setUp(self):
+ self.nova_admin = adminclient.NovaAdminClient(
+ access_key=FLAGS.admin_access_key,
+ secret_key=FLAGS.admin_secret_key,
+ clc_ip=FLAGS.clc_ip)
+
+ def tearDown(self):
+ pass
+
+ def connect_ssh(self, ip, key_name):
+ # TODO(devcamcar): set a more reasonable connection timeout time
+ key = paramiko.RSAKey.from_private_key_file('/tmp/%s.pem' % key_name)
+ client = paramiko.SSHClient()
+ client.load_system_host_keys()
+ client.set_missing_host_key_policy(paramiko.WarningPolicy())
+ client.connect(ip, username='root', pkey=key)
+ stdin, stdout, stderr = client.exec_command('uptime')
+ print 'uptime: ', stdout.read()
+ return client
+
+ def can_ping(self, ip):
+ return commands.getstatusoutput('ping -c 1 %s' % ip)[0] == 0
+
+ @property
+ def admin(self):
+ return self.nova_admin.connection_for('admin')
+
+ def connection_for(self, username):
+ return self.nova_admin.connection_for(username)
+
+ def create_user(self, username):
+ return self.nova_admin.create_user(username)
+
+ def get_user(self, username):
+ return self.nova_admin.get_user(username)
+
+ def delete_user(self, username):
+ return self.nova_admin.delete_user(username)
+
+ def get_signed_zip(self, username):
+ return self.nova_admin.get_zip(username)
+
+ def create_key_pair(self, conn, key_name):
+ try:
+ os.remove('/tmp/%s.pem' % key_name)
+ except:
+ pass
+ key = conn.create_key_pair(key_name)
+ key.save('/tmp/')
+ return key
+
+ def delete_key_pair(self, conn, key_name):
+ conn.delete_key_pair(key_name)
+ try:
+ os.remove('/tmp/%s.pem' % key_name)
+ except:
+ pass
+
+ def bundle_image(self, image, kernel=False):
+ cmd = 'euca-bundle-image -i %s' % image
+ if kernel:
+ cmd += ' --kernel true'
+ status, output = commands.getstatusoutput(cmd)
+ if status != 0:
+ print '%s -> \n %s' % (cmd, output)
+ raise Exception(output)
+ return True
+
+ def upload_image(self, bucket_name, image):
+ cmd = 'euca-upload-bundle -b %s -m /tmp/%s.manifest.xml' % (bucket_name, image)
+ status, output = commands.getstatusoutput(cmd)
+ if status != 0:
+ print '%s -> \n %s' % (cmd, output)
+ raise Exception(output)
+ return True
+
+ def delete_bundle_bucket(self, bucket_name):
+ cmd = 'euca-delete-bundle --clear -b %s' % (bucket_name)
+ status, output = commands.getstatusoutput(cmd)
+ if status != 0:
+ print '%s -> \n%s' % (cmd, output)
+ raise Exception(output)
+ return True
+
+ def register_image(self, bucket_name, manifest):
+ conn = nova_admin.connection_for('admin')
+ return conn.register_image("%s/%s.manifest.xml" % (bucket_name, manifest))
+
+ def setUp_test_image(self, image, kernel=False):
+ self.bundle_image(image, kernel=kernel)
+ bucket = "auto_test_%s" % int(random.random() * 1000000)
+ self.upload_image(bucket, image)
+ return self.register_image(bucket, image)
+
+ def tearDown_test_image(self, conn, image_id):
+ conn.deregister_image(image_id)
diff --git a/smoketests/openwrt-x86-ext2.image b/smoketests/openwrt-x86-ext2.image
new file mode 100644
index 000000000..cd2dfa426
--- /dev/null
+++ b/smoketests/openwrt-x86-ext2.image
Binary files differ
diff --git a/smoketests/openwrt-x86-vmlinuz b/smoketests/openwrt-x86-vmlinuz
new file mode 100644
index 000000000..59cc9bb1f
--- /dev/null
+++ b/smoketests/openwrt-x86-vmlinuz
Binary files differ
diff --git a/smoketests/smoketest.py b/smoketests/smoketest.py
new file mode 100644
index 000000000..b752d814a
--- /dev/null
+++ b/smoketests/smoketest.py
@@ -0,0 +1,568 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import commands
+import os
+import random
+import re
+import sys
+import time
+import unittest
+import zipfile
+
+from nova import vendor
+import paramiko
+
+from smoketests import flags
+from smoketests import novatestcase
+
+SUITE_NAMES = '[user, image, security, public_network, volume]'
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('suite', None, 'Specific test suite to run ' + SUITE_NAMES)
+
+# TODO(devamcar): Use random tempfile
+ZIP_FILENAME = '/tmp/nova-me-x509.zip'
+
+data = {}
+
+test_prefix = 'test%s' % int(random.random()*1000000)
+test_username = '%suser' % test_prefix
+test_bucket = '%s_bucket' % test_prefix
+test_key = '%s_key' % test_prefix
+
+# Test admin credentials and user creation
+class UserTests(novatestcase.NovaTestCase):
+ def test_001_admin_can_connect(self):
+ conn = self.connection_for('admin')
+ self.assert_(conn)
+
+ def test_002_admin_can_create_user(self):
+ userinfo = self.create_user(test_username)
+ self.assertEqual(userinfo.username, test_username)
+
+ def test_003_user_can_download_credentials(self):
+ buf = self.get_signed_zip(test_username)
+ output = open(ZIP_FILENAME, 'w')
+ output.write(buf)
+ output.close()
+
+ zip = zipfile.ZipFile(ZIP_FILENAME, 'a', zipfile.ZIP_DEFLATED)
+ bad = zip.testzip()
+ zip.close()
+
+ self.failIf(bad)
+
+ def test_999_tearDown(self):
+ self.delete_user(test_username)
+ user = self.get_user(test_username)
+ self.assert_(user is None)
+ try:
+ os.remove(ZIP_FILENAME)
+ except:
+ pass
+
+# Test image bundling, registration, and launching
+class ImageTests(novatestcase.NovaTestCase):
+ def test_000_setUp(self):
+ self.create_user(test_username)
+
+ def test_001_admin_can_bundle_image(self):
+ self.assertTrue(self.bundle_image(FLAGS.bundle_image))
+
+ def test_002_admin_can_upload_image(self):
+ self.assertTrue(self.upload_image(test_bucket, FLAGS.bundle_image))
+
+ def test_003_admin_can_register_image(self):
+ image_id = self.register_image(test_bucket, FLAGS.bundle_image)
+ self.assert_(image_id is not None)
+ data['image_id'] = image_id
+
+ def test_004_admin_can_bundle_kernel(self):
+ self.assertTrue(self.bundle_image(FLAGS.bundle_kernel, kernel=True))
+
+ def test_005_admin_can_upload_kernel(self):
+ self.assertTrue(self.upload_image(test_bucket, FLAGS.bundle_kernel))
+
+ def test_006_admin_can_register_kernel(self):
+ # FIXME(devcamcar): registration should verify that bucket/manifest
+ # exists before returning successfully.
+ kernel_id = self.register_image(test_bucket, FLAGS.bundle_kernel)
+ self.assert_(kernel_id is not None)
+ data['kernel_id'] = kernel_id
+
+ def test_007_admin_images_are_available_within_10_seconds(self):
+ for i in xrange(10):
+ image = self.admin.get_image(data['image_id'])
+ if image and image.state == 'available':
+ break
+ time.sleep(1)
+ else:
+ print image.state
+ self.assert_(False) # wasn't available within 10 seconds
+ self.assert_(image.type == 'machine')
+
+ for i in xrange(10):
+ kernel = self.admin.get_image(data['kernel_id'])
+ if kernel and kernel.state == 'available':
+ break
+ time.sleep(1)
+ else:
+ self.assert_(False) # wasn't available within 10 seconds
+ self.assert_(kernel.type == 'kernel')
+
+ def test_008_admin_can_describe_image_attribute(self):
+ attrs = self.admin.get_image_attribute(data['image_id'],
+ 'launchPermission')
+ self.assert_(attrs.name, 'launch_permission')
+
+ def test_009_me_cannot_see_non_public_images(self):
+ conn = self.connection_for(test_username)
+ images = conn.get_all_images(image_ids=[data['image_id']])
+ self.assertEqual(len(images), 0)
+
+ def test_010_admin_can_modify_image_launch_permission(self):
+ conn = self.connection_for(test_username)
+
+ self.admin.modify_image_attribute(image_id=data['image_id'],
+ operation='add',
+ attribute='launchPermission',
+ groups='all')
+
+ image = conn.get_image(data['image_id'])
+ self.assertEqual(image.id, data['image_id'])
+
+ def test_011_me_can_list_public_images(self):
+ conn = self.connection_for(test_username)
+ images = conn.get_all_images(image_ids=[data['image_id']])
+ self.assertEqual(len(images), 1)
+ pass
+
+ def test_012_me_can_see_launch_permission(self):
+ attrs = self.admin.get_image_attribute(data['image_id'],
+ 'launchPermission')
+ self.assert_(attrs.name, 'launch_permission')
+ self.assert_(attrs.groups[0], 'all')
+
+ # FIXME: add tests that user can launch image
+
+# def test_013_user_can_launch_admin_public_image(self):
+# # TODO: Use openwrt kernel instead of default kernel
+# conn = self.connection_for(test_username)
+# reservation = conn.run_instances(data['image_id'])
+# self.assertEqual(len(reservation.instances), 1)
+# data['my_instance_id'] = reservation.instances[0].id
+
+# def test_014_instances_launch_within_30_seconds(self):
+# pass
+
+# def test_015_user_can_terminate(self):
+# conn = self.connection_for(test_username)
+# terminated = conn.terminate_instances(
+# instance_ids=[data['my_instance_id']])
+# self.assertEqual(len(terminated), 1)
+
+ def test_016_admin_can_deregister_kernel(self):
+ self.assertTrue(self.admin.deregister_image(data['kernel_id']))
+
+ def test_017_admin_can_deregister_image(self):
+ self.assertTrue(self.admin.deregister_image(data['image_id']))
+
+ def test_018_admin_can_delete_bundle(self):
+ self.assertTrue(self.delete_bundle_bucket(test_bucket))
+
+ def test_999_tearDown(self):
+ data = {}
+ self.delete_user(test_username)
+
+
+# Test key pairs and security groups
+class SecurityTests(novatestcase.NovaTestCase):
+ def test_000_setUp(self):
+ self.create_user(test_username + '_me')
+ self.create_user(test_username + '_you')
+ data['image_id'] = 'ami-tiny'
+
+ def test_001_me_can_create_keypair(self):
+ conn = self.connection_for(test_username + '_me')
+ key = self.create_key_pair(conn, test_key)
+ self.assertEqual(key.name, test_key)
+
+ def test_002_you_can_create_keypair(self):
+ conn = self.connection_for(test_username + '_you')
+ key = self.create_key_pair(conn, test_key+ 'yourkey')
+ self.assertEqual(key.name, test_key+'yourkey')
+
+ def test_003_me_can_create_instance_with_keypair(self):
+ conn = self.connection_for(test_username + '_me')
+ reservation = conn.run_instances(data['image_id'], key_name=test_key)
+ self.assertEqual(len(reservation.instances), 1)
+ data['my_instance_id'] = reservation.instances[0].id
+
+ def test_004_me_can_obtain_private_ip_within_60_seconds(self):
+ conn = self.connection_for(test_username + '_me')
+ reservations = conn.get_all_instances([data['my_instance_id']])
+ instance = reservations[0].instances[0]
+ # allow 60 seconds to exit pending with IP
+ for x in xrange(60):
+ instance.update()
+ if instance.state != u'pending':
+ break
+ time.sleep(1)
+ else:
+ self.assert_(False)
+ # self.assertEqual(instance.state, u'running')
+ ip = reservations[0].instances[0].private_dns_name
+ self.failIf(ip == '0.0.0.0')
+ data['my_private_ip'] = ip
+ print data['my_private_ip'],
+
+ def test_005_can_ping_private_ip(self):
+ for x in xrange(120):
+ # ping waits for 1 second
+ status, output = commands.getstatusoutput(
+ 'ping -c1 -w1 %s' % data['my_private_ip'])
+ if status == 0:
+ break
+ else:
+ self.assert_('could not ping instance')
+ #def test_005_me_cannot_ssh_when_unauthorized(self):
+ # self.assertRaises(paramiko.SSHException, self.connect_ssh,
+ # data['my_private_ip'], 'mykey')
+
+ #def test_006_me_can_authorize_ssh(self):
+ # conn = self.connection_for(test_username + '_me')
+ # self.assertTrue(
+ # conn.authorize_security_group(
+ # 'default',
+ # ip_protocol='tcp',
+ # from_port=22,
+ # to_port=22,
+ # cidr_ip='0.0.0.0/0'
+ # )
+ # )
+
+ def test_007_me_can_ssh_when_authorized(self):
+ conn = self.connect_ssh(data['my_private_ip'], test_key)
+ conn.close()
+
+ #def test_008_me_can_revoke_ssh_authorization(self):
+ # conn = self.connection_for('me')
+ # self.assertTrue(
+ # conn.revoke_security_group(
+ # 'default',
+ # ip_protocol='tcp',
+ # from_port=22,
+ # to_port=22,
+ # cidr_ip='0.0.0.0/0'
+ # )
+ # )
+
+ #def test_009_you_cannot_ping_my_instance(self):
+ # TODO: should ping my_private_ip from with an instance started by you.
+ #self.assertFalse(self.can_ping(data['my_private_ip']))
+
+ def test_010_you_cannot_ssh_to_my_instance(self):
+ try:
+ conn = self.connect_ssh(data['my_private_ip'],
+ test_key + 'yourkey')
+ conn.close()
+ except paramiko.SSHException:
+ pass
+ else:
+ self.fail("expected SSHException")
+
+ def test_999_tearDown(self):
+ conn = self.connection_for(test_username + '_me')
+ self.delete_key_pair(conn, test_key)
+ if data.has_key('my_instance_id'):
+ conn.terminate_instances([data['my_instance_id']])
+
+ conn = self.connection_for(test_username + '_you')
+ self.delete_key_pair(conn, test_key + 'yourkey')
+
+ conn = self.connection_for('admin')
+ self.delete_user(test_username + '_me')
+ self.delete_user(test_username + '_you')
+ #self.tearDown_test_image(conn, data['image_id'])
+
+# TODO: verify wrt image boots
+# build python into wrt image
+# build boto/m2crypto into wrt image
+# build euca2ools into wrt image
+# build a script to download and unpack credentials
+# - return "ok" to stdout for comparison in self.assertEqual()
+# build a script to bundle the instance
+# build a script to upload the bundle
+
+# status, output = commands.getstatusoutput('cmd')
+# if status == 0:
+# print 'ok'
+# else:
+# print output
+
+# Testing rebundling
+class RebundlingTests(novatestcase.NovaTestCase):
+ def test_000_setUp(self):
+ self.create_user('me')
+ self.create_user('you')
+ # TODO: create keypair for me
+ # upload smoketest img
+ # run instance
+
+ def test_001_me_can_download_credentials_within_instance(self):
+ conn = self.connect_ssh(data['my_private_ip'], 'mykey')
+ stdin, stdout = conn.exec_command(
+ 'python ~/smoketests/install-credentials.py')
+ conn.close()
+ self.assertEqual(stdout, 'ok')
+
+ def test_002_me_can_rebundle_within_instance(self):
+ conn = self.connect_ssh(data['my_private_ip'], 'mykey')
+ stdin, stdout = conn.exec_command(
+ 'python ~/smoketests/rebundle-instance.py')
+ conn.close()
+ self.assertEqual(stdout, 'ok')
+
+ def test_003_me_can_upload_image_within_instance(self):
+ conn = self.connect_ssh(data['my_private_ip'], 'mykey')
+ stdin, stdout = conn.exec_command(
+ 'python ~/smoketests/upload-bundle.py')
+ conn.close()
+ self.assertEqual(stdout, 'ok')
+
+ def test_004_me_can_register_image_within_instance(self):
+ conn = self.connect_ssh(data['my_private_ip'], 'mykey')
+ stdin, stdout = conn.exec_command(
+ 'python ~/smoketests/register-image.py')
+ conn.close()
+ if re.matches('ami-{\w+}', stdout):
+ data['my_image_id'] = stdout.strip()
+ else:
+ self.fail('expected ami-nnnnnn, got:\n ' + stdout)
+
+ def test_005_you_cannot_see_my_private_image(self):
+ conn = self.connection_for('you')
+ image = conn.get_image(data['my_image_id'])
+ self.assertEqual(image, None)
+
+ def test_006_me_can_make_image_public(self):
+ conn = self.connection_for(test_username)
+ conn.modify_image_attribute(image_id=data['my_image_id'],
+ operation='add',
+ attribute='launchPermission',
+ groups='all')
+
+ def test_007_you_can_see_my_public_image(self):
+ conn = self.connection_for('you')
+ image = conn.get_image(data['my_image_id'])
+ self.assertEqual(image.id, data['my_image_id'])
+
+ def test_999_tearDown(self):
+ self.delete_user('me')
+ self.delete_user('you')
+
+ #if data.has_key('image_id'):
+ # deregister rebundled image
+
+ # TODO: tear down instance
+ # delete keypairs
+ data = {}
+
+# Test elastic IPs
+class ElasticIPTests(novatestcase.NovaTestCase):
+ def test_000_setUp(self):
+ data['image_id'] = 'ami-tiny'
+
+ self.create_user('me')
+ conn = self.connection_for('me')
+ self.create_key_pair(conn, 'mykey')
+
+ conn = self.connection_for('admin')
+ #data['image_id'] = self.setUp_test_image(FLAGS.bundle_image)
+
+ def test_001_me_can_launch_image_with_keypair(self):
+ conn = self.connection_for('me')
+ reservation = conn.run_instances(data['image_id'], key_name='mykey')
+ self.assertEqual(len(reservation.instances), 1)
+ data['my_instance_id'] = reservation.instances[0].id
+
+ def test_002_me_can_allocate_elastic_ip(self):
+ conn = self.connection_for('me')
+ data['my_public_ip'] = conn.allocate_address()
+ self.assert_(data['my_public_ip'].public_ip)
+
+ def test_003_me_can_associate_ip_with_instance(self):
+ self.assertTrue(data['my_public_ip'].associate(data['my_instance_id']))
+
+ def test_004_me_can_ssh_with_public_ip(self):
+ conn = self.connect_ssh(data['my_public_ip'].public_ip, 'mykey')
+ conn.close()
+
+ def test_005_me_can_disassociate_ip_from_instance(self):
+ self.assertTrue(data['my_public_ip'].disassociate())
+
+ def test_006_me_can_deallocate_elastic_ip(self):
+ self.assertTrue(data['my_public_ip'].delete())
+
+ def test_999_tearDown(self):
+ conn = self.connection_for('me')
+ self.delete_key_pair(conn, 'mykey')
+
+ conn = self.connection_for('admin')
+ #self.tearDown_test_image(conn, data['image_id'])
+ data = {}
+
+ZONE = 'nova'
+DEVICE = 'vdb'
+# Test iscsi volumes
+class VolumeTests(novatestcase.NovaTestCase):
+ def test_000_setUp(self):
+ self.create_user(test_username)
+ data['image_id'] = 'ami-tiny' # A7370FE3
+
+ conn = self.connection_for(test_username)
+ self.create_key_pair(conn, test_key)
+ reservation = conn.run_instances(data['image_id'],
+ instance_type='m1.tiny',
+ key_name=test_key)
+ data['instance_id'] = reservation.instances[0].id
+ data['private_ip'] = reservation.instances[0].private_dns_name
+ # wait for instance to show up
+ for x in xrange(120):
+ # ping waits for 1 second
+ status, output = commands.getstatusoutput(
+ 'ping -c1 -w1 %s' % data['private_ip'])
+ if status == 0:
+ break
+ else:
+ self.fail('unable to ping instance')
+
+ def test_001_me_can_create_volume(self):
+ conn = self.connection_for(test_username)
+ volume = conn.create_volume(1, ZONE)
+ self.assertEqual(volume.size, 1)
+ data['volume_id'] = volume.id
+ # give network time to find volume
+ time.sleep(5)
+
+ def test_002_me_can_attach_volume(self):
+ conn = self.connection_for(test_username)
+ conn.attach_volume(
+ volume_id = data['volume_id'],
+ instance_id = data['instance_id'],
+ device = '/dev/%s' % DEVICE
+ )
+ # give instance time to recognize volume
+ time.sleep(5)
+
+ def test_003_me_can_mount_volume(self):
+ conn = self.connect_ssh(data['private_ip'], test_key)
+ # FIXME(devcamcar): the tiny image doesn't create the node properly
+ # this will make /dev/vd* if it doesn't exist
+ stdin, stdout, stderr = conn.exec_command(
+ 'grep %s /proc/partitions |' + \
+ '`awk \'{print "mknod /dev/"$4" b "$1" "$2}\'`' % DEVICE)
+ commands = []
+ commands.append('mkdir -p /mnt/vol')
+ commands.append('mkfs.ext2 /dev/%s' % DEVICE)
+ commands.append('mount /dev/%s /mnt/vol' % DEVICE)
+ commands.append('echo success')
+ stdin, stdout, stderr = conn.exec_command(' && '.join(commands))
+ out = stdout.read()
+ conn.close()
+ if not out.strip().endswith('success'):
+ self.fail('Unable to mount: %s %s' % (out, stderr.read()))
+
+ def test_004_me_can_write_to_volume(self):
+ conn = self.connect_ssh(data['private_ip'], test_key)
+ # FIXME(devcamcar): This doesn't fail if the volume hasn't been mounted
+ stdin, stdout, stderr = conn.exec_command(
+ 'echo hello > /mnt/vol/test.txt')
+ err = stderr.read()
+ conn.close()
+ if len(err) > 0:
+ self.fail('Unable to write to mount: %s' % (err))
+
+ def test_005_volume_is_correct_size(self):
+ conn = self.connect_ssh(data['private_ip'], test_key)
+ stdin, stdout, stderr = conn.exec_command(
+ "df -h | grep %s | awk {'print $2'}" % DEVICE)
+ out = stdout.read()
+ conn.close()
+ if not out.strip() == '1007.9M':
+ self.fail('Volume is not the right size: %s %s' % (out, stderr.read()))
+
+ def test_006_me_can_umount_volume(self):
+ conn = self.connect_ssh(data['private_ip'], test_key)
+ stdin, stdout, stderr = conn.exec_command('umount /mnt/vol')
+ err = stderr.read()
+ conn.close()
+ if len(err) > 0:
+ self.fail('Unable to unmount: %s' % (err))
+
+ def test_007_me_can_detach_volume(self):
+ conn = self.connection_for(test_username)
+ self.assertTrue(conn.detach_volume(volume_id = data['volume_id']))
+
+ def test_008_me_can_delete_volume(self):
+ conn = self.connection_for(test_username)
+ self.assertTrue(conn.delete_volume(data['volume_id']))
+
+ def test_009_volume_size_must_be_int(self):
+ conn = self.connection_for(test_username)
+ self.assertRaises(Exception, conn.create_volume, 'foo', ZONE)
+
+ def test_999_tearDown(self):
+ global data
+ conn = self.connection_for(test_username)
+ self.delete_key_pair(conn, test_key)
+ if data.has_key('instance_id'):
+ conn.terminate_instances([data['instance_id']])
+ self.delete_user(test_username)
+ data = {}
+
+def build_suites():
+ return {
+ 'user': unittest.makeSuite(UserTests),
+ 'image': unittest.makeSuite(ImageTests),
+ 'security': unittest.makeSuite(SecurityTests),
+ 'public_network': unittest.makeSuite(ElasticIPTests),
+ 'volume': unittest.makeSuite(VolumeTests),
+ }
+
+def main():
+ argv = FLAGS(sys.argv)
+ suites = build_suites()
+
+ if FLAGS.suite:
+ try:
+ suite = suites[FLAGS.suite]
+ except KeyError:
+ print >> sys.stderr, 'Available test suites:', SUITE_NAMES
+ return 1
+
+ unittest.TextTestRunner(verbosity=2).run(suite)
+ else:
+ for suite in suites.itervalues():
+ unittest.TextTestRunner(verbosity=2).run(suite)
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/tools/clean-vlans b/tools/clean-vlans
new file mode 100755
index 000000000..c345de912
--- /dev/null
+++ b/tools/clean-vlans
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+sudo ifconfig -a | grep br | grep -v bridge | cut -f1 -d" " | xargs -n1 -ifoo ifconfig foo down
+sudo ifconfig -a | grep br | grep -v bridge | cut -f1 -d" " | xargs -n1 -ifoo brctl delbr foo
+sudo ifconfig -a | grep vlan | grep -v vlan124 | grep -v vlan5 | cut -f1 -d" " | xargs -n1 -ifoo ifconfig foo down
+sudo ifconfig -a | grep vlan | grep -v vlan124 | grep -v vlan5 | cut -f1 -d" " | xargs -n1 -ifoo vconfig rem foo
diff --git a/vendor/Twisted-10.0.0/twisted/internet/_sigchld.c b/vendor/Twisted-10.0.0/twisted/internet/_sigchld.c
new file mode 100644
index 000000000..660182bd2
--- /dev/null
+++ b/vendor/Twisted-10.0.0/twisted/internet/_sigchld.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2010 Twisted Matrix Laboratories.
+ * See LICENSE for details.
+ */
+
+#include <signal.h>
+#include <errno.h>
+
+#include "Python.h"
+
+static int sigchld_pipe_fd = -1;
+
+static void got_signal(int sig) {
+ int saved_errno = errno;
+ int ignored_result;
+
+ /* write() errors are unhandled. If the buffer is full, we don't
+ * care. What about other errors? */
+ ignored_result = write(sigchld_pipe_fd, "x", 1);
+
+ errno = saved_errno;
+}
+
+PyDoc_STRVAR(install_sigchld_handler_doc, "\
+install_sigchld_handler(fd)\n\
+\n\
+Installs a SIGCHLD handler which will write a byte to the given fd\n\
+whenever a SIGCHLD occurs. This is done in C code because the python\n\
+signal handling system is not reliable, and additionally cannot\n\
+specify SA_RESTART.\n\
+\n\
+Please ensure fd is in non-blocking mode.\n\
+");
+
+static PyObject *
+install_sigchld_handler(PyObject *self, PyObject *args) {
+ int fd, old_fd;
+ struct sigaction sa;
+
+ if (!PyArg_ParseTuple(args, "i:install_sigchld_handler", &fd)) {
+ return NULL;
+ }
+ old_fd = sigchld_pipe_fd;
+ sigchld_pipe_fd = fd;
+
+ if (fd == -1) {
+ sa.sa_handler = SIG_DFL;
+ } else {
+ sa.sa_handler = got_signal;
+ sa.sa_flags = SA_RESTART;
+ /* mask all signals so I don't worry about EINTR from the write. */
+ sigfillset(&sa.sa_mask);
+ }
+ if (sigaction(SIGCHLD, &sa, 0) != 0) {
+ sigchld_pipe_fd = old_fd;
+ return PyErr_SetFromErrno(PyExc_OSError);
+ }
+ return PyLong_FromLong(old_fd);
+}
+
+PyDoc_STRVAR(is_default_handler_doc, "\
+Return 1 if the SIGCHLD handler is SIG_DFL, 0 otherwise.\n\
+");
+
+static PyObject *
+is_default_handler(PyObject *self, PyObject *args) {
+ /*
+ * This implementation is necessary since the install_sigchld_handler
+ * function above bypasses the Python signal handler installation API, so
+ * CPython doesn't notice that the handler has changed and signal.getsignal
+ * won't return an accurate result.
+ */
+ struct sigaction sa;
+
+ if (sigaction(SIGCHLD, NULL, &sa) != 0) {
+ return PyErr_SetFromErrno(PyExc_OSError);
+ }
+
+ return PyLong_FromLong(sa.sa_handler == SIG_DFL);
+}
+
+static PyMethodDef sigchld_methods[] = {
+ {"installHandler", install_sigchld_handler, METH_VARARGS,
+ install_sigchld_handler_doc},
+ {"isDefaultHandler", is_default_handler, METH_NOARGS,
+ is_default_handler_doc},
+ /* sentinel */
+ {NULL, NULL, 0, NULL}
+};
+
+
+static const char _sigchld_doc[] = "\n\
+This module contains an API for receiving SIGCHLD via a file descriptor.\n\
+";
+
+PyMODINIT_FUNC
+init_sigchld(void) {
+ /* Create the module and add the functions */
+ Py_InitModule3(
+ "twisted.internet._sigchld", sigchld_methods, _sigchld_doc);
+}
diff --git a/vendor/Twisted-10.0.0/twisted/internet/_signals.py b/vendor/Twisted-10.0.0/twisted/internet/_signals.py
new file mode 100644
index 000000000..faf2e2445
--- /dev/null
+++ b/vendor/Twisted-10.0.0/twisted/internet/_signals.py
@@ -0,0 +1,184 @@
+# -*- test-case-name: twisted.test.test_process,twisted.internet.test.test_process -*-
+# Copyright (c) 2010 Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+This module provides a uniform interface to the several mechanisms which are
+possibly available for dealing with signals.
+
+This module is used to integrate child process termination into a
+reactor event loop. This is a challenging feature to provide because
+most platforms indicate process termination via SIGCHLD and do not
+provide a way to wait for that signal and arbitrary I/O events at the
+same time. The naive implementation involves installing a Python
+SIGCHLD handler; unfortunately this leads to other syscalls being
+interrupted (whenever SIGCHLD is received) and failing with EINTR
+(which almost no one is prepared to handle). This interruption can be
+disabled via siginterrupt(2) (or one of the equivalent mechanisms);
+however, if the SIGCHLD is delivered by the platform to a non-main
+thread (not a common occurrence, but difficult to prove impossible),
+the main thread (waiting on select() or another event notification
+API) may not wake up leading to an arbitrary delay before the child
+termination is noticed.
+
+The basic solution to all these issues involves enabling SA_RESTART
+(ie, disabling system call interruption) and registering a C signal
+handler which writes a byte to a pipe. The other end of the pipe is
+registered with the event loop, allowing it to wake up shortly after
+SIGCHLD is received. See L{twisted.internet.posixbase._SIGCHLDWaker}
+for the implementation of the event loop side of this solution. The
+use of a pipe this way is known as the U{self-pipe
+trick<http://cr.yp.to/docs/selfpipe.html>}.
+
+The actual solution implemented in this module depends on the version
+of Python. From version 2.6, C{signal.siginterrupt} and
+C{signal.set_wakeup_fd} allow the necessary C signal handler which
+writes to the pipe to be registered with C{SA_RESTART}. Prior to 2.6,
+the L{twisted.internet._sigchld} extension module provides similar
+functionality.
+
+If neither of these is available, a Python signal handler is used
+instead. This is essentially the naive solution mentioned above and
+has the problems described there.
+"""
+
+import os
+
+try:
+ from signal import set_wakeup_fd, siginterrupt
+except ImportError:
+ set_wakeup_fd = siginterrupt = None
+
+try:
+ import signal
+except ImportError:
+ signal = None
+
+from twisted.python.log import msg
+
+try:
+ from twisted.internet._sigchld import installHandler as _extInstallHandler, \
+ isDefaultHandler as _extIsDefaultHandler
+except ImportError:
+ _extInstallHandler = _extIsDefaultHandler = None
+
+
+class _Handler(object):
+ """
+ L{_Handler} is a signal handler which writes a byte to a file descriptor
+ whenever it is invoked.
+
+ @ivar fd: The file descriptor to which to write. If this is C{None},
+ nothing will be written.
+ """
+ def __init__(self, fd):
+ self.fd = fd
+
+
+ def __call__(self, *args):
+ """
+ L{_Handler.__call__} is the signal handler. It will write a byte to
+ the wrapped file descriptor, if there is one.
+ """
+ if self.fd is not None:
+ try:
+ os.write(self.fd, '\0')
+ except:
+ pass
+
+
+
+def _installHandlerUsingSignal(fd):
+ """
+ Install a signal handler which will write a byte to C{fd} when
+ I{SIGCHLD} is received.
+
+ This is implemented by creating an instance of L{_Handler} with C{fd}
+ and installing it as the signal handler.
+
+ @param fd: The file descriptor to which to write when I{SIGCHLD} is
+ received.
+ @type fd: C{int}
+ """
+ if fd == -1:
+ previous = signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ else:
+ previous = signal.signal(signal.SIGCHLD, _Handler(fd))
+ if isinstance(previous, _Handler):
+ return previous.fd
+ return -1
+
+
+
+def _installHandlerUsingSetWakeup(fd):
+ """
+ Install a signal handler which will write a byte to C{fd} when
+ I{SIGCHLD} is received.
+
+ This is implemented by installing an instance of L{_Handler} wrapped
+ around C{None}, setting the I{SIGCHLD} handler as not allowed to
+ interrupt system calls, and using L{signal.set_wakeup_fd} to do the
+ actual writing.
+
+ @param fd: The file descriptor to which to write when I{SIGCHLD} is
+ received.
+ @type fd: C{int}
+ """
+ if fd == -1:
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ else:
+ signal.signal(signal.SIGCHLD, _Handler(None))
+ siginterrupt(signal.SIGCHLD, False)
+ return set_wakeup_fd(fd)
+
+
+
+def _isDefaultHandler():
+ """
+ Determine whether the I{SIGCHLD} handler is the default or not.
+ """
+ return signal.getsignal(signal.SIGCHLD) == signal.SIG_DFL
+
+
+
+def _cannotInstallHandler(fd):
+ """
+ Fail to install a signal handler for I{SIGCHLD}.
+
+ This implementation is used when the supporting code for the other
+ implementations is unavailable (on Python versions 2.5 and older where
+ neither the L{twisted.internet._sigchld} extension nor the standard
+ L{signal} module is available).
+
+ @param fd: Ignored; only for compatibility with the other
+ implementations of this interface.
+
+ @raise RuntimeError: Always raised to indicate no I{SIGCHLD} handler can
+ be installed.
+ """
+ raise RuntimeError("Cannot install a SIGCHLD handler")
+
+
+
+def _cannotDetermineDefault():
+ raise RuntimeError("No usable signal API available")
+
+
+
+if set_wakeup_fd is not None:
+ msg('using set_wakeup_fd')
+ installHandler = _installHandlerUsingSetWakeup
+ isDefaultHandler = _isDefaultHandler
+elif _extInstallHandler is not None:
+ msg('using _sigchld')
+ installHandler = _extInstallHandler
+ isDefaultHandler = _extIsDefaultHandler
+elif signal is not None:
+ msg('using signal module')
+ installHandler = _installHandlerUsingSignal
+ isDefaultHandler = _isDefaultHandler
+else:
+ msg('nothing unavailable')
+ installHandler = _cannotInstallHandler
+ isDefaultHandler = _cannotDetermineDefault
+
diff --git a/vendor/Twisted-10.0.0/twisted/internet/base.py b/vendor/Twisted-10.0.0/twisted/internet/base.py
index 7513f6ddd..370de7605 100644
--- a/vendor/Twisted-10.0.0/twisted/internet/base.py
+++ b/vendor/Twisted-10.0.0/twisted/internet/base.py
@@ -1,5 +1,5 @@
# -*- test-case-name: twisted.test.test_internet -*-
-# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
+# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
@@ -22,7 +22,7 @@ from twisted.internet.interfaces import IResolverSimple, IReactorPluggableResolv
from twisted.internet.interfaces import IConnector, IDelayedCall
from twisted.internet import fdesc, main, error, abstract, defer, threads
from twisted.python import log, failure, reflect
-from twisted.python.runtime import seconds as runtimeSeconds, platform, platformType
+from twisted.python.runtime import seconds as runtimeSeconds, platform
from twisted.internet.defer import Deferred, DeferredList
from twisted.persisted import styles
@@ -468,6 +468,7 @@ class ReactorBase(object):
if platform.supportsThreads():
self._initThreads()
+ self.installWaker()
# override in subclasses
@@ -889,7 +890,6 @@ class ReactorBase(object):
def _initThreads(self):
self.usingThreads = True
self.resolver = ThreadedResolver(self)
- self.installWaker()
def callFromThread(self, f, *args, **kw):
"""
@@ -914,6 +914,9 @@ class ReactorBase(object):
self.threadpoolShutdownID = self.addSystemEventTrigger(
'during', 'shutdown', self._stopThreadPool)
+ def _uninstallHandler(self):
+ pass
+
def _stopThreadPool(self):
"""
Stop the reactor threadpool. This method is only valid if there
@@ -1109,31 +1112,6 @@ class _SignalReactorMixin:
if hasattr(signal, "SIGBREAK"):
signal.signal(signal.SIGBREAK, self.sigBreak)
- if platformType == 'posix':
- signal.signal(signal.SIGCHLD, self._handleSigchld)
- # Also call the signal handler right now, in case we missed any
- # signals before we installed it. This should only happen if
- # someone used spawnProcess before calling reactor.run (and the
- # process also exited already).
- self._handleSigchld(signal.SIGCHLD, None)
-
-
- def _handleSigchld(self, signum, frame, _threadSupport=platform.supportsThreads()):
- """
- Reap all processes on SIGCHLD.
-
- This gets called on SIGCHLD. We do no processing inside a signal
- handler, as the calls we make here could occur between any two
- python bytecode instructions. Deferring processing to the next
- eventloop round prevents us from violating the state constraints
- of arbitrary classes.
- """
- from twisted.internet.process import reapAllProcesses
- if _threadSupport:
- self.callFromThread(reapAllProcesses)
- else:
- self.callLater(0, reapAllProcesses)
-
def startRunning(self, installSignalHandlers=True):
"""
diff --git a/vendor/Twisted-10.0.0/twisted/internet/gtk2reactor.py b/vendor/Twisted-10.0.0/twisted/internet/gtk2reactor.py
index bd979bcac..52796d429 100644
--- a/vendor/Twisted-10.0.0/twisted/internet/gtk2reactor.py
+++ b/vendor/Twisted-10.0.0/twisted/internet/gtk2reactor.py
@@ -1,4 +1,4 @@
-# -*- test-case-name: twisted.internet.test.test_gtk2reactor -*-
+# -*- test-case-name: twisted.internet.test -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
@@ -21,8 +21,10 @@ integration.
"""
# System Imports
-import sys
+import sys, signal
+
from zope.interface import implements
+
try:
if not hasattr(sys, 'frozen'):
# Don't want to check this for py2exe
@@ -41,7 +43,7 @@ if hasattr(gobject, "threads_init"):
from twisted.python import log, runtime, failure
from twisted.python.compat import set
from twisted.internet.interfaces import IReactorFDSet
-from twisted.internet import main, posixbase, error, selectreactor
+from twisted.internet import main, base, posixbase, error, selectreactor
POLL_DISCONNECTED = gobject.IO_HUP | gobject.IO_ERR | gobject.IO_NVAL
@@ -104,6 +106,25 @@ class Gtk2Reactor(posixbase.PosixReactorBase):
self.__crash = _our_mainquit
self.__run = gtk.main
+
+ if runtime.platformType == 'posix':
+ def _handleSignals(self):
+ # Let the base class do its thing, but pygtk is probably
+ # going to stomp on us so go beyond that and set up some
+ # signal handling which pygtk won't mess with. This would
+ # be better done by letting this reactor select a
+ # different implementation of installHandler for
+ # _SIGCHLDWaker to use. Then, at least, we could fall
+ # back to our extension module. See #4286.
+ from twisted.internet.process import reapAllProcesses as _reapAllProcesses
+ base._SignalReactorMixin._handleSignals(self)
+ signal.signal(signal.SIGCHLD, lambda *a: self.callFromThread(_reapAllProcesses))
+ if getattr(signal, "siginterrupt", None) is not None:
+ signal.siginterrupt(signal.SIGCHLD, False)
+ # Like the base, reap processes now in case a process
+ # exited before the handlers above were installed.
+ _reapAllProcesses()
+
# The input_add function in pygtk1 checks for objects with a
# 'fileno' method and, if present, uses the result of that method
# as the input source. The pygtk2 input_add does not do this. The
diff --git a/vendor/Twisted-10.0.0/twisted/internet/posixbase.py b/vendor/Twisted-10.0.0/twisted/internet/posixbase.py
index b410a4ee5..6ab044842 100644
--- a/vendor/Twisted-10.0.0/twisted/internet/posixbase.py
+++ b/vendor/Twisted-10.0.0/twisted/internet/posixbase.py
@@ -41,8 +41,7 @@ except ImportError:
processEnabled = False
if platformType == 'posix':
- from twisted.internet import fdesc
- import process
+ from twisted.internet import fdesc, process, _signals
processEnabled = True
if platform.isWindows():
@@ -103,10 +102,19 @@ class _SocketWaker(log.Logger, styles.Ephemeral):
-class _PipeWaker(log.Logger, styles.Ephemeral):
+class _FDWaker(object, log.Logger, styles.Ephemeral):
"""
The I{self-pipe trick<http://cr.yp.to/docs/selfpipe.html>}, used to wake
up the main loop from another thread or a signal handler.
+
+ L{_FDWaker} is a base class for waker implementations based on
+ writing to a pipe being monitored by the reactor.
+
+ @ivar o: The file descriptor for the end of the pipe which can be
+ written to to wake up a reactor monitoring this waker.
+
+ @ivar i: The file descriptor which should be monitored in order to
+ be awoken by this waker.
"""
disconnected = 0
@@ -124,22 +132,13 @@ class _PipeWaker(log.Logger, styles.Ephemeral):
fdesc._setCloseOnExec(self.o)
self.fileno = lambda: self.i
+
def doRead(self):
- """Read some bytes from the pipe.
+ """
+ Read some bytes from the pipe and discard them.
"""
fdesc.readFromFD(self.fileno(), lambda data: None)
- def wakeUp(self):
- """Write one byte to the pipe, and flush it.
- """
- # We don't use fdesc.writeToFD since we need to distinguish
- # between EINTR (try again) and EAGAIN (do nothing).
- if self.o is not None:
- try:
- util.untilConcludes(os.write, self.o, 'x')
- except OSError, e:
- if e.errno != errno.EAGAIN:
- raise
def connectionLost(self, reason):
"""Close both ends of my pipe.
@@ -154,25 +153,85 @@ class _PipeWaker(log.Logger, styles.Ephemeral):
del self.i, self.o
+
+class _UnixWaker(_FDWaker):
+ """
+ This class provides a simple interface to wake up the event loop.
+
+ This is used by threads or signals to wake up the event loop.
+ """
+
+ def wakeUp(self):
+ """Write one byte to the pipe, and flush it.
+ """
+ # We don't use fdesc.writeToFD since we need to distinguish
+ # between EINTR (try again) and EAGAIN (do nothing).
+ if self.o is not None:
+ try:
+ util.untilConcludes(os.write, self.o, 'x')
+ except OSError, e:
+ # XXX There is no unit test for raising the exception
+ # for other errnos. See #4285.
+ if e.errno != errno.EAGAIN:
+ raise
+
+
+
if platformType == 'posix':
- _Waker = _PipeWaker
+ _Waker = _UnixWaker
else:
# Primarily Windows and Jython.
_Waker = _SocketWaker
+class _SIGCHLDWaker(_FDWaker):
+ """
+ L{_SIGCHLDWaker} can wake up a reactor whenever C{SIGCHLD} is
+ received.
+
+ @see: L{twisted.internet._signals}
+ """
+ def __init__(self, reactor):
+ _FDWaker.__init__(self, reactor)
+
+
+ def install(self):
+ """
+ Install the handler necessary to make this waker active.
+ """
+ _signals.installHandler(self.o)
+
+
+ def uninstall(self):
+ """
+ Remove the handler which makes this waker active.
+ """
+ _signals.installHandler(-1)
+
+
+ def doRead(self):
+ """
+ Having woken up the reactor in response to receipt of
+ C{SIGCHLD}, reap the process which exited.
+
+ This is called whenever the reactor notices the waker pipe is
+ writeable, which happens soon after any call to the C{wakeUp}
+ method.
+ """
+ _FDWaker.doRead(self)
+ process.reapAllProcesses()
+
+
+
class PosixReactorBase(_SignalReactorMixin, ReactorBase):
"""
A basis for reactors that use file descriptors.
+
+ @ivar _childWaker: C{None} or a reference to the L{_SIGCHLDWaker}
+ which is used to properly notice child process termination.
"""
implements(IReactorArbitrary, IReactorTCP, IReactorUDP, IReactorMulticast)
- def __init__(self):
- ReactorBase.__init__(self)
- if self.usingThreads or platformType == "posix":
- self.installWaker()
-
-
def _disconnectSelectable(self, selectable, why, isRead, faildict={
error.ConnectionDone: failure.Failure(error.ConnectionDone()),
error.ConnectionLost: failure.Failure(error.ConnectionLost())
@@ -209,6 +268,43 @@ class PosixReactorBase(_SignalReactorMixin, ReactorBase):
self.addReader(self.waker)
+ _childWaker = None
+ def _handleSignals(self):
+ """
+ Extend the basic signal handling logic to also support
+ handling SIGCHLD to know when to try to reap child processes.
+ """
+ _SignalReactorMixin._handleSignals(self)
+ if platformType == 'posix':
+ if not self._childWaker:
+ self._childWaker = _SIGCHLDWaker(self)
+ self._internalReaders.add(self._childWaker)
+ self.addReader(self._childWaker)
+ self._childWaker.install()
+ # Also reap all processes right now, in case we missed any
+ # signals before we installed the SIGCHLD waker/handler.
+ # This should only happen if someone used spawnProcess
+ # before calling reactor.run (and the process also exited
+ # already).
+ process.reapAllProcesses()
+
+ def _uninstallHandler(self):
+ """
+ If a child waker was created and installed, uninstall it now.
+
+ Since this disables reactor functionality and is only called
+ when the reactor is stopping, it doesn't provide any directly
+ useful functionality, but the cleanup of reactor-related
+ process-global state that it does helps in unit tests
+ involving multiple reactors and is generally just a nice
+ thing.
+ """
+ # XXX This would probably be an alright place to put all of
+ # the cleanup code for all internal readers (here and in the
+ # base class, anyway). See #3063 for that cleanup task.
+ if self._childWaker:
+ self._childWaker.uninstall()
+
# IReactorProcess
def spawnProcess(self, processProtocol, executable, args=(),
diff --git a/vendor/Twisted-10.0.0/twisted/internet/test/reactormixins.py b/vendor/Twisted-10.0.0/twisted/internet/test/reactormixins.py
index 2895daf25..09e6e55e9 100644
--- a/vendor/Twisted-10.0.0/twisted/internet/test/reactormixins.py
+++ b/vendor/Twisted-10.0.0/twisted/internet/test/reactormixins.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2008-2009 Twisted Matrix Laboratories.
+# Copyright (c) 2008-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
@@ -95,6 +95,7 @@ class ReactorBuilder:
# branch that fixes it.
#
# -exarkun
+ reactor._uninstallHandler()
if getattr(reactor, '_internalReaders', None) is not None:
for reader in reactor._internalReaders:
reactor.removeReader(reader)
@@ -119,10 +120,14 @@ class ReactorBuilder:
try:
reactor = self.reactorFactory()
except:
- # Unfortunately, not all errors which result in a reactor being
- # unusable are detectable without actually instantiating the
- # reactor. So we catch some more here and skip the test if
- # necessary.
+ # Unfortunately, not all errors which result in a reactor
+ # being unusable are detectable without actually
+ # instantiating the reactor. So we catch some more here
+ # and skip the test if necessary. We also log it to aid
+ # with debugging, but flush the logged error so the test
+ # doesn't fail.
+ log.err(None, "Failed to install reactor")
+ self.flushLoggedErrors()
raise SkipTest(Failure().getErrorMessage())
else:
if self.requiredInterface is not None:
diff --git a/vendor/Twisted-10.0.0/twisted/internet/test/test_process.py b/vendor/Twisted-10.0.0/twisted/internet/test/test_process.py
index fbfb788cd..06197c05d 100644
--- a/vendor/Twisted-10.0.0/twisted/internet/test/test_process.py
+++ b/vendor/Twisted-10.0.0/twisted/internet/test/test_process.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2008-2009 Twisted Matrix Laboratories.
+# Copyright (c) 2008-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
@@ -9,7 +9,7 @@ __metaclass__ = type
import os, sys, signal, threading
-from twisted.trial.unittest import TestCase
+from twisted.trial.unittest import TestCase, SkipTest
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.python.compat import set
from twisted.python.log import msg, err
@@ -22,6 +22,7 @@ from twisted.internet.protocol import ProcessProtocol
from twisted.internet.error import ProcessDone, ProcessTerminated
+
class _ShutdownCallbackProcessProtocol(ProcessProtocol):
"""
An L{IProcessProtocol} which fires a Deferred when the process it is
@@ -174,6 +175,50 @@ class ProcessTestsBuilderBase(ReactorBuilder):
self.runReactor(reactor)
+ def test_systemCallUninterruptedByChildExit(self):
+ """
+ If a child process exits while a system call is in progress, the system
+ call should not be interfered with. In particular, it should not fail
+ with EINTR.
+
+ Older versions of Twisted installed a SIGCHLD handler on POSIX without
+ using the feature exposed by the SA_RESTART flag to sigaction(2). The
+ most noticable problem this caused was for blocking reads and writes to
+ sometimes fail with EINTR.
+ """
+ reactor = self.buildReactor()
+
+ # XXX Since pygobject/pygtk wants to use signal.set_wakeup_fd,
+ # we aren't actually providing this functionality on the glib2
+ # or gtk2 reactors yet. See #4286 for the possibility of
+ # improving this.
+ skippedReactors = ["Glib2Reactor", "Gtk2Reactor"]
+ hasSigInterrupt = getattr(signal, "siginterrupt", None) is not None
+ reactorClassName = reactor.__class__.__name__
+ if reactorClassName in skippedReactors and not hasSigInterrupt:
+ raise SkipTest(
+ "%s is not supported without siginterrupt" % reactorClassName)
+
+ result = []
+
+ def f():
+ try:
+ f1 = os.popen('%s -c "import time; time.sleep(0.1)"' %
+ (sys.executable,))
+ f2 = os.popen('%s -c "import time; time.sleep(0.5); print \'Foo\'"' %
+ (sys.executable,))
+ # The read call below will blow up with an EINTR from the
+ # SIGCHLD from the first process exiting if we install a
+ # SIGCHLD handler without SA_RESTART. (which we used to do)
+ result.append(f2.read())
+ finally:
+ reactor.stop()
+
+ reactor.callWhenRunning(f)
+ self.runReactor(reactor)
+ self.assertEqual(result, ["Foo\n"])
+
+
class ProcessTestsBuilder(ProcessTestsBuilderBase):
"""
diff --git a/vendor/Twisted-10.0.0/twisted/internet/test/test_sigchld.py b/vendor/Twisted-10.0.0/twisted/internet/test/test_sigchld.py
new file mode 100644
index 000000000..b7e492127
--- /dev/null
+++ b/vendor/Twisted-10.0.0/twisted/internet/test/test_sigchld.py
@@ -0,0 +1,194 @@
+# Copyright (c) 2010 Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Tests for L{twisted.internet._sigchld}, an alternate, superior SIGCHLD
+monitoring API.
+"""
+
+import os, signal, errno
+
+from twisted.python.log import msg
+from twisted.trial.unittest import TestCase
+from twisted.internet.fdesc import setNonBlocking
+from twisted.internet._signals import installHandler, isDefaultHandler
+from twisted.internet._signals import _extInstallHandler, _extIsDefaultHandler
+from twisted.internet._signals import _installHandlerUsingSetWakeup, \
+ _installHandlerUsingSignal, _isDefaultHandler
+
+
+class SIGCHLDTestsMixin:
+ """
+ Mixin for L{TestCase} subclasses which defines several tests for
+ I{installHandler} and I{isDefaultHandler}. Subclasses are expected to
+ define C{self.installHandler} and C{self.isDefaultHandler} to invoke the
+ implementation to be tested.
+ """
+
+ if getattr(signal, 'SIGCHLD', None) is None:
+ skip = "Platform does not have SIGCHLD"
+
+ def installHandler(self, fd):
+ """
+ Override in a subclass to install a SIGCHLD handler which writes a byte
+ to the given file descriptor. Return the previously registered file
+ descriptor.
+ """
+ raise NotImplementedError()
+
+
+ def isDefaultHandler(self):
+ """
+ Override in a subclass to determine if the current SIGCHLD handler is
+ SIG_DFL or not. Return True if it is SIG_DFL, False otherwise.
+ """
+ raise NotImplementedError()
+
+
+ def pipe(self):
+ """
+ Create a non-blocking pipe which will be closed after the currently
+ running test.
+ """
+ read, write = os.pipe()
+ self.addCleanup(os.close, read)
+ self.addCleanup(os.close, write)
+ setNonBlocking(read)
+ setNonBlocking(write)
+ return read, write
+
+
+ def setUp(self):
+ """
+ Save the current SIGCHLD handler as reported by L{signal.signal} and
+ the current file descriptor registered with L{installHandler}.
+ """
+ handler = signal.getsignal(signal.SIGCHLD)
+ if handler != signal.SIG_DFL:
+ self.signalModuleHandler = handler
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ else:
+ self.signalModuleHandler = None
+
+ self.oldFD = self.installHandler(-1)
+
+ if self.signalModuleHandler is not None and self.oldFD != -1:
+ msg("SIGCHLD setup issue: %r %r" % (self.signalModuleHandler, self.oldFD))
+ raise RuntimeError("You used some signal APIs wrong! Try again.")
+
+
+ def tearDown(self):
+ """
+ Restore whatever signal handler was present when setUp ran.
+ """
+ # If tests set up any kind of handlers, clear them out.
+ self.installHandler(-1)
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+
+ # Now restore whatever the setup was before the test ran.
+ if self.signalModuleHandler is not None:
+ signal.signal(signal.SIGCHLD, self.signalModuleHandler)
+ elif self.oldFD != -1:
+ self.installHandler(self.oldFD)
+
+
+ def test_isDefaultHandler(self):
+ """
+ L{isDefaultHandler} returns true if the SIGCHLD handler is SIG_DFL,
+ false otherwise.
+ """
+ self.assertTrue(self.isDefaultHandler())
+ signal.signal(signal.SIGCHLD, signal.SIG_IGN)
+ self.assertFalse(self.isDefaultHandler())
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ self.assertTrue(self.isDefaultHandler())
+ signal.signal(signal.SIGCHLD, lambda *args: None)
+ self.assertFalse(self.isDefaultHandler())
+
+
+ def test_returnOldFD(self):
+ """
+ L{installHandler} returns the previously registered file descriptor.
+ """
+ read, write = self.pipe()
+ oldFD = self.installHandler(write)
+ self.assertEqual(self.installHandler(oldFD), write)
+
+
+ def test_uninstallHandler(self):
+ """
+ C{installHandler(-1)} removes the SIGCHLD handler completely.
+ """
+ read, write = self.pipe()
+ self.assertTrue(self.isDefaultHandler())
+ self.installHandler(write)
+ self.assertFalse(self.isDefaultHandler())
+ self.installHandler(-1)
+ self.assertTrue(self.isDefaultHandler())
+
+
+ def test_installHandler(self):
+ """
+ The file descriptor passed to L{installHandler} has a byte written to
+ it when SIGCHLD is delivered to the process.
+ """
+ read, write = self.pipe()
+ self.installHandler(write)
+
+ exc = self.assertRaises(OSError, os.read, read, 1)
+ self.assertEqual(exc.errno, errno.EAGAIN)
+
+ os.kill(os.getpid(), signal.SIGCHLD)
+
+ self.assertEqual(len(os.read(read, 5)), 1)
+
+
+
+class DefaultSIGCHLDTests(SIGCHLDTestsMixin, TestCase):
+ """
+ Tests for whatever implementation is selected for the L{installHandler}
+ and L{isDefaultHandler} APIs.
+ """
+ installHandler = staticmethod(installHandler)
+ isDefaultHandler = staticmethod(isDefaultHandler)
+
+
+
+class ExtensionSIGCHLDTests(SIGCHLDTestsMixin, TestCase):
+ """
+ Tests for the L{twisted.internet._sigchld} implementation of the
+ L{installHandler} and L{isDefaultHandler} APIs.
+ """
+ try:
+ import twisted.internet._sigchld
+ except ImportError:
+ skip = "twisted.internet._sigchld is not available"
+
+ installHandler = _extInstallHandler
+ isDefaultHandler = _extIsDefaultHandler
+
+
+
+class SetWakeupSIGCHLDTests(SIGCHLDTestsMixin, TestCase):
+ """
+ Tests for the L{signal.set_wakeup_fd} implementation of the
+ L{installHandler} and L{isDefaultHandler} APIs.
+ """
+ # Check both of these. On Ubuntu 9.10 (to take an example completely at
+ # random), Python 2.5 has set_wakeup_fd but not siginterrupt.
+ if (getattr(signal, 'set_wakeup_fd', None) is None
+ or getattr(signal, 'siginterrupt', None) is None):
+ skip = "signal.set_wakeup_fd is not available"
+
+ installHandler = staticmethod(_installHandlerUsingSetWakeup)
+ isDefaultHandler = staticmethod(_isDefaultHandler)
+
+
+
+class PlainSignalModuleSIGCHLDTests(SIGCHLDTestsMixin, TestCase):
+ """
+ Tests for the L{signal.signal} implementation of the L{installHandler}
+ and L{isDefaultHandler} APIs.
+ """
+ installHandler = staticmethod(_installHandlerUsingSignal)
+ isDefaultHandler = staticmethod(_isDefaultHandler)
diff --git a/vendor/Twisted-10.0.0/twisted/topfiles/733.bugfix b/vendor/Twisted-10.0.0/twisted/topfiles/733.bugfix
new file mode 100644
index 000000000..22d80032c
--- /dev/null
+++ b/vendor/Twisted-10.0.0/twisted/topfiles/733.bugfix
@@ -0,0 +1,4 @@
+On POSIX platforms, reactors now support child processes in a way
+which doesn't cause other syscalls to sometimes fail with EINTR (if
+running on Python 2.6 or if Twisted's extension modules have been
+built).
diff --git a/vendor/Twisted-10.0.0/twisted/topfiles/setup.py b/vendor/Twisted-10.0.0/twisted/topfiles/setup.py
index a135c77fb..90ba24484 100644
--- a/vendor/Twisted-10.0.0/twisted/topfiles/setup.py
+++ b/vendor/Twisted-10.0.0/twisted/topfiles/setup.py
@@ -1,9 +1,7 @@
#!/usr/bin/env python
-
-# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
+# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
-
"""
Distutils installer for Twisted.
"""
@@ -47,9 +45,11 @@ extensions = [
'-framework','CoreServices',
'-framework','Carbon'],
condition=lambda builder: sys.platform == "darwin"),
-
Extension("twisted.python._initgroups",
["twisted/python/_initgroups.c"]),
+ Extension("twisted.internet._sigchld",
+ ["twisted/internet/_sigchld.c"],
+ condition=lambda builder: sys.platform != "win32"),
]
# Figure out which plugins to include: all plugins except subproject ones
diff --git a/vendor/tornado/demos/appengine/blog.py b/vendor/tornado/demos/appengine/blog.py
index ccaabd539..ee7e5b8af 100644
--- a/vendor/tornado/demos/appengine/blog.py
+++ b/vendor/tornado/demos/appengine/blog.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import functools
import markdown
diff --git a/vendor/tornado/demos/auth/authdemo.py b/vendor/tornado/demos/auth/authdemo.py
index e6136d1b5..f1b3c83af 100755
--- a/vendor/tornado/demos/auth/authdemo.py
+++ b/vendor/tornado/demos/auth/authdemo.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import tornado.auth
import tornado.escape
diff --git a/vendor/tornado/demos/blog/blog.py b/vendor/tornado/demos/blog/blog.py
index 808a9afc5..b6242af78 100755
--- a/vendor/tornado/demos/blog/blog.py
+++ b/vendor/tornado/demos/blog/blog.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import markdown
import os.path
diff --git a/vendor/tornado/demos/chat/chatdemo.py b/vendor/tornado/demos/chat/chatdemo.py
index 7086592ec..9843b0657 100755
--- a/vendor/tornado/demos/chat/chatdemo.py
+++ b/vendor/tornado/demos/chat/chatdemo.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import logging
import tornado.auth
diff --git a/vendor/tornado/demos/facebook/facebook.py b/vendor/tornado/demos/facebook/facebook.py
index 0c984ddaa..19b4d1a48 100755
--- a/vendor/tornado/demos/facebook/facebook.py
+++ b/vendor/tornado/demos/facebook/facebook.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import logging
import os.path
diff --git a/vendor/tornado/demos/facebook/uimodules.py b/vendor/tornado/demos/facebook/uimodules.py
index 1173db634..b4c65440b 100644
--- a/vendor/tornado/demos/facebook/uimodules.py
+++ b/vendor/tornado/demos/facebook/uimodules.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import tornado.web
diff --git a/vendor/tornado/demos/helloworld/helloworld.py b/vendor/tornado/demos/helloworld/helloworld.py
index 0f1ed61ff..e97fe68b3 100755
--- a/vendor/tornado/demos/helloworld/helloworld.py
+++ b/vendor/tornado/demos/helloworld/helloworld.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import tornado.httpserver
import tornado.ioloop
diff --git a/vendor/tornado/setup.py b/vendor/tornado/setup.py
index 5cb69df2d..444beb718 100644
--- a/vendor/tornado/setup.py
+++ b/vendor/tornado/setup.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import distutils.core
import sys
diff --git a/vendor/tornado/tornado/__init__.py b/vendor/tornado/tornado/__init__.py
index 8f73764eb..fdad0561d 100644
--- a/vendor/tornado/tornado/__init__.py
+++ b/vendor/tornado/tornado/__init__.py
@@ -2,16 +2,16 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""The Tornado web server and tools."""
diff --git a/vendor/tornado/tornado/auth.py b/vendor/tornado/tornado/auth.py
index f67d9e548..635bc218b 100644
--- a/vendor/tornado/tornado/auth.py
+++ b/vendor/tornado/tornado/auth.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""Implementations of various third-party authentication schemes.
diff --git a/vendor/tornado/tornado/autoreload.py b/vendor/tornado/tornado/autoreload.py
index 231cfe892..1e9b38b13 100644
--- a/vendor/tornado/tornado/autoreload.py
+++ b/vendor/tornado/tornado/autoreload.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""A module to automatically restart the server when a module is modified.
diff --git a/vendor/tornado/tornado/database.py b/vendor/tornado/tornado/database.py
index 3f78e00b9..d933c1671 100644
--- a/vendor/tornado/tornado/database.py
+++ b/vendor/tornado/tornado/database.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""A lightweight wrapper around MySQLdb."""
diff --git a/vendor/tornado/tornado/escape.py b/vendor/tornado/tornado/escape.py
index bacb1c51d..9471e7e61 100644
--- a/vendor/tornado/tornado/escape.py
+++ b/vendor/tornado/tornado/escape.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""Escaping/unescaping methods for HTML, JSON, URLs, and others."""
diff --git a/vendor/tornado/tornado/httpclient.py b/vendor/tornado/tornado/httpclient.py
index 2c9155eb9..fa4917ea3 100644
--- a/vendor/tornado/tornado/httpclient.py
+++ b/vendor/tornado/tornado/httpclient.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""Blocking and non-blocking HTTP client implementations using pycurl."""
diff --git a/vendor/tornado/tornado/httpserver.py b/vendor/tornado/tornado/httpserver.py
index a7ec57eec..801c68aff 100644
--- a/vendor/tornado/tornado/httpserver.py
+++ b/vendor/tornado/tornado/httpserver.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""A non-blocking, single-threaded HTTP server."""
diff --git a/vendor/tornado/tornado/ioloop.py b/vendor/tornado/tornado/ioloop.py
index e94c17372..6d502dc41 100644
--- a/vendor/tornado/tornado/ioloop.py
+++ b/vendor/tornado/tornado/ioloop.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""A level-triggered I/O loop for non-blocking sockets."""
diff --git a/vendor/tornado/tornado/iostream.py b/vendor/tornado/tornado/iostream.py
index af7c6edbf..063fe76a5 100644
--- a/vendor/tornado/tornado/iostream.py
+++ b/vendor/tornado/tornado/iostream.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""A utility class to write to and read from a non-blocking socket."""
diff --git a/vendor/tornado/tornado/locale.py b/vendor/tornado/tornado/locale.py
index 6a8537d75..b819f6cfe 100644
--- a/vendor/tornado/tornado/locale.py
+++ b/vendor/tornado/tornado/locale.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""Translation methods for generating localized strings.
diff --git a/vendor/tornado/tornado/options.py b/vendor/tornado/tornado/options.py
index 66bce091e..c2c393615 100644
--- a/vendor/tornado/tornado/options.py
+++ b/vendor/tornado/tornado/options.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""A command line parsing module that lets modules define their own options.
diff --git a/vendor/tornado/tornado/s3server.py b/vendor/tornado/tornado/s3server.py
index 2e8a97de2..bfbce65b9 100644
--- a/vendor/tornado/tornado/s3server.py
+++ b/vendor/tornado/tornado/s3server.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""Implementation of an S3-like storage server based on local files.
diff --git a/vendor/tornado/tornado/template.py b/vendor/tornado/tornado/template.py
index 7ed56cfa6..25c00be05 100644
--- a/vendor/tornado/tornado/template.py
+++ b/vendor/tornado/tornado/template.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""A simple template system that compiles templates to Python code.
diff --git a/vendor/tornado/tornado/web.py b/vendor/tornado/tornado/web.py
index 7559fae8a..06b8e9e82 100644
--- a/vendor/tornado/tornado/web.py
+++ b/vendor/tornado/tornado/web.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""The Tornado web framework.
diff --git a/vendor/tornado/tornado/websocket.py b/vendor/tornado/tornado/websocket.py
index 38a58012c..5bab75c8f 100644
--- a/vendor/tornado/tornado/websocket.py
+++ b/vendor/tornado/tornado/websocket.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import functools
import logging
diff --git a/vendor/tornado/tornado/wsgi.py b/vendor/tornado/tornado/wsgi.py
index 69fa0988e..b65eaed9b 100644
--- a/vendor/tornado/tornado/wsgi.py
+++ b/vendor/tornado/tornado/wsgi.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Facebook
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
"""WSGI support for the Tornado web framework.
diff --git a/vendor/tornado/website/website.py b/vendor/tornado/website/website.py
index f073b67e6..a07710eed 100644
--- a/vendor/tornado/website/website.py
+++ b/vendor/tornado/website/website.py
@@ -2,17 +2,17 @@
#
# Copyright 2009 Bret Taylor
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import markdown
import os