summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.bzrignore2
-rw-r--r--.gitignore1
-rw-r--r--CA/INTER/.placeholder (renamed from docs/_static/.gitignore)0
-rwxr-xr-xCA/geninter.sh2
-rw-r--r--CA/reqs/.placeholder (renamed from docs/_templates/.gitignore)0
-rw-r--r--MANIFEST.in22
-rw-r--r--README14
-rwxr-xr-xbin/nova-api2
-rwxr-xr-xbin/nova-compute73
-rwxr-xr-xbin/nova-dhcpbridge4
-rwxr-xr-xbin/nova-manage10
-rwxr-xr-xbin/nova-network32
-rwxr-xr-xbin/nova-rsapi12
-rwxr-xr-xbin/nova-volume49
-rw-r--r--debian/changelog232
-rw-r--r--debian/compat1
-rw-r--r--debian/control136
-rw-r--r--debian/nova-api.conf6
-rw-r--r--debian/nova-api.init69
-rw-r--r--debian/nova-api.install3
-rw-r--r--debian/nova-common.dirs11
-rw-r--r--debian/nova-common.install10
-rw-r--r--debian/nova-compute.conf8
-rw-r--r--debian/nova-compute.init69
-rw-r--r--debian/nova-compute.install2
-rw-r--r--debian/nova-dhcp.conf2
-rw-r--r--debian/nova-instancemonitor.init69
-rw-r--r--debian/nova-instancemonitor.install1
-rw-r--r--debian/nova-manage.conf4
-rw-r--r--debian/nova-objectstore.conf6
-rw-r--r--debian/nova-objectstore.init69
-rw-r--r--debian/nova-objectstore.install2
-rw-r--r--debian/nova-volume.conf6
-rw-r--r--debian/nova-volume.init69
-rw-r--r--debian/nova-volume.install2
-rw-r--r--debian/pycompat1
-rw-r--r--debian/pyversions1
-rwxr-xr-xdebian/rules4
-rw-r--r--doc/.gitignore (renamed from docs/.gitignore)0
-rw-r--r--doc/build/.gitignore (renamed from docs/_build/.gitignore)0
-rw-r--r--doc/build/.placeholder0
-rw-r--r--doc/source/Makefile (renamed from docs/Makefile)0
-rw-r--r--doc/source/_static/.gitignore0
-rw-r--r--doc/source/_static/.placeholder0
-rw-r--r--doc/source/_templates/.gitignore0
-rw-r--r--doc/source/_templates/.placeholder0
-rw-r--r--doc/source/architecture.rst (renamed from docs/architecture.rst)0
-rw-r--r--doc/source/auth.rst (renamed from docs/auth.rst)0
-rw-r--r--doc/source/binaries.rst (renamed from docs/binaries.rst)0
-rw-r--r--doc/source/compute.rst (renamed from docs/compute.rst)0
-rw-r--r--doc/source/conf.py (renamed from docs/conf.py)16
-rw-r--r--doc/source/endpoint.rst (renamed from docs/endpoint.rst)0
-rw-r--r--doc/source/fakes.rst (renamed from docs/fakes.rst)4
-rw-r--r--doc/source/getting.started.rst (renamed from docs/getting.started.rst)0
-rw-r--r--doc/source/index.rst (renamed from docs/index.rst)1
-rw-r--r--doc/source/modules.rst (renamed from docs/modules.rst)0
-rw-r--r--doc/source/network.rst (renamed from docs/network.rst)0
-rw-r--r--doc/source/nova.rst (renamed from docs/nova.rst)0
-rw-r--r--doc/source/objectstore.rst (renamed from docs/objectstore.rst)0
-rw-r--r--doc/source/packages.rst (renamed from docs/packages.rst)0
-rw-r--r--doc/source/storage.rst (renamed from docs/storage.rst)0
-rw-r--r--doc/source/volume.rst (renamed from docs/volume.rst)0
-rw-r--r--nova/adminclient.py163
-rw-r--r--nova/auth/ldapdriver.py484
-rw-r--r--nova/auth/manager.py807
-rw-r--r--nova/auth/rbac.py2
-rw-r--r--nova/auth/signer.py8
-rw-r--r--nova/auth/users.py974
-rw-r--r--nova/cloudpipe/api.py2
-rwxr-xr-xnova/cloudpipe/bootscript.sh2
-rw-r--r--nova/cloudpipe/pipelib.py4
-rw-r--r--nova/compute/disk.py7
-rw-r--r--nova/compute/instance_types.py30
-rw-r--r--nova/compute/linux_net.py9
-rw-r--r--nova/compute/model.py74
-rw-r--r--nova/compute/monitor.py127
-rw-r--r--nova/compute/network.py17
-rw-r--r--nova/compute/power_state.py41
-rw-r--r--nova/compute/service.py (renamed from nova/compute/node.py)314
-rw-r--r--nova/datastore.py31
-rw-r--r--nova/endpoint/admin.py88
-rwxr-xr-xnova/endpoint/api.py4
-rw-r--r--nova/endpoint/cloud.py64
-rw-r--r--nova/endpoint/rackspace.py7
-rw-r--r--nova/exception.py6
-rw-r--r--nova/fakevirt.py112
-rw-r--r--nova/flags.py10
-rw-r--r--nova/network/__init__.py32
-rw-r--r--nova/network/service.py35
-rw-r--r--nova/objectstore/bucket.py4
-rw-r--r--nova/objectstore/handler.py84
-rw-r--r--nova/process.py12
-rw-r--r--nova/rpc.py3
-rw-r--r--nova/service.py103
-rw-r--r--nova/test.py8
-rw-r--r--nova/tests/access_unittest.py8
-rw-r--r--nova/tests/api_unittest.py31
-rw-r--r--nova/tests/auth_unittest.py (renamed from nova/tests/users_unittest.py)109
-rw-r--r--nova/tests/cloud_unittest.py41
-rw-r--r--nova/tests/compute_unittest.py (renamed from nova/tests/node_unittest.py)45
-rw-r--r--nova/tests/fake_flags.py4
-rw-r--r--nova/tests/future_unittest.py75
-rw-r--r--nova/tests/model_unittest.py101
-rw-r--r--nova/tests/network_unittest.py97
-rw-r--r--nova/tests/objectstore_unittest.py186
-rw-r--r--nova/tests/process_unittest.py11
-rw-r--r--nova/tests/real_flags.py3
-rw-r--r--nova/tests/storage_unittest.py2
-rw-r--r--nova/tests/volume_unittest.py115
-rw-r--r--nova/twistd.py33
-rw-r--r--nova/utils.py17
-rw-r--r--nova/virt/__init__.py15
-rw-r--r--nova/virt/connection.py45
-rw-r--r--nova/virt/fake.py81
-rw-r--r--nova/virt/images.py72
-rw-r--r--nova/virt/libvirt_conn.py355
-rw-r--r--nova/virt/xenapi.py152
-rw-r--r--nova/volume/service.py (renamed from nova/volume/storage.py)63
-rw-r--r--run_tests.py6
-rwxr-xr-xrun_tests.sh13
-rw-r--r--setup.cfg12
-rw-r--r--setup.py45
-rw-r--r--tools/install_venv.py94
-rw-r--r--tools/pip-requires15
-rwxr-xr-xtools/with_venv.sh4
125 files changed, 3676 insertions, 2869 deletions
diff --git a/.bzrignore b/.bzrignore
index 93fc868a3..ab099d3e3 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -1 +1,3 @@
run_tests.err.log
+.nova-venv
+ChangeLog
diff --git a/.gitignore b/.gitignore
index 9db87ac29..2afc7a32c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,3 +9,4 @@ keys
build/*
build-stamp
nova.egg-info
+.nova-venv
diff --git a/docs/_static/.gitignore b/CA/INTER/.placeholder
index e69de29bb..e69de29bb 100644
--- a/docs/_static/.gitignore
+++ b/CA/INTER/.placeholder
diff --git a/CA/geninter.sh b/CA/geninter.sh
index 6c0528d1b..7d6c280d5 100755
--- a/CA/geninter.sh
+++ b/CA/geninter.sh
@@ -17,7 +17,7 @@
# under the License.
# ARG is the id of the user
-export SUBJ=/C=US/ST=California/L=Mountain View/O=Anso Labs/OU=Nova Dev/CN=customer-intCA-$3
+export SUBJ="/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=customer-intCA-$1"
mkdir INTER/$1
cd INTER/$1
cp ../../openssl.cnf.tmpl openssl.cnf
diff --git a/docs/_templates/.gitignore b/CA/reqs/.placeholder
index e69de29bb..e69de29bb 100644
--- a/docs/_templates/.gitignore
+++ b/CA/reqs/.placeholder
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 000000000..4eb28bde6
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,22 @@
+include HACKING LICENSE run_tests.py run_tests.sh
+include README builddeb.sh exercise_rsapi.py
+include ChangeLog
+graft CA
+graft doc
+graft smoketests
+graft tools
+include nova/auth/novarc.template
+include nova/auth/slap.sh
+include nova/cloudpipe/bootscript.sh
+include nova/cloudpipe/client.ovpn.template
+include nova/compute/fakevirtinstance.xml
+include nova/compute/interfaces.template
+include nova/compute/libvirt.xml.template
+include nova/tests/CA/
+include nova/tests/CA/cacert.pem
+include nova/tests/CA/private/
+include nova/tests/CA/private/cakey.pem
+include nova/tests/bundle/
+include nova/tests/bundle/1mb.manifest.xml
+include nova/tests/bundle/1mb.part.0
+include nova/tests/bundle/1mb.part.1
diff --git a/README b/README
index f7d21f400..851bca9db 100644
--- a/README
+++ b/README
@@ -6,15 +6,19 @@ The Choose Your Own Adventure README for Nova:
To monitor it from a distance: follow @novacc on twitter
-To tame it for use in your own cloud: read http://docs.novacc.org/getting.started.html
+To tame it for use in your own cloud: read http://nova.openstack.org/getting.started.html
-To study its anatomy: read http://docs.novacc.org/architecture.html
+To study its anatomy: read http://nova.openstack.org/architecture.html
-To disect it in detail: visit http://github.com/nova/cc
+To disect it in detail: visit http://code.launchpad.net/nova
-To taunt it with its weaknesses: use http://github.com/nova/cc/issues
+To taunt it with its weaknesses: use http://bugs.launchpad.net/nova
+
+To watch it: http://hudson.openstack.org
To hack at it: read HACKING
-To watch it: http://test.novacc.org/waterfall
+To laugh at its PEP8 problems: http://hudson.openstack.org/job/nova-pep8/violations
+
+To cry over its pylint problems: http://hudson.openstack.org/job/nova-pylint/violations
diff --git a/bin/nova-api b/bin/nova-api
index 26f5dbc87..1f2009c30 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -29,7 +29,7 @@ from nova import flags
from nova import rpc
from nova import server
from nova import utils
-from nova.auth import users
+from nova.auth import manager
from nova.compute import model
from nova.endpoint import admin
from nova.endpoint import api
diff --git a/bin/nova-compute b/bin/nova-compute
index 5635efbaf..e0c12354f 100755
--- a/bin/nova-compute
+++ b/bin/nova-compute
@@ -19,83 +19,14 @@
"""
Twistd daemon for the nova compute nodes.
- Receives messages via AMQP, manages pool of worker threads
- for async tasks.
"""
-import logging
-import os
-import sys
-
-# NOTE(termie): kludge so that we can run this from the bin directory in the
-# checkout without having to screw with paths
-NOVA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'nova')
-if os.path.exists(NOVA_PATH):
- sys.path.insert(0, os.path.dirname(NOVA_PATH))
-
-
-from carrot import connection
-from carrot import messaging
-from twisted.internet import task
-from twisted.application import service
-
-from nova import flags
-from nova import rpc
from nova import twistd
-from nova.compute import node
-
-
-FLAGS = flags.FLAGS
-# NOTE(termie): This file will necessarily be re-imported under different
-# context when the twistd.serve() call is made below so any
-# flags we define here will have to be conditionally defined,
-# flags defined by imported modules are safe.
-if 'node_report_state_interval' not in FLAGS:
- flags.DEFINE_integer('node_report_state_interval', 10,
- 'seconds between nodes reporting state to cloud',
- lower_bound=1)
-logging.getLogger().setLevel(logging.DEBUG)
-
-def main():
- logging.warn('Starting compute node')
- n = node.Node()
- d = n.adopt_instances()
- d.addCallback(lambda x: logging.info('Adopted %d instances', x))
-
- conn = rpc.Connection.instance()
- consumer_all = rpc.AdapterConsumer(
- connection=conn,
- topic='%s' % FLAGS.compute_topic,
- proxy=n)
-
- consumer_node = rpc.AdapterConsumer(
- connection=conn,
- topic='%s.%s' % (FLAGS.compute_topic, FLAGS.node_name),
- proxy=n)
-
- bin_name = os.path.basename(__file__)
- pulse = task.LoopingCall(n.report_state, FLAGS.node_name, bin_name)
- pulse.start(interval=FLAGS.node_report_state_interval, now=False)
-
- injected = consumer_all.attach_to_twisted()
- injected = consumer_node.attach_to_twisted()
-
- # This is the parent service that twistd will be looking for when it
- # parses this file, return it so that we can get it into globals below
- application = service.Application(bin_name)
- n.setServiceParent(application)
- return application
+from nova.compute import service
-# NOTE(termie): When this script is executed from the commandline what it will
-# actually do is tell the twistd application runner that it
-# should run this file as a twistd application (see below).
if __name__ == '__main__':
twistd.serve(__file__)
-# NOTE(termie): When this script is loaded by the twistd application runner
-# this code path will be executed and twistd will expect a
-# variable named 'application' to be available, it will then
-# handle starting it and stopping it.
if __name__ == '__builtin__':
- application = main()
+ application = service.ComputeService.create()
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index d15924e57..0db241b5e 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -76,9 +76,9 @@ def main():
FLAGS.fake_rabbit = True
FLAGS.redis_db = 8
FLAGS.network_size = 32
- FLAGS.fake_libvirt=True
+ FLAGS.connection_type = 'fake'
FLAGS.fake_network=True
- FLAGS.fake_users = True
+ FLAGS.auth_driver='nova.auth.ldapdriver.FakeLdapDriver'
action = argv[1]
if action in ['add','del','old']:
mac = argv[2]
diff --git a/bin/nova-manage b/bin/nova-manage
index 61ac86db6..1f7f808f1 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -27,7 +27,7 @@ import time
from nova import flags
from nova import utils
-from nova.auth import users
+from nova.auth import manager
from nova.compute import model
from nova.compute import network
from nova.cloudpipe import pipelib
@@ -42,7 +42,7 @@ class NetworkCommands(object):
class VpnCommands(object):
def __init__(self):
- self.manager = users.UserManager.instance()
+ self.manager = manager.AuthManager()
self.instdir = model.InstanceDirectory()
self.pipe = pipelib.CloudPipe(cloud.CloudController())
@@ -90,7 +90,7 @@ class VpnCommands(object):
class RoleCommands(object):
def __init__(self):
- self.manager = users.UserManager.instance()
+ self.manager = manager.AuthManager()
def add(self, user, role, project=None):
"""adds role to user
@@ -113,7 +113,7 @@ class RoleCommands(object):
class UserCommands(object):
def __init__(self):
- self.manager = users.UserManager.instance()
+ self.manager = manager.AuthManager()
def __print_export(self, user):
print 'export EC2_ACCESS_KEY=%s' % user.access
@@ -153,7 +153,7 @@ class UserCommands(object):
class ProjectCommands(object):
def __init__(self):
- self.manager = users.UserManager.instance()
+ self.manager = manager.AuthManager()
def add(self, project, user):
"""adds user to project
diff --git a/bin/nova-network b/bin/nova-network
new file mode 100755
index 000000000..52d6cb70a
--- /dev/null
+++ b/bin/nova-network
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+ Twistd daemon for the nova network nodes.
+"""
+
+from nova import twistd
+from nova.network import service
+
+
+if __name__ == '__main__':
+ twistd.serve(__file__)
+
+if __name__ == '__builtin__':
+ application = service.NetworkService.create()
diff --git a/bin/nova-rsapi b/bin/nova-rsapi
index 5cbe2d8c1..306a1fc60 100755
--- a/bin/nova-rsapi
+++ b/bin/nova-rsapi
@@ -4,20 +4,20 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
-#
+#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
- WSGI daemon for the main API endpoint.
+ WSGI daemon for the main API endpoint.
"""
import logging
@@ -28,14 +28,14 @@ from nova import flags
from nova import rpc
from nova import server
from nova import utils
-from nova.auth import users
+from nova.auth import manager
from nova.endpoint import rackspace
FLAGS = flags.FLAGS
flags.DEFINE_integer('cc_port', 8773, 'cloud controller port')
def main(_argv):
- user_manager = users.UserManager()
+ user_manager = manager.AuthManager()
api_instance = rackspace.Api(user_manager)
conn = rpc.Connection.instance()
rpc_consumer = rpc.AdapterConsumer(connection=conn,
diff --git a/bin/nova-volume b/bin/nova-volume
index df9fb5c7a..f7a8fad37 100755
--- a/bin/nova-volume
+++ b/bin/nova-volume
@@ -18,52 +18,15 @@
# under the License.
"""
- Tornado Storage daemon manages AoE volumes via AMQP messaging.
+ Twistd daemon for the nova volume nodes.
"""
-import logging
-from tornado import ioloop
-
-from nova import flags
-from nova import rpc
-from nova import server
-from nova import utils
-from nova.volume import storage
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_integer('storage_report_state_interval', 10,
- 'seconds between broadcasting state to cloud',
- lower_bound=1)
-
-
-def main(argv):
- bs = storage.BlockStore()
-
- conn = rpc.Connection.instance()
- consumer_all = rpc.AdapterConsumer(
- connection=conn,
- topic='%s' % FLAGS.storage_topic,
- proxy=bs)
-
- consumer_node = rpc.AdapterConsumer(
- connection=conn,
- topic='%s.%s' % (FLAGS.storage_topic, FLAGS.node_name),
- proxy=bs)
-
- io_inst = ioloop.IOLoop.instance()
- scheduler = ioloop.PeriodicCallback(
- lambda: bs.report_state(),
- FLAGS.storage_report_state_interval * 1000,
- io_loop=io_inst)
-
- injected = consumer_all.attachToTornado(io_inst)
- injected = consumer_node.attachToTornado(io_inst)
- scheduler.start()
- io_inst.start()
+from nova import twistd
+from nova.volume import service
if __name__ == '__main__':
- utils.default_flagfile()
- server.serve('nova-volume', main)
+ twistd.serve(__file__)
+if __name__ == '__builtin__':
+ application = service.VolumeService.create()
diff --git a/debian/changelog b/debian/changelog
deleted file mode 100644
index 31dd5e91e..000000000
--- a/debian/changelog
+++ /dev/null
@@ -1,232 +0,0 @@
-nova (0.2.3-1) UNRELEASED; urgency=low
-
- * Relax the Twisted dependency to python-twisted-core (rather than the
- full stack).
- * Move nova related configuration files into /etc/nova/.
- * Add a dependency on nginx from nova-objectsstore and install a
- suitable configuration file.
- * Ship the CA directory in nova-common.
- * Add a default flag file for nova-manage to help it find the CA.
- * If set, pass KernelId and RamdiskId from RunInstances call to the
- target compute node.
- * Added --network_path setting to nova-compute's flagfile.
- * Move templates from python directories to /usr/share/nova.
- * Add debian/nova-common.dirs to create
- var/lib/nova/{buckets,CA,images,instances,keys,networks}
- * Don't pass --daemonize=1 to nova-compute. It's already daemonising
- by default.
-
- -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 14 Jul 2010 12:00:00 -0700
-
-nova (0.2.2-10) UNRELEASED; urgency=low
-
- * Fixed extra space in vblade-persist
-
- -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 13 Jul 2010 19:00:00 -0700
-
-nova (0.2.2-9) UNRELEASED; urgency=low
-
- * Fixed invalid dn bug in ldap for adding roles
-
- -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 12 Jul 2010 15:20:00 -0700
-
-nova (0.2.2-8) UNRELEASED; urgency=low
-
- * Added a missing comma
-
- -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 08 Jul 2010 10:05:00 -0700
-
-nova (0.2.2-7) UNRELEASED; urgency=low
-
- * Missing files from twisted patch
- * License upedates
- * Reformatting/cleanup
- * Users/ldap bugfixes
- * Merge fixes
- * Documentation updates
- * Vpn key creation fix
- * Multiple shelves for volumes
-
- -- Vishvananda Ishaya <vishvananda@gmail.com> Wed, 07 Jul 2010 18:45:00 -0700
-
-nova (0.2.2-6) UNRELEASED; urgency=low
-
- * Fix to make Key Injection work again
-
- -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 14 Jun 2010 21:35:00 -0700
-
-nova (0.2.2-5) UNRELEASED; urgency=low
-
- * Lowered message callback frequency to stop compute and volume
- from eating tons of cpu
-
- -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 14 Jun 2010 14:15:00 -0700
-
-nova (0.2.2-4) UNRELEASED; urgency=low
-
- * Documentation fixes
- * Uncaught exceptions now log properly
- * Nova Manage zip exporting works again
- * Twisted threads no longer interrupt system calls
-
- -- Vishvananda Ishaya <vishvananda@gmail.com> Sun, 13 Jun 2010 01:40:00 -0700
-
-nova (0.2.2-3) UNRELEASED; urgency=low
-
- * Fixes to api calls
- * More accurate documentation
- * Removal of buggy multiprocessing
- * Asynchronus execution of shell commands
- * Fix of messaging race condition
- * Test redis database cleaned out on each run of tests
- * Smoketest updates
-
- -- Vishvananda Ishaya <vishvananda@gmail.com> Fri, 12 Jun 2010 20:10:00 -0700
-
-nova (0.2.2-2) UNRELEASED; urgency=low
-
- * Bugfixes to volume code
- * Instances no longer use keeper
- * Sectors off by one fix
- * State reported properly by instances
-
- -- Vishvananda Ishaya <vishvananda@gmail.com> Wed, 03 Jun 2010 15:21:00 -0700
-
-nova (0.2.2-1) UNRELEASED; urgency=low
-
- * First release based on nova/cc
- * Major rewrites to volumes and instances
- * Addition of cloudpipe and rbac
- * Major bugfixes
-
- -- Vishvananda Ishaya <vishvananda@gmail.com> Wed, 02 Jun 2010 17:42:00 -0700
-
-nova (0.2.1-1) UNRELEASED; urgency=low
-
- * Support ephemeral (local) space for instances
- * instance related fixes
- * fix network & cloudpipe bugs
-
- -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 25 May 2010 12:14:00 -0700
-
-nova (0.2.0-20) UNRELEASED; urgency=low
-
- * template files are in proper folder
-
- -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 25 May 2010 12:14:00 -0700
-
-nova (0.2.0-19) UNRELEASED; urgency=low
-
- * removed mox dependency and added templates to install
-
- -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 25 May 2010 11:53:00 -0700
-
-nova (0.2.0-18) UNRELEASED; urgency=low
-
- * api server properly sends instance status code
-
- -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 24 May 2010 17:18:00 -0700
-
-nova (0.2.0-17) UNRELEASED; urgency=low
-
- * redis-backed datastore
-
- -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 24 May 2010 16:28:00 -0700
-
-nova (0.2.0-16) UNRELEASED; urgency=low
-
- * make sure twistd.pid is really overriden
-
- -- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 22:18:47 -0700
-
-nova (0.2.0-15) UNRELEASED; urgency=low
-
- * rpc shouldn't require tornado unless you are using attach_to_tornado
-
- -- Jesse Andrews <anotherjesse@gmail.com> Sun, 23 May 2010 21:59:00 -0700
-
-nova (0.2.0-14) UNRELEASED; urgency=low
-
- * quicky init scripts for the other services, based on nova-objectstore
-
- -- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 21:49:43 -0700
-
-nova (0.2.0-13) UNRELEASED; urgency=low
-
- * init script for nova-objectstore
-
- -- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 21:33:25 -0700
-
-nova (0.2.0-12) UNRELEASED; urgency=low
-
- * kvm, kpartx required for nova-compute
-
- -- Jesse Andrews <anotherjesse@gmail.com> Sun, 23 May 2010 21:32:00 -0700
-
-nova (0.2.0-11) UNRELEASED; urgency=low
-
- * Need to include the python modules in nova-common.install as well.
-
- -- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 20:04:27 -0700
-
-nova (0.2.0-10) UNRELEASED; urgency=low
-
- * add more requirements to bin packages
-
- -- Jesse Andrews <anotherjesse@gmail.com> Sun, 23 May 2010 19:54:00 -0700
-
-nova (0.2.0-9) UNRELEASED; urgency=low
-
- * nova bin packages should depend on the same version of nova-common they
- were built from.
-
- -- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 18:46:34 -0700
-
-nova (0.2.0-8) UNRELEASED; urgency=low
-
- * Require libvirt 0.8.1 or newer for nova-compute
-
- -- Jesse Andrews <anotherjesse@gmail.com> Sun, 23 May 2010 18:33:00 -0700
-
-nova (0.2.0-7) UNRELEASED; urgency=low
-
- * Split bins into separate packages
-
- -- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 18:46:34 -0700
-
-nova (0.2.0-6) UNRELEASED; urgency=low
-
- * Add python-m2crypto to deps
-
- -- Jesse Andrews <anotherjesse@gmail.com> Sun, 23 May 2010 18:33:00 -0700
-
-nova (0.2.0-5) UNRELEASED; urgency=low
-
- * Add python-gflags to deps
-
- -- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 18:28:50 -0700
-
-nova (0.2.0-4) UNRELEASED; urgency=low
-
- * install scripts
-
- -- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 18:16:27 -0700
-
-nova (0.2.0-3) UNRELEASED; urgency=low
-
- * debian build goop
-
- -- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 18:06:37 -0700
-
-nova (0.2.0-2) UNRELEASED; urgency=low
-
- * improved requirements
-
- -- Jesse Andrews <anotherjesse@gmail.com> Sun, 23 May 2010 17:42:00 -0700
-
-nova (0.2.0-1) UNRELEASED; urgency=low
-
- * initial release
-
- -- Jesse Andrews <anotherjesse@gmail.com> Fri, 21 May 2010 12:28:00 -0700
-
diff --git a/debian/compat b/debian/compat
deleted file mode 100644
index 7f8f011eb..000000000
--- a/debian/compat
+++ /dev/null
@@ -1 +0,0 @@
-7
diff --git a/debian/control b/debian/control
deleted file mode 100644
index a6d12f36e..000000000
--- a/debian/control
+++ /dev/null
@@ -1,136 +0,0 @@
-Source: nova
-Section: net
-Priority: extra
-Maintainer: Jesse Andrews <jesse@ansolabs.com>
-Build-Depends: debhelper (>= 7), redis-server (>=2:2.0.0~rc1), python-m2crypto
-Build-Depends-Indep: python-support, python-setuptools
-Standards-Version: 3.8.4
-XS-Python-Version: 2.6
-
-Package: nova-common
-Architecture: all
-Depends: ${python:Depends}, aoetools, vlan, python-ipy, python-boto, python-m2crypto, python-pycurl, python-twisted-core, python-daemon, python-redis, python-carrot, python-lockfile, python-gflags, python-tornado, ${misc:Depends}
-Provides: ${python:Provides}
-Description: Nova Cloud Computing - common files
- Nova is a cloud computing fabric controller (the main part of an IaaS
- system) built to match the popular AWS EC2 and S3 APIs. It is written in
- Python, using the Tornado and Twisted frameworks, and relies on the
- standard AMQP messaging protocol, and the Redis distributed KVS.
- .
- Nova is intended to be easy to extend, and adapt. For example, it
- currently uses an LDAP server for users and groups, but also includes a
- fake LDAP server, that stores data in Redis. It has extensive test
- coverage, and uses the Sphinx toolkit (the same as Python itself) for code
- and user documentation.
- .
- While Nova is currently in Beta use within several organizations, the
- codebase is very much under active development.
- .
- This package contains things that are needed by all parts of Nova.
-
-Package: nova-compute
-Architecture: all
-Depends: nova-common (= ${binary:Version}), kpartx, kvm, python-libvirt, libvirt-bin (>= 0.7.5), curl, ${python:Depends}, ${misc:Depends}
-Description: Nova Cloud Computing - compute node
- Nova is a cloud computing fabric controller (the main part of an IaaS
- system) built to match the popular AWS EC2 and S3 APIs. It is written in
- Python, using the Tornado and Twisted frameworks, and relies on the
- standard AMQP messaging protocol, and the Redis distributed KVS.
- .
- Nova is intended to be easy to extend, and adapt. For example, it
- currently uses an LDAP server for users and groups, but also includes a
- fake LDAP server, that stores data in Redis. It has extensive test
- coverage, and uses the Sphinx toolkit (the same as Python itself) for code
- and user documentation.
- .
- While Nova is currently in Beta use within several organizations, the
- codebase is very much under active development.
- .
- This is the package you will install on the nodes that will run your
- virtual machines.
-
-Package: nova-volume
-Architecture: all
-Depends: nova-common (= ${binary:Version}), vblade, vblade-persist, ${python:Depends}, ${misc:Depends}
-Description: Nova Cloud Computing - storage
- Nova is a cloud computing fabric controller (the main part of an IaaS
- system) built to match the popular AWS EC2 and S3 APIs. It is written in
- Python, using the Tornado and Twisted frameworks, and relies on the
- standard AMQP messaging protocol, and the Redis distributed KVS.
- .
- Nova is intended to be easy to extend, and adapt. For example, it
- currently uses an LDAP server for users and groups, but also includes a
- fake LDAP server, that stores data in Redis. It has extensive test
- coverage, and uses the Sphinx toolkit (the same as Python itself) for code
- and user documentation.
- .
- While Nova is currently in Beta use within several organizations, the
- codebase is very much under active development.
- .
- This is the package you will install on your storage nodes.
-
-Package: nova-api
-Architecture: all
-Depends: nova-common (= ${binary:Version}), ${python:Depends}, ${misc:Depends}
-Description: Nova Cloud Computing - API frontend
- Nova is a cloud computing fabric controller (the main part of an IaaS
- system) built to match the popular AWS EC2 and S3 APIs. It is written in
- Python, using the Tornado and Twisted frameworks, and relies on the
- standard AMQP messaging protocol, and the Redis distributed KVS.
- .
- Nova is intended to be easy to extend, and adapt. For example, it
- currently uses an LDAP server for users and groups, but also includes a
- fake LDAP server, that stores data in Redis. It has extensive test
- coverage, and uses the Sphinx toolkit (the same as Python itself) for code
- and user documentation.
- .
- While Nova is currently in Beta use within several organizations, the
- codebase is very much under active development.
- .
- This package provides the API frontend.
-
-Package: nova-objectstore
-Architecture: all
-Depends: nova-common (= ${binary:Version}), ${python:Depends}, ${misc:Depends}
-Description: Nova Cloud Computing - object store
- Nova is a cloud computing fabric controller (the main part of an IaaS
- system) built to match the popular AWS EC2 and S3 APIs. It is written in
- Python, using the Tornado and Twisted frameworks, and relies on the
- standard AMQP messaging protocol, and the Redis distributed KVS.
- .
- Nova is intended to be easy to extend, and adapt. For example, it
- currently uses an LDAP server for users and groups, but also includes a
- fake LDAP server, that stores data in Redis. It has extensive test
- coverage, and uses the Sphinx toolkit (the same as Python itself) for code
- and user documentation.
- .
- While Nova is currently in Beta use within several organizations, the
- codebase is very much under active development.
- .
- This is the package you will install on the nodes that will contain your
- object store.
-
-Package: nova-instancemonitor
-Architecture: all
-Depends: nova-common (= ${binary:Version}), ${python:Depends}, ${misc:Depends}
-Description: Nova instance monitor
-
-Package: nova-tools
-Architecture: all
-Depends: python-boto, ${python:Depends}, ${misc:Depends}
-Description: Nova Cloud Computing - management tools
- Nova is a cloud computing fabric controller (the main part of an IaaS
- system) built to match the popular AWS EC2 and S3 APIs. It is written in
- Python, using the Tornado and Twisted frameworks, and relies on the
- standard AMQP messaging protocol, and the Redis distributed KVS.
- .
- Nova is intended to be easy to extend, and adapt. For example, it
- currently uses an LDAP server for users and groups, but also includes a
- fake LDAP server, that stores data in Redis. It has extensive test
- coverage, and uses the Sphinx toolkit (the same as Python itself) for code
- and user documentation.
- .
- While Nova is currently in Beta use within several organizations, the
- codebase is very much under active development.
- .
- This package contains admin tools for Nova.
diff --git a/debian/nova-api.conf b/debian/nova-api.conf
deleted file mode 100644
index d0b796878..000000000
--- a/debian/nova-api.conf
+++ /dev/null
@@ -1,6 +0,0 @@
---daemonize=1
---ca_path=/var/lib/nova/CA
---keys_path=/var/lib/nova/keys
---networks_path=/var/lib/nova/networks
---dhcpbridge_flagfile=/etc/nova/nova-dhcpbridge.conf
---fake_users=1
diff --git a/debian/nova-api.init b/debian/nova-api.init
deleted file mode 100644
index 597fbef95..000000000
--- a/debian/nova-api.init
+++ /dev/null
@@ -1,69 +0,0 @@
-#! /bin/sh
-### BEGIN INIT INFO
-# Provides: nova-api
-# Required-Start: $remote_fs $syslog
-# Required-Stop: $remote_fs $syslog
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: nova-api
-# Description: nova-api
-### END INIT INFO
-
-
-set -e
-
-DAEMON=/usr/bin/nova-api
-DAEMON_ARGS="--flagfile=/etc/nova/nova-api.conf"
-PIDFILE=/var/run/nova-api.pid
-
-ENABLED=true
-
-if test -f /etc/default/nova-api; then
- . /etc/default/nova-api
-fi
-
-. /lib/lsb/init-functions
-
-export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
-
-case "$1" in
- start)
- test "$ENABLED" = "true" || exit 0
- log_daemon_msg "Starting nova api" "nova-api"
- cd /var/run
- if $DAEMON $DAEMON_ARGS start; then
- log_end_msg 0
- else
- log_end_msg 1
- fi
- ;;
- stop)
- test "$ENABLED" = "true" || exit 0
- log_daemon_msg "Stopping nova api" "nova-api"
- cd /var/run
- if $DAEMON $DAEMON_ARGS stop; then
- log_end_msg 0
- else
- log_end_msg 1
- fi
- ;;
- restart|force-reload)
- test "$ENABLED" = "true" || exit 1
- cd /var/run
- if $DAEMON $DAEMON_ARGS restart; then
- log_end_msg 0
- else
- log_end_msg 1
- fi
- ;;
- status)
- test "$ENABLED" = "true" || exit 0
- status_of_proc -p $PIDFILE $DAEMON nova-api && exit 0 || exit $?
- ;;
- *)
- log_action_msg "Usage: /etc/init.d/nova-api {start|stop|restart|force-reload|status}"
- exit 1
- ;;
-esac
-
-exit 0
diff --git a/debian/nova-api.install b/debian/nova-api.install
deleted file mode 100644
index 89615d302..000000000
--- a/debian/nova-api.install
+++ /dev/null
@@ -1,3 +0,0 @@
-bin/nova-api usr/bin
-debian/nova-api.conf etc/nova
-debian/nova-dhcpbridge.conf etc/nova
diff --git a/debian/nova-common.dirs b/debian/nova-common.dirs
deleted file mode 100644
index b58fe8b7f..000000000
--- a/debian/nova-common.dirs
+++ /dev/null
@@ -1,11 +0,0 @@
-etc/nova
-var/lib/nova/buckets
-var/lib/nova/CA
-var/lib/nova/CA/INTER
-var/lib/nova/CA/newcerts
-var/lib/nova/CA/private
-var/lib/nova/CA/reqs
-var/lib/nova/images
-var/lib/nova/instances
-var/lib/nova/keys
-var/lib/nova/networks
diff --git a/debian/nova-common.install b/debian/nova-common.install
deleted file mode 100644
index 9b1bbf147..000000000
--- a/debian/nova-common.install
+++ /dev/null
@@ -1,10 +0,0 @@
-bin/nova-manage usr/bin
-debian/nova-manage.conf etc/nova
-nova/auth/novarc.template usr/share/nova
-nova/cloudpipe/client.ovpn.template usr/share/nova
-nova/compute/libvirt.xml.template usr/share/nova
-nova/compute/interfaces.template usr/share/nova
-usr/lib/python*/*-packages/nova/*
-CA/openssl.cnf.tmpl var/lib/nova/CA
-CA/geninter.sh var/lib/nova/CA
-CA/genrootca.sh var/lib/nova/CA
diff --git a/debian/nova-compute.conf b/debian/nova-compute.conf
deleted file mode 100644
index d862f2328..000000000
--- a/debian/nova-compute.conf
+++ /dev/null
@@ -1,8 +0,0 @@
---ca_path=/var/lib/nova/CA
---keys_path=/var/lib/nova/keys
---instances_path=/var/lib/nova/instances
---simple_network_template=/usr/share/nova/interfaces.template
---libvirt_xml_template=/usr/share/nova/libvirt.xml.template
---vpn_client_template=/usr/share/nova/client.ovpn.template
---credentials_template=/usr/share/nova/novarc.template
---fake_users=1
diff --git a/debian/nova-compute.init b/debian/nova-compute.init
deleted file mode 100644
index d0f093a7a..000000000
--- a/debian/nova-compute.init
+++ /dev/null
@@ -1,69 +0,0 @@
-#! /bin/sh
-### BEGIN INIT INFO
-# Provides: nova-compute
-# Required-Start: $remote_fs $syslog
-# Required-Stop: $remote_fs $syslog
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: nova-compute
-# Description: nova-compute
-### END INIT INFO
-
-
-set -e
-
-DAEMON=/usr/bin/nova-compute
-DAEMON_ARGS="--flagfile=/etc/nova/nova-compute.conf"
-PIDFILE=/var/run/nova-compute.pid
-
-ENABLED=true
-
-if test -f /etc/default/nova-compute; then
- . /etc/default/nova-compute
-fi
-
-. /lib/lsb/init-functions
-
-export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
-
-case "$1" in
- start)
- test "$ENABLED" = "true" || exit 0
- log_daemon_msg "Starting nova compute" "nova-compute"
- cd /var/run
- if $DAEMON $DAEMON_ARGS start; then
- log_end_msg 0
- else
- log_end_msg 1
- fi
- ;;
- stop)
- test "$ENABLED" = "true" || exit 0
- log_daemon_msg "Stopping nova compute" "nova-compute"
- cd /var/run
- if $DAEMON $DAEMON_ARGS stop; then
- log_end_msg 0
- else
- log_end_msg 1
- fi
- ;;
- restart|force-reload)
- test "$ENABLED" = "true" || exit 1
- cd /var/run
- if $DAEMON $DAEMON_ARGS restart; then
- log_end_msg 0
- else
- log_end_msg 1
- fi
- ;;
- status)
- test "$ENABLED" = "true" || exit 0
- status_of_proc -p $PIDFILE $DAEMON nova-compute && exit 0 || exit $?
- ;;
- *)
- log_action_msg "Usage: /etc/init.d/nova-compute {start|stop|restart|force-reload|status}"
- exit 1
- ;;
-esac
-
-exit 0
diff --git a/debian/nova-compute.install b/debian/nova-compute.install
deleted file mode 100644
index 5f9df46a8..000000000
--- a/debian/nova-compute.install
+++ /dev/null
@@ -1,2 +0,0 @@
-bin/nova-compute usr/bin
-debian/nova-compute.conf etc/nova
diff --git a/debian/nova-dhcp.conf b/debian/nova-dhcp.conf
deleted file mode 100644
index 0aafe7549..000000000
--- a/debian/nova-dhcp.conf
+++ /dev/null
@@ -1,2 +0,0 @@
---networks_path=/var/lib/nova/networks
---fake_users=1
diff --git a/debian/nova-instancemonitor.init b/debian/nova-instancemonitor.init
deleted file mode 100644
index 2865fc334..000000000
--- a/debian/nova-instancemonitor.init
+++ /dev/null
@@ -1,69 +0,0 @@
-#! /bin/sh
-### BEGIN INIT INFO
-# Provides: nova-instancemonitor
-# Required-Start: $remote_fs $syslog
-# Required-Stop: $remote_fs $syslog
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: nova-instancemonitor
-# Description: nova-instancemonitor
-### END INIT INFO
-
-
-set -e
-
-DAEMON=/usr/bin/nova-instancemonitor
-DAEMON_ARGS="--flagfile=/etc/nova.conf"
-PIDFILE=/var/run/nova-instancemonitor.pid
-
-ENABLED=false
-
-if test -f /etc/default/nova-instancemonitor; then
- . /etc/default/nova-instancemonitor
-fi
-
-. /lib/lsb/init-functions
-
-export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
-
-case "$1" in
- start)
- test "$ENABLED" = "true" || exit 0
- log_daemon_msg "Starting nova compute" "nova-instancemonitor"
- cd /var/run
- if $DAEMON $DAEMON_ARGS start; then
- log_end_msg 0
- else
- log_end_msg 1
- fi
- ;;
- stop)
- test "$ENABLED" = "true" || exit 0
- log_daemon_msg "Stopping nova compute" "nova-instancemonitor"
- cd /var/run
- if $DAEMON $DAEMON_ARGS stop; then
- log_end_msg 0
- else
- log_end_msg 1
- fi
- ;;
- restart|force-reload)
- test "$ENABLED" = "true" || exit 1
- cd /var/run
- if $DAEMON $DAEMON_ARGS restart; then
- log_end_msg 0
- else
- log_end_msg 1
- fi
- ;;
- status)
- test "$ENABLED" = "true" || exit 0
- status_of_proc -p $PIDFILE $DAEMON nova-instancemonitor && exit 0 || exit $?
- ;;
- *)
- log_action_msg "Usage: /etc/init.d/nova-instancemonitor {start|stop|restart|force-reload|status}"
- exit 1
- ;;
-esac
-
-exit 0
diff --git a/debian/nova-instancemonitor.install b/debian/nova-instancemonitor.install
deleted file mode 100644
index 48e7884b4..000000000
--- a/debian/nova-instancemonitor.install
+++ /dev/null
@@ -1 +0,0 @@
-bin/nova-instancemonitor usr/bin
diff --git a/debian/nova-manage.conf b/debian/nova-manage.conf
deleted file mode 100644
index 5ccda7ecf..000000000
--- a/debian/nova-manage.conf
+++ /dev/null
@@ -1,4 +0,0 @@
---ca_path=/var/lib/nova/CA
---credentials_template=/usr/share/nova/novarc.template
---keys_path=/var/lib/nova/keys
---vpn_client_template=/usr/share/nova/client.ovpn.template
diff --git a/debian/nova-objectstore.conf b/debian/nova-objectstore.conf
deleted file mode 100644
index 03f5df051..000000000
--- a/debian/nova-objectstore.conf
+++ /dev/null
@@ -1,6 +0,0 @@
---daemonize=1
---ca_path=/var/lib/nova/CA
---keys_path=/var/lib/nova/keys
---fake_users=1
---images_path=/var/lib/nova/images
---buckets_path=/var/lib/nova/buckets
diff --git a/debian/nova-objectstore.init b/debian/nova-objectstore.init
deleted file mode 100644
index 9676345ad..000000000
--- a/debian/nova-objectstore.init
+++ /dev/null
@@ -1,69 +0,0 @@
-#! /bin/sh
-### BEGIN INIT INFO
-# Provides: nova-objectstore
-# Required-Start: $remote_fs $syslog
-# Required-Stop: $remote_fs $syslog
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: nova-objectstore
-# Description: nova-objectstore
-### END INIT INFO
-
-
-set -e
-
-DAEMON=/usr/bin/nova-objectstore
-DAEMON_ARGS="--flagfile=/etc/nova/nova-objectstore.conf"
-PIDFILE=/var/run/nova-objectstore.pid
-
-ENABLED=true
-
-if test -f /etc/default/nova-objectstore; then
- . /etc/default/nova-objectstore
-fi
-
-. /lib/lsb/init-functions
-
-export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
-
-case "$1" in
- start)
- test "$ENABLED" = "true" || exit 0
- log_daemon_msg "Starting nova objectstore" "nova-objectstore"
- cd /var/run
- if $DAEMON $DAEMON_ARGS start; then
- log_end_msg 0
- else
- log_end_msg 1
- fi
- ;;
- stop)
- test "$ENABLED" = "true" || exit 0
- log_daemon_msg "Stopping nova objectstore" "nova-objectstore"
- cd /var/run
- if $DAEMON $DAEMON_ARGS stop; then
- log_end_msg 0
- else
- log_end_msg 1
- fi
- ;;
- restart|force-reload)
- test "$ENABLED" = "true" || exit 1
- cd /var/run
- if $DAEMON $DAEMON_ARGS restart; then
- log_end_msg 0
- else
- log_end_msg 1
- fi
- ;;
- status)
- test "$ENABLED" = "true" || exit 0
- status_of_proc -p $PIDFILE $DAEMON nova-objectstore && exit 0 || exit $?
- ;;
- *)
- log_action_msg "Usage: /etc/init.d/nova-objectstore {start|stop|restart|force-reload|status}"
- exit 1
- ;;
-esac
-
-exit 0
diff --git a/debian/nova-objectstore.install b/debian/nova-objectstore.install
deleted file mode 100644
index c5b3d997a..000000000
--- a/debian/nova-objectstore.install
+++ /dev/null
@@ -1,2 +0,0 @@
-bin/nova-objectstore usr/bin
-debian/nova-objectstore.conf etc/nova
diff --git a/debian/nova-volume.conf b/debian/nova-volume.conf
deleted file mode 100644
index 03f5df051..000000000
--- a/debian/nova-volume.conf
+++ /dev/null
@@ -1,6 +0,0 @@
---daemonize=1
---ca_path=/var/lib/nova/CA
---keys_path=/var/lib/nova/keys
---fake_users=1
---images_path=/var/lib/nova/images
---buckets_path=/var/lib/nova/buckets
diff --git a/debian/nova-volume.init b/debian/nova-volume.init
deleted file mode 100644
index d5c2dddf8..000000000
--- a/debian/nova-volume.init
+++ /dev/null
@@ -1,69 +0,0 @@
-#! /bin/sh
-### BEGIN INIT INFO
-# Provides: nova-volume
-# Required-Start: $remote_fs $syslog
-# Required-Stop: $remote_fs $syslog
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: nova-volume
-# Description: nova-volume
-### END INIT INFO
-
-
-set -e
-
-DAEMON=/usr/bin/nova-volume
-DAEMON_ARGS="--flagfile=/etc/nova/nova-volume.conf"
-PIDFILE=/var/run/nova-volume.pid
-
-ENABLED=true
-
-if test -f /etc/default/nova-volume; then
- . /etc/default/nova-volume
-fi
-
-. /lib/lsb/init-functions
-
-export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
-
-case "$1" in
- start)
- test "$ENABLED" = "true" || exit 0
- log_daemon_msg "Starting nova volume" "nova-volume"
- cd /var/run
- if $DAEMON $DAEMON_ARGS start; then
- log_end_msg 0
- else
- log_end_msg 1
- fi
- ;;
- stop)
- test "$ENABLED" = "true" || exit 0
- log_daemon_msg "Stopping nova volume" "nova-volume"
- cd /var/run
- if $DAEMON $DAEMON_ARGS stop; then
- log_end_msg 0
- else
- log_end_msg 1
- fi
- ;;
- restart|force-reload)
- test "$ENABLED" = "true" || exit 1
- cd /var/run
- if $DAEMON $DAEMON_ARGS restart; then
- log_end_msg 0
- else
- log_end_msg 1
- fi
- ;;
- status)
- test "$ENABLED" = "true" || exit 0
- status_of_proc -p $PIDFILE $DAEMON nova-volume && exit 0 || exit $?
- ;;
- *)
- log_action_msg "Usage: /etc/init.d/nova-volume {start|stop|restart|force-reload|status}"
- exit 1
- ;;
-esac
-
-exit 0
diff --git a/debian/nova-volume.install b/debian/nova-volume.install
deleted file mode 100644
index 9a840c78e..000000000
--- a/debian/nova-volume.install
+++ /dev/null
@@ -1,2 +0,0 @@
-bin/nova-volume usr/bin
-debian/nova-volume.conf etc/nova
diff --git a/debian/pycompat b/debian/pycompat
deleted file mode 100644
index 0cfbf0888..000000000
--- a/debian/pycompat
+++ /dev/null
@@ -1 +0,0 @@
-2
diff --git a/debian/pyversions b/debian/pyversions
deleted file mode 100644
index 0c043f18c..000000000
--- a/debian/pyversions
+++ /dev/null
@@ -1 +0,0 @@
-2.6-
diff --git a/debian/rules b/debian/rules
deleted file mode 100755
index 2d33f6ac8..000000000
--- a/debian/rules
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/usr/bin/make -f
-
-%:
- dh $@
diff --git a/docs/.gitignore b/doc/.gitignore
index 88f9974bd..88f9974bd 100644
--- a/docs/.gitignore
+++ b/doc/.gitignore
diff --git a/docs/_build/.gitignore b/doc/build/.gitignore
index 72e8ffc0d..72e8ffc0d 100644
--- a/docs/_build/.gitignore
+++ b/doc/build/.gitignore
diff --git a/doc/build/.placeholder b/doc/build/.placeholder
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/doc/build/.placeholder
diff --git a/docs/Makefile b/doc/source/Makefile
index b2f74e85a..b2f74e85a 100644
--- a/docs/Makefile
+++ b/doc/source/Makefile
diff --git a/doc/source/_static/.gitignore b/doc/source/_static/.gitignore
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/doc/source/_static/.gitignore
diff --git a/doc/source/_static/.placeholder b/doc/source/_static/.placeholder
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/doc/source/_static/.placeholder
diff --git a/doc/source/_templates/.gitignore b/doc/source/_templates/.gitignore
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/doc/source/_templates/.gitignore
diff --git a/doc/source/_templates/.placeholder b/doc/source/_templates/.placeholder
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/doc/source/_templates/.placeholder
diff --git a/docs/architecture.rst b/doc/source/architecture.rst
index 11813d2c8..11813d2c8 100644
--- a/docs/architecture.rst
+++ b/doc/source/architecture.rst
diff --git a/docs/auth.rst b/doc/source/auth.rst
index 70aca704a..70aca704a 100644
--- a/docs/auth.rst
+++ b/doc/source/auth.rst
diff --git a/docs/binaries.rst b/doc/source/binaries.rst
index 90a9581f7..90a9581f7 100644
--- a/docs/binaries.rst
+++ b/doc/source/binaries.rst
diff --git a/docs/compute.rst b/doc/source/compute.rst
index 5b08dbd5b..5b08dbd5b 100644
--- a/docs/compute.rst
+++ b/doc/source/compute.rst
diff --git a/docs/conf.py b/doc/source/conf.py
index fb3fd1a30..349d23af2 100644
--- a/docs/conf.py
+++ b/doc/source/conf.py
@@ -16,8 +16,7 @@ import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.append(os.path.abspath('/Users/jmckenty/Projects/cc'))
-sys.path.append([os.path.abspath('../nova'),os.path.abspath('../'),os.path.abspath('../vendor')])
+sys.path.append([os.path.abspath('../nova'), os.path.abspath('..'), os.path.abspath('../bin')])
# -- General configuration -----------------------------------------------------
@@ -25,7 +24,6 @@ sys.path.append([os.path.abspath('../nova'),os.path.abspath('../'),os.path.abspa
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig']
-#sphinx_to_github = False
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
@@ -49,9 +47,9 @@ copyright = u'2010, United States Government as represented by the Administrator
# built documents.
#
# The short X.Y version.
-version = '0.42'
+version = '0.9'
# The full version, including alpha/beta/rc tags.
-release = '0.42'
+release = '0.9.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -68,7 +66,7 @@ release = '0.42'
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
-exclude_trees = ['_build']
+exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
@@ -176,7 +174,7 @@ htmlhelp_basename = 'novadoc'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
- ('index', 'nova.tex', u'nova Documentation',
+ ('index', 'Nova.tex', u'Nova Documentation',
u'Anso Labs, LLC', 'manual'),
]
@@ -199,4 +197,6 @@ latex_documents = [
# Example configuration for intersphinx: refer to the Python standard library.
-intersphinx_mapping = {'http://docs.python.org/': None}
+intersphinx_mapping = {'python': ('http://docs.python.org/', None),
+ 'swift': ('http://swift.openstack.org', None)}
+
diff --git a/docs/endpoint.rst b/doc/source/endpoint.rst
index 399df4161..399df4161 100644
--- a/docs/endpoint.rst
+++ b/doc/source/endpoint.rst
diff --git a/docs/fakes.rst b/doc/source/fakes.rst
index bea8bc4e9..a993fb4c8 100644
--- a/docs/fakes.rst
+++ b/doc/source/fakes.rst
@@ -18,10 +18,10 @@
Nova Fakes
==========
-The :mod:`fakevirt` Module
+The :mod:`virt.fake` Module
--------------------------
-.. automodule:: nova.fakevirt
+.. automodule:: nova.virt.fake
:members:
:undoc-members:
:show-inheritance:
diff --git a/docs/getting.started.rst b/doc/source/getting.started.rst
index 3eadd0882..3eadd0882 100644
--- a/docs/getting.started.rst
+++ b/doc/source/getting.started.rst
diff --git a/docs/index.rst b/doc/source/index.rst
index ef2e8f63e..6627fe066 100644
--- a/docs/index.rst
+++ b/doc/source/index.rst
@@ -43,7 +43,6 @@ Contents:
nova
fakes
binaries
- todo
modules
packages
diff --git a/docs/modules.rst b/doc/source/modules.rst
index 82c61f008..82c61f008 100644
--- a/docs/modules.rst
+++ b/doc/source/modules.rst
diff --git a/docs/network.rst b/doc/source/network.rst
index 357a0517f..357a0517f 100644
--- a/docs/network.rst
+++ b/doc/source/network.rst
diff --git a/docs/nova.rst b/doc/source/nova.rst
index 4b9c44a5f..4b9c44a5f 100644
--- a/docs/nova.rst
+++ b/doc/source/nova.rst
diff --git a/docs/objectstore.rst b/doc/source/objectstore.rst
index 6b8d293f4..6b8d293f4 100644
--- a/docs/objectstore.rst
+++ b/doc/source/objectstore.rst
diff --git a/docs/packages.rst b/doc/source/packages.rst
index 6029ad7d7..6029ad7d7 100644
--- a/docs/packages.rst
+++ b/doc/source/packages.rst
diff --git a/docs/storage.rst b/doc/source/storage.rst
index f77e5f0e5..f77e5f0e5 100644
--- a/docs/storage.rst
+++ b/doc/source/storage.rst
diff --git a/docs/volume.rst b/doc/source/volume.rst
index 619968458..619968458 100644
--- a/docs/volume.rst
+++ b/doc/source/volume.rst
diff --git a/nova/adminclient.py b/nova/adminclient.py
index db392feb1..fceeac274 100644
--- a/nova/adminclient.py
+++ b/nova/adminclient.py
@@ -23,6 +23,7 @@ import base64
import boto
from boto.ec2.regioninfo import RegionInfo
+
class UserInfo(object):
"""
Information about a Nova user, as parsed through SAX
@@ -56,6 +57,64 @@ class UserInfo(object):
elif name == 'secretkey':
self.secretkey = str(value)
+class ProjectInfo(object):
+ """
+ Information about a Nova project, as parsed through SAX
+ Fields include:
+ projectname
+ description
+ projectManagerId
+ memberIds
+ """
+
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.projectname = None
+ self.description = None
+ self.projectManagerId = None
+ self.memberIds = []
+
+ def __repr__(self):
+ return 'ProjectInfo:%s' % self.projectname
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'projectname':
+ self.projectname = value
+ elif name == 'description':
+ self.description = value
+ elif name == 'projectManagerId':
+ self.projectManagerId = value
+ elif name == 'memberId':
+ self.memberIds.append(value)
+ else:
+ setattr(self, name, str(value))
+
+class ProjectMember(object):
+ """
+ Information about a Nova project member, as parsed through SAX.
+ Fields include:
+ memberId
+ """
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.memberId = None
+
+ def __repr__(self):
+ return 'ProjectMember:%s' % self.memberId
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'member':
+ self.memberId = value
+ else:
+ setattr(self, name, str(value))
+
+
class HostInfo(object):
"""
Information about a Nova Host, as parsed through SAX:
@@ -99,20 +158,20 @@ class NovaAdminClient(object):
**kwargs)
self.apiconn.APIVersion = 'nova'
- def connection_for(self, username, **kwargs):
+ def connection_for(self, username, project, **kwargs):
"""
Returns a boto ec2 connection for the given username.
"""
user = self.get_user(username)
+ access_key = '%s:%s' % (user.accesskey, project)
return boto.connect_ec2(
- aws_access_key_id=user.accesskey,
+ aws_access_key_id=access_key,
aws_secret_access_key=user.secretkey,
is_secure=False,
region=RegionInfo(None, self.region, self.clc_ip),
port=8773,
path='/services/Cloud',
- **kwargs
- )
+ **kwargs)
def get_users(self):
""" grabs the list of all users """
@@ -137,6 +196,102 @@ class NovaAdminClient(object):
""" deletes a user """
return self.apiconn.get_object('DeregisterUser', {'Name': username}, UserInfo)
+ def add_user_role(self, user, role, project=None):
+ """
+ Add a role to a user either globally or for a specific project.
+ """
+ return self.modify_user_role(user, role, project=project,
+ operation='add')
+
+ def remove_user_role(self, user, role, project=None):
+ """
+ Remove a role from a user either globally or for a specific project.
+ """
+ return self.modify_user_role(user, role, project=project,
+ operation='remove')
+
+ def modify_user_role(self, user, role, project=None, operation='add',
+ **kwargs):
+ """
+ Add or remove a role for a user and project.
+ """
+ params = {'User': user,
+ 'Role': role,
+ 'Project': project,
+ 'Operation': operation}
+ return self.apiconn.get_status('ModifyUserRole', params)
+
+ def get_projects(self, user=None):
+ """
+ Returns a list of all projects.
+ """
+ if user:
+ params = {'User': user}
+ else:
+ params = {}
+ return self.apiconn.get_list('DescribeProjects',
+ params,
+ [('item', ProjectInfo)])
+
+ def get_project(self, name):
+ """
+ Returns a single project with the specified name.
+ """
+ project = self.apiconn.get_object('DescribeProject',
+ {'Name': name},
+ ProjectInfo)
+
+ if project.projectname != None:
+ return project
+
+ def create_project(self, projectname, manager_user, description=None,
+ member_users=None):
+ """
+ Creates a new project.
+ """
+ params = {'Name': projectname,
+ 'ManagerUser': manager_user,
+ 'Description': description,
+ 'MemberUsers': member_users}
+ return self.apiconn.get_object('RegisterProject', params, ProjectInfo)
+
+ def delete_project(self, projectname):
+ """
+ Permanently deletes the specified project.
+ """
+ return self.apiconn.get_object('DeregisterProject',
+ {'Name': projectname},
+ ProjectInfo)
+
+ def get_project_members(self, name):
+ """
+ Returns a list of members of a project.
+ """
+ return self.apiconn.get_list('DescribeProjectMembers',
+ {'Name': name},
+ [('item', ProjectMember)])
+
+ def add_project_member(self, user, project):
+ """
+ Adds a user to a project.
+ """
+ return self.modify_project_member(user, project, operation='add')
+
+ def remove_project_member(self, user, project):
+ """
+ Removes a user from a project.
+ """
+ return self.modify_project_member(user, project, operation='remove')
+
+ def modify_project_member(self, user, project, operation='add'):
+ """
+ Adds or removes a user from a project.
+ """
+ params = {'User': user,
+ 'Project': project,
+ 'Operation': operation}
+ return self.apiconn.get_status('ModifyProjectMember', params)
+
def get_zip(self, username):
""" returns the content of a zip file containing novarc and access credentials. """
return self.apiconn.get_object('GenerateX509ForUser', {'Name': username}, UserInfo).file
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
new file mode 100644
index 000000000..055e8332b
--- /dev/null
+++ b/nova/auth/ldapdriver.py
@@ -0,0 +1,484 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Auth driver for ldap. Includes FakeLdapDriver.
+
+It should be easy to create a replacement for this driver supporting
+other backends by creating another class that exposes the same
+public methods.
+"""
+
+import logging
+import sys
+
+from nova import exception
+from nova import flags
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('ldap_url', 'ldap://localhost',
+ 'Point this at your ldap server')
+flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password')
+flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com',
+ 'DN of admin user')
+flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users')
+flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com',
+ 'OU for Users')
+flags.DEFINE_string('ldap_project_subtree', 'ou=Groups,dc=example,dc=com',
+ 'OU for Projects')
+flags.DEFINE_string('role_project_subtree', 'ou=Groups,dc=example,dc=com',
+ 'OU for Roles')
+
+# NOTE(vish): mapping with these flags is necessary because we're going
+# to tie in to an existing ldap schema
+flags.DEFINE_string('ldap_cloudadmin',
+ 'cn=cloudadmins,ou=Groups,dc=example,dc=com', 'cn for Cloud Admins')
+flags.DEFINE_string('ldap_itsec',
+ 'cn=itsec,ou=Groups,dc=example,dc=com', 'cn for ItSec')
+flags.DEFINE_string('ldap_sysadmin',
+ 'cn=sysadmins,ou=Groups,dc=example,dc=com', 'cn for Sysadmins')
+flags.DEFINE_string('ldap_netadmin',
+ 'cn=netadmins,ou=Groups,dc=example,dc=com', 'cn for NetAdmins')
+flags.DEFINE_string('ldap_developer',
+ 'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers')
+
+
+# TODO(vish): make an abstract base class with the same public methods
+# to define a set interface for AuthDrivers. I'm delaying
+# creating this now because I'm expecting an auth refactor
+# in which we may want to change the interface a bit more.
+class LdapDriver(object):
+ """Ldap Auth driver
+
+ Defines enter and exit and therefore supports the with/as syntax.
+ """
+ def __init__(self):
+ """Imports the LDAP module"""
+ self.ldap = __import__('ldap')
+
+ def __enter__(self):
+ """Creates the connection to LDAP"""
+ self.conn = self.ldap.initialize(FLAGS.ldap_url)
+ self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password)
+ return self
+
+ def __exit__(self, type, value, traceback):
+ """Destroys the connection to LDAP"""
+ self.conn.unbind_s()
+ return False
+
+ def get_user(self, uid):
+ """Retrieve user by id"""
+ attr = self.__find_object(self.__uid_to_dn(uid),
+ '(objectclass=novaUser)')
+ return self.__to_user(attr)
+
+ def get_user_from_access_key(self, access):
+ """Retrieve user by access key"""
+ query = '(accessKey=%s)' % access
+ dn = FLAGS.ldap_user_subtree
+ return self.__to_user(self.__find_object(dn, query))
+
+ def get_key_pair(self, uid, key_name):
+ """Retrieve key pair by uid and key name"""
+ dn = 'cn=%s,%s' % (key_name,
+ self.__uid_to_dn(uid))
+ attr = self.__find_object(dn, '(objectclass=novaKeyPair)')
+ return self.__to_key_pair(uid, attr)
+
+ def get_project(self, pid):
+ """Retrieve project by id"""
+ dn = 'cn=%s,%s' % (pid,
+ FLAGS.ldap_project_subtree)
+ attr = self.__find_object(dn, '(objectclass=novaProject)')
+ return self.__to_project(attr)
+
+ def get_users(self):
+ """Retrieve list of users"""
+ attrs = self.__find_objects(FLAGS.ldap_user_subtree,
+ '(objectclass=novaUser)')
+ return [self.__to_user(attr) for attr in attrs]
+
+ def get_key_pairs(self, uid):
+ """Retrieve list of key pairs"""
+ attrs = self.__find_objects(self.__uid_to_dn(uid),
+ '(objectclass=novaKeyPair)')
+ return [self.__to_key_pair(uid, attr) for attr in attrs]
+
+ def get_projects(self, uid=None):
+ """Retrieve list of projects"""
+ filter = '(objectclass=novaProject)'
+ if uid:
+ filter = "(&%s(member=%s))" % (filter, self.__uid_to_dn(uid))
+ attrs = self.__find_objects(FLAGS.ldap_project_subtree,
+ filter)
+ return [self.__to_project(attr) for attr in attrs]
+
+ def create_user(self, name, access_key, secret_key, is_admin):
+ """Create a user"""
+ if self.__user_exists(name):
+ raise exception.Duplicate("LDAP user %s already exists" % name)
+ attr = [
+ ('objectclass', ['person',
+ 'organizationalPerson',
+ 'inetOrgPerson',
+ 'novaUser']),
+ ('ou', [FLAGS.ldap_user_unit]),
+ ('uid', [name]),
+ ('sn', [name]),
+ ('cn', [name]),
+ ('secretKey', [secret_key]),
+ ('accessKey', [access_key]),
+ ('isAdmin', [str(is_admin).upper()]),
+ ]
+ self.conn.add_s(self.__uid_to_dn(name), attr)
+ return self.__to_user(dict(attr))
+
+ def create_key_pair(self, uid, key_name, public_key, fingerprint):
+ """Create a key pair"""
+ # TODO(vish): possibly refactor this to store keys in their own ou
+ # and put dn reference in the user object
+ attr = [
+ ('objectclass', ['novaKeyPair']),
+ ('cn', [key_name]),
+ ('sshPublicKey', [public_key]),
+ ('keyFingerprint', [fingerprint]),
+ ]
+ self.conn.add_s('cn=%s,%s' % (key_name,
+ self.__uid_to_dn(uid)),
+ attr)
+ return self.__to_key_pair(uid, dict(attr))
+
+ def create_project(self, name, manager_uid,
+ description=None, member_uids=None):
+ """Create a project"""
+ if self.__project_exists(name):
+ raise exception.Duplicate("Project can't be created because "
+ "project %s already exists" % name)
+ if not self.__user_exists(manager_uid):
+ raise exception.NotFound("Project can't be created because "
+ "manager %s doesn't exist" % manager_uid)
+ manager_dn = self.__uid_to_dn(manager_uid)
+ # description is a required attribute
+ if description is None:
+ description = name
+ members = []
+ if member_uids != None:
+ for member_uid in member_uids:
+ if not self.__user_exists(member_uid):
+ raise exception.NotFound("Project can't be created "
+ "because user %s doesn't exist" % member_uid)
+ members.append(self.__uid_to_dn(member_uid))
+ # always add the manager as a member because members is required
+ if not manager_dn in members:
+ members.append(manager_dn)
+ attr = [
+ ('objectclass', ['novaProject']),
+ ('cn', [name]),
+ ('description', [description]),
+ ('projectManager', [manager_dn]),
+ ('member', members)
+ ]
+ self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr)
+ return self.__to_project(dict(attr))
+
+ def add_to_project(self, uid, project_id):
+ """Add user to project"""
+ dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ return self.__add_to_group(uid, dn)
+
+ def remove_from_project(self, uid, project_id):
+ """Remove user from project"""
+ dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ return self.__remove_from_group(uid, dn)
+
+ def is_in_project(self, uid, project_id):
+ """Check if user is in project"""
+ dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ return self.__is_in_group(uid, dn)
+
+ def has_role(self, uid, role, project_id=None):
+ """Check if user has role
+
+ If project is specified, it checks for local role, otherwise it
+ checks for global role
+ """
+ role_dn = self.__role_to_dn(role, project_id)
+ return self.__is_in_group(uid, role_dn)
+
+ def add_role(self, uid, role, project_id=None):
+ """Add role for user (or user and project)"""
+ role_dn = self.__role_to_dn(role, project_id)
+ if not self.__group_exists(role_dn):
+ # create the role if it doesn't exist
+ description = '%s role for %s' % (role, project_id)
+ self.__create_group(role_dn, role, uid, description)
+ else:
+ return self.__add_to_group(uid, role_dn)
+
+ def remove_role(self, uid, role, project_id=None):
+ """Remove role for user (or user and project)"""
+ role_dn = self.__role_to_dn(role, project_id)
+ return self.__remove_from_group(uid, role_dn)
+
+ def delete_user(self, uid):
+ """Delete a user"""
+ if not self.__user_exists(uid):
+ raise exception.NotFound("User %s doesn't exist" % uid)
+ self.__delete_key_pairs(uid)
+ self.__remove_from_all(uid)
+ self.conn.delete_s('uid=%s,%s' % (uid,
+ FLAGS.ldap_user_subtree))
+
+ def delete_key_pair(self, uid, key_name):
+ """Delete a key pair"""
+ if not self.__key_pair_exists(uid, key_name):
+ raise exception.NotFound("Key Pair %s doesn't exist for user %s" %
+ (key_name, uid))
+ self.conn.delete_s('cn=%s,uid=%s,%s' % (key_name, uid,
+ FLAGS.ldap_user_subtree))
+
+ def delete_project(self, name):
+ """Delete a project"""
+ project_dn = 'cn=%s,%s' % (name, FLAGS.ldap_project_subtree)
+ self.__delete_roles(project_dn)
+ self.__delete_group(project_dn)
+
+ def __user_exists(self, name):
+ """Check if user exists"""
+ return self.get_user(name) != None
+
+ def __key_pair_exists(self, uid, key_name):
+ """Check if key pair exists"""
+ return self.get_user(uid) != None
+ return self.get_key_pair(uid, key_name) != None
+
+ def __project_exists(self, name):
+ """Check if project exists"""
+ return self.get_project(name) != None
+
+ def __find_object(self, dn, query = None):
+ """Find an object by dn and query"""
+ objects = self.__find_objects(dn, query)
+ if len(objects) == 0:
+ return None
+ return objects[0]
+
+ def __find_dns(self, dn, query=None):
+ """Find dns by query"""
+ try:
+ res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query)
+ except self.ldap.NO_SUCH_OBJECT:
+ return []
+ # just return the DNs
+ return [dn for dn, attributes in res]
+
+ def __find_objects(self, dn, query = None):
+ """Find objects by query"""
+ try:
+ res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query)
+ except self.ldap.NO_SUCH_OBJECT:
+ return []
+ # just return the attributes
+ return [attributes for dn, attributes in res]
+
+ def __find_role_dns(self, tree):
+ """Find dns of role objects in given tree"""
+ return self.__find_dns(tree,
+ '(&(objectclass=groupOfNames)(!(objectclass=novaProject)))')
+
+ def __find_group_dns_with_member(self, tree, uid):
+ """Find dns of group objects in a given tree that contain member"""
+ dns = self.__find_dns(tree,
+ '(&(objectclass=groupOfNames)(member=%s))' %
+ self.__uid_to_dn(uid))
+ return dns
+
+ def __group_exists(self, dn):
+ """Check if group exists"""
+ return self.__find_object(dn, '(objectclass=groupOfNames)') != None
+
+ def __delete_key_pairs(self, uid):
+ """Delete all key pairs for user"""
+ keys = self.get_key_pairs(uid)
+ if keys != None:
+ for key in keys:
+ self.delete_key_pair(uid, key['name'])
+
+ def __role_to_dn(self, role, project_id=None):
+ """Convert role to corresponding dn"""
+ if project_id == None:
+ return FLAGS.__getitem__("ldap_%s" % role).value
+ else:
+ return 'cn=%s,cn=%s,%s' % (role,
+ project_id,
+ FLAGS.ldap_project_subtree)
+
+ def __create_group(self, group_dn, name, uid,
+ description, member_uids = None):
+ """Create a group"""
+ if self.__group_exists(group_dn):
+ raise exception.Duplicate("Group can't be created because "
+ "group %s already exists" % name)
+ members = []
+ if member_uids != None:
+ for member_uid in member_uids:
+ if not self.__user_exists(member_uid):
+ raise exception.NotFound("Group can't be created "
+ "because user %s doesn't exist" % member_uid)
+ members.append(self.__uid_to_dn(member_uid))
+ dn = self.__uid_to_dn(uid)
+ if not dn in members:
+ members.append(dn)
+ attr = [
+ ('objectclass', ['groupOfNames']),
+ ('cn', [name]),
+ ('description', [description]),
+ ('member', members)
+ ]
+ self.conn.add_s(group_dn, attr)
+
+ def __is_in_group(self, uid, group_dn):
+ """Check if user is in group"""
+ if not self.__user_exists(uid):
+ raise exception.NotFound("User %s can't be searched in group "
+ "becuase the user doesn't exist" % (uid,))
+ if not self.__group_exists(group_dn):
+ return False
+ res = self.__find_object(group_dn,
+ '(member=%s)' % self.__uid_to_dn(uid))
+ return res != None
+
+ def __add_to_group(self, uid, group_dn):
+ """Add user to group"""
+ if not self.__user_exists(uid):
+ raise exception.NotFound("User %s can't be added to the group "
+ "becuase the user doesn't exist" % (uid,))
+ if not self.__group_exists(group_dn):
+ raise exception.NotFound("The group at dn %s doesn't exist" %
+ (group_dn,))
+ if self.__is_in_group(uid, group_dn):
+ raise exception.Duplicate("User %s is already a member of "
+ "the group %s" % (uid, group_dn))
+ attr = [
+ (self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))
+ ]
+ self.conn.modify_s(group_dn, attr)
+
+ def __remove_from_group(self, uid, group_dn):
+ """Remove user from group"""
+ if not self.__group_exists(group_dn):
+ raise exception.NotFound("The group at dn %s doesn't exist" %
+ (group_dn,))
+ if not self.__user_exists(uid):
+ raise exception.NotFound("User %s can't be removed from the "
+ "group because the user doesn't exist" % (uid,))
+ if not self.__is_in_group(uid, group_dn):
+ raise exception.NotFound("User %s is not a member of the group" %
+ (uid,))
+ self.__safe_remove_from_group(uid, group_dn)
+
+ def __safe_remove_from_group(self, uid, group_dn):
+ """Remove user from group, deleting group if user is last member"""
+ # FIXME(vish): what if deleted user is a project manager?
+ attr = [(self.ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))]
+ try:
+ self.conn.modify_s(group_dn, attr)
+ except self.ldap.OBJECT_CLASS_VIOLATION:
+ logging.debug("Attempted to remove the last member of a group. "
+ "Deleting the group at %s instead." % group_dn )
+ self.__delete_group(group_dn)
+
+ def __remove_from_all(self, uid):
+ """Remove user from all roles and projects"""
+ if not self.__user_exists(uid):
+ raise exception.NotFound("User %s can't be removed from all "
+ "because the user doesn't exist" % (uid,))
+ dn = self.__uid_to_dn(uid)
+ role_dns = self.__find_group_dns_with_member(
+ FLAGS.role_project_subtree, uid)
+ for role_dn in role_dns:
+ self.__safe_remove_from_group(uid, role_dn)
+ project_dns = self.__find_group_dns_with_member(
+ FLAGS.ldap_project_subtree, uid)
+ for project_dn in project_dns:
+ self.__safe_remove_from_group(uid, role_dn)
+
+ def __delete_group(self, group_dn):
+ """Delete Group"""
+ if not self.__group_exists(group_dn):
+ raise exception.NotFound("Group at dn %s doesn't exist" % group_dn)
+ self.conn.delete_s(group_dn)
+
+ def __delete_roles(self, project_dn):
+ """Delete all roles for project"""
+ for role_dn in self.__find_role_dns(project_dn):
+ self.__delete_group(role_dn)
+
+ def __to_user(self, attr):
+ """Convert ldap attributes to User object"""
+ if attr == None:
+ return None
+ return {
+ 'id': attr['uid'][0],
+ 'name': attr['cn'][0],
+ 'access': attr['accessKey'][0],
+ 'secret': attr['secretKey'][0],
+ 'admin': (attr['isAdmin'][0] == 'TRUE')
+ }
+
+ def __to_key_pair(self, owner, attr):
+ """Convert ldap attributes to KeyPair object"""
+ if attr == None:
+ return None
+ return {
+ 'id': attr['cn'][0],
+ 'name': attr['cn'][0],
+ 'owner_id': owner,
+ 'public_key': attr['sshPublicKey'][0],
+ 'fingerprint': attr['keyFingerprint'][0],
+ }
+
+ def __to_project(self, attr):
+ """Convert ldap attributes to Project object"""
+ if attr == None:
+ return None
+ member_dns = attr.get('member', [])
+ return {
+ 'id': attr['cn'][0],
+ 'name': attr['cn'][0],
+ 'project_manager_id': self.__dn_to_uid(attr['projectManager'][0]),
+ 'description': attr.get('description', [None])[0],
+ 'member_ids': [self.__dn_to_uid(x) for x in member_dns]
+ }
+
+ def __dn_to_uid(self, dn):
+ """Convert user dn to uid"""
+ return dn.split(',')[0].split('=')[1]
+
+ def __uid_to_dn(self, dn):
+ """Convert uid to dn"""
+ return 'uid=%s,%s' % (dn, FLAGS.ldap_user_subtree)
+
+
+class FakeLdapDriver(LdapDriver):
+ """Fake Ldap Auth driver"""
+ def __init__(self):
+ __import__('nova.auth.fakeldap')
+ self.ldap = sys.modules['nova.auth.fakeldap']
+
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
new file mode 100644
index 000000000..2da53a736
--- /dev/null
+++ b/nova/auth/manager.py
@@ -0,0 +1,807 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Nova authentication management
+"""
+
+import logging
+import os
+import shutil
+import string
+import sys
+import tempfile
+import uuid
+import zipfile
+
+from nova import crypto
+from nova import datastore
+from nova import exception
+from nova import flags
+from nova import objectstore # for flags
+from nova import utils
+from nova.auth import ldapdriver # for flags
+from nova.auth import signer
+
+FLAGS = flags.FLAGS
+
+# NOTE(vish): a user with one of these roles will be a superuser and
+# have access to all api commands
+flags.DEFINE_list('superuser_roles', ['cloudadmin'],
+ 'Roles that ignore rbac checking completely')
+
+# NOTE(vish): a user with one of these roles will have it for every
+# project, even if he or she is not a member of the project
+flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'],
+ 'Roles that apply to all projects')
+
+
+flags.DEFINE_bool('use_vpn', True, 'Support per-project vpns')
+flags.DEFINE_string('credentials_template',
+ utils.abspath('auth/novarc.template'),
+ 'Template for creating users rc file')
+flags.DEFINE_string('vpn_client_template',
+ utils.abspath('cloudpipe/client.ovpn.template'),
+ 'Template for creating users vpn file')
+flags.DEFINE_string('credential_key_file', 'pk.pem',
+ 'Filename of private key in credentials zip')
+flags.DEFINE_string('credential_cert_file', 'cert.pem',
+ 'Filename of certificate in credentials zip')
+flags.DEFINE_string('credential_rc_file', 'novarc',
+ 'Filename of rc in credentials zip')
+
+flags.DEFINE_integer('vpn_start_port', 1000,
+ 'Start port for the cloudpipe VPN servers')
+flags.DEFINE_integer('vpn_end_port', 2000,
+ 'End port for the cloudpipe VPN servers')
+
+flags.DEFINE_string('credential_cert_subject',
+ '/C=US/ST=California/L=MountainView/O=AnsoLabs/'
+ 'OU=NovaDev/CN=%s-%s',
+ 'Subject for certificate for users')
+
+flags.DEFINE_string('vpn_ip', '127.0.0.1',
+ 'Public IP for the cloudpipe VPN servers')
+
+flags.DEFINE_string('auth_driver', 'nova.auth.ldapdriver.FakeLdapDriver',
+ 'Driver that auth manager uses')
+
+class AuthBase(object):
+ """Base class for objects relating to auth
+
+ Objects derived from this class should be stupid data objects with
+ an id member. They may optionally contain methods that delegate to
+ AuthManager, but should not implement logic themselves.
+ """
+ @classmethod
+ def safe_id(cls, obj):
+ """Safe get object id
+
+ This method will return the id of the object if the object
+ is of this class, otherwise it will return the original object.
+ This allows methods to accept objects or ids as paramaters.
+
+ """
+ if isinstance(obj, cls):
+ return obj.id
+ else:
+ return obj
+
+
+class User(AuthBase):
+ """Object representing a user"""
+ def __init__(self, id, name, access, secret, admin):
+ self.id = id
+ self.name = name
+ self.access = access
+ self.secret = secret
+ self.admin = admin
+
+ def is_superuser(self):
+ return AuthManager().is_superuser(self)
+
+ def is_admin(self):
+ return AuthManager().is_admin(self)
+
+ def has_role(self, role):
+ return AuthManager().has_role(self, role)
+
+ def add_role(self, role):
+ return AuthManager().add_role(self, role)
+
+ def remove_role(self, role):
+ return AuthManager().remove_role(self, role)
+
+ def is_project_member(self, project):
+ return AuthManager().is_project_member(self, project)
+
+ def is_project_manager(self, project):
+ return AuthManager().is_project_manager(self, project)
+
+ def generate_key_pair(self, name):
+ return AuthManager().generate_key_pair(self.id, name)
+
+ def create_key_pair(self, name, public_key, fingerprint):
+ return AuthManager().create_key_pair(self.id,
+ name,
+ public_key,
+ fingerprint)
+
+ def get_key_pair(self, name):
+ return AuthManager().get_key_pair(self.id, name)
+
+ def delete_key_pair(self, name):
+ return AuthManager().delete_key_pair(self.id, name)
+
+ def get_key_pairs(self):
+ return AuthManager().get_key_pairs(self.id)
+
+ def __repr__(self):
+ return "User('%s', '%s', '%s', '%s', %s)" % (self.id,
+ self.name,
+ self.access,
+ self.secret,
+ self.admin)
+
+
+class KeyPair(AuthBase):
+ """Represents an ssh key returned from the datastore
+
+ Even though this object is named KeyPair, only the public key and
+ fingerprint is stored. The user's private key is not saved.
+ """
+ def __init__(self, id, name, owner_id, public_key, fingerprint):
+ self.id = id
+ self.name = name
+ self.owner_id = owner_id
+ self.public_key = public_key
+ self.fingerprint = fingerprint
+
+ def __repr__(self):
+ return "KeyPair('%s', '%s', '%s', '%s', '%s')" % (self.id,
+ self.name,
+ self.owner_id,
+ self.public_key,
+ self.fingerprint)
+
+
+class Project(AuthBase):
+ """Represents a Project returned from the datastore"""
+ def __init__(self, id, name, project_manager_id, description, member_ids):
+ self.id = id
+ self.name = name
+ self.project_manager_id = project_manager_id
+ self.description = description
+ self.member_ids = member_ids
+
+ @property
+ def project_manager(self):
+ return AuthManager().get_user(self.project_manager_id)
+
+ @property
+ def vpn_ip(self):
+ ip, port = AuthManager().get_project_vpn_data(self)
+ return ip
+
+ @property
+ def vpn_port(self):
+ ip, port = AuthManager().get_project_vpn_data(self)
+ return port
+
+ def has_manager(self, user):
+ return AuthManager().is_project_manager(user, self)
+
+ def has_member(self, user):
+ return AuthManager().is_project_member(user, self)
+
+ def add_role(self, user, role):
+ return AuthManager().add_role(user, role, self)
+
+ def remove_role(self, user, role):
+ return AuthManager().remove_role(user, role, self)
+
+ def has_role(self, user, role):
+ return AuthManager().has_role(user, role, self)
+
+ def get_credentials(self, user):
+ return AuthManager().get_credentials(user, self)
+
+ def __repr__(self):
+ return "Project('%s', '%s', '%s', '%s', %s)" % (self.id,
+ self.name,
+ self.project_manager_id,
+ self.description,
+ self.member_ids)
+
+
+class NoMorePorts(exception.Error):
+ pass
+
+
+class Vpn(datastore.BasicModel):
+ """Manages vpn ips and ports for projects"""
+ def __init__(self, project_id):
+ self.project_id = project_id
+ super(Vpn, self).__init__()
+
+ @property
+ def identifier(self):
+ """Identifier used for key in redis"""
+ return self.project_id
+
+ @classmethod
+ def create(cls, project_id):
+ """Creates a vpn for project
+
+ This method finds a free ip and port and stores the associated
+ values in the datastore.
+ """
+ # TODO(vish): get list of vpn ips from redis
+ port = cls.find_free_port_for_ip(FLAGS.vpn_ip)
+ vpn = cls(project_id)
+ # save ip for project
+ vpn['project'] = project_id
+ vpn['ip'] = FLAGS.vpn_ip
+ vpn['port'] = port
+ vpn.save()
+ return vpn
+
+ @classmethod
+ def find_free_port_for_ip(cls, ip):
+ """Finds a free port for a given ip from the redis set"""
+ # TODO(vish): these redis commands should be generalized and
+ # placed into a base class. Conceptually, it is
+ # similar to an association, but we are just
+ # storing a set of values instead of keys that
+ # should be turned into objects.
+ redis = datastore.Redis.instance()
+ key = 'ip:%s:ports' % ip
+ # TODO(vish): these ports should be allocated through an admin
+ # command instead of a flag
+ if (not redis.exists(key) and
+ not redis.exists(cls._redis_association_name('ip', ip))):
+ for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
+ redis.sadd(key, i)
+
+ port = redis.spop(key)
+ if not port:
+ raise NoMorePorts()
+ return port
+
+ @classmethod
+ def num_ports_for_ip(cls, ip):
+ """Calculates the number of free ports for a given ip"""
+ return datastore.Redis.instance().scard('ip:%s:ports' % ip)
+
+ @property
+ def ip(self):
+ """The ip assigned to the project"""
+ return self['ip']
+
+ @property
+ def port(self):
+ """The port assigned to the project"""
+ return int(self['port'])
+
+ def save(self):
+ """Saves the association to the given ip"""
+ self.associate_with('ip', self.ip)
+ super(Vpn, self).save()
+
+ def destroy(self):
+ """Cleans up datastore and adds port back to pool"""
+ self.unassociate_with('ip', self.ip)
+ datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port)
+ super(Vpn, self).destroy()
+
+
+class AuthManager(object):
+ """Manager Singleton for dealing with Users, Projects, and Keypairs
+
+ Methods accept objects or ids.
+
+ AuthManager uses a driver object to make requests to the data backend.
+ See ldapdriver for reference.
+
+ AuthManager also manages associated data related to Auth objects that
+ need to be more accessible, such as vpn ips and ports.
+ """
+ _instance=None
+ def __new__(cls, *args, **kwargs):
+ """Returns the AuthManager singleton"""
+ if not cls._instance:
+ cls._instance = super(AuthManager, cls).__new__(
+ cls, *args, **kwargs)
+ return cls._instance
+
+ def __init__(self, driver=None, *args, **kwargs):
+ """Inits the driver from parameter or flag
+
+ __init__ is run every time AuthManager() is called, so we only
+ reset the driver if it is not set or a new driver is specified.
+ """
+ if driver or not getattr(self, 'driver', None):
+ self.driver = utils.import_class(driver or FLAGS.auth_driver)
+
+ def authenticate(self, access, signature, params, verb='GET',
+ server_string='127.0.0.1:8773', path='/',
+ check_type='ec2', headers=None):
+ """Authenticates AWS request using access key and signature
+
+ If the project is not specified, attempts to authenticate to
+ a project with the same name as the user. This way, older tools
+ that have no project knowledge will still work.
+
+ @type access: str
+ @param access: Access key for user in the form "access:project".
+
+ @type signature: str
+ @param signature: Signature of the request.
+
+ @type params: list of str
+ @param params: Web paramaters used for the signature.
+
+ @type verb: str
+ @param verb: Web request verb ('GET' or 'POST').
+
+ @type server_string: str
+ @param server_string: Web request server string.
+
+ @type path: str
+ @param path: Web request path.
+
+ @type check_type: str
+ @param check_type: Type of signature to check. 'ec2' for EC2, 's3' for
+ S3. Any other value will cause signature not to be
+ checked.
+
+ @type headers: list
+ @param headers: HTTP headers passed with the request (only needed for
+ s3 signature checks)
+
+ @rtype: tuple (User, Project)
+ @return: User and project that the request represents.
+ """
+ # TODO(vish): check for valid timestamp
+ (access_key, sep, project_id) = access.partition(':')
+
+ logging.info('Looking up user: %r', access_key)
+ user = self.get_user_from_access_key(access_key)
+ logging.info('user: %r', user)
+ if user == None:
+ raise exception.NotFound('No user found for access key %s' %
+ access_key)
+
+ # NOTE(vish): if we stop using project name as id we need better
+ # logic to find a default project for user
+ if project_id is '':
+ project_id = user.name
+
+ project = self.get_project(project_id)
+ if project == None:
+ raise exception.NotFound('No project called %s could be found' %
+ project_id)
+ if not self.is_admin(user) and not self.is_project_member(user,
+ project):
+ raise exception.NotFound('User %s is not a member of project %s' %
+ (user.id, project.id))
+ if check_type == 's3':
+ expected_signature = signer.Signer(user.secret.encode()).s3_authorization(headers, verb, path)
+ logging.debug('user.secret: %s', user.secret)
+ logging.debug('expected_signature: %s', expected_signature)
+ logging.debug('signature: %s', signature)
+ if signature != expected_signature:
+ raise exception.NotAuthorized('Signature does not match')
+ elif check_type == 'ec2':
+ # NOTE(vish): hmac can't handle unicode, so encode ensures that
+ # secret isn't unicode
+ expected_signature = signer.Signer(user.secret.encode()).generate(
+ params, verb, server_string, path)
+ logging.debug('user.secret: %s', user.secret)
+ logging.debug('expected_signature: %s', expected_signature)
+ logging.debug('signature: %s', signature)
+ if signature != expected_signature:
+ raise exception.NotAuthorized('Signature does not match')
+ return (user, project)
+
+ def is_superuser(self, user):
+ """Checks for superuser status, allowing user to bypass rbac
+
+ @type user: User or uid
+ @param user: User to check.
+
+ @rtype: bool
+ @return: True for superuser.
+ """
+ if not isinstance(user, User):
+ user = self.get_user(user)
+ # NOTE(vish): admin flag on user represents superuser
+ if user.admin:
+ return True
+ for role in FLAGS.superuser_roles:
+ if self.has_role(user, role):
+ return True
+
+ def is_admin(self, user):
+ """Checks for admin status, allowing user to access all projects
+
+ @type user: User or uid
+ @param user: User to check.
+
+ @rtype: bool
+ @return: True for admin.
+ """
+ if not isinstance(user, User):
+ user = self.get_user(user)
+ if self.is_superuser(user):
+ return True
+ for role in FLAGS.global_roles:
+ if self.has_role(user, role):
+ return True
+
+ def has_role(self, user, role, project=None):
+ """Checks existence of role for user
+
+ If project is not specified, checks for a global role. If project
+ is specified, checks for the union of the global role and the
+ project role.
+
+ Role 'projectmanager' only works for projects and simply checks to
+ see if the user is the project_manager of the specified project. It
+ is the same as calling is_project_manager(user, project).
+
+ @type user: User or uid
+ @param user: User to check.
+
+ @type role: str
+ @param role: Role to check.
+
+ @type project: Project or project_id
+ @param project: Project in which to look for local role.
+
+ @rtype: bool
+ @return: True if the user has the role.
+ """
+ with self.driver() as drv:
+ if role == 'projectmanager':
+ if not project:
+ raise exception.Error("Must specify project")
+ return self.is_project_manager(user, project)
+
+ global_role = drv.has_role(User.safe_id(user),
+ role,
+ None)
+ if not global_role:
+ return global_role
+
+ if not project or role in FLAGS.global_roles:
+ return global_role
+
+ return drv.has_role(User.safe_id(user),
+ role,
+ Project.safe_id(project))
+
+ def add_role(self, user, role, project=None):
+ """Adds role for user
+
+ If project is not specified, adds a global role. If project
+ is specified, adds a local role.
+
+ The 'projectmanager' role is special and can't be added or removed.
+
+ @type user: User or uid
+ @param user: User to which to add role.
+
+ @type role: str
+ @param role: Role to add.
+
+ @type project: Project or project_id
+ @param project: Project in which to add local role.
+ """
+ with self.driver() as drv:
+ drv.add_role(User.safe_id(user), role, Project.safe_id(project))
+
+ def remove_role(self, user, role, project=None):
+ """Removes role for user
+
+ If project is not specified, removes a global role. If project
+ is specified, removes a local role.
+
+ The 'projectmanager' role is special and can't be added or removed.
+
+ @type user: User or uid
+ @param user: User from which to remove role.
+
+ @type role: str
+ @param role: Role to remove.
+
+ @type project: Project or project_id
+ @param project: Project in which to remove local role.
+ """
+ with self.driver() as drv:
+ drv.remove_role(User.safe_id(user), role, Project.safe_id(project))
+
+ def get_project(self, pid):
+ """Get project object by id"""
+ with self.driver() as drv:
+ project_dict = drv.get_project(pid)
+ if project_dict:
+ return Project(**project_dict)
+
+ def get_projects(self, user=None):
+ """Retrieves list of projects, optionally filtered by user"""
+ with self.driver() as drv:
+ project_list = drv.get_projects(User.safe_id(user))
+ if not project_list:
+ return []
+ return [Project(**project_dict) for project_dict in project_list]
+
+ def create_project(self, name, manager_user,
+ description=None, member_users=None):
+ """Create a project
+
+ @type name: str
+ @param name: Name of the project to create. The name will also be
+ used as the project id.
+
+ @type manager_user: User or uid
+ @param manager_user: This user will be the project manager.
+
+ @type description: str
+ @param project: Description of the project. If no description is
+ specified, the name of the project will be used.
+
+ @type member_users: list of User or uid
+ @param: Initial project members. The project manager will always be
+ added as a member, even if he isn't specified in this list.
+
+ @rtype: Project
+ @return: The new project.
+ """
+ if member_users:
+ member_users = [User.safe_id(u) for u in member_users]
+ with self.driver() as drv:
+ project_dict = drv.create_project(name,
+ User.safe_id(manager_user),
+ description,
+ member_users)
+ if project_dict:
+ if FLAGS.use_vpn:
+ Vpn.create(project_dict['id'])
+ return Project(**project_dict)
+
+ def add_to_project(self, user, project):
+ """Add user to project"""
+ with self.driver() as drv:
+ return drv.add_to_project(User.safe_id(user),
+ Project.safe_id(project))
+
+ def is_project_manager(self, user, project):
+ """Checks if user is project manager"""
+ if not isinstance(project, Project):
+ project = self.get_project(project)
+ return User.safe_id(user) == project.project_manager_id
+
+ def is_project_member(self, user, project):
+ """Checks to see if user is a member of project"""
+ if not isinstance(project, Project):
+ project = self.get_project(project)
+ return User.safe_id(user) in project.member_ids
+
+ def remove_from_project(self, user, project):
+ """Removes a user from a project"""
+ with self.driver() as drv:
+ return drv.remove_from_project(User.safe_id(user),
+ Project.safe_id(project))
+
+ def get_project_vpn_data(self, project):
+ """Gets vpn ip and port for project
+
+ @type project: Project or project_id
+ @param project: Project from which to get associated vpn data
+
+ @rvalue: tuple of (str, str)
+ @return: A tuple containing (ip, port) or None, None if vpn has
+ not been allocated for user.
+ """
+ vpn = Vpn.lookup(Project.safe_id(project))
+ if not vpn:
+ return None, None
+ return (vpn.ip, vpn.port)
+
+ def delete_project(self, project):
+ """Deletes a project"""
+ with self.driver() as drv:
+ return drv.delete_project(Project.safe_id(project))
+
+ def get_user(self, uid):
+ """Retrieves a user by id"""
+ with self.driver() as drv:
+ user_dict = drv.get_user(uid)
+ if user_dict:
+ return User(**user_dict)
+
+ def get_user_from_access_key(self, access_key):
+ """Retrieves a user by access key"""
+ with self.driver() as drv:
+ user_dict = drv.get_user_from_access_key(access_key)
+ if user_dict:
+ return User(**user_dict)
+
+ def get_users(self):
+ """Retrieves a list of all users"""
+ with self.driver() as drv:
+ user_list = drv.get_users()
+ if not user_list:
+ return []
+ return [User(**user_dict) for user_dict in user_list]
+
+ def create_user(self, name, access=None, secret=None, admin=False):
+ """Creates a user
+
+ @type name: str
+ @param name: Name of the user to create.
+
+ @type access: str
+ @param access: Access Key (defaults to a random uuid)
+
+ @type secret: str
+ @param secret: Secret Key (defaults to a random uuid)
+
+ @type admin: bool
+ @param admin: Whether to set the admin flag. The admin flag gives
+ superuser status regardless of roles specifed for the user.
+
+ @type create_project: bool
+ @param: Whether to create a project for the user with the same name.
+
+ @rtype: User
+ @return: The new user.
+ """
+ if access == None: access = str(uuid.uuid4())
+ if secret == None: secret = str(uuid.uuid4())
+ with self.driver() as drv:
+ user_dict = drv.create_user(name, access, secret, admin)
+ if user_dict:
+ return User(**user_dict)
+
+ def delete_user(self, user):
+ """Deletes a user"""
+ with self.driver() as drv:
+ drv.delete_user(User.safe_id(user))
+
+ def generate_key_pair(self, user, key_name):
+ """Generates a key pair for a user
+
+ Generates a public and private key, stores the public key using the
+ key_name, and returns the private key and fingerprint.
+
+ @type user: User or uid
+ @param user: User for which to create key pair.
+
+ @type key_name: str
+ @param key_name: Name to use for the generated KeyPair.
+
+ @rtype: tuple (private_key, fingerprint)
+ @return: A tuple containing the private_key and fingerprint.
+ """
+ # NOTE(vish): generating key pair is slow so check for legal
+ # creation before creating keypair
+ uid = User.safe_id(user)
+ with self.driver() as drv:
+ if not drv.get_user(uid):
+ raise exception.NotFound("User %s doesn't exist" % user)
+ if drv.get_key_pair(uid, key_name):
+ raise exception.Duplicate("The keypair %s already exists"
+ % key_name)
+ private_key, public_key, fingerprint = crypto.generate_key_pair()
+ self.create_key_pair(uid, key_name, public_key, fingerprint)
+ return private_key, fingerprint
+
+ def create_key_pair(self, user, key_name, public_key, fingerprint):
+ """Creates a key pair for user"""
+ with self.driver() as drv:
+ kp_dict = drv.create_key_pair(User.safe_id(user),
+ key_name,
+ public_key,
+ fingerprint)
+ if kp_dict:
+ return KeyPair(**kp_dict)
+
+ def get_key_pair(self, user, key_name):
+ """Retrieves a key pair for user"""
+ with self.driver() as drv:
+ kp_dict = drv.get_key_pair(User.safe_id(user), key_name)
+ if kp_dict:
+ return KeyPair(**kp_dict)
+
+ def get_key_pairs(self, user):
+ """Retrieves all key pairs for user"""
+ with self.driver() as drv:
+ kp_list = drv.get_key_pairs(User.safe_id(user))
+ if not kp_list:
+ return []
+ return [KeyPair(**kp_dict) for kp_dict in kp_list]
+
+ def delete_key_pair(self, user, key_name):
+ """Deletes a key pair for user"""
+ with self.driver() as drv:
+ drv.delete_key_pair(User.safe_id(user), key_name)
+
+ def get_credentials(self, user, project=None):
+ """Get credential zip for user in project"""
+ if not isinstance(user, User):
+ user = self.get_user(user)
+ if project is None:
+ project = user.id
+ pid = Project.safe_id(project)
+ rc = self.__generate_rc(user.access, user.secret, pid)
+ private_key, signed_cert = self._generate_x509_cert(user.id, pid)
+
+ vpn = Vpn.lookup(pid)
+ if not vpn:
+ raise exception.Error("No vpn data allocated for project %s" %
+ project.name)
+ configfile = open(FLAGS.vpn_client_template,"r")
+ s = string.Template(configfile.read())
+ configfile.close()
+ config = s.substitute(keyfile=FLAGS.credential_key_file,
+ certfile=FLAGS.credential_cert_file,
+ ip=vpn.ip,
+ port=vpn.port)
+
+ tmpdir = tempfile.mkdtemp()
+ zf = os.path.join(tmpdir, "temp.zip")
+ zippy = zipfile.ZipFile(zf, 'w')
+ zippy.writestr(FLAGS.credential_rc_file, rc)
+ zippy.writestr(FLAGS.credential_key_file, private_key)
+ zippy.writestr(FLAGS.credential_cert_file, signed_cert)
+ zippy.writestr("nebula-client.conf", config)
+ zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id))
+ zippy.close()
+ with open(zf, 'rb') as f:
+ buffer = f.read()
+
+ shutil.rmtree(tmpdir)
+ return buffer
+
+ def __generate_rc(self, access, secret, pid):
+ """Generate rc file for user"""
+ rc = open(FLAGS.credentials_template).read()
+ rc = rc % { 'access': access,
+ 'project': pid,
+ 'secret': secret,
+ 'ec2': FLAGS.ec2_url,
+ 's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port),
+ 'nova': FLAGS.ca_file,
+ 'cert': FLAGS.credential_cert_file,
+ 'key': FLAGS.credential_key_file,
+ }
+ return rc
+
+ def _generate_x509_cert(self, uid, pid):
+ """Generate x509 cert for user"""
+ (private_key, csr) = crypto.generate_x509_cert(
+ self.__cert_subject(uid))
+ # TODO(joshua): This should be async call back to the cloud controller
+ signed_cert = crypto.sign_csr(csr, pid)
+ return (private_key, signed_cert)
+
+ def __cert_subject(self, uid):
+ """Helper to generate cert subject"""
+ return FLAGS.credential_cert_subject % (uid, utils.isotime())
diff --git a/nova/auth/rbac.py b/nova/auth/rbac.py
index 9e2bb830c..7fab9419f 100644
--- a/nova/auth/rbac.py
+++ b/nova/auth/rbac.py
@@ -17,7 +17,7 @@
# under the License.
from nova import exception
-from nova.auth import users
+from nova.auth import manager
def allow(*roles):
diff --git a/nova/auth/signer.py b/nova/auth/signer.py
index 83831bfac..7d7471575 100644
--- a/nova/auth/signer.py
+++ b/nova/auth/signer.py
@@ -48,6 +48,7 @@ import hashlib
import hmac
import logging
import urllib
+import boto.utils
from nova.exception import Error
@@ -59,6 +60,13 @@ class Signer(object):
if hashlib.sha256:
self.hmac_256 = hmac.new(secret_key, digestmod=hashlib.sha256)
+ def s3_authorization(self, headers, verb, path):
+ c_string = boto.utils.canonical_string(verb, path, headers)
+ hmac = self.hmac.copy()
+ hmac.update(c_string)
+ b64_hmac = base64.encodestring(hmac.digest()).strip()
+ return b64_hmac
+
def generate(self, params, verb, server_string, path):
if params['SignatureVersion'] == '0':
return self._calc_signature_0(params)
diff --git a/nova/auth/users.py b/nova/auth/users.py
deleted file mode 100644
index fc08dc34d..000000000
--- a/nova/auth/users.py
+++ /dev/null
@@ -1,974 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Nova users and user management, including RBAC hooks.
-"""
-
-import datetime
-import logging
-import os
-import shutil
-import signer
-import string
-import tempfile
-import uuid
-import zipfile
-
-try:
- import ldap
-except Exception, e:
- import fakeldap as ldap
-
-import fakeldap
-
-# TODO(termie): clean up these imports
-from nova import datastore
-from nova import exception
-from nova import flags
-from nova import crypto
-from nova import utils
-from nova import objectstore # for flags
-
-FLAGS = flags.FLAGS
-
-flags.DEFINE_string('ldap_url', 'ldap://localhost',
- 'Point this at your ldap server')
-flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password')
-flags.DEFINE_string('user_dn', 'cn=Manager,dc=example,dc=com',
- 'DN of admin user')
-flags.DEFINE_string('user_unit', 'Users', 'OID for Users')
-flags.DEFINE_string('user_ldap_subtree', 'ou=Users,dc=example,dc=com',
- 'OU for Users')
-flags.DEFINE_string('project_ldap_subtree', 'ou=Groups,dc=example,dc=com',
- 'OU for Projects')
-flags.DEFINE_string('role_ldap_subtree', 'ou=Groups,dc=example,dc=com',
- 'OU for Roles')
-
-# NOTE(vish): mapping with these flags is necessary because we're going
-# to tie in to an existing ldap schema
-flags.DEFINE_string('ldap_cloudadmin',
- 'cn=cloudadmins,ou=Groups,dc=example,dc=com', 'cn for Cloud Admins')
-flags.DEFINE_string('ldap_itsec',
- 'cn=itsec,ou=Groups,dc=example,dc=com', 'cn for ItSec')
-flags.DEFINE_string('ldap_sysadmin',
- 'cn=sysadmins,ou=Groups,dc=example,dc=com', 'cn for Sysadmins')
-flags.DEFINE_string('ldap_netadmin',
- 'cn=netadmins,ou=Groups,dc=example,dc=com', 'cn for NetAdmins')
-flags.DEFINE_string('ldap_developer',
- 'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers')
-
-# NOTE(vish): a user with one of these roles will be a superuser and
-# have access to all api commands
-flags.DEFINE_list('superuser_roles', ['cloudadmin'],
- 'roles that ignore rbac checking completely')
-
-# NOTE(vish): a user with one of these roles will have it for every
-# project, even if he or she is not a member of the project
-flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'],
- 'roles that apply to all projects')
-
-flags.DEFINE_string('credentials_template',
- utils.abspath('auth/novarc.template'),
- 'Template for creating users rc file')
-flags.DEFINE_string('vpn_client_template',
- utils.abspath('cloudpipe/client.ovpn.template'),
- 'Template for creating users vpn file')
-flags.DEFINE_string('credential_key_file', 'pk.pem',
- 'Filename of private key in credentials zip')
-flags.DEFINE_string('credential_cert_file', 'cert.pem',
- 'Filename of certificate in credentials zip')
-flags.DEFINE_string('credential_rc_file', 'novarc',
- 'Filename of rc in credentials zip')
-
-flags.DEFINE_integer('vpn_start_port', 1000,
- 'Start port for the cloudpipe VPN servers')
-flags.DEFINE_integer('vpn_end_port', 2000,
- 'End port for the cloudpipe VPN servers')
-
-flags.DEFINE_string('credential_cert_subject',
- '/C=US/ST=California/L=MountainView/O=AnsoLabs/'
- 'OU=NovaDev/CN=%s-%s',
- 'Subject for certificate for users')
-
-flags.DEFINE_string('vpn_ip', '127.0.0.1',
- 'Public IP for the cloudpipe VPN servers')
-
-
-class AuthBase(object):
- @classmethod
- def safe_id(cls, obj):
- """Safe get object id.
-
- This method will return the id of the object if the object
- is of this class, otherwise it will return the original object.
- This allows methods to accept objects or ids as paramaters.
-
- """
- if isinstance(obj, cls):
- return obj.id
- else:
- return obj
-
-
-class User(AuthBase):
- """id and name are currently the same"""
- def __init__(self, id, name, access, secret, admin):
- self.id = id
- self.name = name
- self.access = access
- self.secret = secret
- self.admin = admin
-
- def is_superuser(self):
- """allows user to bypass rbac completely"""
- if self.admin:
- return True
- for role in FLAGS.superuser_roles:
- if self.has_role(role):
- return True
-
- def is_admin(self):
- """allows user to see objects from all projects"""
- if self.is_superuser():
- return True
- for role in FLAGS.global_roles:
- if self.has_role(role):
- return True
-
- def has_role(self, role):
- return UserManager.instance().has_role(self, role)
-
- def add_role(self, role):
- return UserManager.instance().add_role(self, role)
-
- def remove_role(self, role):
- return UserManager.instance().remove_role(self, role)
-
- def is_project_member(self, project):
- return UserManager.instance().is_project_member(self, project)
-
- def is_project_manager(self, project):
- return UserManager.instance().is_project_manager(self, project)
-
- def generate_rc(self, project=None):
- if project is None:
- project = self.id
- rc = open(FLAGS.credentials_template).read()
- rc = rc % { 'access': self.access,
- 'project': project,
- 'secret': self.secret,
- 'ec2': FLAGS.ec2_url,
- 's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port),
- 'nova': FLAGS.ca_file,
- 'cert': FLAGS.credential_cert_file,
- 'key': FLAGS.credential_key_file,
- }
- return rc
-
- def generate_key_pair(self, name):
- return UserManager.instance().generate_key_pair(self.id, name)
-
- def create_key_pair(self, name, public_key, fingerprint):
- return UserManager.instance().create_key_pair(self.id,
- name,
- public_key,
- fingerprint)
-
- def get_key_pair(self, name):
- return UserManager.instance().get_key_pair(self.id, name)
-
- def delete_key_pair(self, name):
- return UserManager.instance().delete_key_pair(self.id, name)
-
- def get_key_pairs(self):
- return UserManager.instance().get_key_pairs(self.id)
-
- def __repr__(self):
- return "User('%s', '%s', '%s', '%s', %s)" % (
- self.id, self.name, self.access, self.secret, self.admin)
-
-
-class KeyPair(AuthBase):
- def __init__(self, id, owner_id, public_key, fingerprint):
- self.id = id
- self.name = id
- self.owner_id = owner_id
- self.public_key = public_key
- self.fingerprint = fingerprint
-
- def delete(self):
- return UserManager.instance().delete_key_pair(self.owner, self.name)
-
- def __repr__(self):
- return "KeyPair('%s', '%s', '%s', '%s')" % (
- self.id, self.owner_id, self.public_key, self.fingerprint)
-
-
-class Group(AuthBase):
- """id and name are currently the same"""
- def __init__(self, id, description = None, member_ids = None):
- self.id = id
- self.name = id
- self.description = description
- self.member_ids = member_ids
-
- def has_member(self, user):
- return User.safe_id(user) in self.member_ids
-
- def __repr__(self):
- return "Group('%s', '%s', %s)" % (
- self.id, self.description, self.member_ids)
-
-
-class Project(Group):
- def __init__(self, id, project_manager_id, description, member_ids):
- self.project_manager_id = project_manager_id
- super(Project, self).__init__(id, description, member_ids)
-
- @property
- def project_manager(self):
- return UserManager.instance().get_user(self.project_manager_id)
-
- def has_manager(self, user):
- return User.safe_id(user) == self.project_manager_id
-
- def add_role(self, user, role):
- return UserManager.instance().add_role(user, role, self)
-
- def remove_role(self, user, role):
- return UserManager.instance().remove_role(user, role, self)
-
- def has_role(self, user, role):
- return UserManager.instance().has_role(user, role, self)
-
- @property
- def vpn_ip(self):
- return Vpn(self.id).ip
-
- @property
- def vpn_port(self):
- return Vpn(self.id).port
-
- def get_credentials(self, user):
- if not isinstance(user, User):
- user = UserManager.instance().get_user(user)
- rc = user.generate_rc(self.id)
- private_key, signed_cert = self.generate_x509_cert(user)
-
- configfile = open(FLAGS.vpn_client_template,"r")
- s = string.Template(configfile.read())
- configfile.close()
- config = s.substitute(keyfile=FLAGS.credential_key_file,
- certfile=FLAGS.credential_cert_file,
- ip=self.vpn_ip,
- port=self.vpn_port)
-
- tmpdir = tempfile.mkdtemp()
- zf = os.path.join(tmpdir, "temp.zip")
- zippy = zipfile.ZipFile(zf, 'w')
- zippy.writestr(FLAGS.credential_rc_file, rc)
- zippy.writestr(FLAGS.credential_key_file, private_key)
- zippy.writestr(FLAGS.credential_cert_file, signed_cert)
- zippy.writestr("nebula-client.conf", config)
- zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(self.id))
- zippy.close()
- with open(zf, 'rb') as f:
- buffer = f.read()
-
- shutil.rmtree(tmpdir)
- return buffer
-
- def generate_x509_cert(self, user):
- return UserManager.instance().generate_x509_cert(user, self)
-
- def __repr__(self):
- return "Project('%s', '%s', '%s', %s)" % (
- self.id, self.project_manager_id,
- self.description, self.member_ids)
-
-
-class NoMorePorts(exception.Error):
- pass
-
-
-class Vpn(datastore.BasicModel):
- def __init__(self, project_id):
- self.project_id = project_id
- super(Vpn, self).__init__()
-
- @property
- def identifier(self):
- return self.project_id
-
- @classmethod
- def create(cls, project_id):
- # TODO(vish): get list of vpn ips from redis
- port = cls.find_free_port_for_ip(FLAGS.vpn_ip)
- vpn = cls(project_id)
- # save ip for project
- vpn['project'] = project_id
- vpn['ip'] = FLAGS.vpn_ip
- vpn['port'] = port
- vpn.save()
- return vpn
-
- @classmethod
- def find_free_port_for_ip(cls, ip):
- # TODO(vish): these redis commands should be generalized and
- # placed into a base class. Conceptually, it is
- # similar to an association, but we are just
- # storing a set of values instead of keys that
- # should be turned into objects.
- redis = datastore.Redis.instance()
- key = 'ip:%s:ports' % ip
- # TODO(vish): these ports should be allocated through an admin
- # command instead of a flag
- if (not redis.exists(key) and
- not redis.exists(cls._redis_association_name('ip', ip))):
- for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
- redis.sadd(key, i)
-
- port = redis.spop(key)
- if not port:
- raise NoMorePorts()
- return port
-
- @classmethod
- def num_ports_for_ip(cls, ip):
- return datastore.Redis.instance().scard('ip:%s:ports' % ip)
-
- @property
- def ip(self):
- return self['ip']
-
- @property
- def port(self):
- return int(self['port'])
-
- def save(self):
- self.associate_with('ip', self.ip)
- super(Vpn, self).save()
-
- def destroy(self):
- self.unassociate_with('ip', self.ip)
- datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port)
- super(Vpn, self).destroy()
-
-
-class UserManager(object):
- def __init__(self):
- if hasattr(self.__class__, '_instance'):
- raise Exception('Attempted to instantiate singleton')
-
- @classmethod
- def instance(cls):
- if not hasattr(cls, '_instance'):
- inst = UserManager()
- cls._instance = inst
- if FLAGS.fake_users:
- try:
- inst.create_user('fake', 'fake', 'fake')
- except: pass
- try:
- inst.create_user('user', 'user', 'user')
- except: pass
- try:
- inst.create_user('admin', 'admin', 'admin', True)
- except: pass
- return cls._instance
-
- def authenticate(self, access, signature, params, verb='GET',
- server_string='127.0.0.1:8773', path='/',
- verify_signature=True):
- # TODO: Check for valid timestamp
- (access_key, sep, project_name) = access.partition(':')
-
- user = self.get_user_from_access_key(access_key)
- if user == None:
- raise exception.NotFound('No user found for access key %s' %
- access_key)
- if project_name is '':
- project_name = user.name
-
- project = self.get_project(project_name)
- if project == None:
- raise exception.NotFound('No project called %s could be found' %
- project_name)
- if not user.is_admin() and not project.has_member(user):
- raise exception.NotFound('User %s is not a member of project %s' %
- (user.id, project.id))
- if verify_signature:
- # NOTE(vish): hmac can't handle unicode, so encode ensures that
- # secret isn't unicode
- expected_signature = signer.Signer(user.secret.encode()).generate(
- params, verb, server_string, path)
- logging.debug('user.secret: %s', user.secret)
- logging.debug('expected_signature: %s', expected_signature)
- logging.debug('signature: %s', signature)
- if signature != expected_signature:
- raise exception.NotAuthorized('Signature does not match')
- return (user, project)
-
- def has_role(self, user, role, project=None):
- with LDAPWrapper() as conn:
- if role == 'projectmanager':
- if not project:
- raise exception.Error("Must specify project")
- return self.is_project_manager(user, project)
-
- global_role = conn.has_role(User.safe_id(user),
- role,
- None)
- if not global_role:
- return global_role
-
- if not project or role in FLAGS.global_roles:
- return global_role
-
- return conn.has_role(User.safe_id(user),
- role,
- Project.safe_id(project))
-
- def add_role(self, user, role, project=None):
- with LDAPWrapper() as conn:
- return conn.add_role(User.safe_id(user), role,
- Project.safe_id(project))
-
- def remove_role(self, user, role, project=None):
- with LDAPWrapper() as conn:
- return conn.remove_role(User.safe_id(user), role,
- Project.safe_id(project))
-
- def create_project(self, name, manager_user,
- description=None, member_users=None):
- if member_users:
- member_users = [User.safe_id(u) for u in member_users]
- # NOTE(vish): try to associate a vpn ip and port first because
- # if it throws an exception, we save having to
- # create and destroy a project
- Vpn.create(name)
- with LDAPWrapper() as conn:
- return conn.create_project(name,
- User.safe_id(manager_user),
- description,
- member_users)
-
-
- def get_projects(self):
- with LDAPWrapper() as conn:
- return conn.find_projects()
-
-
- def get_project(self, project):
- with LDAPWrapper() as conn:
- return conn.find_project(Project.safe_id(project))
-
- def add_to_project(self, user, project):
- with LDAPWrapper() as conn:
- return conn.add_to_project(User.safe_id(user),
- Project.safe_id(project))
-
- def is_project_manager(self, user, project):
- if not isinstance(project, Project):
- project = self.get_project(project)
- return project.has_manager(user)
-
- def is_project_member(self, user, project):
- if isinstance(project, Project):
- return project.has_member(user)
- else:
- with LDAPWrapper() as conn:
- return conn.is_in_project(User.safe_id(user), project)
-
- def remove_from_project(self, user, project):
- with LDAPWrapper() as conn:
- return conn.remove_from_project(User.safe_id(user),
- Project.safe_id(project))
-
- def delete_project(self, project):
- with LDAPWrapper() as conn:
- return conn.delete_project(Project.safe_id(project))
-
- def get_user(self, uid):
- with LDAPWrapper() as conn:
- return conn.find_user(uid)
-
- def get_user_from_access_key(self, access_key):
- with LDAPWrapper() as conn:
- return conn.find_user_by_access_key(access_key)
-
- def get_users(self):
- with LDAPWrapper() as conn:
- return conn.find_users()
-
- def create_user(self, user, access=None, secret=None,
- admin=False, create_project=True):
- if access == None: access = str(uuid.uuid4())
- if secret == None: secret = str(uuid.uuid4())
- with LDAPWrapper() as conn:
- user = User.safe_id(user)
- result = conn.create_user(user, access, secret, admin)
- if create_project:
- # NOTE(vish): if the project creation fails, we delete
- # the user and return an exception
- try:
- conn.create_project(user, user, user)
- except Exception:
- with LDAPWrapper() as conn:
- conn.delete_user(user)
- raise
- return result
-
- def delete_user(self, user, delete_project=True):
- with LDAPWrapper() as conn:
- user = User.safe_id(user)
- if delete_project:
- try:
- conn.delete_project(user)
- except exception.NotFound:
- pass
- conn.delete_user(user)
-
- def generate_key_pair(self, user, key_name):
- # generating key pair is slow so delay generation
- # until after check
- user = User.safe_id(user)
- with LDAPWrapper() as conn:
- if not conn.user_exists(user):
- raise exception.NotFound("User %s doesn't exist" % user)
- if conn.key_pair_exists(user, key_name):
- raise exception.Duplicate("The keypair %s already exists"
- % key_name)
- private_key, public_key, fingerprint = crypto.generate_key_pair()
- self.create_key_pair(User.safe_id(user), key_name,
- public_key, fingerprint)
- return private_key, fingerprint
-
- def create_key_pair(self, user, key_name, public_key, fingerprint):
- with LDAPWrapper() as conn:
- return conn.create_key_pair(User.safe_id(user), key_name,
- public_key, fingerprint)
-
- def get_key_pair(self, user, key_name):
- with LDAPWrapper() as conn:
- return conn.find_key_pair(User.safe_id(user), key_name)
-
- def get_key_pairs(self, user):
- with LDAPWrapper() as conn:
- return conn.find_key_pairs(User.safe_id(user))
-
- def delete_key_pair(self, user, key_name):
- with LDAPWrapper() as conn:
- conn.delete_key_pair(User.safe_id(user), key_name)
-
- def generate_x509_cert(self, user, project):
- (private_key, csr) = crypto.generate_x509_cert(
- self.__cert_subject(User.safe_id(user)))
- # TODO - This should be async call back to the cloud controller
- signed_cert = crypto.sign_csr(csr, Project.safe_id(project))
- return (private_key, signed_cert)
-
- def __cert_subject(self, uid):
- # FIXME(ja) - this should be pulled from a global configuration
- return FLAGS.credential_cert_subject % (uid, utils.isotime())
-
-
-class LDAPWrapper(object):
- def __init__(self):
- self.user = FLAGS.user_dn
- self.passwd = FLAGS.ldap_password
-
- def __enter__(self):
- self.connect()
- return self
-
- def __exit__(self, type, value, traceback):
- self.conn.unbind_s()
- return False
-
- def connect(self):
- """ connect to ldap as admin user """
- if FLAGS.fake_users:
- self.NO_SUCH_OBJECT = fakeldap.NO_SUCH_OBJECT
- self.OBJECT_CLASS_VIOLATION = fakeldap.OBJECT_CLASS_VIOLATION
- self.conn = fakeldap.initialize(FLAGS.ldap_url)
- else:
- self.NO_SUCH_OBJECT = ldap.NO_SUCH_OBJECT
- self.OBJECT_CLASS_VIOLATION = ldap.OBJECT_CLASS_VIOLATION
- self.conn = ldap.initialize(FLAGS.ldap_url)
- self.conn.simple_bind_s(self.user, self.passwd)
-
- def find_object(self, dn, query = None):
- objects = self.find_objects(dn, query)
- if len(objects) == 0:
- return None
- return objects[0]
-
- def find_dns(self, dn, query=None):
- try:
- res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query)
- except self.NO_SUCH_OBJECT:
- return []
- # just return the DNs
- return [dn for dn, attributes in res]
-
- def find_objects(self, dn, query = None):
- try:
- res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query)
- except self.NO_SUCH_OBJECT:
- return []
- # just return the attributes
- return [attributes for dn, attributes in res]
-
- def find_users(self):
- attrs = self.find_objects(FLAGS.user_ldap_subtree,
- '(objectclass=novaUser)')
- return [self.__to_user(attr) for attr in attrs]
-
- def find_key_pairs(self, uid):
- attrs = self.find_objects(self.__uid_to_dn(uid),
- '(objectclass=novaKeyPair)')
- return [self.__to_key_pair(uid, attr) for attr in attrs]
-
- def find_projects(self):
- attrs = self.find_objects(FLAGS.project_ldap_subtree,
- '(objectclass=novaProject)')
- return [self.__to_project(attr) for attr in attrs]
-
- def find_roles(self, tree):
- attrs = self.find_objects(tree,
- '(&(objectclass=groupOfNames)(!(objectclass=novaProject)))')
- return [self.__to_group(attr) for attr in attrs]
-
- def find_group_dns_with_member(self, tree, uid):
- dns = self.find_dns(tree,
- '(&(objectclass=groupOfNames)(member=%s))' %
- self.__uid_to_dn(uid))
- return dns
-
- def find_user(self, uid):
- attr = self.find_object(self.__uid_to_dn(uid),
- '(objectclass=novaUser)')
- return self.__to_user(attr)
-
- def find_key_pair(self, uid, key_name):
- dn = 'cn=%s,%s' % (key_name,
- self.__uid_to_dn(uid))
- attr = self.find_object(dn, '(objectclass=novaKeyPair)')
- return self.__to_key_pair(uid, attr)
-
- def find_group(self, dn):
- """uses dn directly instead of custructing it from name"""
- attr = self.find_object(dn, '(objectclass=groupOfNames)')
- return self.__to_group(attr)
-
- def find_project(self, name):
- dn = 'cn=%s,%s' % (name,
- FLAGS.project_ldap_subtree)
- attr = self.find_object(dn, '(objectclass=novaProject)')
- return self.__to_project(attr)
-
- def user_exists(self, name):
- return self.find_user(name) != None
-
- def key_pair_exists(self, uid, key_name):
- return self.find_key_pair(uid, key_name) != None
-
- def project_exists(self, name):
- return self.find_project(name) != None
-
- def group_exists(self, dn):
- return self.find_group(dn) != None
-
- def delete_key_pairs(self, uid):
- keys = self.find_key_pairs(uid)
- if keys != None:
- for key in keys:
- self.delete_key_pair(uid, key.name)
-
- def create_user(self, name, access_key, secret_key, is_admin):
- if self.user_exists(name):
- raise exception.Duplicate("LDAP user %s already exists" % name)
- attr = [
- ('objectclass', ['person',
- 'organizationalPerson',
- 'inetOrgPerson',
- 'novaUser']),
- ('ou', [FLAGS.user_unit]),
- ('uid', [name]),
- ('sn', [name]),
- ('cn', [name]),
- ('secretKey', [secret_key]),
- ('accessKey', [access_key]),
- ('isAdmin', [str(is_admin).upper()]),
- ]
- self.conn.add_s(self.__uid_to_dn(name), attr)
- return self.__to_user(dict(attr))
-
- def create_project(self, name, manager_uid,
- description=None, member_uids=None):
- if self.project_exists(name):
- raise exception.Duplicate("Project can't be created because "
- "project %s already exists" % name)
- if not self.user_exists(manager_uid):
- raise exception.NotFound("Project can't be created because "
- "manager %s doesn't exist" % manager_uid)
- manager_dn = self.__uid_to_dn(manager_uid)
- # description is a required attribute
- if description is None:
- description = name
- members = []
- if member_uids != None:
- for member_uid in member_uids:
- if not self.user_exists(member_uid):
- raise exception.NotFound("Project can't be created "
- "because user %s doesn't exist" % member_uid)
- members.append(self.__uid_to_dn(member_uid))
- # always add the manager as a member because members is required
- if not manager_dn in members:
- members.append(manager_dn)
- attr = [
- ('objectclass', ['novaProject']),
- ('cn', [name]),
- ('description', [description]),
- ('projectManager', [manager_dn]),
- ('member', members)
- ]
- self.conn.add_s('cn=%s,%s' % (name, FLAGS.project_ldap_subtree), attr)
- return self.__to_project(dict(attr))
-
- def add_to_project(self, uid, project_id):
- dn = 'cn=%s,%s' % (project_id, FLAGS.project_ldap_subtree)
- return self.add_to_group(uid, dn)
-
- def remove_from_project(self, uid, project_id):
- dn = 'cn=%s,%s' % (project_id, FLAGS.project_ldap_subtree)
- return self.remove_from_group(uid, dn)
-
- def is_in_project(self, uid, project_id):
- dn = 'cn=%s,%s' % (project_id, FLAGS.project_ldap_subtree)
- return self.is_in_group(uid, dn)
-
- def __role_to_dn(self, role, project_id=None):
- if project_id == None:
- return FLAGS.__getitem__("ldap_%s" % role).value
- else:
- return 'cn=%s,cn=%s,%s' % (role,
- project_id,
- FLAGS.project_ldap_subtree)
-
- def __create_group(self, group_dn, name, uid,
- description, member_uids = None):
- if self.group_exists(group_dn):
- raise exception.Duplicate("Group can't be created because "
- "group %s already exists" % name)
- members = []
- if member_uids != None:
- for member_uid in member_uids:
- if not self.user_exists(member_uid):
- raise exception.NotFound("Group can't be created "
- "because user %s doesn't exist" % member_uid)
- members.append(self.__uid_to_dn(member_uid))
- dn = self.__uid_to_dn(uid)
- if not dn in members:
- members.append(dn)
- attr = [
- ('objectclass', ['groupOfNames']),
- ('cn', [name]),
- ('description', [description]),
- ('member', members)
- ]
- self.conn.add_s(group_dn, attr)
- return self.__to_group(dict(attr))
-
- def has_role(self, uid, role, project_id=None):
- role_dn = self.__role_to_dn(role, project_id)
- return self.is_in_group(uid, role_dn)
-
- def add_role(self, uid, role, project_id=None):
- role_dn = self.__role_to_dn(role, project_id)
- if not self.group_exists(role_dn):
- # create the role if it doesn't exist
- description = '%s role for %s' % (role, project_id)
- self.__create_group(role_dn, role, uid, description)
- else:
- return self.add_to_group(uid, role_dn)
-
- def remove_role(self, uid, role, project_id=None):
- role_dn = self.__role_to_dn(role, project_id)
- return self.remove_from_group(uid, role_dn)
-
- def is_in_group(self, uid, group_dn):
- if not self.user_exists(uid):
- raise exception.NotFound("User %s can't be searched in group "
- "becuase the user doesn't exist" % (uid,))
- if not self.group_exists(group_dn):
- return False
- res = self.find_object(group_dn,
- '(member=%s)' % self.__uid_to_dn(uid))
- return res != None
-
- def add_to_group(self, uid, group_dn):
- if not self.user_exists(uid):
- raise exception.NotFound("User %s can't be added to the group "
- "becuase the user doesn't exist" % (uid,))
- if not self.group_exists(group_dn):
- raise exception.NotFound("The group at dn %s doesn't exist" %
- (group_dn,))
- if self.is_in_group(uid, group_dn):
- raise exception.Duplicate("User %s is already a member of "
- "the group %s" % (uid, group_dn))
- attr = [
- (ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))
- ]
- self.conn.modify_s(group_dn, attr)
-
- def remove_from_group(self, uid, group_dn):
- if not self.group_exists(group_dn):
- raise exception.NotFound("The group at dn %s doesn't exist" %
- (group_dn,))
- if not self.user_exists(uid):
- raise exception.NotFound("User %s can't be removed from the "
- "group because the user doesn't exist" % (uid,))
- if not self.is_in_group(uid, group_dn):
- raise exception.NotFound("User %s is not a member of the group" %
- (uid,))
- self._safe_remove_from_group(group_dn, uid)
-
- def _safe_remove_from_group(self, group_dn, uid):
- # FIXME(vish): what if deleted user is a project manager?
- attr = [(ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))]
- try:
- self.conn.modify_s(group_dn, attr)
- except self.OBJECT_CLASS_VIOLATION:
- logging.debug("Attempted to remove the last member of a group. "
- "Deleting the group at %s instead." % group_dn )
- self.delete_group(group_dn)
-
- def remove_from_all(self, uid):
- if not self.user_exists(uid):
- raise exception.NotFound("User %s can't be removed from all "
- "because the user doesn't exist" % (uid,))
- dn = self.__uid_to_dn(uid)
- role_dns = self.find_group_dns_with_member(
- FLAGS.role_ldap_subtree, uid)
- for role_dn in role_dns:
- self._safe_remove_from_group(role_dn, uid)
- project_dns = self.find_group_dns_with_member(
- FLAGS.project_ldap_subtree, uid)
- for project_dn in project_dns:
- self._safe_remove_from_group(project_dn, uid)
-
- def create_key_pair(self, uid, key_name, public_key, fingerprint):
- """create's a public key in the directory underneath the user"""
- # TODO(vish): possibly refactor this to store keys in their own ou
- # and put dn reference in the user object
- attr = [
- ('objectclass', ['novaKeyPair']),
- ('cn', [key_name]),
- ('sshPublicKey', [public_key]),
- ('keyFingerprint', [fingerprint]),
- ]
- self.conn.add_s('cn=%s,%s' % (key_name,
- self.__uid_to_dn(uid)),
- attr)
- return self.__to_key_pair(uid, dict(attr))
-
- def find_user_by_access_key(self, access):
- query = '(accessKey=%s)' % access
- dn = FLAGS.user_ldap_subtree
- return self.__to_user(self.find_object(dn, query))
-
- def delete_user(self, uid):
- if not self.user_exists(uid):
- raise exception.NotFound("User %s doesn't exist" % uid)
- self.delete_key_pairs(uid)
- self.remove_from_all(uid)
- self.conn.delete_s('uid=%s,%s' % (uid,
- FLAGS.user_ldap_subtree))
-
- def delete_key_pair(self, uid, key_name):
- if not self.key_pair_exists(uid, key_name):
- raise exception.NotFound("Key Pair %s doesn't exist for user %s" %
- (key_name, uid))
- self.conn.delete_s('cn=%s,uid=%s,%s' % (key_name, uid,
- FLAGS.user_ldap_subtree))
-
- def delete_group(self, group_dn):
- if not self.group_exists(group_dn):
- raise exception.NotFound("Group at dn %s doesn't exist" % group_dn)
- self.conn.delete_s(group_dn)
-
- def delete_roles(self, project_dn):
- roles = self.find_roles(project_dn)
- for role in roles:
- self.delete_group('cn=%s,%s' % (role.id, project_dn))
-
- def delete_project(self, name):
- project_dn = 'cn=%s,%s' % (name, FLAGS.project_ldap_subtree)
- self.delete_roles(project_dn)
- self.delete_group(project_dn)
-
- def __to_user(self, attr):
- if attr == None:
- return None
- return User(
- id = attr['uid'][0],
- name = attr['cn'][0],
- access = attr['accessKey'][0],
- secret = attr['secretKey'][0],
- admin = (attr['isAdmin'][0] == 'TRUE')
- )
-
- def __to_key_pair(self, owner, attr):
- if attr == None:
- return None
- return KeyPair(
- id = attr['cn'][0],
- owner_id = owner,
- public_key = attr['sshPublicKey'][0],
- fingerprint = attr['keyFingerprint'][0],
- )
-
- def __to_group(self, attr):
- if attr == None:
- return None
- member_dns = attr.get('member', [])
- return Group(
- id = attr['cn'][0],
- description = attr.get('description', [None])[0],
- member_ids = [self.__dn_to_uid(x) for x in member_dns]
- )
-
- def __to_project(self, attr):
- if attr == None:
- return None
- member_dns = attr.get('member', [])
- return Project(
- id = attr['cn'][0],
- project_manager_id = self.__dn_to_uid(attr['projectManager'][0]),
- description = attr.get('description', [None])[0],
- member_ids = [self.__dn_to_uid(x) for x in member_dns]
- )
-
- def __dn_to_uid(self, dn):
- return dn.split(',')[0].split('=')[1]
-
- def __uid_to_dn(self, dn):
- return 'uid=%s,%s' % (dn, FLAGS.user_ldap_subtree)
diff --git a/nova/cloudpipe/api.py b/nova/cloudpipe/api.py
index a5f78a16d..0bffe9aa3 100644
--- a/nova/cloudpipe/api.py
+++ b/nova/cloudpipe/api.py
@@ -25,7 +25,7 @@ import tornado.web
import urllib
from nova import crypto
-from nova.auth import users
+from nova.auth import manager
_log = logging.getLogger("api")
diff --git a/nova/cloudpipe/bootscript.sh b/nova/cloudpipe/bootscript.sh
index 43fc2ecab..82ec2012a 100755
--- a/nova/cloudpipe/bootscript.sh
+++ b/nova/cloudpipe/bootscript.sh
@@ -24,7 +24,7 @@ export VPN_IP=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f2
export BROADCAST=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f3 | awk '{print $1}'`
export DHCP_MASK=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f4 | awk '{print $1}'`
export GATEWAY=`netstat -r | grep default | cut -d' ' -f10`
-export SUBJ=/C=US/ST=California/L=Mountain View/O=Anso Labs/OU=Nova Dev/CN=customer-vpn-$VPN_IP
+export SUBJ="/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=customer-vpn-$VPN_IP"
DHCP_LOWER=`echo $BROADCAST | awk -F. '{print $1"."$2"."$3"." $4 - 10 }'`
DHCP_UPPER=`echo $BROADCAST | awk -F. '{print $1"."$2"."$3"." $4 - 1 }'`
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index 63f7ae222..5b0ed3471 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -31,7 +31,7 @@ import zipfile
from nova import exception
from nova import flags
from nova import utils
-from nova.auth import users
+from nova.auth import manager
from nova.endpoint import api
@@ -44,7 +44,7 @@ flags.DEFINE_string('boot_script_template',
class CloudPipe(object):
def __init__(self, cloud_controller):
self.controller = cloud_controller
- self.manager = users.UserManager.instance()
+ self.manager = manager.AuthManager()
def launch_vpn_instance(self, project_id):
logging.debug( "Launching VPN for %s" % (project_id))
diff --git a/nova/compute/disk.py b/nova/compute/disk.py
index 08a22556e..1ffcca685 100644
--- a/nova/compute/disk.py
+++ b/nova/compute/disk.py
@@ -40,7 +40,8 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None):
formatted as ext2.
In the diagram below, dashes represent drive sectors.
- 0 a b c d e
+ +-----+------. . .-------+------. . .------+
+ | 0 a| b c|d e|
+-----+------. . .-------+------. . .------+
| mbr | primary partiton | local partition |
+-----+------. . .-------+------. . .------+
@@ -64,8 +65,8 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None):
last_sector = local_last # e
# create an empty file
- execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
- % (outfile, last_sector, sector_size))
+ yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
+ % (outfile, last_sector, sector_size))
# make mbr partition
yield execute('parted --script %s mklabel msdos' % outfile)
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
new file mode 100644
index 000000000..439be3c7d
--- /dev/null
+++ b/nova/compute/instance_types.py
@@ -0,0 +1,30 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+The built-in instance properties.
+"""
+
+INSTANCE_TYPES = {}
+INSTANCE_TYPES['m1.tiny'] = {'memory_mb': 512, 'vcpus': 1, 'local_gb': 0}
+INSTANCE_TYPES['m1.small'] = {'memory_mb': 1024, 'vcpus': 1, 'local_gb': 10}
+INSTANCE_TYPES['m1.medium'] = {'memory_mb': 2048, 'vcpus': 2, 'local_gb': 10}
+INSTANCE_TYPES['m1.large'] = {'memory_mb': 4096, 'vcpus': 4, 'local_gb': 10}
+INSTANCE_TYPES['m1.xlarge'] = {'memory_mb': 8192, 'vcpus': 4, 'local_gb': 10}
+INSTANCE_TYPES['c1.medium'] = {'memory_mb': 2048, 'vcpus': 4, 'local_gb': 10}
diff --git a/nova/compute/linux_net.py b/nova/compute/linux_net.py
index 48e07da66..4a4b4c8a8 100644
--- a/nova/compute/linux_net.py
+++ b/nova/compute/linux_net.py
@@ -29,7 +29,7 @@ from nova import flags
FLAGS=flags.FLAGS
flags.DEFINE_string('dhcpbridge_flagfile',
- '/etc/nova-dhcpbridge.conf',
+ '/etc/nova/nova-dhcpbridge.conf',
'location of flagfile for dhcpbridge')
def execute(cmd, addl_env=None):
@@ -94,7 +94,7 @@ def bridge_create(net):
execute("sudo ifconfig %s up" % net['bridge_name'])
def dnsmasq_cmd(net):
- cmd = ['sudo dnsmasq',
+ cmd = ['sudo -E dnsmasq',
' --strict-order',
' --bind-interfaces',
' --conf-file=',
@@ -143,8 +143,9 @@ def start_dnsmasq(network):
if os.path.exists(lease_file):
os.unlink(lease_file)
- # FLAGFILE in env
- env = {'FLAGFILE' : FLAGS.dhcpbridge_flagfile}
+ # FLAGFILE and DNSMASQ_INTERFACE in env
+ env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile,
+ 'DNSMASQ_INTERFACE': network['bridge_name']}
execute(dnsmasq_cmd(network), addl_env=env)
def stop_dnsmasq(network):
diff --git a/nova/compute/model.py b/nova/compute/model.py
index cda188183..212830d3c 100644
--- a/nova/compute/model.py
+++ b/nova/compute/model.py
@@ -40,9 +40,11 @@ True
True
"""
+import datetime
import logging
import time
import redis
+import uuid
from nova import datastore
from nova import exception
@@ -228,6 +230,78 @@ class Daemon(datastore.BasicModel):
for x in cls.associated_to("host", hostname):
yield x
+class SessionToken(datastore.BasicModel):
+ """This is a short-lived auth token that is passed through web requests"""
+
+ def __init__(self, session_token):
+ self.token = session_token
+ self.default_ttl = FLAGS.auth_token_ttl
+ super(SessionToken, self).__init__()
+
+ @property
+ def identifier(self):
+ return self.token
+
+ def default_state(self):
+ now = datetime.datetime.utcnow()
+ diff = datetime.timedelta(seconds=self.default_ttl)
+ expires = now + diff
+ return {'user': None, 'session_type': None, 'token': self.token,
+ 'expiry': expires.strftime(utils.TIME_FORMAT)}
+
+ def save(self):
+ """Call into superclass to save object, then save associations"""
+ if not self['user']:
+ raise exception.Invalid("SessionToken requires a User association")
+ success = super(SessionToken, self).save()
+ if success:
+ self.associate_with("user", self['user'])
+ return True
+
+ @classmethod
+ def lookup(cls, key):
+ token = super(SessionToken, cls).lookup(key)
+ if token:
+ expires_at = utils.parse_isotime(token['expiry'])
+ if datetime.datetime.utcnow() >= expires_at:
+ token.destroy()
+ return None
+ return token
+
+ @classmethod
+ def generate(cls, userid, session_type=None):
+ """make a new token for the given user"""
+ token = str(uuid.uuid4())
+ while cls.lookup(token):
+ token = str(uuid.uuid4())
+ instance = cls(token)
+ instance['user'] = userid
+ instance['session_type'] = session_type
+ instance.save()
+ return instance
+
+ def update_expiry(self, **kwargs):
+ """updates the expirty attribute, but doesn't save"""
+ if not kwargs:
+ kwargs['seconds'] = self.default_ttl
+ time = datetime.datetime.utcnow()
+ diff = datetime.timedelta(**kwargs)
+ expires = time + diff
+ self['expiry'] = expires.strftime(utils.TIME_FORMAT)
+
+ def is_expired(self):
+ now = datetime.datetime.utcnow()
+ expires = utils.parse_isotime(self['expiry'])
+ return expires <= now
+
+ def ttl(self):
+ """number of seconds remaining before expiration"""
+ now = datetime.datetime.utcnow()
+ expires = utils.parse_isotime(self['expiry'])
+ delta = expires - now
+ return (delta.seconds + (delta.days * 24 * 3600))
+
+
if __name__ == "__main__":
import doctest
doctest.testmod()
diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py
index fdc86b031..19e1a483d 100644
--- a/nova/compute/monitor.py
+++ b/nova/compute/monitor.py
@@ -27,7 +27,6 @@ Instance Monitoring:
import boto
import boto.s3
import datetime
-import libxml2
import logging
import os
import rrdtool
@@ -37,12 +36,8 @@ from twisted.internet import defer
from twisted.internet import task
from twisted.application import service
-try:
- import libvirt
-except Exception, err:
- logging.warning('no libvirt found')
-
from nova import flags
+from nova.virt import connection as virt_connection
FLAGS = flags.FLAGS
@@ -130,83 +125,6 @@ def init_rrd(instance, name):
*RRD_VALUES[name]
)
-def get_disks(domain):
- """
- Returns a list of all block devices for this domain.
- """
- # TODO(devcamcar): Replace libxml2 with etree.
- xml = domain.XMLDesc(0)
- doc = None
-
- try:
- doc = libxml2.parseDoc(xml)
- except:
- return []
-
- ctx = doc.xpathNewContext()
- disks = []
-
- try:
- ret = ctx.xpathEval('/domain/devices/disk')
-
- for node in ret:
- devdst = None
-
- for child in node.children:
- if child.name == 'target':
- devdst = child.prop('dev')
-
- if devdst == None:
- continue
-
- disks.append(devdst)
- finally:
- if ctx != None:
- ctx.xpathFreeContext()
- if doc != None:
- doc.freeDoc()
-
- return disks
-
-def get_interfaces(domain):
- """
- Returns a list of all network interfaces for this instance.
- """
- # TODO(devcamcar): Replace libxml2 with etree.
- xml = domain.XMLDesc(0)
- doc = None
-
- try:
- doc = libxml2.parseDoc(xml)
- except:
- return []
-
- ctx = doc.xpathNewContext()
- interfaces = []
-
- try:
- ret = ctx.xpathEval('/domain/devices/interface')
-
- for node in ret:
- devdst = None
-
- for child in node.children:
- if child.name == 'target':
- devdst = child.prop('dev')
-
- if devdst == None:
- continue
-
- interfaces.append(devdst)
- finally:
- if ctx != None:
- ctx.xpathFreeContext()
- if doc != None:
- doc.freeDoc()
-
- return interfaces
-
-
def graph_cpu(instance, duration):
"""
Creates a graph of cpu usage for the specified instance and duration.
@@ -317,10 +235,9 @@ def store_graph(instance_id, filename):
class Instance(object):
- def __init__(self, conn, domain):
+ def __init__(self, conn, instance_id):
self.conn = conn
- self.domain = domain
- self.instance_id = domain.name()
+ self.instance_id = instance_id
self.last_updated = datetime.datetime.min
self.cputime = 0
self.cputime_last_updated = None
@@ -385,14 +302,14 @@ class Instance(object):
"""
Returns cpu usage statistics for this instance.
"""
- info = self.domain.info()
+ info = self.conn.get_info(self.instance_id)
# Get the previous values.
cputime_last = self.cputime
cputime_last_updated = self.cputime_last_updated
# Get the raw CPU time used in nanoseconds.
- self.cputime = float(info[4])
+ self.cputime = float(info['cpu_time'])
self.cputime_last_updated = utcnow()
logging.debug('CPU: %d', self.cputime)
@@ -413,8 +330,8 @@ class Instance(object):
logging.debug('cputime_delta = %s', cputime_delta)
# Get the number of virtual cpus in this domain.
- vcpus = int(info[3])
-
+ vcpus = int(info['num_cpu'])
+
logging.debug('vcpus = %d', vcpus)
# Calculate CPU % used and cap at 100.
@@ -427,14 +344,13 @@ class Instance(object):
rd = 0
wr = 0
- # Get a list of block devices for this instance.
- disks = get_disks(self.domain)
+ disks = self.conn.get_disks(self.instance_id)
# Aggregate the read and write totals.
for disk in disks:
try:
rd_req, rd_bytes, wr_req, wr_bytes, errs = \
- self.domain.blockStats(disk)
+ self.conn.block_stats(self.instance_id, disk)
rd += rd_bytes
wr += wr_bytes
except TypeError:
@@ -451,13 +367,12 @@ class Instance(object):
rx = 0
tx = 0
- # Get a list of all network interfaces for this instance.
- interfaces = get_interfaces(self.domain)
+ interfaces = self.conn.get_interfaces(self.instance_id)
# Aggregate the in and out totals.
for interface in interfaces:
try:
- stats = self.domain.interfaceStats(interface)
+ stats = self.conn.interface_stats(self.instance_id, interface)
rx += stats[0]
tx += stats[4]
except TypeError:
@@ -493,20 +408,24 @@ class InstanceMonitor(object, service.Service):
Update resource usage for all running instances.
"""
try:
- conn = libvirt.openReadOnly(None)
- except libvirt.libvirtError:
- logging.exception('unexpected libvirt error')
+ conn = virt_connection.get_connection(read_only=True)
+ except Exception, exn:
+ logging.exception('unexpected exception getting connection')
time.sleep(FLAGS.monitoring_instances_delay)
return
- domain_ids = conn.listDomainsID()
-
+ domain_ids = conn.list_instances()
+ try:
+ self.updateInstances_(conn, domain_ids)
+ except Exception, exn:
+ logging.exception('updateInstances_')
+
+ def updateInstances_(self, conn, domain_ids):
for domain_id in domain_ids:
if not domain_id in self._instances:
- domain = conn.lookupByID(domain_id)
- instance = Instance(conn, domain)
+ instance = Instance(conn, domain_id)
self._instances[domain_id] = instance
- logging.debug('Found instance: %s', instance.instance_id)
+ logging.debug('Found instance: %s', domain_id)
for key in self._instances.keys():
instance = self._instances[key]
diff --git a/nova/compute/network.py b/nova/compute/network.py
index 43011f696..62d892e58 100644
--- a/nova/compute/network.py
+++ b/nova/compute/network.py
@@ -29,7 +29,7 @@ from nova import datastore
from nova import exception
from nova import flags
from nova import utils
-from nova.auth import users
+from nova.auth import manager
from nova.compute import exception as compute_exception
from nova.compute import linux_net
@@ -144,7 +144,7 @@ class Vlan(datastore.BasicModel):
@datastore.absorb_connection_error
def destroy(self):
set_name = self._redis_set_name(self.__class__.__name__)
- datastore.Redis.instance().hdel(set_name, self.project)
+ datastore.Redis.instance().hdel(set_name, self.project_id)
def subnet(self):
vlan = int(self.vlan_id)
@@ -210,11 +210,11 @@ class BaseNetwork(datastore.BasicModel):
@property
def user(self):
- return users.UserManager.instance().get_user(self['user_id'])
+ return manager.AuthManager().get_user(self['user_id'])
@property
def project(self):
- return users.UserManager.instance().get_project(self['project_id'])
+ return manager.AuthManager().get_project(self['project_id'])
@property
def _hosts_key(self):
@@ -516,7 +516,7 @@ def get_vlan_for_project(project_id):
if not known_vlans.has_key(vstr):
return Vlan.create(project_id, vnum)
old_project_id = known_vlans[vstr]
- if not users.UserManager.instance().get_project(old_project_id):
+ if not manager.AuthManager().get_project(old_project_id):
vlan = Vlan.lookup(old_project_id)
if vlan:
# NOTE(todd): This doesn't check for vlan id match, because
@@ -529,6 +529,7 @@ def get_vlan_for_project(project_id):
# don't orphan any VLANs. It is basically
# garbage collection for after projects abandoned
# their reference.
+ vlan.destroy()
vlan.project_id = project_id
vlan.save()
return vlan
@@ -542,7 +543,7 @@ def get_network_by_interface(iface, security_group='default'):
def get_network_by_address(address):
logging.debug("Get Network By Address: %s" % address)
- for project in users.UserManager.instance().get_projects():
+ for project in manager.AuthManager().get_projects():
net = get_project_network(project.id)
if address in net.assigned:
logging.debug("Found %s in %s" % (address, project.id))
@@ -582,7 +583,7 @@ def get_project_network(project_id, security_group='default'):
""" get a project's private network, allocating one if needed """
# TODO(todd): It looks goofy to get a project from a UserManager.
# Refactor to still use the LDAP backend, but not User specific.
- project = users.UserManager.instance().get_project(project_id)
+ project = manager.AuthManager().get_project(project_id)
if not project:
raise exception.Error("Project %s doesn't exist, uhoh." %
project_id)
@@ -592,5 +593,5 @@ def get_project_network(project_id, security_group='default'):
def restart_nets():
""" Ensure the network for each user is enabled"""
- for project in users.UserManager.instance().get_projects():
+ for project in manager.AuthManager().get_projects():
get_project_network(project.id).express()
diff --git a/nova/compute/power_state.py b/nova/compute/power_state.py
new file mode 100644
index 000000000..b27aa4677
--- /dev/null
+++ b/nova/compute/power_state.py
@@ -0,0 +1,41 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The various power states that a VM can be in."""
+
+NOSTATE = 0x00
+RUNNING = 0x01
+BLOCKED = 0x02
+PAUSED = 0x03
+SHUTDOWN = 0x04
+SHUTOFF = 0x05
+CRASHED = 0x06
+
+
+def name(code):
+ d = {
+ NOSTATE : 'pending',
+ RUNNING : 'running',
+ BLOCKED : 'blocked',
+ PAUSED : 'paused',
+ SHUTDOWN: 'shutdown',
+ SHUTOFF : 'shutdown',
+ CRASHED : 'crashed',
+ }
+ return d[code]
diff --git a/nova/compute/node.py b/nova/compute/service.py
index 4683f1c8d..9b162edc7 100644
--- a/nova/compute/node.py
+++ b/nova/compute/service.py
@@ -17,10 +17,10 @@
# under the License.
"""
-Compute Node:
+Compute Service:
- Runs on each compute node, managing the
- hypervisor using libvirt.
+ Runs on each compute host, managing the
+ hypervisor using the virt module.
"""
@@ -28,84 +28,42 @@ import base64
import json
import logging
import os
-import shutil
import sys
from twisted.internet import defer
from twisted.internet import task
-from twisted.application import service
-
-
-try:
- import libvirt
-except Exception, err:
- logging.warning('no libvirt found')
from nova import exception
-from nova import fakevirt
from nova import flags
from nova import process
+from nova import service
from nova import utils
from nova.compute import disk
from nova.compute import model
from nova.compute import network
+from nova.compute import power_state
+from nova.compute.instance_types import INSTANCE_TYPES
from nova.objectstore import image # for image_path flag
-from nova.volume import storage
+from nova.virt import connection as virt_connection
+from nova.volume import service as volume_service
FLAGS = flags.FLAGS
-flags.DEFINE_string('libvirt_xml_template',
- utils.abspath('compute/libvirt.xml.template'),
- 'Libvirt XML Template')
-flags.DEFINE_bool('use_s3', True,
- 'whether to get images from s3 or use local copy')
flags.DEFINE_string('instances_path', utils.abspath('../instances'),
'where instances are stored on disk')
-INSTANCE_TYPES = {}
-INSTANCE_TYPES['m1.tiny'] = {'memory_mb': 512, 'vcpus': 1, 'local_gb': 0}
-INSTANCE_TYPES['m1.small'] = {'memory_mb': 1024, 'vcpus': 1, 'local_gb': 10}
-INSTANCE_TYPES['m1.medium'] = {'memory_mb': 2048, 'vcpus': 2, 'local_gb': 10}
-INSTANCE_TYPES['m1.large'] = {'memory_mb': 4096, 'vcpus': 4, 'local_gb': 10}
-INSTANCE_TYPES['m1.xlarge'] = {'memory_mb': 8192, 'vcpus': 4, 'local_gb': 10}
-INSTANCE_TYPES['c1.medium'] = {'memory_mb': 2048, 'vcpus': 4, 'local_gb': 10}
-
-
-def _image_path(path=''):
- return os.path.join(FLAGS.images_path, path)
-
-
-def _image_url(path):
- return "%s:%s/_images/%s" % (FLAGS.s3_host, FLAGS.s3_port, path)
-
-class Node(object, service.Service):
+class ComputeService(service.Service):
"""
Manages the running instances.
"""
def __init__(self):
- """ load configuration options for this node and connect to libvirt """
- super(Node, self).__init__()
+ """ load configuration options for this node and connect to the hypervisor"""
+ super(ComputeService, self).__init__()
self._instances = {}
- self._conn = self._get_connection()
+ self._conn = virt_connection.get_connection()
self.instdir = model.InstanceDirectory()
# TODO(joshua): This needs to ensure system state, specifically: modprobe aoe
- def _get_connection(self):
- """ returns a libvirt connection object """
- # TODO(termie): maybe lazy load after initial check for permissions
- # TODO(termie): check whether we can be disconnected
- if FLAGS.fake_libvirt:
- conn = fakevirt.FakeVirtConnection.instance()
- else:
- auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT],
- 'root',
- None]
- conn = libvirt.openAuth('qemu:///system', auth, 0)
- if conn == None:
- logging.error('Failed to open connection to the hypervisor')
- sys.exit(1)
- return conn
-
def noop(self):
""" simple test of an AMQP message call """
return defer.succeed('PONG')
@@ -121,8 +79,7 @@ class Node(object, service.Service):
def adopt_instances(self):
""" if there are instances already running, adopt them """
return defer.succeed(0)
- instance_names = [self._conn.lookupByID(x).name()
- for x in self._conn.listDomainsID()]
+ instance_names = self._conn.list_instances()
for name in instance_names:
try:
new_inst = Instance.fromName(self._conn, name)
@@ -155,7 +112,7 @@ class Node(object, service.Service):
logging.exception("model server went away")
yield
- # @exception.wrap_exception
+ @exception.wrap_exception
def run_instance(self, instance_id, **_kwargs):
""" launch a new instance with specified options """
logging.debug("Starting instance %s..." % (instance_id))
@@ -173,8 +130,7 @@ class Node(object, service.Service):
logging.info("Instances current state is %s", new_inst.state)
if new_inst.is_running():
raise exception.Error("Instance is already running")
- d = new_inst.spawn()
- return d
+ new_inst.spawn()
@exception.wrap_exception
def terminate_instance(self, instance_id):
@@ -221,29 +177,33 @@ class Node(object, service.Service):
@exception.wrap_exception
def attach_volume(self, instance_id = None,
volume_id = None, mountpoint = None):
- volume = storage.get_volume(volume_id)
+ volume = volume_service.get_volume(volume_id)
yield self._init_aoe()
- yield utils.runthis("Attached Volume: %s",
- "sudo virsh attach-disk %s /dev/etherd/%s %s"
- % (instance_id, volume['aoe_device'], mountpoint.split("/")[-1]))
+ yield process.simple_execute(
+ "sudo virsh attach-disk %s /dev/etherd/%s %s" %
+ (instance_id,
+ volume['aoe_device'],
+ mountpoint.rpartition('/dev/')[2]))
volume.finish_attach()
defer.returnValue(True)
+ @defer.inlineCallbacks
def _init_aoe(self):
- utils.runthis("Doin an AoE discover, returns %s", "sudo aoe-discover")
- utils.runthis("Doin an AoE stat, returns %s", "sudo aoe-stat")
+ yield process.simple_execute("sudo aoe-discover")
+ yield process.simple_execute("sudo aoe-stat")
+ @defer.inlineCallbacks
@exception.wrap_exception
def detach_volume(self, instance_id, volume_id):
""" detach a volume from an instance """
# despite the documentation, virsh detach-disk just wants the device
# name without the leading /dev/
- volume = storage.get_volume(volume_id)
+ volume = volume_service.get_volume(volume_id)
target = volume['mountpoint'].rpartition('/dev/')[2]
- utils.runthis("Detached Volume: %s", "sudo virsh detach-disk %s %s "
- % (instance_id, target))
+ yield process.simple_execute(
+ "sudo virsh detach-disk %s %s " % (instance_id, target))
volume.finish_detach()
- return defer.succeed(True)
+ defer.returnValue(True)
class Group(object):
@@ -305,20 +265,6 @@ class Instance(object):
self.datamodel.save()
logging.debug("Finished init of Instance with id of %s" % name)
- def toXml(self):
- # TODO(termie): cache?
- logging.debug("Starting the toXML method")
- libvirt_xml = open(FLAGS.libvirt_xml_template).read()
- xml_info = self.datamodel.copy()
- # TODO(joshua): Make this xml express the attached disks as well
-
- # TODO(termie): lazy lazy hack because xml is annoying
- xml_info['nova'] = json.dumps(self.datamodel.copy())
- libvirt_xml = libvirt_xml % xml_info
- logging.debug("Finished the toXML method")
-
- return libvirt_xml
-
@classmethod
def fromName(cls, conn, name):
""" use the saved data for reloading the instance """
@@ -329,7 +275,7 @@ class Instance(object):
def set_state(self, state_code, state_description=None):
self.datamodel['state'] = state_code
if not state_description:
- state_description = STATE_NAMES[state_code]
+ state_description = power_state.name(state_code)
self.datamodel['state_description'] = state_description
self.datamodel.save()
@@ -343,37 +289,29 @@ class Instance(object):
return self.datamodel['name']
def is_pending(self):
- return (self.state == Instance.NOSTATE or self.state == 'pending')
+ return (self.state == power_state.NOSTATE or self.state == 'pending')
def is_destroyed(self):
- return self.state == Instance.SHUTOFF
+ return self.state == power_state.SHUTOFF
def is_running(self):
logging.debug("Instance state is: %s" % self.state)
- return (self.state == Instance.RUNNING or self.state == 'running')
+ return (self.state == power_state.RUNNING or self.state == 'running')
def describe(self):
return self.datamodel
def info(self):
- logging.debug("Getting info for dom %s" % self.name)
- virt_dom = self._conn.lookupByName(self.name)
- (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
- return {'state': state,
- 'max_mem': max_mem,
- 'mem': mem,
- 'num_cpu': num_cpu,
- 'cpu_time': cpu_time,
- 'node_name': FLAGS.node_name}
-
- def basepath(self, path=''):
- return os.path.abspath(os.path.join(self.datamodel['basepath'], path))
+ result = self._conn.get_info(self.name)
+ result['node_name'] = FLAGS.node_name
+ return result
def update_state(self):
self.datamodel.update(self.info())
self.set_state(self.state)
self.datamodel.save() # Extra, but harmless
+ @defer.inlineCallbacks
@exception.wrap_exception
def destroy(self):
if self.is_destroyed():
@@ -381,38 +319,9 @@ class Instance(object):
raise exception.Error('trying to destroy already destroyed'
' instance: %s' % self.name)
- self.set_state(Instance.NOSTATE, 'shutting_down')
- try:
- virt_dom = self._conn.lookupByName(self.name)
- virt_dom.destroy()
- except Exception, _err:
- pass
- # If the instance is already terminated, we're still happy
- d = defer.Deferred()
- d.addCallback(lambda x: self._cleanup())
- d.addCallback(lambda x: self.datamodel.destroy())
- # TODO(termie): short-circuit me for tests
- # WE'LL save this for when we do shutdown,
- # instead of destroy - but destroy returns immediately
- timer = task.LoopingCall(f=None)
- def _wait_for_shutdown():
- try:
- self.update_state()
- if self.state == Instance.SHUTDOWN:
- timer.stop()
- d.callback(None)
- except Exception:
- self.set_state(Instance.SHUTDOWN)
- timer.stop()
- d.callback(None)
- timer.f = _wait_for_shutdown
- timer.start(interval=0.5, now=True)
- return d
-
- def _cleanup(self):
- target = os.path.abspath(self.datamodel['basepath'])
- logging.info("Deleting instance files at %s", target)
- shutil.rmtree(target)
+ self.set_state(power_state.NOSTATE, 'shutting_down')
+ yield self._conn.destroy(self)
+ self.datamodel.destroy()
@defer.inlineCallbacks
@exception.wrap_exception
@@ -423,141 +332,26 @@ class Instance(object):
'instance: %s (state: %s)' % (self.name, self.state))
logging.debug('rebooting instance %s' % self.name)
- self.set_state(Instance.NOSTATE, 'rebooting')
- yield self._conn.lookupByName(self.name).destroy()
- self._conn.createXML(self.toXml(), 0)
-
- d = defer.Deferred()
- timer = task.LoopingCall(f=None)
- def _wait_for_reboot():
- try:
- self.update_state()
- if self.is_running():
- logging.debug('rebooted instance %s' % self.name)
- timer.stop()
- d.callback(None)
- except Exception:
- self.set_state(Instance.SHUTDOWN)
- timer.stop()
- d.callback(None)
- timer.f = _wait_for_reboot
- timer.start(interval=0.5, now=True)
- yield d
-
- def _fetch_s3_image(self, image, path):
- url = _image_url('%s/image' % image)
- d = process.simple_execute(
- 'curl --silent %s -o %s' % (url, path))
- return d
-
- def _fetch_local_image(self, image, path):
- source = _image_path('%s/image' % image)
- d = process.simple_execute('cp %s %s' % (source, path))
- return d
-
- @defer.inlineCallbacks
- def _create_image(self, libvirt_xml):
- # syntactic nicety
- data = self.datamodel
- basepath = self.basepath
-
- # ensure directories exist and are writable
- yield process.simple_execute(
- 'mkdir -p %s' % basepath())
- yield process.simple_execute(
- 'chmod 0777 %s' % basepath())
-
-
- # TODO(termie): these are blocking calls, it would be great
- # if they weren't.
- logging.info('Creating image for: %s', data['instance_id'])
- f = open(basepath('libvirt.xml'), 'w')
- f.write(libvirt_xml)
- f.close()
-
- if FLAGS.fake_libvirt:
- logging.info('fake_libvirt, nothing to do for create_image')
- raise defer.returnValue(None);
-
- if FLAGS.use_s3:
- _fetch_file = self._fetch_s3_image
- else:
- _fetch_file = self._fetch_local_image
-
- if not os.path.exists(basepath('disk')):
- yield _fetch_file(data['image_id'], basepath('disk-raw'))
- if not os.path.exists(basepath('kernel')):
- yield _fetch_file(data['kernel_id'], basepath('kernel'))
- if not os.path.exists(basepath('ramdisk')):
- yield _fetch_file(data['ramdisk_id'], basepath('ramdisk'))
-
- execute = lambda cmd, input=None: \
- process.simple_execute(cmd=cmd,
- input=input,
- error_ok=1)
-
- key = data['key_data']
- net = None
- if FLAGS.simple_network:
- with open(FLAGS.simple_network_template) as f:
- net = f.read() % {'address': data['private_dns_name'],
- 'network': FLAGS.simple_network_network,
- 'netmask': FLAGS.simple_network_netmask,
- 'gateway': FLAGS.simple_network_gateway,
- 'broadcast': FLAGS.simple_network_broadcast,
- 'dns': FLAGS.simple_network_dns}
- if key or net:
- logging.info('Injecting data into image %s', data['image_id'])
- yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute)
-
- if os.path.exists(basepath('disk')):
- yield process.simple_execute(
- 'rm -f %s' % basepath('disk'))
-
- bytes = (INSTANCE_TYPES[data['instance_type']]['local_gb']
- * 1024 * 1024 * 1024)
- yield disk.partition(
- basepath('disk-raw'), basepath('disk'), bytes, execute=execute)
+ self.set_state(power_state.NOSTATE, 'rebooting')
+ yield self._conn.reboot(self)
+ self.update_state()
@defer.inlineCallbacks
@exception.wrap_exception
def spawn(self):
- self.set_state(Instance.NOSTATE, 'spawning')
+ self.set_state(power_state.NOSTATE, 'spawning')
logging.debug("Starting spawn in Instance")
-
- xml = self.toXml()
- self.set_state(Instance.NOSTATE, 'launching')
- logging.info('self %s', self)
try:
- yield self._create_image(xml)
- self._conn.createXML(xml, 0)
- # TODO(termie): this should actually register
- # a callback to check for successful boot
- logging.debug("Instance is running")
-
- local_d = defer.Deferred()
- timer = task.LoopingCall(f=None)
- def _wait_for_boot():
- try:
- self.update_state()
- if self.is_running():
- logging.debug('booted instance %s' % self.name)
- timer.stop()
- local_d.callback(None)
- except Exception:
- self.set_state(Instance.SHUTDOWN)
- logging.error('Failed to boot instance %s' % self.name)
- timer.stop()
- local_d.callback(None)
- timer.f = _wait_for_boot
- timer.start(interval=0.5, now=True)
+ yield self._conn.spawn(self)
except Exception, ex:
logging.debug(ex)
- self.set_state(Instance.SHUTDOWN)
+ self.set_state(power_state.SHUTDOWN)
+ self.update_state()
@exception.wrap_exception
def console_output(self):
- if not FLAGS.fake_libvirt:
+ # FIXME: Abstract this for Xen
+ if FLAGS.connection_type == 'libvirt':
fname = os.path.abspath(
os.path.join(self.datamodel['basepath'], 'console.log'))
with open(fname, 'r') as f:
@@ -565,13 +359,3 @@ class Instance(object):
else:
console = 'FAKE CONSOLE OUTPUT'
return defer.succeed(console)
-
-STATE_NAMES = {
- Instance.NOSTATE : 'pending',
- Instance.RUNNING : 'running',
- Instance.BLOCKED : 'blocked',
- Instance.PAUSED : 'paused',
- Instance.SHUTDOWN : 'shutdown',
- Instance.SHUTOFF : 'shutdown',
- Instance.CRASHED : 'crashed',
-}
diff --git a/nova/datastore.py b/nova/datastore.py
index e57177e04..9c2592334 100644
--- a/nova/datastore.py
+++ b/nova/datastore.py
@@ -103,7 +103,7 @@ class BasicModel(object):
@classmethod
def _redis_name(cls):
- return cls.override_type or cls.__name__
+ return cls.override_type or cls.__name__.lower()
@classmethod
def lookup(cls, identifier):
@@ -184,21 +184,19 @@ class BasicModel(object):
@absorb_connection_error
def add_to_index(self):
+ """Each insance of Foo has its id tracked int the set named Foos"""
set_name = self.__class__._redis_set_name(self.__class__.__name__)
Redis.instance().sadd(set_name, self.identifier)
@absorb_connection_error
def remove_from_index(self):
- set_name = self.__class__._redis_set_name(self.__class__.__name__)
- Redis.instance().srem(set_name, self.identifier)
-
- @absorb_connection_error
- def remove_from_index(self):
+ """Remove id of this instance from the set tracking ids of this type"""
set_name = self.__class__._redis_set_name(self.__class__.__name__)
Redis.instance().srem(set_name, self.identifier)
@absorb_connection_error
def associate_with(self, foreign_type, foreign_id):
+ """Add this class id into the set foreign_type:foreign_id:this_types"""
# note the extra 's' on the end is for plurality
# to match the old data without requiring a migration of any sort
self.add_associated_model_to_its_set(foreign_type, foreign_id)
@@ -208,21 +206,24 @@ class BasicModel(object):
@absorb_connection_error
def unassociate_with(self, foreign_type, foreign_id):
+ """Delete from foreign_type:foreign_id:this_types set"""
redis_set = self.__class__._redis_association_name(foreign_type,
foreign_id)
Redis.instance().srem(redis_set, self.identifier)
- def add_associated_model_to_its_set(self, my_type, my_id):
+ def add_associated_model_to_its_set(self, model_type, model_id):
+ """
+ When associating an X to a Y, save Y for newer timestamp, etc, and to
+ make sure to save it if Y is a new record.
+ If the model_type isn't found as a usable class, ignore it, this can
+ happen when associating to things stored in LDAP (user, project, ...).
+ """
table = globals()
- klsname = my_type.capitalize()
+ klsname = model_type.capitalize()
if table.has_key(klsname):
- my_class = table[klsname]
- my_inst = my_class(my_id)
- my_inst.save()
- else:
- logging.warning("no model class for %s when building"
- " association from %s",
- klsname, self)
+ model_class = table[klsname]
+ model_inst = model_class(model_id)
+ model_inst.save()
@absorb_connection_error
def save(self):
diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py
index b97a6727f..c4b8c05ca 100644
--- a/nova/endpoint/admin.py
+++ b/nova/endpoint/admin.py
@@ -22,9 +22,10 @@ Admin API controller, exposed through http via the api worker.
import base64
-from nova.auth import users
+from nova.auth import manager
from nova.compute import model
+
def user_dict(user, base64_file=None):
"""Convert the user object to a result dict"""
if user:
@@ -32,8 +33,17 @@ def user_dict(user, base64_file=None):
'username': user.id,
'accesskey': user.access,
'secretkey': user.secret,
- 'file': base64_file,
- }
+ 'file': base64_file}
+ else:
+ return {}
+
+def project_dict(project):
+ """Convert the project object to a result dict"""
+ if project:
+ return {
+ 'projectname': project.id,
+ 'project_manager_id': project.project_manager_id,
+ 'description': project.description}
else:
return {}
@@ -69,18 +79,18 @@ class AdminController(object):
@admin_only
def describe_user(self, _context, name, **_kwargs):
"""Returns user data, including access and secret keys."""
- return user_dict(users.UserManager.instance().get_user(name))
+ return user_dict(manager.AuthManager().get_user(name))
@admin_only
def describe_users(self, _context, **_kwargs):
"""Returns all users - should be changed to deal with a list."""
return {'userSet':
- [user_dict(u) for u in users.UserManager.instance().get_users()] }
+ [user_dict(u) for u in manager.AuthManager().get_users()] }
@admin_only
def register_user(self, _context, name, **_kwargs):
"""Creates a new user, and returns generated credentials."""
- return user_dict(users.UserManager.instance().create_user(name))
+ return user_dict(manager.AuthManager().create_user(name))
@admin_only
def deregister_user(self, _context, name, **_kwargs):
@@ -88,7 +98,20 @@ class AdminController(object):
Should throw an exception if the user has instances,
volumes, or buckets remaining.
"""
- users.UserManager.instance().delete_user(name)
+ manager.AuthManager().delete_user(name)
+
+ return True
+
+ @admin_only
+ def modify_user_role(self, context, user, role, project=None,
+ operation='add', **kwargs):
+ """Add or remove a role for a user and project."""
+ if operation == 'add':
+ manager.AuthManager().add_role(user, role, project)
+ elif operation == 'remove':
+ manager.AuthManager().remove_role(user, role, project)
+ else:
+ raise exception.ApiError('operation must be add or remove')
return True
@@ -100,11 +123,58 @@ class AdminController(object):
"""
if project is None:
project = name
- project = users.UserManager.instance().get_project(project)
- user = users.UserManager.instance().get_user(name)
+ project = manager.AuthManager().get_project(project)
+ user = manager.AuthManager().get_user(name)
return user_dict(user, base64.b64encode(project.get_credentials(user)))
@admin_only
+ def describe_project(self, context, name, **kwargs):
+ """Returns project data, including member ids."""
+ return project_dict(manager.AuthManager().get_project(name))
+
+ @admin_only
+ def describe_projects(self, context, user=None, **kwargs):
+ """Returns all projects - should be changed to deal with a list."""
+ return {'projectSet':
+ [project_dict(u) for u in
+ manager.AuthManager().get_projects(user=user)]}
+
+ @admin_only
+ def register_project(self, context, name, manager_user, description=None,
+ member_users=None, **kwargs):
+ """Creates a new project"""
+ return project_dict(
+ manager.AuthManager().create_project(
+ name,
+ manager_user,
+ description=None,
+ member_users=None))
+
+ @admin_only
+ def deregister_project(self, context, name):
+ """Permanently deletes a project."""
+ manager.AuthManager().delete_project(name)
+ return True
+
+ @admin_only
+ def describe_project_members(self, context, name, **kwargs):
+ project = manager.AuthManager().get_project(name)
+ result = {
+ 'members': [{'member': m} for m in project.member_ids]}
+ return result
+
+ @admin_only
+ def modify_project_member(self, context, user, project, operation, **kwargs):
+ """Add or remove a user from a project."""
+ if operation =='add':
+ manager.AuthManager().add_to_project(user, project)
+ elif operation == 'remove':
+ manager.AuthManager().remove_from_project(user, project)
+ else:
+ raise exception.ApiError('operation must be add or remove')
+ return True
+
+ @admin_only
def describe_hosts(self, _context, **_kwargs):
"""Returns status info for all nodes. Includes:
* Disk Space
diff --git a/nova/endpoint/api.py b/nova/endpoint/api.py
index 79a2aaddb..78a18b9ea 100755
--- a/nova/endpoint/api.py
+++ b/nova/endpoint/api.py
@@ -35,7 +35,7 @@ from nova import crypto
from nova import exception
from nova import flags
from nova import utils
-from nova.auth import users
+from nova.auth import manager
import nova.cloudpipe.api
from nova.endpoint import cloud
@@ -266,7 +266,7 @@ class APIRequestHandler(tornado.web.RequestHandler):
# Authenticate the request.
try:
- (user, project) = users.UserManager.instance().authenticate(
+ (user, project) = manager.AuthManager().authenticate(
access,
signature,
auth_params,
diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index 3b7b4804b..67fc04502 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -23,7 +23,6 @@ datastore.
"""
import base64
-import json
import logging
import os
import time
@@ -35,12 +34,13 @@ from nova import flags
from nova import rpc
from nova import utils
from nova.auth import rbac
-from nova.auth import users
+from nova.auth import manager
from nova.compute import model
from nova.compute import network
-from nova.compute import node
+from nova.compute.instance_types import INSTANCE_TYPES
+from nova.compute import service as compute_service
from nova.endpoint import images
-from nova.volume import storage
+from nova.volume import service as volume_service
FLAGS = flags.FLAGS
@@ -48,10 +48,10 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
def _gen_key(user_id, key_name):
- """ Tuck this into UserManager """
+ """ Tuck this into AuthManager """
try:
- manager = users.UserManager.instance()
- private_key, fingerprint = manager.generate_key_pair(user_id, key_name)
+ mgr = manager.AuthManager()
+ private_key, fingerprint = mgr.generate_key_pair(user_id, key_name)
except Exception as ex:
return {'exception': ex}
return {'private_key': private_key, 'fingerprint': fingerprint}
@@ -76,7 +76,7 @@ class CloudController(object):
def volumes(self):
""" returns a list of all volumes """
for volume_id in datastore.Redis.instance().smembers("volumes"):
- volume = storage.get_volume(volume_id)
+ volume = volume_service.get_volume(volume_id)
yield volume
def __str__(self):
@@ -103,7 +103,7 @@ class CloudController(object):
result = {}
for instance in self.instdir.all:
if instance['project_id'] == project_id:
- line = '%s slots=%d' % (instance['private_dns_name'], node.INSTANCE_TYPES[instance['instance_type']]['vcpus'])
+ line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus'])
if instance['key_name'] in result:
result[instance['key_name']].append(line)
else:
@@ -296,8 +296,8 @@ class CloudController(object):
@rbac.allow('projectmanager', 'sysadmin')
def create_volume(self, context, size, **kwargs):
- # TODO(vish): refactor this to create the volume object here and tell storage to create it
- res = rpc.call(FLAGS.storage_topic, {"method": "create_volume",
+ # TODO(vish): refactor this to create the volume object here and tell service to create it
+ res = rpc.call(FLAGS.volume_topic, {"method": "create_volume",
"args" : {"size": size,
"user_id": context.user.id,
"project_id": context.project.id}})
@@ -331,7 +331,7 @@ class CloudController(object):
raise exception.NotFound('Instance %s could not be found' % instance_id)
def _get_volume(self, context, volume_id):
- volume = storage.get_volume(volume_id)
+ volume = volume_service.get_volume(volume_id)
if context.user.is_admin() or volume['project_id'] == context.project.id:
return volume
raise exception.NotFound('Volume %s could not be found' % volume_id)
@@ -453,21 +453,21 @@ class CloudController(object):
def format_addresses(self, context):
addresses = []
- # TODO(vish): move authorization checking into network.py
for address in self.network.host_objs:
- #logging.debug(address_record)
- address_rv = {
- 'public_ip': address['address'],
- 'instance_id' : address.get('instance_id', 'free')
- }
- if context.user.is_admin():
- address_rv['instance_id'] = "%s (%s, %s)" % (
- address['instance_id'],
- address['user_id'],
- address['project_id'],
- )
+ # TODO(vish): implement a by_project iterator for addresses
+ if (context.user.is_admin() or
+ address['project_id'] == self.project.id):
+ address_rv = {
+ 'public_ip': address['address'],
+ 'instance_id' : address.get('instance_id', 'free')
+ }
+ if context.user.is_admin():
+ address_rv['instance_id'] = "%s (%s, %s)" % (
+ address['instance_id'],
+ address['user_id'],
+ address['project_id'],
+ )
addresses.append(address_rv)
- # logging.debug(addresses)
return {'addressesSet': addresses}
@rbac.allow('netadmin')
@@ -516,13 +516,17 @@ class CloudController(object):
# get defaults from imagestore
image_id = image['imageId']
- kernel_id = image.get('kernelId', None)
- ramdisk_id = image.get('ramdiskId', None)
+ kernel_id = image.get('kernelId', FLAGS.default_kernel)
+ ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk)
# API parameters overrides of defaults
kernel_id = kwargs.get('kernel_id', kernel_id)
ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id)
+ # make sure we have access to kernel and ramdisk
+ self._get_image(context, kernel_id)
+ self._get_image(context, ramdisk_id)
+
logging.debug("Going to run instances...")
reservation_id = utils.generate_uid('r')
launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
@@ -578,7 +582,7 @@ class CloudController(object):
"args": {"instance_id" : inst.instance_id}})
logging.debug("Casting to node for %s's instance with IP of %s" %
(context.user.name, inst['private_dns_name']))
- # TODO: Make the NetworkComputeNode figure out the network name from ip.
+ # TODO: Make Network figure out the network name from ip.
return defer.succeed(self._format_instances(
context, reservation_id))
@@ -628,8 +632,8 @@ class CloudController(object):
def delete_volume(self, context, volume_id, **kwargs):
# TODO: return error if not authorized
volume = self._get_volume(context, volume_id)
- storage_node = volume['node_name']
- rpc.cast('%s.%s' % (FLAGS.storage_topic, storage_node),
+ volume_node = volume['node_name']
+ rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node),
{"method": "delete_volume",
"args" : {"volume_id": volume_id}})
return defer.succeed(True)
diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py
index 9208ddab7..de05ba2da 100644
--- a/nova/endpoint/rackspace.py
+++ b/nova/endpoint/rackspace.py
@@ -34,12 +34,11 @@ from nova import exception
from nova import flags
from nova import rpc
from nova import utils
-from nova.auth import users
+from nova.auth import manager
from nova.compute import model
from nova.compute import network
from nova.endpoint import images
from nova.endpoint import wsgi
-from nova.volume import storage
FLAGS = flags.FLAGS
@@ -78,11 +77,11 @@ class Api(object):
def build_context(self, env):
rv = {}
if env.has_key("HTTP_X_AUTH_TOKEN"):
- rv['user'] = users.UserManager.instance().get_user_from_access_key(
+ rv['user'] = manager.AuthManager().get_user_from_access_key(
env['HTTP_X_AUTH_TOKEN']
)
if rv['user']:
- rv['project'] = users.UserManager.instance().get_project(
+ rv['project'] = manager.AuthManager().get_project(
rv['user'].name
)
return rv
diff --git a/nova/exception.py b/nova/exception.py
index bda002d1e..52497a19e 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -44,6 +44,12 @@ class Duplicate(Error):
class NotAuthorized(Error):
pass
+class NotEmpty(Error):
+ pass
+
+class Invalid(Error):
+ pass
+
def wrap_exception(f):
def _wrap(*args, **kw):
try:
diff --git a/nova/fakevirt.py b/nova/fakevirt.py
deleted file mode 100644
index bcbeae548..000000000
--- a/nova/fakevirt.py
+++ /dev/null
@@ -1,112 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-A fake (in-memory) hypervisor+api. Allows nova testing w/o KVM and libvirt.
-"""
-
-import StringIO
-from xml.etree import ElementTree
-
-
-class FakeVirtConnection(object):
- # FIXME: networkCreateXML, listNetworks don't do anything since
- # they aren't exercised in tests yet
-
- def __init__(self):
- self.next_index = 0
- self.instances = {}
-
- @classmethod
- def instance(cls):
- if not hasattr(cls, '_instance'):
- cls._instance = cls()
- return cls._instance
-
- def lookupByID(self, i):
- return self.instances[str(i)]
-
- def listDomainsID(self):
- return self.instances.keys()
-
- def listNetworks(self):
- return []
-
- def lookupByName(self, instance_id):
- for x in self.instances.values():
- if x.name() == instance_id:
- return x
- raise Exception('no instance found for instance_id: %s' % instance_id)
-
- def networkCreateXML(self, xml):
- pass
-
- def createXML(self, xml, flags):
- # parse the xml :(
- xml_stringio = StringIO.StringIO(xml)
-
- my_xml = ElementTree.parse(xml_stringio)
- name = my_xml.find('name').text
-
- fake_instance = FakeVirtInstance(conn=self,
- index=str(self.next_index),
- name=name,
- xml=my_xml)
- self.instances[str(self.next_index)] = fake_instance
- self.next_index += 1
-
- def _removeInstance(self, i):
- self.instances.pop(str(i))
-
-
-class FakeVirtInstance(object):
- NOSTATE = 0x00
- RUNNING = 0x01
- BLOCKED = 0x02
- PAUSED = 0x03
- SHUTDOWN = 0x04
- SHUTOFF = 0x05
- CRASHED = 0x06
-
- def __init__(self, conn, index, name, xml):
- self._conn = conn
- self._destroyed = False
- self._name = name
- self._index = index
- self._state = self.RUNNING
-
- def name(self):
- return self._name
-
- def destroy(self):
- if self._state == self.SHUTOFF:
- raise Exception('instance already destroyed: %s' % self.name())
- self._state = self.SHUTDOWN
- self._conn._removeInstance(self._index)
-
- def info(self):
- return [self._state, 0, 2, 0, 0]
-
- def XMLDesc(self, flags):
- return open('fakevirtinstance.xml', 'r').read()
-
- def blockStats(self, disk):
- return [0L, 0L, 0L, 0L, null]
-
- def interfaceStats(self, iface):
- return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L]
diff --git a/nova/flags.py b/nova/flags.py
index 06ea1e007..f35f5fa10 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -36,17 +36,17 @@ DEFINE_bool = DEFINE_bool
# Define any app-specific flags in their own files, docs at:
# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39
+DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
DEFINE_integer('s3_port', 3333, 's3 port')
DEFINE_string('s3_host', '127.0.0.1', 's3 host')
#DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
-DEFINE_string('storage_topic', 'storage', 'the topic storage nodes listen on')
-DEFINE_bool('fake_libvirt', False,
- 'whether to use a fake libvirt or not')
+DEFINE_string('volume_topic', 'volume', 'the topic volume nodes listen on')
+DEFINE_string('network_topic', 'network', 'the topic network nodes listen on')
+
DEFINE_bool('verbose', False, 'show debug output')
DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit')
DEFINE_bool('fake_network', False, 'should we use fake network devices and addresses')
-DEFINE_bool('fake_users', False, 'use fake users')
DEFINE_string('rabbit_host', 'localhost', 'rabbit host')
DEFINE_integer('rabbit_port', 5672, 'rabbit port')
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
@@ -75,6 +75,8 @@ DEFINE_string('vpn_key_suffix',
'-key',
'Suffix to add to project name for vpn key')
+DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
+
# UNUSED
DEFINE_string('node_availability_zone',
'nova',
diff --git a/nova/network/__init__.py b/nova/network/__init__.py
new file mode 100644
index 000000000..dcc54db09
--- /dev/null
+++ b/nova/network/__init__.py
@@ -0,0 +1,32 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`nova.network` -- Network Nodes
+=====================================================
+
+.. automodule:: nova.network
+ :platform: Unix
+ :synopsis: Network is responsible for managing networking
+.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
+.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
+.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
+.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
+.. moduleauthor:: Manish Singh <yosh@gimp.org>
+.. moduleauthor:: Andy Smith <andy@anarkystic.com>
+"""
diff --git a/nova/network/service.py b/nova/network/service.py
new file mode 100644
index 000000000..9d87e05e6
--- /dev/null
+++ b/nova/network/service.py
@@ -0,0 +1,35 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Network Nodes are responsible for allocating ips and setting up network
+"""
+
+import logging
+
+from nova import flags
+from nova import service
+
+
+FLAGS = flags.FLAGS
+
+class NetworkService(service.Service):
+ """Allocates ips and sets up networks"""
+
+ def __init__(self):
+ logging.debug("Network node working")
diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py
index 090ef4e61..b42a96233 100644
--- a/nova/objectstore/bucket.py
+++ b/nova/objectstore/bucket.py
@@ -107,7 +107,7 @@ class Bucket(object):
try:
return context.user.is_admin() or self.owner_id == context.project.id
except Exception, e:
- pass
+ return False
def list_keys(self, prefix='', marker=None, max_keys=1000, terse=False):
object_names = []
@@ -161,7 +161,7 @@ class Bucket(object):
def delete(self):
if len(os.listdir(self.path)) > 0:
- raise exception.NotAuthorized()
+ raise exception.NotEmpty()
os.rmdir(self.path)
os.remove(self.path+'.json')
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
index c670ee02f..b4d7e6179 100644
--- a/nova/objectstore/handler.py
+++ b/nova/objectstore/handler.py
@@ -47,12 +47,12 @@ import urllib
from twisted.application import internet, service
from twisted.web.resource import Resource
-from twisted.web import server, static
+from twisted.web import server, static, error
from nova import exception
from nova import flags
-from nova.auth import users
+from nova.auth import manager
from nova.endpoint import api
from nova.objectstore import bucket
from nova.objectstore import image
@@ -107,28 +107,46 @@ def get_context(request):
if not authorization_header:
raise exception.NotAuthorized
access, sep, secret = authorization_header.split(' ')[1].rpartition(':')
- um = users.UserManager.instance()
- print 'um %s' % um
- (user, project) = um.authenticate(access, secret, {}, request.method, request.host, request.uri, False)
- # FIXME: check signature here!
+ (user, project) = manager.AuthManager().authenticate(access,
+ secret,
+ {},
+ request.method,
+ request.getRequestHostname(),
+ request.uri,
+ headers=request.getAllHeaders(),
+ check_type='s3')
return api.APIRequestContext(None, user, project)
except exception.Error as ex:
logging.debug("Authentication Failure: %s" % ex)
raise exception.NotAuthorized
-class S3(Resource):
+class ErrorHandlingResource(Resource):
+ """Maps exceptions to 404 / 401 codes. Won't work for exceptions thrown after NOT_DONE_YET is returned."""
+ # TODO(unassigned) (calling-all-twisted-experts): This needs to be plugged in to the right place in twisted...
+ # This doesn't look like it's the right place (consider exceptions in getChild; or after NOT_DONE_YET is returned
+ def render(self, request):
+ try:
+ return Resource.render(self, request)
+ except exception.NotFound:
+ request.setResponseCode(404)
+ return ''
+ except exception.NotAuthorized:
+ request.setResponseCode(403)
+ return ''
+
+class S3(ErrorHandlingResource):
"""Implementation of an S3-like storage server based on local files."""
def getChild(self, name, request):
request.context = get_context(request)
-
if name == '':
return self
elif name == '_images':
- return ImageResource()
+ return ImagesResource()
else:
return BucketResource(name)
def render_GET(self, request):
+ logging.debug('List of buckets requested')
buckets = [b for b in bucket.Bucket.all() if b.is_authorized(request.context)]
render_xml(request, {"ListAllMyBucketsResult": {
@@ -136,7 +154,7 @@ class S3(Resource):
}})
return server.NOT_DONE_YET
-class BucketResource(Resource):
+class BucketResource(ErrorHandlingResource):
def __init__(self, name):
Resource.__init__(self)
self.name = name
@@ -150,7 +168,10 @@ class BucketResource(Resource):
def render_GET(self, request):
logging.debug("List keys for bucket %s" % (self.name))
- bucket_object = bucket.Bucket(self.name)
+ try:
+ bucket_object = bucket.Bucket(self.name)
+ except exception.NotFound, e:
+ return error.NoResource(message="No such bucket").render(request)
if not bucket_object.is_authorized(request.context):
raise exception.NotAuthorized
@@ -166,13 +187,10 @@ class BucketResource(Resource):
def render_PUT(self, request):
logging.debug("Creating bucket %s" % (self.name))
- try:
- print 'user is %s' % request.context
- except Exception as e:
- logging.exception(e)
logging.debug("calling bucket.Bucket.create(%r, %r)" % (self.name, request.context))
bucket.Bucket.create(self.name, request.context)
- return ''
+ request.finish()
+ return server.NOT_DONE_YET
def render_DELETE(self, request):
logging.debug("Deleting bucket %s" % (self.name))
@@ -186,7 +204,7 @@ class BucketResource(Resource):
return ''
-class ObjectResource(Resource):
+class ObjectResource(ErrorHandlingResource):
def __init__(self, bucket, name):
Resource.__init__(self)
self.bucket = bucket
@@ -227,16 +245,22 @@ class ObjectResource(Resource):
request.setResponseCode(204)
return ''
-class ImageResource(Resource):
+class ImageResource(ErrorHandlingResource):
isLeaf = True
+ def __init__(self, name):
+ Resource.__init__(self)
+ self.img = image.Image(name)
+
+ def render_GET(self, request):
+ return static.File(self.img.image_path, defaultType='application/octet-stream').render_GET(request)
+
+class ImagesResource(Resource):
def getChild(self, name, request):
if name == '':
return self
else:
- request.setHeader("Content-Type", "application/octet-stream")
- img = image.Image(name)
- return static.File(img.image_path)
+ return ImageResource(name)
def render_GET(self, request):
""" returns a json listing of all images
@@ -273,12 +297,12 @@ class ImageResource(Resource):
def render_POST(self, request):
""" update image attributes: public/private """
- image_id = self.get_argument('image_id', u'')
- operation = self.get_argument('operation', u'')
+ image_id = get_argument(request, 'image_id', u'')
+ operation = get_argument(request, 'operation', u'')
image_object = image.Image(image_id)
- if not image.is_authorized(request.context):
+ if not image_object.is_authorized(request.context):
raise exception.NotAuthorized
image_object.set_public(operation=='add')
@@ -287,10 +311,10 @@ class ImageResource(Resource):
def render_DELETE(self, request):
""" delete a registered image """
- image_id = self.get_argument("image_id", u"")
+ image_id = get_argument(request, "image_id", u"")
image_object = image.Image(image_id)
- if not image.is_authorized(request.context):
+ if not image_object.is_authorized(request.context):
raise exception.NotAuthorized
image_object.delete()
@@ -298,9 +322,13 @@ class ImageResource(Resource):
request.setResponseCode(204)
return ''
-def get_application():
+def get_site():
root = S3()
- factory = server.Site(root)
+ site = server.Site(root)
+ return site
+
+def get_application():
+ factory = get_site()
application = service.Application("objectstore")
objectStoreService = internet.TCPServer(FLAGS.s3_port, factory)
objectStoreService.setServiceParent(application)
diff --git a/nova/process.py b/nova/process.py
index d3558ed2e..2dc56372f 100644
--- a/nova/process.py
+++ b/nova/process.py
@@ -205,13 +205,13 @@ class ProcessPool(object):
self._pool.release()
return rv
-class SharedPool(ProcessPool):
+class SharedPool(object):
_instance = None
- def __new__(cls, *args, **kwargs):
- if not cls._instance:
- cls._instance = super(SharedPool, cls).__new__(
- cls, *args, **kwargs)
- return cls._instance
+ def __init__(self):
+ if SharedPool._instance is None:
+ self.__class__._instance = ProcessPool()
+ def __getattr__(self, key):
+ return getattr(self._instance, key)
def simple_execute(cmd, **kwargs):
return SharedPool().simple_execute(cmd, **kwargs)
diff --git a/nova/rpc.py b/nova/rpc.py
index ef463e84b..ebf140d92 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -151,6 +151,7 @@ class TopicPublisher(Publisher):
def __init__(self, connection=None, topic="broadcast"):
self.routing_key = topic
self.exchange = FLAGS.control_exchange
+ self.durable = False
super(TopicPublisher, self).__init__(connection=connection)
@@ -242,7 +243,7 @@ def send_message(topic, message, wait=True):
consumer.register_callback(generic_response)
publisher = messaging.Publisher(connection=Connection.instance(),
- exchange="nova",
+ exchange=FLAGS.control_exchange,
exchange_type="topic",
routing_key=topic)
publisher.send(message)
diff --git a/nova/service.py b/nova/service.py
new file mode 100644
index 000000000..96281bc6b
--- /dev/null
+++ b/nova/service.py
@@ -0,0 +1,103 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Generic Node baseclass for all workers that run on hosts
+"""
+
+import inspect
+import logging
+import os
+
+from twisted.internet import defer
+from twisted.internet import task
+from twisted.application import service
+
+from nova import datastore
+from nova import flags
+from nova import rpc
+from nova.compute import model
+
+
+FLAGS = flags.FLAGS
+
+flags.DEFINE_integer('report_interval', 10,
+ 'seconds between nodes reporting state to cloud',
+ lower_bound=1)
+
+class Service(object, service.Service):
+ """Base class for workers that run on hosts"""
+
+ @classmethod
+ def create(cls,
+ report_interval=None, # defaults to flag
+ bin_name=None, # defaults to basename of executable
+ topic=None): # defaults to basename - "nova-" part
+ """Instantiates class and passes back application object"""
+ if not report_interval:
+ # NOTE(vish): set here because if it is set to flag in the
+ # parameter list, it wrongly uses the default
+ report_interval = FLAGS.report_interval
+ # NOTE(vish): magic to automatically determine bin_name and topic
+ if not bin_name:
+ bin_name = os.path.basename(inspect.stack()[-1][1])
+ if not topic:
+ topic = bin_name.rpartition("nova-")[2]
+ logging.warn("Starting %s node" % topic)
+ node_instance = cls()
+
+ conn = rpc.Connection.instance()
+ consumer_all = rpc.AdapterConsumer(
+ connection=conn,
+ topic='%s' % topic,
+ proxy=node_instance)
+
+ consumer_node = rpc.AdapterConsumer(
+ connection=conn,
+ topic='%s.%s' % (topic, FLAGS.node_name),
+ proxy=node_instance)
+
+ pulse = task.LoopingCall(node_instance.report_state,
+ FLAGS.node_name,
+ bin_name)
+ pulse.start(interval=report_interval, now=False)
+
+ consumer_all.attach_to_twisted()
+ consumer_node.attach_to_twisted()
+
+ # This is the parent service that twistd will be looking for when it
+ # parses this file, return it so that we can get it into globals below
+ application = service.Application(bin_name)
+ node_instance.setServiceParent(application)
+ return application
+
+ @defer.inlineCallbacks
+ def report_state(self, nodename, daemon):
+ # TODO(termie): make this pattern be more elegant. -todd
+ try:
+ record = model.Daemon(nodename, daemon)
+ record.heartbeat()
+ if getattr(self, "model_disconnected", False):
+ self.model_disconnected = False
+ logging.error("Recovered model server connection!")
+
+ except datastore.ConnectionError, ex:
+ if not getattr(self, "model_disconnected", False):
+ self.model_disconnected = True
+ logging.exception("model server went away")
+ yield
diff --git a/nova/test.py b/nova/test.py
index 5dcf0b9b0..6fbcab5e4 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -156,9 +156,9 @@ class BaseTestCase(TrialTestCase):
Example (callback chain, ugly):
- d = self.node.terminate_instance(instance_id) # a Deferred instance
+ d = self.compute.terminate_instance(instance_id) # a Deferred instance
def _describe(_):
- d_desc = self.node.describe_instances() # another Deferred instance
+ d_desc = self.compute.describe_instances() # another Deferred instance
return d_desc
def _checkDescribe(rv):
self.assertEqual(rv, [])
@@ -169,8 +169,8 @@ class BaseTestCase(TrialTestCase):
Example (inline callbacks! yay!):
- yield self.node.terminate_instance(instance_id)
- rv = yield self.node.describe_instances()
+ yield self.compute.terminate_instance(instance_id)
+ rv = yield self.compute.describe_instances()
self.assertEqual(rv, [])
If the test fits the Inline Callbacks pattern we will automatically
diff --git a/nova/tests/access_unittest.py b/nova/tests/access_unittest.py
index 8500dd0cb..fa0a090a0 100644
--- a/nova/tests/access_unittest.py
+++ b/nova/tests/access_unittest.py
@@ -22,7 +22,7 @@ import logging
from nova import exception
from nova import flags
from nova import test
-from nova.auth.users import UserManager
+from nova.auth import manager
from nova.auth import rbac
@@ -33,9 +33,9 @@ class Context(object):
class AccessTestCase(test.BaseTestCase):
def setUp(self):
super(AccessTestCase, self).setUp()
- FLAGS.fake_libvirt = True
+ FLAGS.connection_type = 'fake'
FLAGS.fake_storage = True
- um = UserManager.instance()
+ um = manager.AuthManager()
# Make test users
try:
self.testadmin = um.create_user('testadmin')
@@ -79,7 +79,7 @@ class AccessTestCase(test.BaseTestCase):
#user is set in each test
def tearDown(self):
- um = UserManager.instance()
+ um = manager.AuthManager()
# Delete the test project
um.delete_project('testproj')
# Delete the test user
diff --git a/nova/tests/api_unittest.py b/nova/tests/api_unittest.py
index e5e2afe26..9d072866c 100644
--- a/nova/tests/api_unittest.py
+++ b/nova/tests/api_unittest.py
@@ -26,7 +26,7 @@ from twisted.internet import defer
from nova import flags
from nova import test
-from nova.auth import users
+from nova.auth import manager
from nova.endpoint import api
from nova.endpoint import cloud
@@ -43,7 +43,11 @@ def boto_to_tornado(method, path, headers, data, host, connection=None):
connection should be a FakeTornadoHttpConnection instance
"""
- headers = httpserver.HTTPHeaders()
+ try:
+ headers = httpserver.HTTPHeaders()
+ except AttributeError:
+ from tornado import httputil
+ headers = httputil.HTTPHeaders()
for k, v in headers.iteritems():
headers[k] = v
@@ -150,7 +154,7 @@ class ApiEc2TestCase(test.BaseTestCase):
def setUp(self):
super(ApiEc2TestCase, self).setUp()
- self.users = users.UserManager.instance()
+ self.manager = manager.AuthManager()
self.cloud = cloud.CloudController()
self.host = '127.0.0.1'
@@ -175,25 +179,22 @@ class ApiEc2TestCase(test.BaseTestCase):
def test_describe_instances(self):
self.expect_http()
self.mox.ReplayAll()
- try:
- self.users.create_user('fake', 'fake', 'fake')
- except Exception, _err:
- pass # User may already exist
+ user = self.manager.create_user('fake', 'fake', 'fake')
+ project = self.manager.create_project('fake', 'fake', 'fake')
self.assertEqual(self.ec2.get_all_instances(), [])
- self.users.delete_user('fake')
+ self.manager.delete_project(project)
+ self.manager.delete_user(user)
def test_get_all_key_pairs(self):
self.expect_http()
self.mox.ReplayAll()
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") for x in range(random.randint(4, 8)))
- try:
- self.users.create_user('fake', 'fake', 'fake')
- except Exception, _err:
- pass # User may already exist
- self.users.generate_key_pair('fake', keyname)
+ user = self.manager.create_user('fake', 'fake', 'fake')
+ project = self.manager.create_project('fake', 'fake', 'fake')
+ self.manager.generate_key_pair(user.id, keyname)
rv = self.ec2.get_all_key_pairs()
self.assertTrue(filter(lambda k: k.name == keyname, rv))
- self.users.delete_user('fake')
-
+ self.manager.delete_project(project)
+ self.manager.delete_user(user)
diff --git a/nova/tests/users_unittest.py b/nova/tests/auth_unittest.py
index 301721075..2167c2385 100644
--- a/nova/tests/users_unittest.py
+++ b/nova/tests/auth_unittest.py
@@ -25,41 +25,41 @@ import unittest
from nova import crypto
from nova import flags
from nova import test
-from nova.auth import users
+from nova.auth import manager
from nova.endpoint import cloud
FLAGS = flags.FLAGS
-class UserTestCase(test.BaseTestCase):
+class AuthTestCase(test.BaseTestCase):
flush_db = False
def setUp(self):
- super(UserTestCase, self).setUp()
- self.flags(fake_libvirt=True,
+ super(AuthTestCase, self).setUp()
+ self.flags(connection_type='fake',
fake_storage=True)
- self.users = users.UserManager.instance()
+ self.manager = manager.AuthManager()
def test_001_can_create_users(self):
- self.users.create_user('test1', 'access', 'secret')
- self.users.create_user('test2')
+ self.manager.create_user('test1', 'access', 'secret')
+ self.manager.create_user('test2')
def test_002_can_get_user(self):
- user = self.users.get_user('test1')
+ user = self.manager.get_user('test1')
def test_003_can_retreive_properties(self):
- user = self.users.get_user('test1')
+ user = self.manager.get_user('test1')
self.assertEqual('test1', user.id)
self.assertEqual('access', user.access)
self.assertEqual('secret', user.secret)
def test_004_signature_is_valid(self):
- #self.assertTrue(self.users.authenticate( **boto.generate_url ... ? ? ? ))
+ #self.assertTrue(self.manager.authenticate( **boto.generate_url ... ? ? ? ))
pass
#raise NotImplementedError
def test_005_can_get_credentials(self):
return
- credentials = self.users.get_user('test1').get_credentials()
+ credentials = self.manager.get_user('test1').get_credentials()
self.assertEqual(credentials,
'export EC2_ACCESS_KEY="access"\n' +
'export EC2_SECRET_KEY="secret"\n' +
@@ -68,14 +68,14 @@ class UserTestCase(test.BaseTestCase):
'export EC2_USER_ID="test1"\n')
def test_006_test_key_storage(self):
- user = self.users.get_user('test1')
+ user = self.manager.get_user('test1')
user.create_key_pair('public', 'key', 'fingerprint')
key = user.get_key_pair('public')
self.assertEqual('key', key.public_key)
self.assertEqual('fingerprint', key.fingerprint)
def test_007_test_key_generation(self):
- user = self.users.get_user('test1')
+ user = self.manager.get_user('test1')
private_key, fingerprint = user.generate_key_pair('public2')
key = RSA.load_key_string(private_key, callback=lambda: None)
bio = BIO.MemoryBuffer()
@@ -87,71 +87,71 @@ class UserTestCase(test.BaseTestCase):
converted.split(" ")[1].strip())
def test_008_can_list_key_pairs(self):
- keys = self.users.get_user('test1').get_key_pairs()
+ keys = self.manager.get_user('test1').get_key_pairs()
self.assertTrue(filter(lambda k: k.name == 'public', keys))
self.assertTrue(filter(lambda k: k.name == 'public2', keys))
def test_009_can_delete_key_pair(self):
- self.users.get_user('test1').delete_key_pair('public')
- keys = self.users.get_user('test1').get_key_pairs()
+ self.manager.get_user('test1').delete_key_pair('public')
+ keys = self.manager.get_user('test1').get_key_pairs()
self.assertFalse(filter(lambda k: k.name == 'public', keys))
def test_010_can_list_users(self):
- users = self.users.get_users()
+ users = self.manager.get_users()
logging.warn(users)
self.assertTrue(filter(lambda u: u.id == 'test1', users))
def test_101_can_add_user_role(self):
- self.assertFalse(self.users.has_role('test1', 'itsec'))
- self.users.add_role('test1', 'itsec')
- self.assertTrue(self.users.has_role('test1', 'itsec'))
+ self.assertFalse(self.manager.has_role('test1', 'itsec'))
+ self.manager.add_role('test1', 'itsec')
+ self.assertTrue(self.manager.has_role('test1', 'itsec'))
def test_199_can_remove_user_role(self):
- self.assertTrue(self.users.has_role('test1', 'itsec'))
- self.users.remove_role('test1', 'itsec')
- self.assertFalse(self.users.has_role('test1', 'itsec'))
+ self.assertTrue(self.manager.has_role('test1', 'itsec'))
+ self.manager.remove_role('test1', 'itsec')
+ self.assertFalse(self.manager.has_role('test1', 'itsec'))
def test_201_can_create_project(self):
- project = self.users.create_project('testproj', 'test1', 'A test project', ['test1'])
- self.assertTrue(filter(lambda p: p.name == 'testproj', self.users.get_projects()))
+ project = self.manager.create_project('testproj', 'test1', 'A test project', ['test1'])
+ self.assertTrue(filter(lambda p: p.name == 'testproj', self.manager.get_projects()))
self.assertEqual(project.name, 'testproj')
self.assertEqual(project.description, 'A test project')
self.assertEqual(project.project_manager_id, 'test1')
self.assertTrue(project.has_member('test1'))
def test_202_user1_is_project_member(self):
- self.assertTrue(self.users.get_user('test1').is_project_member('testproj'))
+ self.assertTrue(self.manager.get_user('test1').is_project_member('testproj'))
def test_203_user2_is_not_project_member(self):
- self.assertFalse(self.users.get_user('test2').is_project_member('testproj'))
+ self.assertFalse(self.manager.get_user('test2').is_project_member('testproj'))
def test_204_user1_is_project_manager(self):
- self.assertTrue(self.users.get_user('test1').is_project_manager('testproj'))
+ self.assertTrue(self.manager.get_user('test1').is_project_manager('testproj'))
def test_205_user2_is_not_project_manager(self):
- self.assertFalse(self.users.get_user('test2').is_project_manager('testproj'))
+ self.assertFalse(self.manager.get_user('test2').is_project_manager('testproj'))
def test_206_can_add_user_to_project(self):
- self.users.add_to_project('test2', 'testproj')
- self.assertTrue(self.users.get_project('testproj').has_member('test2'))
+ self.manager.add_to_project('test2', 'testproj')
+ self.assertTrue(self.manager.get_project('testproj').has_member('test2'))
def test_208_can_remove_user_from_project(self):
- self.users.remove_from_project('test2', 'testproj')
- self.assertFalse(self.users.get_project('testproj').has_member('test2'))
+ self.manager.remove_from_project('test2', 'testproj')
+ self.assertFalse(self.manager.get_project('testproj').has_member('test2'))
def test_209_can_generate_x509(self):
# MUST HAVE RUN CLOUD SETUP BY NOW
self.cloud = cloud.CloudController()
self.cloud.setup()
- private_key, signed_cert_string = self.users.get_project('testproj').generate_x509_cert('test1')
- logging.debug(signed_cert_string)
+ _key, cert_str = self.manager._generate_x509_cert('test1', 'testproj')
+ logging.debug(cert_str)
# Need to verify that it's signed by the right intermediate CA
full_chain = crypto.fetch_ca(project_id='testproj', chain=True)
int_cert = crypto.fetch_ca(project_id='testproj', chain=False)
cloud_cert = crypto.fetch_ca()
logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain)
- signed_cert = X509.load_cert_string(signed_cert_string)
+ signed_cert = X509.load_cert_string(cert_str)
chain_cert = X509.load_cert_string(full_chain)
int_cert = X509.load_cert_string(int_cert)
cloud_cert = X509.load_cert_string(cloud_cert)
@@ -164,42 +164,51 @@ class UserTestCase(test.BaseTestCase):
self.assertFalse(signed_cert.verify(cloud_cert.get_pubkey()))
def test_210_can_add_project_role(self):
- project = self.users.get_project('testproj')
+ project = self.manager.get_project('testproj')
self.assertFalse(project.has_role('test1', 'sysadmin'))
- self.users.add_role('test1', 'sysadmin')
+ self.manager.add_role('test1', 'sysadmin')
self.assertFalse(project.has_role('test1', 'sysadmin'))
project.add_role('test1', 'sysadmin')
self.assertTrue(project.has_role('test1', 'sysadmin'))
def test_211_can_remove_project_role(self):
- project = self.users.get_project('testproj')
+ project = self.manager.get_project('testproj')
self.assertTrue(project.has_role('test1', 'sysadmin'))
project.remove_role('test1', 'sysadmin')
self.assertFalse(project.has_role('test1', 'sysadmin'))
- self.users.remove_role('test1', 'sysadmin')
+ self.manager.remove_role('test1', 'sysadmin')
self.assertFalse(project.has_role('test1', 'sysadmin'))
def test_212_vpn_ip_and_port_looks_valid(self):
- project = self.users.get_project('testproj')
+ project = self.manager.get_project('testproj')
self.assert_(project.vpn_ip)
self.assert_(project.vpn_port >= FLAGS.vpn_start_port)
self.assert_(project.vpn_port <= FLAGS.vpn_end_port)
def test_213_too_many_vpns(self):
- for i in xrange(users.Vpn.num_ports_for_ip(FLAGS.vpn_ip)):
- users.Vpn.create("vpnuser%s" % i)
- self.assertRaises(users.NoMorePorts, users.Vpn.create, "boom")
+ vpns = []
+ for i in xrange(manager.Vpn.num_ports_for_ip(FLAGS.vpn_ip)):
+ vpns.append(manager.Vpn.create("vpnuser%s" % i))
+ self.assertRaises(manager.NoMorePorts, manager.Vpn.create, "boom")
+ for vpn in vpns:
+ vpn.destroy()
+
+ def test_214_can_retrieve_project_by_user(self):
+ project = self.manager.create_project('testproj2', 'test2', 'Another test project', ['test2'])
+ self.assert_(len(self.manager.get_projects()) > 1)
+ self.assertEqual(len(self.manager.get_projects('test2')), 1)
def test_299_can_delete_project(self):
- self.users.delete_project('testproj')
- self.assertFalse(filter(lambda p: p.name == 'testproj', self.users.get_projects()))
+ self.manager.delete_project('testproj')
+ self.assertFalse(filter(lambda p: p.name == 'testproj', self.manager.get_projects()))
+ self.manager.delete_project('testproj2')
def test_999_can_delete_users(self):
- self.users.delete_user('test1')
- users = self.users.get_users()
+ self.manager.delete_user('test1')
+ users = self.manager.get_users()
self.assertFalse(filter(lambda u: u.id == 'test1', users))
- self.users.delete_user('test2')
- self.assertEqual(self.users.get_user('test2'), None)
+ self.manager.delete_user('test2')
+ self.assertEqual(self.manager.get_user('test2'), None)
if __name__ == "__main__":
diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py
index b8614fdc8..40837405c 100644
--- a/nova/tests/cloud_unittest.py
+++ b/nova/tests/cloud_unittest.py
@@ -27,8 +27,8 @@ from xml.etree import ElementTree
from nova import flags
from nova import rpc
from nova import test
-from nova.auth import users
-from nova.compute import node
+from nova.auth import manager
+from nova.compute import service
from nova.endpoint import api
from nova.endpoint import cloud
@@ -39,9 +39,8 @@ FLAGS = flags.FLAGS
class CloudTestCase(test.BaseTestCase):
def setUp(self):
super(CloudTestCase, self).setUp()
- self.flags(fake_libvirt=True,
- fake_storage=True,
- fake_users=True)
+ self.flags(connection_type='fake',
+ fake_storage=True)
self.conn = rpc.Connection.instance()
logging.getLogger().setLevel(logging.DEBUG)
@@ -53,37 +52,37 @@ class CloudTestCase(test.BaseTestCase):
proxy=self.cloud)
self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop))
- # set up a node
- self.node = node.Node()
- self.node_consumer = rpc.AdapterConsumer(connection=self.conn,
+ # set up a service
+ self.compute = service.ComputeService()
+ self.compute_consumer = rpc.AdapterConsumer(connection=self.conn,
topic=FLAGS.compute_topic,
- proxy=self.node)
- self.injected.append(self.node_consumer.attach_to_tornado(self.ioloop))
+ proxy=self.compute)
+ self.injected.append(self.compute_consumer.attach_to_tornado(self.ioloop))
try:
- users.UserManager.instance().create_user('admin', 'admin', 'admin')
+ manager.AuthManager().create_user('admin', 'admin', 'admin')
except: pass
- admin = users.UserManager.instance().get_user('admin')
- project = users.UserManager.instance().create_project('proj', 'admin', 'proj')
+ admin = manager.AuthManager().get_user('admin')
+ project = manager.AuthManager().create_project('proj', 'admin', 'proj')
self.context = api.APIRequestContext(handler=None,project=project,user=admin)
def tearDown(self):
- users.UserManager.instance().delete_project('proj')
- users.UserManager.instance().delete_user('admin')
+ manager.AuthManager().delete_project('proj')
+ manager.AuthManager().delete_user('admin')
def test_console_output(self):
- if FLAGS.fake_libvirt:
+ if FLAGS.connection_type == 'fake':
logging.debug("Can't test instances without a real virtual env.")
return
instance_id = 'foo'
- inst = yield self.node.run_instance(instance_id)
+ inst = yield self.compute.run_instance(instance_id)
output = yield self.cloud.get_console_output(self.context, [instance_id])
logging.debug(output)
self.assert_(output)
- rv = yield self.node.terminate_instance(instance_id)
+ rv = yield self.compute.terminate_instance(instance_id)
def test_run_instances(self):
- if FLAGS.fake_libvirt:
+ if FLAGS.connection_type == 'fake':
logging.debug("Can't test instances without a real virtual env.")
return
image_id = FLAGS.default_image
@@ -104,7 +103,7 @@ class CloudTestCase(test.BaseTestCase):
break
self.assert_(rv)
- if not FLAGS.fake_libvirt:
+ if connection_type != 'fake':
time.sleep(45) # Should use boto for polling here
for reservations in rv['reservationSet']:
# for res_id in reservations.keys():
@@ -112,7 +111,7 @@ class CloudTestCase(test.BaseTestCase):
# for instance in reservations[res_id]:
for instance in reservations[reservations.keys()[0]]:
logging.debug("Terminating instance %s" % instance['instance_id'])
- rv = yield self.node.terminate_instance(instance['instance_id'])
+ rv = yield self.compute.terminate_instance(instance['instance_id'])
def test_instance_update_state(self):
def instance(num):
diff --git a/nova/tests/node_unittest.py b/nova/tests/compute_unittest.py
index 93942d79e..da0f82e3a 100644
--- a/nova/tests/node_unittest.py
+++ b/nova/tests/compute_unittest.py
@@ -26,7 +26,7 @@ from nova import flags
from nova import test
from nova import utils
from nova.compute import model
-from nova.compute import node
+from nova.compute import service
FLAGS = flags.FLAGS
@@ -53,14 +53,13 @@ class InstanceXmlTestCase(test.TrialTestCase):
# rv = yield first_node.terminate_instance(instance_id)
-class NodeConnectionTestCase(test.TrialTestCase):
+class ComputeConnectionTestCase(test.TrialTestCase):
def setUp(self):
logging.getLogger().setLevel(logging.DEBUG)
- super(NodeConnectionTestCase, self).setUp()
- self.flags(fake_libvirt=True,
- fake_storage=True,
- fake_users=True)
- self.node = node.Node()
+ super(ComputeConnectionTestCase, self).setUp()
+ self.flags(connection_type='fake',
+ fake_storage=True)
+ self.compute = service.ComputeService()
def create_instance(self):
instdir = model.InstanceDirectory()
@@ -81,48 +80,48 @@ class NodeConnectionTestCase(test.TrialTestCase):
def test_run_describe_terminate(self):
instance_id = self.create_instance()
- rv = yield self.node.run_instance(instance_id)
+ rv = yield self.compute.run_instance(instance_id)
- rv = yield self.node.describe_instances()
+ rv = yield self.compute.describe_instances()
logging.info("Running instances: %s", rv)
self.assertEqual(rv[instance_id].name, instance_id)
- rv = yield self.node.terminate_instance(instance_id)
+ rv = yield self.compute.terminate_instance(instance_id)
- rv = yield self.node.describe_instances()
+ rv = yield self.compute.describe_instances()
logging.info("After terminating instances: %s", rv)
self.assertEqual(rv, {})
@defer.inlineCallbacks
def test_reboot(self):
instance_id = self.create_instance()
- rv = yield self.node.run_instance(instance_id)
+ rv = yield self.compute.run_instance(instance_id)
- rv = yield self.node.describe_instances()
+ rv = yield self.compute.describe_instances()
self.assertEqual(rv[instance_id].name, instance_id)
- yield self.node.reboot_instance(instance_id)
+ yield self.compute.reboot_instance(instance_id)
- rv = yield self.node.describe_instances()
+ rv = yield self.compute.describe_instances()
self.assertEqual(rv[instance_id].name, instance_id)
- rv = yield self.node.terminate_instance(instance_id)
+ rv = yield self.compute.terminate_instance(instance_id)
@defer.inlineCallbacks
def test_console_output(self):
instance_id = self.create_instance()
- rv = yield self.node.run_instance(instance_id)
+ rv = yield self.compute.run_instance(instance_id)
- console = yield self.node.get_console_output(instance_id)
+ console = yield self.compute.get_console_output(instance_id)
self.assert_(console)
- rv = yield self.node.terminate_instance(instance_id)
+ rv = yield self.compute.terminate_instance(instance_id)
@defer.inlineCallbacks
def test_run_instance_existing(self):
instance_id = self.create_instance()
- rv = yield self.node.run_instance(instance_id)
+ rv = yield self.compute.run_instance(instance_id)
- rv = yield self.node.describe_instances()
+ rv = yield self.compute.describe_instances()
self.assertEqual(rv[instance_id].name, instance_id)
- self.assertRaises(exception.Error, self.node.run_instance, instance_id)
- rv = yield self.node.terminate_instance(instance_id)
+ self.assertRaises(exception.Error, self.compute.run_instance, instance_id)
+ rv = yield self.compute.terminate_instance(instance_id)
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index d32f40d8f..a7310fb26 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -20,9 +20,9 @@ from nova import flags
FLAGS = flags.FLAGS
-FLAGS.fake_libvirt = True
+FLAGS.connection_type = 'fake'
FLAGS.fake_storage = True
FLAGS.fake_rabbit = True
FLAGS.fake_network = True
-FLAGS.fake_users = True
+FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
FLAGS.verbose = True
diff --git a/nova/tests/future_unittest.py b/nova/tests/future_unittest.py
deleted file mode 100644
index da5470ffe..000000000
--- a/nova/tests/future_unittest.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import logging
-import mox
-import StringIO
-import time
-from tornado import ioloop
-from twisted.internet import defer
-import unittest
-from xml.etree import ElementTree
-
-from nova import cloud
-from nova import exception
-from nova import flags
-from nova import node
-from nova import rpc
-from nova import test
-
-
-FLAGS = flags.FLAGS
-
-
-class AdminTestCase(test.BaseTestCase):
- def setUp(self):
- super(AdminTestCase, self).setUp()
- self.flags(fake_libvirt=True,
- fake_rabbit=True)
-
- self.conn = rpc.Connection.instance()
-
- logging.getLogger().setLevel(logging.INFO)
-
- # set up our cloud
- self.cloud = cloud.CloudController()
- self.cloud_consumer = rpc.AdapterConsumer(connection=self.conn,
- topic=FLAGS.cloud_topic,
- proxy=self.cloud)
- self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop))
-
- # set up a node
- self.node = node.Node()
- self.node_consumer = rpc.AdapterConsumer(connection=self.conn,
- topic=FLAGS.compute_topic,
- proxy=self.node)
- self.injected.append(self.node_consumer.attach_to_tornado(self.ioloop))
-
- def test_flush_terminated(self):
- # Launch an instance
-
- # Wait until it's running
-
- # Terminate it
-
- # Wait until it's terminated
-
- # Flush terminated nodes
-
- # ASSERT that it's gone
- pass
diff --git a/nova/tests/model_unittest.py b/nova/tests/model_unittest.py
index 1bd7e527f..6825cfe2a 100644
--- a/nova/tests/model_unittest.py
+++ b/nova/tests/model_unittest.py
@@ -16,6 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from datetime import datetime, timedelta
import logging
import time
from twisted.internet import defer
@@ -25,7 +26,6 @@ from nova import flags
from nova import test
from nova import utils
from nova.compute import model
-from nova.compute import node
FLAGS = flags.FLAGS
@@ -34,9 +34,8 @@ FLAGS = flags.FLAGS
class ModelTestCase(test.TrialTestCase):
def setUp(self):
super(ModelTestCase, self).setUp()
- self.flags(fake_libvirt=True,
- fake_storage=True,
- fake_users=True)
+ self.flags(connection_type='fake',
+ fake_storage=True)
def tearDown(self):
model.Instance('i-test').destroy()
@@ -66,6 +65,12 @@ class ModelTestCase(test.TrialTestCase):
daemon.save()
return daemon
+ def create_session_token(self):
+ session_token = model.SessionToken('tk12341234')
+ session_token['user'] = 'testuser'
+ session_token.save()
+ return session_token
+
@defer.inlineCallbacks
def test_create_instance(self):
"""store with create_instace, then test that a load finds it"""
@@ -204,3 +209,91 @@ class ModelTestCase(test.TrialTestCase):
if x.identifier == 'testhost:nova-testdaemon':
found = True
self.assertTrue(found)
+
+ @defer.inlineCallbacks
+ def test_create_session_token(self):
+ """create"""
+ d = yield self.create_session_token()
+ d = model.SessionToken(d.token)
+ self.assertFalse(d.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_delete_session_token(self):
+ """create, then destroy, then make sure loads a new record"""
+ instance = yield self.create_session_token()
+ yield instance.destroy()
+ newinst = yield model.SessionToken(instance.token)
+ self.assertTrue(newinst.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_session_token_added_to_set(self):
+ """create, then check that it is included in list"""
+ instance = yield self.create_session_token()
+ found = False
+ for x in model.SessionToken.all():
+ if x.identifier == instance.token:
+ found = True
+ self.assert_(found)
+
+ @defer.inlineCallbacks
+ def test_session_token_associates_user(self):
+ """create, then check that it is listed for the user"""
+ instance = yield self.create_session_token()
+ found = False
+ for x in model.SessionToken.associated_to('user', 'testuser'):
+ if x.identifier == instance.identifier:
+ found = True
+ self.assertTrue(found)
+
+ @defer.inlineCallbacks
+ def test_session_token_generation(self):
+ instance = yield model.SessionToken.generate('username', 'TokenType')
+ self.assertFalse(instance.is_new_record())
+
+ @defer.inlineCallbacks
+ def test_find_generated_session_token(self):
+ instance = yield model.SessionToken.generate('username', 'TokenType')
+ found = yield model.SessionToken.lookup(instance.identifier)
+ self.assert_(found)
+
+ def test_update_session_token_expiry(self):
+ instance = model.SessionToken('tk12341234')
+ oldtime = datetime.utcnow()
+ instance['expiry'] = oldtime.strftime(utils.TIME_FORMAT)
+ instance.update_expiry()
+ expiry = utils.parse_isotime(instance['expiry'])
+ self.assert_(expiry > datetime.utcnow())
+
+ @defer.inlineCallbacks
+ def test_session_token_lookup_when_expired(self):
+ instance = yield model.SessionToken.generate("testuser")
+ instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
+ instance.save()
+ inst = model.SessionToken.lookup(instance.identifier)
+ self.assertFalse(inst)
+
+ @defer.inlineCallbacks
+ def test_session_token_lookup_when_not_expired(self):
+ instance = yield model.SessionToken.generate("testuser")
+ inst = model.SessionToken.lookup(instance.identifier)
+ self.assert_(inst)
+
+ @defer.inlineCallbacks
+ def test_session_token_is_expired_when_expired(self):
+ instance = yield model.SessionToken.generate("testuser")
+ instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
+ self.assert_(instance.is_expired())
+
+ @defer.inlineCallbacks
+ def test_session_token_is_expired_when_not_expired(self):
+ instance = yield model.SessionToken.generate("testuser")
+ self.assertFalse(instance.is_expired())
+
+ @defer.inlineCallbacks
+ def test_session_token_ttl(self):
+ instance = yield model.SessionToken.generate("testuser")
+ now = datetime.utcnow()
+ delta = timedelta(hours=1)
+ instance['expiry'] = (now + delta).strftime(utils.TIME_FORMAT)
+ # give 5 seconds of fuzziness
+ self.assert_(abs(instance.ttl() - FLAGS.auth_token_ttl) < 5)
diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index a1d1789e2..f24eefb0d 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -23,7 +23,7 @@ import logging
from nova import flags
from nova import test
from nova import utils
-from nova.auth import users
+from nova.auth import manager
from nova.compute import network
from nova.compute.exception import NoMoreAddresses
@@ -32,66 +32,71 @@ FLAGS = flags.FLAGS
class NetworkTestCase(test.TrialTestCase):
def setUp(self):
super(NetworkTestCase, self).setUp()
- self.flags(fake_libvirt=True,
+ # NOTE(vish): if you change these flags, make sure to change the
+ # flags in the corresponding section in nova-dhcpbridge
+ self.flags(connection_type='fake',
fake_storage=True,
fake_network=True,
+ auth_driver='nova.auth.ldapdriver.FakeLdapDriver',
network_size=32)
logging.getLogger().setLevel(logging.DEBUG)
- self.manager = users.UserManager.instance()
+ self.manager = manager.AuthManager()
self.dnsmasq = FakeDNSMasq()
- try:
- self.manager.create_user('netuser', 'netuser', 'netuser')
- except: pass
+ self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
+ self.projects = []
+ self.projects.append(self.manager.create_project('netuser',
+ 'netuser',
+ 'netuser'))
for i in range(0, 6):
name = 'project%s' % i
- if not self.manager.get_project(name):
- self.manager.create_project(name, 'netuser', name)
+ self.projects.append(self.manager.create_project(name,
+ 'netuser',
+ name))
self.network = network.PublicNetworkController()
def tearDown(self):
super(NetworkTestCase, self).tearDown()
- for i in range(0, 6):
- name = 'project%s' % i
- self.manager.delete_project(name)
- self.manager.delete_user('netuser')
+ for project in self.projects:
+ self.manager.delete_project(project)
+ self.manager.delete_user(self.user)
def test_public_network_allocation(self):
pubnet = IPy.IP(flags.FLAGS.public_range)
- address = self.network.allocate_ip("netuser", "project0", "public")
+ address = self.network.allocate_ip(self.user.id, self.projects[0].id, "public")
self.assertTrue(IPy.IP(address) in pubnet)
self.assertTrue(IPy.IP(address) in self.network.network)
def test_allocate_deallocate_ip(self):
address = network.allocate_ip(
- "netuser", "project0", utils.generate_mac())
+ self.user.id, self.projects[0].id, utils.generate_mac())
logging.debug("Was allocated %s" % (address))
- net = network.get_project_network("project0", "default")
- self.assertEqual(True, is_in_project(address, "project0"))
+ net = network.get_project_network(self.projects[0].id, "default")
+ self.assertEqual(True, is_in_project(address, self.projects[0].id))
mac = utils.generate_mac()
hostname = "test-host"
self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
rv = network.deallocate_ip(address)
# Doesn't go away until it's dhcp released
- self.assertEqual(True, is_in_project(address, "project0"))
+ self.assertEqual(True, is_in_project(address, self.projects[0].id))
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
- self.assertEqual(False, is_in_project(address, "project0"))
+ self.assertEqual(False, is_in_project(address, self.projects[0].id))
def test_range_allocation(self):
mac = utils.generate_mac()
secondmac = utils.generate_mac()
hostname = "test-host"
address = network.allocate_ip(
- "netuser", "project0", mac)
+ self.user.id, self.projects[0].id, mac)
secondaddress = network.allocate_ip(
- "netuser", "project1", secondmac)
- net = network.get_project_network("project0", "default")
- secondnet = network.get_project_network("project1", "default")
+ self.user, self.projects[1].id, secondmac)
+ net = network.get_project_network(self.projects[0].id, "default")
+ secondnet = network.get_project_network(self.projects[1].id, "default")
- self.assertEqual(True, is_in_project(address, "project0"))
- self.assertEqual(True, is_in_project(secondaddress, "project1"))
- self.assertEqual(False, is_in_project(address, "project1"))
+ self.assertEqual(True, is_in_project(address, self.projects[0].id))
+ self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id))
+ self.assertEqual(False, is_in_project(address, self.projects[1].id))
# Addresses are allocated before they're issued
self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
@@ -100,34 +105,34 @@ class NetworkTestCase(test.TrialTestCase):
rv = network.deallocate_ip(address)
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
- self.assertEqual(False, is_in_project(address, "project0"))
+ self.assertEqual(False, is_in_project(address, self.projects[0].id))
# First address release shouldn't affect the second
- self.assertEqual(True, is_in_project(secondaddress, "project1"))
+ self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id))
rv = network.deallocate_ip(secondaddress)
self.dnsmasq.release_ip(secondmac, secondaddress,
hostname, secondnet.bridge_name)
- self.assertEqual(False, is_in_project(secondaddress, "project1"))
+ self.assertEqual(False, is_in_project(secondaddress, self.projects[1].id))
def test_subnet_edge(self):
- secondaddress = network.allocate_ip("netuser", "project0",
+ secondaddress = network.allocate_ip(self.user.id, self.projects[0].id,
utils.generate_mac())
hostname = "toomany-hosts"
- for project in range(1,5):
- project_id = "project%s" % (project)
+ for i in range(1,5):
+ project_id = self.projects[i].id
mac = utils.generate_mac()
mac2 = utils.generate_mac()
mac3 = utils.generate_mac()
address = network.allocate_ip(
- "netuser", project_id, mac)
+ self.user, project_id, mac)
address2 = network.allocate_ip(
- "netuser", project_id, mac2)
+ self.user, project_id, mac2)
address3 = network.allocate_ip(
- "netuser", project_id, mac3)
- self.assertEqual(False, is_in_project(address, "project0"))
- self.assertEqual(False, is_in_project(address2, "project0"))
- self.assertEqual(False, is_in_project(address3, "project0"))
+ self.user, project_id, mac3)
+ self.assertEqual(False, is_in_project(address, self.projects[0].id))
+ self.assertEqual(False, is_in_project(address2, self.projects[0].id))
+ self.assertEqual(False, is_in_project(address3, self.projects[0].id))
rv = network.deallocate_ip(address)
rv = network.deallocate_ip(address2)
rv = network.deallocate_ip(address3)
@@ -135,7 +140,7 @@ class NetworkTestCase(test.TrialTestCase):
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
self.dnsmasq.release_ip(mac2, address2, hostname, net.bridge_name)
self.dnsmasq.release_ip(mac3, address3, hostname, net.bridge_name)
- net = network.get_project_network("project0", "default")
+ net = network.get_project_network(self.projects[0].id, "default")
rv = network.deallocate_ip(secondaddress)
self.dnsmasq.release_ip(mac, secondaddress, hostname, net.bridge_name)
@@ -153,34 +158,36 @@ class NetworkTestCase(test.TrialTestCase):
environment's setup.
Network size is set in test fixture's setUp method.
-
+
There are FLAGS.cnt_vpn_clients addresses reserved for VPN (NUM_RESERVED_VPN_IPS)
And there are NUM_STATIC_IPS that are always reserved by Nova for the necessary
services (gateway, CloudPipe, etc)
- So we should get flags.network_size - (NUM_STATIC_IPS +
- NUM_PREALLOCATED_IPS +
+ So we should get flags.network_size - (NUM_STATIC_IPS +
+ NUM_PREALLOCATED_IPS +
NUM_RESERVED_VPN_IPS)
usable addresses
"""
- net = network.get_project_network("project0", "default")
+ net = network.get_project_network(self.projects[0].id, "default")
# Determine expected number of available IP addresses
num_static_ips = net.num_static_ips
num_preallocated_ips = len(net.hosts.keys())
num_reserved_vpn_ips = flags.FLAGS.cnt_vpn_clients
- num_available_ips = flags.FLAGS.network_size - (num_static_ips + num_preallocated_ips + num_reserved_vpn_ips)
+ num_available_ips = flags.FLAGS.network_size - (num_static_ips +
+ num_preallocated_ips +
+ num_reserved_vpn_ips)
hostname = "toomany-hosts"
macs = {}
addresses = {}
for i in range(0, (num_available_ips - 1)):
macs[i] = utils.generate_mac()
- addresses[i] = network.allocate_ip("netuser", "project0", macs[i])
+ addresses[i] = network.allocate_ip(self.user.id, self.projects[0].id, macs[i])
self.dnsmasq.issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
- self.assertRaises(NoMoreAddresses, network.allocate_ip, "netuser", "project0", utils.generate_mac())
+ self.assertRaises(NoMoreAddresses, network.allocate_ip, self.user.id, self.projects[0].id, utils.generate_mac())
for i in range(0, (num_available_ips - 1)):
rv = network.deallocate_ip(addresses[i])
diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py
index f47ca7f00..dd00377e7 100644
--- a/nova/tests/objectstore_unittest.py
+++ b/nova/tests/objectstore_unittest.py
@@ -16,6 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import boto
import glob
import hashlib
import logging
@@ -25,9 +26,16 @@ import tempfile
from nova import flags
from nova import objectstore
+from nova.objectstore import bucket # for buckets_path flag
+from nova.objectstore import image # for images_path flag
from nova import test
-from nova.auth import users
+from nova.auth import manager
+from nova.objectstore.handler import S3
+from nova.exception import NotEmpty, NotFound, NotAuthorized
+from boto.s3.connection import S3Connection, OrdinaryCallingFormat
+from twisted.internet import reactor, threads, defer
+from twisted.web import http, server
FLAGS = flags.FLAGS
@@ -51,13 +59,12 @@ os.makedirs(os.path.join(oss_tempdir, 'buckets'))
class ObjectStoreTestCase(test.BaseTestCase):
def setUp(self):
super(ObjectStoreTestCase, self).setUp()
- self.flags(fake_users=True,
- buckets_path=os.path.join(oss_tempdir, 'buckets'),
+ self.flags(buckets_path=os.path.join(oss_tempdir, 'buckets'),
images_path=os.path.join(oss_tempdir, 'images'),
ca_path=os.path.join(os.path.dirname(__file__), 'CA'))
logging.getLogger().setLevel(logging.DEBUG)
- self.um = users.UserManager.instance()
+ self.um = manager.AuthManager()
try:
self.um.create_user('user1')
except: pass
@@ -96,49 +103,37 @@ class ObjectStoreTestCase(test.BaseTestCase):
# another user is not authorized
self.context.user = self.um.get_user('user2')
self.context.project = self.um.get_project('proj2')
- self.assert_(bucket.is_authorized(self.context) == False)
+ self.assertFalse(bucket.is_authorized(self.context))
# admin is authorized to use bucket
self.context.user = self.um.get_user('admin_user')
self.context.project = None
- self.assert_(bucket.is_authorized(self.context))
+ self.assertTrue(bucket.is_authorized(self.context))
# new buckets are empty
- self.assert_(bucket.list_keys()['Contents'] == [])
+ self.assertTrue(bucket.list_keys()['Contents'] == [])
# storing keys works
bucket['foo'] = "bar"
- self.assert_(len(bucket.list_keys()['Contents']) == 1)
+ self.assertEquals(len(bucket.list_keys()['Contents']), 1)
- self.assert_(bucket['foo'].read() == 'bar')
+ self.assertEquals(bucket['foo'].read(), 'bar')
# md5 of key works
- self.assert_(bucket['foo'].md5 == hashlib.md5('bar').hexdigest())
-
- # deleting non-empty bucket throws exception
- exception = False
- try:
- bucket.delete()
- except:
- exception = True
+ self.assertEquals(bucket['foo'].md5, hashlib.md5('bar').hexdigest())
- self.assert_(exception)
+ # deleting non-empty bucket should throw a NotEmpty exception
+ self.assertRaises(NotEmpty, bucket.delete)
# deleting key
del bucket['foo']
- # deleting empty button
+ # deleting empty bucket
bucket.delete()
# accessing deleted bucket throws exception
- exception = False
- try:
- objectstore.bucket.Bucket('new_bucket')
- except:
- exception = True
-
- self.assert_(exception)
+ self.assertRaises(NotFound, objectstore.bucket.Bucket, 'new_bucket')
def test_images(self):
self.context.user = self.um.get_user('user1')
@@ -167,37 +162,108 @@ class ObjectStoreTestCase(test.BaseTestCase):
# verify image permissions
self.context.user = self.um.get_user('user2')
self.context.project = self.um.get_project('proj2')
- self.assert_(my_img.is_authorized(self.context) == False)
-
-# class ApiObjectStoreTestCase(test.BaseTestCase):
-# def setUp(self):
-# super(ApiObjectStoreTestCase, self).setUp()
-# FLAGS.fake_users = True
-# FLAGS.buckets_path = os.path.join(tempdir, 'buckets')
-# FLAGS.images_path = os.path.join(tempdir, 'images')
-# FLAGS.ca_path = os.path.join(os.path.dirname(__file__), 'CA')
-#
-# self.users = users.UserManager.instance()
-# self.app = handler.Application(self.users)
-#
-# self.host = '127.0.0.1'
-#
-# self.conn = boto.s3.connection.S3Connection(
-# aws_access_key_id=user.access,
-# aws_secret_access_key=user.secret,
-# is_secure=False,
-# calling_format=boto.s3.connection.OrdinaryCallingFormat(),
-# port=FLAGS.s3_port,
-# host=FLAGS.s3_host)
-#
-# self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
-#
-# def tearDown(self):
-# FLAGS.Reset()
-# super(ApiObjectStoreTestCase, self).tearDown()
-#
-# def test_describe_instances(self):
-# self.expect_http()
-# self.mox.ReplayAll()
-#
-# self.assertEqual(self.ec2.get_all_instances(), [])
+ self.assertFalse(my_img.is_authorized(self.context))
+
+
+class TestHTTPChannel(http.HTTPChannel):
+ # Otherwise we end up with an unclean reactor
+ def checkPersistence(self, _, __):
+ return False
+
+
+class TestSite(server.Site):
+ protocol = TestHTTPChannel
+
+
+class S3APITestCase(test.TrialTestCase):
+ def setUp(self):
+ super(S3APITestCase, self).setUp()
+
+ FLAGS.auth_driver='nova.auth.ldapdriver.FakeLdapDriver',
+ FLAGS.buckets_path = os.path.join(oss_tempdir, 'buckets')
+
+ self.um = manager.AuthManager()
+ self.admin_user = self.um.create_user('admin', admin=True)
+ self.admin_project = self.um.create_project('admin', self.admin_user)
+
+ shutil.rmtree(FLAGS.buckets_path)
+ os.mkdir(FLAGS.buckets_path)
+
+ root = S3()
+ self.site = TestSite(root)
+ self.listening_port = reactor.listenTCP(0, self.site, interface='127.0.0.1')
+ self.tcp_port = self.listening_port.getHost().port
+
+
+ if not boto.config.has_section('Boto'):
+ boto.config.add_section('Boto')
+ boto.config.set('Boto', 'num_retries', '0')
+ self.conn = S3Connection(aws_access_key_id=self.admin_user.access,
+ aws_secret_access_key=self.admin_user.secret,
+ host='127.0.0.1',
+ port=self.tcp_port,
+ is_secure=False,
+ calling_format=OrdinaryCallingFormat())
+
+ # Don't attempt to reuse connections
+ def get_http_connection(host, is_secure):
+ return self.conn.new_http_connection(host, is_secure)
+ self.conn.get_http_connection = get_http_connection
+
+ def _ensure_empty_list(self, l):
+ self.assertEquals(len(l), 0, "List was not empty")
+ return True
+
+ def _ensure_only_bucket(self, l, name):
+ self.assertEquals(len(l), 1, "List didn't have exactly one element in it")
+ self.assertEquals(l[0].name, name, "Wrong name")
+
+ def test_000_list_buckets(self):
+ d = threads.deferToThread(self.conn.get_all_buckets)
+ d.addCallback(self._ensure_empty_list)
+ return d
+
+ def test_001_create_and_delete_bucket(self):
+ bucket_name = 'testbucket'
+
+ d = threads.deferToThread(self.conn.create_bucket, bucket_name)
+ d.addCallback(lambda _:threads.deferToThread(self.conn.get_all_buckets))
+
+ def ensure_only_bucket(l, name):
+ self.assertEquals(len(l), 1, "List didn't have exactly one element in it")
+ self.assertEquals(l[0].name, name, "Wrong name")
+ d.addCallback(ensure_only_bucket, bucket_name)
+
+ d.addCallback(lambda _:threads.deferToThread(self.conn.delete_bucket, bucket_name))
+ d.addCallback(lambda _:threads.deferToThread(self.conn.get_all_buckets))
+ d.addCallback(self._ensure_empty_list)
+ return d
+
+ def test_002_create_bucket_and_key_and_delete_key_again(self):
+ bucket_name = 'testbucket'
+ key_name = 'somekey'
+ key_contents = 'somekey'
+
+ d = threads.deferToThread(self.conn.create_bucket, bucket_name)
+ d.addCallback(lambda b:threads.deferToThread(b.new_key, key_name))
+ d.addCallback(lambda k:threads.deferToThread(k.set_contents_from_string, key_contents))
+ def ensure_key_contents(bucket_name, key_name, contents):
+ bucket = self.conn.get_bucket(bucket_name)
+ key = bucket.get_key(key_name)
+ self.assertEquals(key.get_contents_as_string(), contents, "Bad contents")
+ d.addCallback(lambda _:threads.deferToThread(ensure_key_contents, bucket_name, key_name, key_contents))
+ def delete_key(bucket_name, key_name):
+ bucket = self.conn.get_bucket(bucket_name)
+ key = bucket.get_key(key_name)
+ key.delete()
+ d.addCallback(lambda _:threads.deferToThread(delete_key, bucket_name, key_name))
+ d.addCallback(lambda _:threads.deferToThread(self.conn.get_bucket, bucket_name))
+ d.addCallback(lambda b:threads.deferToThread(b.get_all_keys))
+ d.addCallback(self._ensure_empty_list)
+ return d
+
+ def tearDown(self):
+ self.um.delete_user('admin')
+ self.um.delete_project('admin')
+ return defer.DeferredList([defer.maybeDeferred(self.listening_port.stopListening)])
+ super(S3APITestCase, self).tearDown()
diff --git a/nova/tests/process_unittest.py b/nova/tests/process_unittest.py
index 1c15b69a0..75187e1fc 100644
--- a/nova/tests/process_unittest.py
+++ b/nova/tests/process_unittest.py
@@ -118,5 +118,12 @@ class ProcessTestCase(test.TrialTestCase):
def test_shared_pool_is_singleton(self):
pool1 = process.SharedPool()
pool2 = process.SharedPool()
- self.assert_(id(pool1) == id(pool2))
-
+ self.assertEqual(id(pool1._instance), id(pool2._instance))
+
+ def test_shared_pool_works_as_singleton(self):
+ d1 = process.simple_execute('sleep 1')
+ d2 = process.simple_execute('sleep 0.005')
+ # lp609749: would have failed with
+ # exceptions.AssertionError: Someone released me too many times:
+ # too many tokens!
+ return d1
diff --git a/nova/tests/real_flags.py b/nova/tests/real_flags.py
index 9e106f227..121f4eb41 100644
--- a/nova/tests/real_flags.py
+++ b/nova/tests/real_flags.py
@@ -20,9 +20,8 @@ from nova import flags
FLAGS = flags.FLAGS
-FLAGS.fake_libvirt = False
+FLAGS.connection_type = 'libvirt'
FLAGS.fake_storage = False
FLAGS.fake_rabbit = False
FLAGS.fake_network = False
-FLAGS.fake_users = False
FLAGS.verbose = False
diff --git a/nova/tests/storage_unittest.py b/nova/tests/storage_unittest.py
index 60576d74f..f400cd2fd 100644
--- a/nova/tests/storage_unittest.py
+++ b/nova/tests/storage_unittest.py
@@ -34,7 +34,7 @@ class StorageTestCase(test.TrialTestCase):
super(StorageTestCase, self).setUp()
self.mynode = node.Node()
self.mystorage = None
- self.flags(fake_libvirt=True,
+ self.flags(connection_type='fake',
fake_storage=True)
self.mystorage = storage.BlockStore()
diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py
new file mode 100644
index 000000000..b536ac383
--- /dev/null
+++ b/nova/tests/volume_unittest.py
@@ -0,0 +1,115 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+
+from nova import compute
+from nova import exception
+from nova import flags
+from nova import test
+from nova.volume import service as volume_service
+
+
+FLAGS = flags.FLAGS
+
+
+class VolumeTestCase(test.TrialTestCase):
+ def setUp(self):
+ logging.getLogger().setLevel(logging.DEBUG)
+ super(VolumeTestCase, self).setUp()
+ self.compute = compute.service.ComputeService()
+ self.volume = None
+ self.flags(connection_type='fake',
+ fake_storage=True)
+ self.volume = volume_service.VolumeService()
+
+ def test_run_create_volume(self):
+ vol_size = '0'
+ user_id = 'fake'
+ project_id = 'fake'
+ volume_id = self.volume.create_volume(vol_size, user_id, project_id)
+ # TODO(termie): get_volume returns differently than create_volume
+ self.assertEqual(volume_id,
+ volume_service.get_volume(volume_id)['volume_id'])
+
+ rv = self.volume.delete_volume(volume_id)
+ self.assertRaises(exception.Error,
+ volume_service.get_volume,
+ volume_id)
+
+ def test_too_big_volume(self):
+ vol_size = '1001'
+ user_id = 'fake'
+ project_id = 'fake'
+ self.assertRaises(TypeError,
+ self.volume.create_volume,
+ vol_size, user_id, project_id)
+
+ def test_too_many_volumes(self):
+ vol_size = '1'
+ user_id = 'fake'
+ project_id = 'fake'
+ num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1
+ total_slots = FLAGS.slots_per_shelf * num_shelves
+ vols = []
+ for i in xrange(total_slots):
+ vid = self.volume.create_volume(vol_size, user_id, project_id)
+ vols.append(vid)
+ self.assertRaises(volume_service.NoMoreVolumes,
+ self.volume.create_volume,
+ vol_size, user_id, project_id)
+ for id in vols:
+ self.volume.delete_volume(id)
+
+ def test_run_attach_detach_volume(self):
+ # Create one volume and one compute to test with
+ instance_id = "storage-test"
+ vol_size = "5"
+ user_id = "fake"
+ project_id = 'fake'
+ mountpoint = "/dev/sdf"
+ volume_id = self.volume.create_volume(vol_size, user_id, project_id)
+
+ volume_obj = volume_service.get_volume(volume_id)
+ volume_obj.start_attach(instance_id, mountpoint)
+ rv = yield self.compute.attach_volume(volume_id,
+ instance_id,
+ mountpoint)
+ self.assertEqual(volume_obj['status'], "in-use")
+ self.assertEqual(volume_obj['attachStatus'], "attached")
+ self.assertEqual(volume_obj['instance_id'], instance_id)
+ self.assertEqual(volume_obj['mountpoint'], mountpoint)
+
+ self.assertRaises(exception.Error,
+ self.volume.delete_volume,
+ volume_id)
+
+ rv = yield self.volume.detach_volume(volume_id)
+ volume_obj = volume_service.get_volume(volume_id)
+ self.assertEqual(volume_obj['status'], "available")
+
+ rv = self.volume.delete_volume(volume_id)
+ self.assertRaises(exception.Error,
+ volume_service.get_volume,
+ volume_id)
+
+ def test_multi_node(self):
+ # TODO(termie): Figure out how to test with two nodes,
+ # each of them having a different FLAG for storage_node
+ # This will allow us to test cross-node interactions
+ pass
diff --git a/nova/twistd.py b/nova/twistd.py
index 32a46ce03..ecb6e2892 100644
--- a/nova/twistd.py
+++ b/nova/twistd.py
@@ -22,7 +22,6 @@ manage pid files and support syslogging.
"""
import logging
-import logging.handlers
import os
import signal
import sys
@@ -32,7 +31,6 @@ from twisted.python import log
from twisted.python import reflect
from twisted.python import runtime
from twisted.python import usage
-import UserDict
from nova import flags
@@ -161,6 +159,13 @@ def WrapTwistedOptions(wrapped):
except (AttributeError, KeyError):
self._data[key] = value
+ def get(self, key, default):
+ key = key.replace('-', '_')
+ try:
+ return getattr(FLAGS, key)
+ except (AttributeError, KeyError):
+ self._data.get(key, default)
+
return TwistedOptionsToFlags
@@ -209,9 +214,14 @@ def serve(filename):
FLAGS.pidfile = '%s.pid' % name
elif FLAGS.pidfile.endswith('twistd.pid'):
FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name)
-
if not FLAGS.logfile:
FLAGS.logfile = '%s.log' % name
+ elif FLAGS.logfile.endswith('twistd.log'):
+ FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name)
+ if not FLAGS.prefix:
+ FLAGS.prefix = name
+ elif FLAGS.prefix.endswith('twisted'):
+ FLAGS.prefix = FLAGS.prefix.replace('twisted', name)
action = 'start'
if len(argv) > 1:
@@ -228,8 +238,16 @@ def serve(filename):
print 'usage: %s [options] [start|stop|restart]' % argv[0]
sys.exit(1)
- formatter = logging.Formatter(
- name + '(%(name)s): %(levelname)s %(message)s')
+ class NoNewlineFormatter(logging.Formatter):
+ """Strips newlines from default formatter"""
+ def format(self, record):
+ """Grabs default formatter's output and strips newlines"""
+ data = logging.Formatter.format(self, record)
+ return data.replace("\n", "--")
+
+ # NOTE(vish): syslog-ng doesn't handle newlines from trackbacks very well
+ formatter = NoNewlineFormatter(
+ '(%(name)s): %(levelname)s %(message)s')
handler = logging.StreamHandler(log.StdioOnnaStick())
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
@@ -239,11 +257,6 @@ def serve(filename):
else:
logging.getLogger().setLevel(logging.WARNING)
- if FLAGS.syslog:
- syslog = logging.handlers.SysLogHandler(address='/dev/log')
- syslog.setFormatter(formatter)
- logging.getLogger().addHandler(syslog)
-
logging.debug("Full set of FLAGS:")
for flag in FLAGS:
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
diff --git a/nova/utils.py b/nova/utils.py
index d01c33042..fd30f1f2d 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -20,7 +20,7 @@
System-level utilities and helper functions.
"""
-from datetime import datetime
+from datetime import datetime, timedelta
import inspect
import logging
import os
@@ -29,10 +29,20 @@ import subprocess
import socket
import sys
+from nova import exception
from nova import flags
FLAGS = flags.FLAGS
+TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
+def import_class(import_str):
+ """Returns a class from a string including module and class"""
+ mod_str, _sep, class_str = import_str.rpartition('.')
+ try:
+ __import__(mod_str)
+ return getattr(sys.modules[mod_str], class_str)
+ except (ImportError, AttributeError):
+ raise exception.NotFound('Class %s cannot be found' % class_str)
def fetchfile(url, target):
logging.debug("Fetching %s" % url)
@@ -123,4 +133,7 @@ def get_my_ip():
def isotime(at=None):
if not at:
at = datetime.utcnow()
- return at.strftime("%Y-%m-%dT%H:%M:%SZ")
+ return at.strftime(TIME_FORMAT)
+
+def parse_isotime(timestr):
+ return datetime.strptime(timestr, TIME_FORMAT)
diff --git a/nova/virt/__init__.py b/nova/virt/__init__.py
new file mode 100644
index 000000000..3d598c463
--- /dev/null
+++ b/nova/virt/__init__.py
@@ -0,0 +1,15 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/virt/connection.py b/nova/virt/connection.py
new file mode 100644
index 000000000..004adb19d
--- /dev/null
+++ b/nova/virt/connection.py
@@ -0,0 +1,45 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import flags
+from nova.virt import fake
+from nova.virt import libvirt_conn
+from nova.virt import xenapi
+
+
+FLAGS = flags.FLAGS
+
+
+def get_connection(read_only=False):
+ # TODO(termie): maybe lazy load after initial check for permissions
+ # TODO(termie): check whether we can be disconnected
+ t = FLAGS.connection_type
+ if t == 'fake':
+ conn = fake.get_connection(read_only)
+ elif t == 'libvirt':
+ conn = libvirt_conn.get_connection(read_only)
+ elif t == 'xenapi':
+ conn = xenapi.get_connection(read_only)
+ else:
+ raise Exception('Unknown connection type "%s"' % t)
+
+ if conn is None:
+ logging.error('Failed to open connection to the hypervisor')
+ sys.exit(1)
+ return conn
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
new file mode 100644
index 000000000..d9ae5ac96
--- /dev/null
+++ b/nova/virt/fake.py
@@ -0,0 +1,81 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A fake (in-memory) hypervisor+api. Allows nova testing w/o a hypervisor.
+"""
+
+import logging
+
+from nova.compute import power_state
+
+
+def get_connection(_):
+ # The read_only parameter is ignored.
+ return FakeConnection.instance()
+
+
+class FakeConnection(object):
+ def __init__(self):
+ self.instances = {}
+
+ @classmethod
+ def instance(cls):
+ if not hasattr(cls, '_instance'):
+ cls._instance = cls()
+ return cls._instance
+
+ def list_instances(self):
+ return self.instances.keys()
+
+ def spawn(self, instance):
+ fake_instance = FakeInstance()
+ self.instances[instance.name] = fake_instance
+ fake_instance._state = power_state.RUNNING
+
+ def reboot(self, instance):
+ pass
+
+ def destroy(self, instance):
+ del self.instances[instance.name]
+
+ def get_info(self, instance_id):
+ i = self.instances[instance_id]
+ return {'state': i._state,
+ 'max_mem': 0,
+ 'mem': 0,
+ 'num_cpu': 2,
+ 'cpu_time': 0}
+
+ def list_disks(self, instance_id):
+ return ['A_DISK']
+
+ def list_interfaces(self, instance_id):
+ return ['A_VIF']
+
+ def block_stats(self, instance_id, disk_id):
+ return [0L, 0L, 0L, 0L, null]
+
+ def interface_stats(self, instance_id, iface_id):
+ return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L]
+
+
+class FakeInstance(object):
+ def __init__(self):
+ self._state = power_state.NOSTATE
diff --git a/nova/virt/images.py b/nova/virt/images.py
new file mode 100644
index 000000000..92210e242
--- /dev/null
+++ b/nova/virt/images.py
@@ -0,0 +1,72 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handling of VM disk images.
+"""
+
+import os.path
+import time
+
+from nova import flags
+from nova import process
+from nova.auth import signer
+
+FLAGS = flags.FLAGS
+
+flags.DEFINE_bool('use_s3', True,
+ 'whether to get images from s3 or use local copy')
+
+
+def fetch(image, path, user):
+ if FLAGS.use_s3:
+ f = _fetch_s3_image
+ else:
+ f = _fetch_local_image
+ return f(image, path, user)
+
+def _fetch_s3_image(image, path, user):
+ url = _image_url('%s/image' % image)
+
+ # This should probably move somewhere else, like e.g. a download_as
+ # method on User objects and at the same time get rewritten to use
+ # twisted web client.
+ headers = {}
+ headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
+
+ uri = '/' + url.partition('/')[2]
+ auth = signer.Signer(user.secret.encode()).s3_authorization(headers, 'GET', uri)
+ headers['Authorization'] = 'AWS %s:%s' % (user.access, auth)
+
+ cmd = ['/usr/bin/curl', '--silent', url]
+ for (k,v) in headers.iteritems():
+ cmd += ['-H', '%s: %s' % (k,v)]
+
+ cmd += ['-o', path]
+ return process.SharedPool().execute(executable=cmd[0], args=cmd[1:])
+
+def _fetch_local_image(image, path, _):
+ source = _image_path('%s/image' % image)
+ return process.simple_execute('cp %s %s' % (source, path))
+
+def _image_path(path):
+ return os.path.join(FLAGS.images_path, path)
+
+def _image_url(path):
+ return "%s:%s/_images/%s" % (FLAGS.s3_host, FLAGS.s3_port, path)
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
new file mode 100644
index 000000000..c545e4190
--- /dev/null
+++ b/nova/virt/libvirt_conn.py
@@ -0,0 +1,355 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A connection to a hypervisor (e.g. KVM) through libvirt.
+"""
+
+import json
+import logging
+import os.path
+import shutil
+import sys
+
+from twisted.internet import defer
+from twisted.internet import task
+
+from nova import exception
+from nova import flags
+from nova import process
+from nova import utils
+from nova.auth import manager
+from nova.compute import disk
+from nova.compute import instance_types
+from nova.compute import power_state
+from nova.virt import images
+
+libvirt = None
+libxml2 = None
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('libvirt_xml_template',
+ utils.abspath('compute/libvirt.xml.template'),
+ 'Libvirt XML Template')
+
+def get_connection(read_only):
+ # These are loaded late so that there's no need to install these
+ # libraries when not using libvirt.
+ global libvirt
+ global libxml2
+ if libvirt is None:
+ libvirt = __import__('libvirt')
+ if libxml2 is None:
+ libxml2 = __import__('libxml2')
+ return LibvirtConnection(read_only)
+
+
+class LibvirtConnection(object):
+ def __init__(self, read_only):
+ auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT],
+ 'root',
+ None]
+ if read_only:
+ self._conn = libvirt.openReadOnly('qemu:///system')
+ else:
+ self._conn = libvirt.openAuth('qemu:///system', auth, 0)
+
+
+ def list_instances(self):
+ return [self._conn.lookupByID(x).name()
+ for x in self._conn.listDomainsID()]
+
+
+ def destroy(self, instance):
+ try:
+ virt_dom = self._conn.lookupByName(instance.name)
+ virt_dom.destroy()
+ except Exception, _err:
+ pass
+ # If the instance is already terminated, we're still happy
+ d = defer.Deferred()
+ d.addCallback(lambda _: self._cleanup(instance))
+ # FIXME: What does this comment mean?
+ # TODO(termie): short-circuit me for tests
+ # WE'LL save this for when we do shutdown,
+ # instead of destroy - but destroy returns immediately
+ timer = task.LoopingCall(f=None)
+ def _wait_for_shutdown():
+ try:
+ instance.update_state()
+ if instance.state == power_state.SHUTDOWN:
+ timer.stop()
+ d.callback(None)
+ except Exception:
+ instance.set_state(power_state.SHUTDOWN)
+ timer.stop()
+ d.callback(None)
+ timer.f = _wait_for_shutdown
+ timer.start(interval=0.5, now=True)
+ return d
+
+
+ def _cleanup(self, instance):
+ target = os.path.abspath(instance.datamodel['basepath'])
+ logging.info("Deleting instance files at %s", target)
+ shutil.rmtree(target)
+
+
+ @defer.inlineCallbacks
+ @exception.wrap_exception
+ def reboot(self, instance):
+ xml = self.toXml(instance)
+ yield self._conn.lookupByName(instance.name).destroy()
+ yield self._conn.createXML(xml, 0)
+
+ d = defer.Deferred()
+ timer = task.LoopingCall(f=None)
+ def _wait_for_reboot():
+ try:
+ instance.update_state()
+ if instance.is_running():
+ logging.debug('rebooted instance %s' % instance.name)
+ timer.stop()
+ d.callback(None)
+ except Exception, exn:
+ logging.error('_wait_for_reboot failed: %s' % exn)
+ instance.set_state(power_state.SHUTDOWN)
+ timer.stop()
+ d.callback(None)
+ timer.f = _wait_for_reboot
+ timer.start(interval=0.5, now=True)
+ yield d
+
+
+ @defer.inlineCallbacks
+ @exception.wrap_exception
+ def spawn(self, instance):
+ xml = self.toXml(instance)
+ instance.set_state(power_state.NOSTATE, 'launching')
+ yield self._create_image(instance, xml)
+ yield self._conn.createXML(xml, 0)
+ # TODO(termie): this should actually register
+ # a callback to check for successful boot
+ logging.debug("Instance is running")
+
+ local_d = defer.Deferred()
+ timer = task.LoopingCall(f=None)
+ def _wait_for_boot():
+ try:
+ instance.update_state()
+ if instance.is_running():
+ logging.debug('booted instance %s' % instance.name)
+ timer.stop()
+ local_d.callback(None)
+ except Exception, exn:
+ logging.error("_wait_for_boot exception %s" % exn)
+ self.set_state(power_state.SHUTDOWN)
+ logging.error('Failed to boot instance %s' % instance.name)
+ timer.stop()
+ local_d.callback(None)
+ timer.f = _wait_for_boot
+ timer.start(interval=0.5, now=True)
+ yield local_d
+
+
+ @defer.inlineCallbacks
+ def _create_image(self, instance, libvirt_xml):
+ # syntactic nicety
+ data = instance.datamodel
+ basepath = lambda x='': self.basepath(instance, x)
+
+ # ensure directories exist and are writable
+ yield process.simple_execute('mkdir -p %s' % basepath())
+ yield process.simple_execute('chmod 0777 %s' % basepath())
+
+
+ # TODO(termie): these are blocking calls, it would be great
+ # if they weren't.
+ logging.info('Creating image for: %s', data['instance_id'])
+ f = open(basepath('libvirt.xml'), 'w')
+ f.write(libvirt_xml)
+ f.close()
+
+ user = manager.AuthManager().get_user(data['user_id'])
+ if not os.path.exists(basepath('disk')):
+ yield images.fetch(data['image_id'], basepath('disk-raw'), user)
+ if not os.path.exists(basepath('kernel')):
+ yield images.fetch(data['kernel_id'], basepath('kernel'), user)
+ if not os.path.exists(basepath('ramdisk')):
+ yield images.fetch(data['ramdisk_id'], basepath('ramdisk'), user)
+
+ execute = lambda cmd, input=None: \
+ process.simple_execute(cmd=cmd,
+ input=input,
+ error_ok=1)
+
+ key = data['key_data']
+ net = None
+ if FLAGS.simple_network:
+ with open(FLAGS.simple_network_template) as f:
+ net = f.read() % {'address': data['private_dns_name'],
+ 'network': FLAGS.simple_network_network,
+ 'netmask': FLAGS.simple_network_netmask,
+ 'gateway': FLAGS.simple_network_gateway,
+ 'broadcast': FLAGS.simple_network_broadcast,
+ 'dns': FLAGS.simple_network_dns}
+ if key or net:
+ logging.info('Injecting data into image %s', data['image_id'])
+ yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute)
+
+ if os.path.exists(basepath('disk')):
+ yield process.simple_execute('rm -f %s' % basepath('disk'))
+
+ bytes = (instance_types.INSTANCE_TYPES[data['instance_type']]['local_gb']
+ * 1024 * 1024 * 1024)
+ yield disk.partition(
+ basepath('disk-raw'), basepath('disk'), bytes, execute=execute)
+
+
+ def basepath(self, instance, path=''):
+ return os.path.abspath(os.path.join(instance.datamodel['basepath'], path))
+
+
+ def toXml(self, instance):
+ # TODO(termie): cache?
+ logging.debug("Starting the toXML method")
+ libvirt_xml = open(FLAGS.libvirt_xml_template).read()
+ xml_info = instance.datamodel.copy()
+ # TODO(joshua): Make this xml express the attached disks as well
+
+ # TODO(termie): lazy lazy hack because xml is annoying
+ xml_info['nova'] = json.dumps(instance.datamodel.copy())
+ libvirt_xml = libvirt_xml % xml_info
+ logging.debug("Finished the toXML method")
+
+ return libvirt_xml
+
+
+ def get_info(self, instance_id):
+ virt_dom = self._conn.lookupByName(instance_id)
+ (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
+ return {'state': state,
+ 'max_mem': max_mem,
+ 'mem': mem,
+ 'num_cpu': num_cpu,
+ 'cpu_time': cpu_time}
+
+
+ def get_disks(self, instance_id):
+ """
+ Note that this function takes an instance ID, not an Instance, so
+ that it can be called by monitor.
+
+ Returns a list of all block devices for this domain.
+ """
+ domain = self._conn.lookupByName(instance_id)
+ # TODO(devcamcar): Replace libxml2 with etree.
+ xml = domain.XMLDesc(0)
+ doc = None
+
+ try:
+ doc = libxml2.parseDoc(xml)
+ except:
+ return []
+
+ ctx = doc.xpathNewContext()
+ disks = []
+
+ try:
+ ret = ctx.xpathEval('/domain/devices/disk')
+
+ for node in ret:
+ devdst = None
+
+ for child in node.children:
+ if child.name == 'target':
+ devdst = child.prop('dev')
+
+ if devdst == None:
+ continue
+
+ disks.append(devdst)
+ finally:
+ if ctx != None:
+ ctx.xpathFreeContext()
+ if doc != None:
+ doc.freeDoc()
+
+ return disks
+
+
+ def get_interfaces(self, instance_id):
+ """
+ Note that this function takes an instance ID, not an Instance, so
+ that it can be called by monitor.
+
+ Returns a list of all network interfaces for this instance.
+ """
+ domain = self._conn.lookupByName(instance_id)
+ # TODO(devcamcar): Replace libxml2 with etree.
+ xml = domain.XMLDesc(0)
+ doc = None
+
+ try:
+ doc = libxml2.parseDoc(xml)
+ except:
+ return []
+
+ ctx = doc.xpathNewContext()
+ interfaces = []
+
+ try:
+ ret = ctx.xpathEval('/domain/devices/interface')
+
+ for node in ret:
+ devdst = None
+
+ for child in node.children:
+ if child.name == 'target':
+ devdst = child.prop('dev')
+
+ if devdst == None:
+ continue
+
+ interfaces.append(devdst)
+ finally:
+ if ctx != None:
+ ctx.xpathFreeContext()
+ if doc != None:
+ doc.freeDoc()
+
+ return interfaces
+
+
+ def block_stats(self, instance_id, disk):
+ """
+ Note that this function takes an instance ID, not an Instance, so
+ that it can be called by monitor.
+ """
+ domain = self._conn.lookupByName(instance_id)
+ return domain.blockStats(disk)
+
+
+ def interface_stats(self, instance_id, interface):
+ """
+ Note that this function takes an instance ID, not an Instance, so
+ that it can be called by monitor.
+ """
+ domain = self._conn.lookupByName(instance_id)
+ return domain.interfaceStats(interface)
diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py
new file mode 100644
index 000000000..dc372e3e3
--- /dev/null
+++ b/nova/virt/xenapi.py
@@ -0,0 +1,152 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A connection to XenServer or Xen Cloud Platform.
+"""
+
+import logging
+
+from twisted.internet import defer
+from twisted.internet import task
+
+from nova import exception
+from nova import flags
+from nova import process
+from nova.compute import power_state
+
+XenAPI = None
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('xenapi_connection_url',
+ None,
+ 'URL for connection to XenServer/Xen Cloud Platform. Required if connection_type=xenapi.')
+flags.DEFINE_string('xenapi_connection_username',
+ 'root',
+ 'Username for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi.')
+flags.DEFINE_string('xenapi_connection_password',
+ None,
+ 'Password for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi.')
+
+
+def get_connection(_):
+ """Note that XenAPI doesn't have a read-only connection mode, so
+ the read_only parameter is ignored."""
+ # This is loaded late so that there's no need to install this
+ # library when not using XenAPI.
+ global XenAPI
+ if XenAPI is None:
+ XenAPI = __import__('XenAPI')
+ url = FLAGS.xenapi_connection_url
+ username = FLAGS.xenapi_connection_username
+ password = FLAGS.xenapi_connection_password
+ if not url or password is None:
+ raise Exception('Must specify xenapi_connection_url, xenapi_connection_username (optionally), and xenapi_connection_password to use connection_type=xenapi')
+ return XenAPIConnection(url, username, password)
+
+
+class XenAPIConnection(object):
+
+ def __init__(self, url, user, pw):
+ self._conn = XenAPI.Session(url)
+ self._conn.login_with_password(user, pw)
+
+ def list_instances(self):
+ result = [self._conn.xenapi.VM.get_name_label(vm) \
+ for vm in self._conn.xenapi.VM.get_all()]
+
+ @defer.inlineCallbacks
+ @exception.wrap_exception
+ def spawn(self, instance):
+ vm = self.lookup(instance.name)
+ if vm is not None:
+ raise Exception('Attempted to create non-unique name %s' %
+ instance.name)
+ mem = str(long(instance.datamodel['memory_kb']) * 1024)
+ vcpus = str(instance.datamodel['vcpus'])
+ rec = {
+ 'name_label': instance.name,
+ 'name_description': '',
+ 'is_a_template': False,
+ 'memory_static_min': '0',
+ 'memory_static_max': mem,
+ 'memory_dynamic_min': mem,
+ 'memory_dynamic_max': mem,
+ 'VCPUs_at_startup': vcpus,
+ 'VCPUs_max': vcpus,
+ 'VCPUs_params': {},
+ 'actions_after_shutdown': 'destroy',
+ 'actions_after_reboot': 'restart',
+ 'actions_after_crash': 'destroy',
+ 'PV_bootloader': '',
+ 'PV_kernel': instance.datamodel['kernel_id'],
+ 'PV_ramdisk': instance.datamodel['ramdisk_id'],
+ 'PV_args': '',
+ 'PV_bootloader_args': '',
+ 'PV_legacy_args': '',
+ 'HVM_boot_policy': '',
+ 'HVM_boot_params': {},
+ 'platform': {},
+ 'PCI_bus': '',
+ 'recommendations': '',
+ 'affinity': '',
+ 'user_version': '0',
+ 'other_config': {},
+ }
+ vm = yield self._conn.xenapi.VM.create(rec)
+ #yield self._conn.xenapi.VM.start(vm, False, False)
+
+
+ def reboot(self, instance):
+ vm = self.lookup(instance.name)
+ if vm is None:
+ raise Exception('instance not present %s' % instance.name)
+ yield self._conn.xenapi.VM.clean_reboot(vm)
+
+ def destroy(self, instance):
+ vm = self.lookup(instance.name)
+ if vm is None:
+ raise Exception('instance not present %s' % instance.name)
+ yield self._conn.xenapi.VM.destroy(vm)
+
+ def get_info(self, instance_id):
+ vm = self.lookup(instance_id)
+ if vm is None:
+ raise Exception('instance not present %s' % instance.name)
+ rec = self._conn.xenapi.VM.get_record(vm)
+ return {'state': power_state_from_xenapi[rec['power_state']],
+ 'max_mem': long(rec['memory_static_max']) >> 10,
+ 'mem': long(rec['memory_dynamic_max']) >> 10,
+ 'num_cpu': rec['VCPUs_max'],
+ 'cpu_time': 0}
+
+ def lookup(self, i):
+ vms = self._conn.xenapi.VM.get_by_name_label(i)
+ n = len(vms)
+ if n == 0:
+ return None
+ elif n > 1:
+ raise Exception('duplicate name found: %s' % i)
+ else:
+ return vms[0]
+
+ power_state_from_xenapi = {
+ 'Halted' : power_state.RUNNING, #FIXME
+ 'Running' : power_state.RUNNING,
+ 'Paused' : power_state.PAUSED,
+ 'Suspended': power_state.SHUTDOWN, # FIXME
+ 'Crashed' : power_state.CRASHED
+ }
diff --git a/nova/volume/storage.py b/nova/volume/service.py
index de20f30b5..87a47f40a 100644
--- a/nova/volume/storage.py
+++ b/nova/volume/service.py
@@ -28,13 +28,14 @@ import os
import shutil
import socket
import tempfile
-import time
-from tornado import ioloop
+
from twisted.internet import defer
from nova import datastore
from nova import exception
from nova import flags
+from nova import process
+from nova import service
from nova import utils
from nova import validate
@@ -48,13 +49,13 @@ flags.DEFINE_string('aoe_eth_dev', 'eth0',
'Which device to export the volumes on')
flags.DEFINE_string('storage_name',
socket.gethostname(),
- 'name of this node')
+ 'name of this service')
flags.DEFINE_integer('first_shelf_id',
utils.last_octet(utils.get_my_ip()) * 10,
- 'AoE starting shelf_id for this node')
+ 'AoE starting shelf_id for this service')
flags.DEFINE_integer('last_shelf_id',
utils.last_octet(utils.get_my_ip()) * 10 + 9,
- 'AoE starting shelf_id for this node')
+ 'AoE starting shelf_id for this service')
flags.DEFINE_string('aoe_export_dir',
'/var/lib/vblade-persist/vblades',
'AoE directory where exports are created')
@@ -63,7 +64,7 @@ flags.DEFINE_integer('slots_per_shelf',
'Number of AoE slots per shelf')
flags.DEFINE_string('storage_availability_zone',
'nova',
- 'availability zone of this node')
+ 'availability zone of this service')
flags.DEFINE_boolean('fake_storage', False,
'Should we make real storage volumes to attach?')
@@ -80,14 +81,14 @@ def get_volume(volume_id):
return volume_class(volume_id=volume_id)
raise exception.Error("Volume does not exist")
-class BlockStore(object):
+class VolumeService(service.Service):
"""
- There is one BlockStore running on each volume node.
- However, each BlockStore can report on the state of
+ There is one VolumeNode running on each host.
+ However, each VolumeNode can report on the state of
*all* volumes in the cluster.
"""
def __init__(self):
- super(BlockStore, self).__init__()
+ super(VolumeService, self).__init__()
self.volume_class = Volume
if FLAGS.fake_storage:
FLAGS.aoe_export_dir = tempfile.mkdtemp()
@@ -102,10 +103,6 @@ class BlockStore(object):
except Exception, err:
pass
- def report_state(self):
- #TODO: aggregate the state of the system
- pass
-
@validate.rangetest(size=(0, 1000))
def create_volume(self, size, user_id, project_id):
"""
@@ -143,17 +140,24 @@ class BlockStore(object):
datastore.Redis.instance().srem('volumes:%s' % (FLAGS.storage_name), vol['volume_id'])
return True
+ @defer.inlineCallbacks
def _restart_exports(self):
if FLAGS.fake_storage:
return
- utils.runthis("Setting exports to auto: %s", "sudo vblade-persist auto all")
- utils.runthis("Starting all exports: %s", "sudo vblade-persist start all")
+ yield process.simple_execute(
+ "sudo vblade-persist auto all")
+ yield process.simple_execute(
+ "sudo vblade-persist start all")
+ @defer.inlineCallbacks
def _init_volume_group(self):
if FLAGS.fake_storage:
return
- utils.runthis("PVCreate returned: %s", "sudo pvcreate %s" % (FLAGS.storage_dev))
- utils.runthis("VGCreate returned: %s", "sudo vgcreate %s %s" % (FLAGS.volume_group, FLAGS.storage_dev))
+ yield process.simple_execute(
+ "sudo pvcreate %s" % (FLAGS.storage_dev))
+ yield process.simple_execute(
+ "sudo vgcreate %s %s" % (FLAGS.volume_group,
+ FLAGS.storage_dev))
class Volume(datastore.BasicModel):
@@ -227,15 +231,22 @@ class Volume(datastore.BasicModel):
self._delete_lv()
super(Volume, self).destroy()
+ @defer.inlineCallbacks
def create_lv(self):
if str(self['size']) == '0':
sizestr = '100M'
else:
sizestr = '%sG' % self['size']
- utils.runthis("Creating LV: %s", "sudo lvcreate -L %s -n %s %s" % (sizestr, self['volume_id'], FLAGS.volume_group))
+ yield process.simple_execute(
+ "sudo lvcreate -L %s -n %s %s" % (sizestr,
+ self['volume_id'],
+ FLAGS.volume_group))
+ @defer.inlineCallbacks
def _delete_lv(self):
- utils.runthis("Removing LV: %s", "sudo lvremove -f %s/%s" % (FLAGS.volume_group, self['volume_id']))
+ yield process.simple_execute(
+ "sudo lvremove -f %s/%s" % (FLAGS.volume_group,
+ self['volume_id']))
def _setup_export(self):
(shelf_id, blade_id) = get_next_aoe_numbers()
@@ -245,8 +256,9 @@ class Volume(datastore.BasicModel):
self.save()
self._exec_export()
+ @defer.inlineCallbacks
def _exec_export(self):
- utils.runthis("Creating AOE export: %s",
+ yield process.simple_execute(
"sudo vblade-persist setup %s %s %s /dev/%s/%s" %
(self['shelf_id'],
self['blade_id'],
@@ -254,9 +266,14 @@ class Volume(datastore.BasicModel):
FLAGS.volume_group,
self['volume_id']))
+ @defer.inlineCallbacks
def _remove_export(self):
- utils.runthis("Stopped AOE export: %s", "sudo vblade-persist stop %s %s" % (self['shelf_id'], self['blade_id']))
- utils.runthis("Destroyed AOE export: %s", "sudo vblade-persist destroy %s %s" % (self['shelf_id'], self['blade_id']))
+ yield process.simple_execute(
+ "sudo vblade-persist stop %s %s" % (self['shelf_id'],
+ self['blade_id']))
+ yield process.simple_execute(
+ "sudo vblade-persist destroy %s %s" % (self['shelf_id'],
+ self['blade_id']))
class FakeVolume(Volume):
diff --git a/run_tests.py b/run_tests.py
index db8a582ea..5a8966f02 100644
--- a/run_tests.py
+++ b/run_tests.py
@@ -50,16 +50,16 @@ from nova import flags
from nova import twistd
from nova.tests.access_unittest import *
+from nova.tests.auth_unittest import *
from nova.tests.api_unittest import *
from nova.tests.cloud_unittest import *
+from nova.tests.compute_unittest import *
from nova.tests.model_unittest import *
from nova.tests.network_unittest import *
-from nova.tests.node_unittest import *
from nova.tests.objectstore_unittest import *
from nova.tests.process_unittest import *
-from nova.tests.storage_unittest import *
-from nova.tests.users_unittest import *
from nova.tests.validator_unittest import *
+from nova.tests.volume_unittest import *
FLAGS = flags.FLAGS
diff --git a/run_tests.sh b/run_tests.sh
new file mode 100755
index 000000000..1bf3d1a79
--- /dev/null
+++ b/run_tests.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+venv=.nova-venv
+with_venv=tools/with_venv.sh
+
+if [ -e ${venv} ]; then
+ ${with_venv} python run_tests.py
+else
+ echo "You need to install the Nova virtualenv before you can run this."
+ echo ""
+ echo "Please run tools/install_venv.py"
+ exit 1
+fi
diff --git a/setup.cfg b/setup.cfg
index 278586962..14dcb5c8e 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,4 +1,10 @@
[build_sphinx]
-source-dir = docs
-build-dir = docs/_build
-all_files = 1
+all_files = 1
+build-dir = doc/build
+source-dir = doc/source
+
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/setup.py b/setup.py
index eb42283ea..0fd286f7d 100644
--- a/setup.py
+++ b/setup.py
@@ -16,20 +16,41 @@
# License for the specific language governing permissions and limitations
# under the License.
-import glob
+from setuptools import setup, find_packages
+from setuptools.command.sdist import sdist
+
import os
-import sys
+import subprocess
-from setuptools import setup, find_packages
-srcdir = os.path.join(os.path.dirname(sys.argv[0]), 'src')
+class local_sdist(sdist):
+ """Customized sdist hook - builds the ChangeLog file from VC first"""
-setup(name='nova',
- version='0.3.0',
- description='None Other, Vaguely Awesome',
- author='nova-core',
- author_email='nova-core@googlegroups.com',
- url='http://novacc.org/',
- packages = find_packages(),
+ def run(self):
+ if os.path.isdir('.bzr'):
+ # We're in a bzr branch
+ log_cmd = subprocess.Popen(["bzr", "log", "--gnu"],
+ stdout=subprocess.PIPE)
+ changelog = log_cmd.communicate()[0]
+ with open("ChangeLog", "w") as changelog_file:
+ changelog_file.write(changelog)
+ sdist.run(self)
- )
+setup(name='nova',
+ version='0.9.1',
+ description='cloud computing fabric controller',
+ author='OpenStack',
+ author_email='nova@lists.launchpad.net',
+ url='http://www.openstack.org/',
+ cmdclass={'sdist': local_sdist},
+ packages=find_packages(exclude=['bin', 'smoketests']),
+ scripts=['bin/nova-api',
+ 'bin/nova-compute',
+ 'bin/nova-dhcpbridge',
+ 'bin/nova-import-canonical-imagestore',
+ 'bin/nova-instancemonitor',
+ 'bin/nova-manage',
+ 'bin/nova-network',
+ 'bin/nova-objectstore',
+ 'bin/nova-rsapi',
+ 'bin/nova-volume'])
diff --git a/tools/install_venv.py b/tools/install_venv.py
new file mode 100644
index 000000000..0b35fc8e9
--- /dev/null
+++ b/tools/install_venv.py
@@ -0,0 +1,94 @@
+"""
+Installation script for Nova's development virtualenv
+"""
+
+import os
+import subprocess
+import sys
+
+
+ROOT = os.path.dirname(os.path.dirname(__file__))
+VENV = os.path.join(ROOT, '.nova-venv')
+PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires')
+TWISTED_NOVA='http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz'
+
+
+def die(message, *args):
+ print >>sys.stderr, message % args
+ sys.exit(1)
+
+
+def run_command(cmd, redirect_output=True, error_ok=False):
+ # Useful for debugging:
+ #print >>sys.stderr, ' '.join(cmd)
+ if redirect_output:
+ stdout = subprocess.PIPE
+ else:
+ stdout = None
+
+ proc = subprocess.Popen(cmd, stdout=stdout)
+ output = proc.communicate()[0]
+ if not error_ok and proc.returncode != 0:
+ die('Command "%s" failed.\n%s', ' '.join(cmd), output)
+ return output
+
+
+def check_dependencies():
+ """Make sure pip and virtualenv are on the path."""
+ print 'Checking for pip...',
+ if not run_command(['which', 'pip']).strip():
+ die('ERROR: pip not found.\n\nNova development requires pip,'
+ ' please install it using your favorite package management tool')
+ print 'done.'
+
+ print 'Checking for virtualenv...',
+ if not run_command(['which', 'virtualenv']).strip():
+ die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,'
+ ' please install it using your favorite package management tool')
+ print 'done.'
+
+
+def create_virtualenv(venv=VENV):
+ print 'Creating venv...',
+ run_command(['virtualenv', '-q', '--no-site-packages', VENV])
+ print 'done.'
+
+
+def install_dependencies(venv=VENV):
+ print 'Installing dependencies with pip (this can take a while)...'
+ run_command(['pip', 'install', '-E', venv, '-r', PIP_REQUIRES],
+ redirect_output=False)
+ run_command(['pip', 'install', '-E', venv, TWISTED_NOVA],
+ redirect_output=False)
+
+
+def print_help():
+ help = """
+ Nova development environment setup is complete.
+
+ Nova development uses virtualenv to track and manage Python dependencies
+ while in development and testing.
+
+ To activate the Nova virtualenv for the extent of your current shell session
+ you can run:
+
+ $ source .nova-venv/bin/activate
+
+ Or, if you prefer, you can run commands in the virtualenv on a case by case
+ basis by running:
+
+ $ tools/with_venv.sh <your command>
+
+ Also, make test will automatically use the virtualenv.
+ """
+ print help
+
+
+def main(argv):
+ check_dependencies()
+ create_virtualenv()
+ install_dependencies()
+ print_help()
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/tools/pip-requires b/tools/pip-requires
new file mode 100644
index 000000000..4eb47ca2b
--- /dev/null
+++ b/tools/pip-requires
@@ -0,0 +1,15 @@
+IPy==0.70
+M2Crypto==0.20.2
+amqplib==0.6.1
+anyjson==0.2.4
+boto==2.0b1
+carrot==0.10.5
+lockfile==0.8
+python-daemon==1.5.5
+python-gflags==1.3
+redis==2.0.0
+tornado==1.0
+wsgiref==0.1.2
+zope.interface==3.6.1
+mox==0.5.0
+-f http://pymox.googlecode.com/files/mox-0.5.0.tar.gz
diff --git a/tools/with_venv.sh b/tools/with_venv.sh
new file mode 100755
index 000000000..99d1ac18f
--- /dev/null
+++ b/tools/with_venv.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+TOOLS=`dirname $0`
+VENV=$TOOLS/../.nova-venv
+source $VENV/bin/activate && $@