summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTodd Willey <todd@rubidine.com>2010-07-15 00:21:17 -0400
committerTodd Willey <todd@rubidine.com>2010-07-15 00:21:17 -0400
commitfbf23a4f5f7d10429f26b2f43d2d203a39712acd (patch)
treee5ed888998e97d7d16bfc9d80210609a4e3d13fb
parent9ef046e77e18806264c92e90b20dc84fb2a9d369 (diff)
parent892ca58c0642db19e57a89d7a2ae5466971249cf (diff)
downloadnova-fbf23a4f5f7d10429f26b2f43d2d203a39712acd.tar.gz
nova-fbf23a4f5f7d10429f26b2f43d2d203a39712acd.tar.xz
nova-fbf23a4f5f7d10429f26b2f43d2d203a39712acd.zip
fix merge errors
-rw-r--r--README3
-rwxr-xr-xbin/dhcpleasor.py92
-rwxr-xr-xbin/nova-api8
-rwxr-xr-xbin/nova-compute5
-rwxr-xr-xbin/nova-manage4
-rw-r--r--debian/changelog51
-rw-r--r--debian/control113
-rw-r--r--debian/nova-api.conf5
-rw-r--r--debian/nova-api.init4
-rw-r--r--debian/nova-api.install1
-rw-r--r--debian/nova-common.dirs11
-rw-r--r--debian/nova-common.install11
-rw-r--r--debian/nova-compute.conf10
-rw-r--r--debian/nova-compute.init4
-rw-r--r--debian/nova-compute.install1
-rw-r--r--debian/nova-manage.conf4
-rw-r--r--debian/nova-objectstore.conf7
-rw-r--r--debian/nova-objectstore.init4
-rw-r--r--debian/nova-objectstore.install2
-rw-r--r--debian/nova-objectstore.links1
-rw-r--r--debian/nova-objectstore.nginx.conf17
-rw-r--r--debian/nova-volume.conf7
-rw-r--r--debian/nova-volume.init4
-rw-r--r--debian/nova-volume.install1
-rw-r--r--docs/conf.py2
-rw-r--r--docs/getting.started.rst25
-rw-r--r--nova/adminclient.py7
-rw-r--r--nova/auth/fakeldap.py62
-rw-r--r--nova/auth/users.py63
-rw-r--r--nova/cloudpipe/pipelib.py5
-rw-r--r--nova/compute/disk.py30
-rw-r--r--nova/compute/interfaces.template18
-rw-r--r--nova/compute/linux_net.py12
-rw-r--r--nova/compute/model.py231
-rw-r--r--nova/compute/network.py120
-rw-r--r--nova/compute/node.py48
-rw-r--r--nova/crypto.py21
-rw-r--r--nova/datastore.py198
-rw-r--r--nova/endpoint/admin.py23
-rwxr-xr-xnova/endpoint/api.py3
-rw-r--r--nova/endpoint/cloud.py85
-rw-r--r--nova/flags.py3
-rw-r--r--nova/rpc.py28
-rw-r--r--nova/tests/api_unittest.py2
-rw-r--r--nova/tests/network_unittest.py141
-rw-r--r--nova/tests/objectstore_unittest.py2
-rw-r--r--nova/tests/storage_unittest.py17
-rw-r--r--nova/tests/validator_unittest.py3
-rw-r--r--nova/utils.py17
-rw-r--r--nova/volume/storage.py87
-rw-r--r--run_tests.py3
-rw-r--r--smoketests/flags.py14
-rw-r--r--smoketests/novatestcase.py26
-rw-r--r--smoketests/smoketest.py131
54 files changed, 1189 insertions, 608 deletions
diff --git a/README b/README
index 192711204..f7d21f400 100644
--- a/README
+++ b/README
@@ -15,3 +15,6 @@ To disect it in detail: visit http://github.com/nova/cc
To taunt it with its weaknesses: use http://github.com/nova/cc/issues
To hack at it: read HACKING
+
+To watch it: http://test.novacc.org/waterfall
+
diff --git a/bin/dhcpleasor.py b/bin/dhcpleasor.py
new file mode 100755
index 000000000..30f8fbdc3
--- /dev/null
+++ b/bin/dhcpleasor.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 Anso Labs, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+dhcpleasor.py
+
+Handle lease database updates from DHCP servers.
+"""
+
+import sys
+import os
+import logging
+sys.path.append(os.path.abspath(os.path.join(__file__, "../../")))
+
+logging.debug(sys.path)
+import getopt
+from os import environ
+from nova.compute import linux_net
+from nova.compute import network
+from nova import rpc
+
+from nova import flags
+FLAGS = flags.FLAGS
+
+
+def add_lease(mac, ip, hostname, interface):
+ if FLAGS.fake_rabbit:
+ network.lease_ip(ip)
+ else:
+ rpc.cast(FLAGS.cloud_topic, {"method": "lease_ip",
+ "args" : {"address": ip}})
+
+def old_lease(mac, ip, hostname, interface):
+ logging.debug("Adopted old lease or got a change of mac/hostname")
+
+def del_lease(mac, ip, hostname, interface):
+ if FLAGS.fake_rabbit:
+ network.release_ip(ip)
+ else:
+ rpc.cast(FLAGS.cloud_topic, {"method": "release_ip",
+ "args" : {"address": ip}})
+
+def init_leases(interface):
+ net = network.get_network_by_interface(interface)
+ res = ""
+ for host_name in net.hosts:
+ res += "%s\n" % linux_net.hostDHCP(net, host_name, net.hosts[host_name])
+ return res
+
+
+def main(argv=None):
+ if argv is None:
+ argv = sys.argv
+ interface = environ.get('DNSMASQ_INTERFACE', 'br0')
+ if int(environ.get('TESTING', '0')):
+ FLAGS.fake_rabbit = True
+ FLAGS.redis_db = 8
+ FLAGS.network_size = 32
+ FLAGS.fake_libvirt=True
+ FLAGS.fake_network=True
+ FLAGS.fake_users = True
+ action = argv[1]
+ if action in ['add','del','old']:
+ mac = argv[2]
+ ip = argv[3]
+ hostname = argv[4]
+ logging.debug("Called %s for mac %s with ip %s and hostname %s on interface %s" % (action, mac, ip, hostname, interface))
+ globals()[action+'_lease'](mac, ip, hostname, interface)
+ else:
+ print init_leases(interface)
+ exit(0)
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/bin/nova-api b/bin/nova-api
index f0f79a236..1bef778c5 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -20,7 +20,7 @@
# under the License.
"""
-Tornado daemon for the main API endpoint.
+Tornado daemon for the main API endpoint.
"""
import logging
@@ -43,13 +43,11 @@ FLAGS = flags.FLAGS
def main(_argv):
- user_manager = users.UserManager()
- host_manager = model.Host
controllers = {
'Cloud': cloud.CloudController(),
- 'Admin': admin.AdminController(user_manager, host_manager)
+ 'Admin': admin.AdminController()
}
- _app = api.APIServerApplication(user_manager, controllers)
+ _app = api.APIServerApplication(controllers)
conn = rpc.Connection.instance()
consumer = rpc.AdapterConsumer(connection=conn,
diff --git a/bin/nova-compute b/bin/nova-compute
index ed829ecc8..1b438f6a7 100755
--- a/bin/nova-compute
+++ b/bin/nova-compute
@@ -75,7 +75,8 @@ def main():
topic='%s.%s' % (FLAGS.compute_topic, FLAGS.node_name),
proxy=n)
- pulse = task.LoopingCall(n.report_state, FLAGS.node_name, 'nova-compute')
+ bin_name = os.path.basename(__file__)
+ pulse = task.LoopingCall(n.report_state, FLAGS.node_name, bin_name)
pulse.start(interval=FLAGS.node_report_state_interval, now=False)
injected = consumer_all.attach_to_twisted()
@@ -83,7 +84,7 @@ def main():
# This is the parent service that twistd will be looking for when it
# parses this file, return it so that we can get it into globals below
- application = service.Application('nova-compute')
+ application = service.Application(bin_name)
n.setServiceParent(application)
return application
diff --git a/bin/nova-manage b/bin/nova-manage
index f418e162b..12e4c9324 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -62,6 +62,8 @@ class VpnCommands(object):
net = 'up'
else:
net = 'down'
+ print vpn['private_dns_name'],
+ print vpn['node_name'],
print vpn['instance_id'],
print vpn['state_description'],
print net
@@ -218,7 +220,7 @@ def methods_of(obj):
if __name__ == '__main__':
- utils.default_flagfile()
+ utils.default_flagfile('/etc/nova/nova-manage.conf')
argv = FLAGS(sys.argv)
script_name = argv.pop(0)
if len(argv) < 1:
diff --git a/debian/changelog b/debian/changelog
index 789dad36d..31dd5e91e 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,54 @@
+nova (0.2.3-1) UNRELEASED; urgency=low
+
+ * Relax the Twisted dependency to python-twisted-core (rather than the
+ full stack).
+ * Move nova related configuration files into /etc/nova/.
+ * Add a dependency on nginx from nova-objectsstore and install a
+ suitable configuration file.
+ * Ship the CA directory in nova-common.
+ * Add a default flag file for nova-manage to help it find the CA.
+ * If set, pass KernelId and RamdiskId from RunInstances call to the
+ target compute node.
+ * Added --network_path setting to nova-compute's flagfile.
+ * Move templates from python directories to /usr/share/nova.
+ * Add debian/nova-common.dirs to create
+ var/lib/nova/{buckets,CA,images,instances,keys,networks}
+ * Don't pass --daemonize=1 to nova-compute. It's already daemonising
+ by default.
+
+ -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 14 Jul 2010 12:00:00 -0700
+
+nova (0.2.2-10) UNRELEASED; urgency=low
+
+ * Fixed extra space in vblade-persist
+
+ -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 13 Jul 2010 19:00:00 -0700
+
+nova (0.2.2-9) UNRELEASED; urgency=low
+
+ * Fixed invalid dn bug in ldap for adding roles
+
+ -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 12 Jul 2010 15:20:00 -0700
+
+nova (0.2.2-8) UNRELEASED; urgency=low
+
+ * Added a missing comma
+
+ -- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 08 Jul 2010 10:05:00 -0700
+
+nova (0.2.2-7) UNRELEASED; urgency=low
+
+ * Missing files from twisted patch
+ * License upedates
+ * Reformatting/cleanup
+ * Users/ldap bugfixes
+ * Merge fixes
+ * Documentation updates
+ * Vpn key creation fix
+ * Multiple shelves for volumes
+
+ -- Vishvananda Ishaya <vishvananda@gmail.com> Wed, 07 Jul 2010 18:45:00 -0700
+
nova (0.2.2-6) UNRELEASED; urgency=low
* Fix to make Key Injection work again
diff --git a/debian/control b/debian/control
index bb2071234..342dfb185 100644
--- a/debian/control
+++ b/debian/control
@@ -3,36 +3,112 @@ Section: net
Priority: extra
Maintainer: Jesse Andrews <jesse@ansolabs.com>
Build-Depends: debhelper (>= 7)
-Build-Depends-Indep: python-support
+Build-Depends-Indep: python-support, python-setuptools
Standards-Version: 3.8.4
XS-Python-Version: 2.6
Package: nova-common
Architecture: all
-Depends: ${python:Depends}, aoetools, vlan, python-ipy, python-boto, python-m2crypto, python-pycurl, python-twisted (>= 10.0.0-2ubuntu2nebula1), python-daemon, python-redis, python-carrot, python-lockfile, python-gflags, python-tornado, ${misc:Depends}
+Depends: ${python:Depends}, aoetools, vlan, python-ipy, python-boto, python-m2crypto, python-pycurl, python-twisted-core, python-daemon, python-redis, python-carrot, python-lockfile, python-gflags, python-tornado, ${misc:Depends}
Provides: ${python:Provides}
-Conflicts: nova
-Description: Nova is a cloud
+Description: Nova Cloud Computing - common files
+ Nova is a cloud computing fabric controller (the main part of an IaaS
+ system) built to match the popular AWS EC2 and S3 APIs. It is written in
+ Python, using the Tornado and Twisted frameworks, and relies on the
+ standard AMQP messaging protocol, and the Redis distributed KVS.
+ .
+ Nova is intended to be easy to extend, and adapt. For example, it
+ currently uses an LDAP server for users and groups, but also includes a
+ fake LDAP server, that stores data in Redis. It has extensive test
+ coverage, and uses the Sphinx toolkit (the same as Python itself) for code
+ and user documentation.
+ .
+ While Nova is currently in Beta use within several organizations, the
+ codebase is very much under active development.
+ .
+ This package contains things that are needed by all parts of Nova.
Package: nova-compute
Architecture: all
-Depends: nova-common (= ${binary:Version}), kpartx, kvm, python-libvirt, libvirt-bin (>= 0.8.1), ${python:Depends}, ${misc:Depends}
-Description: Nova compute
+Depends: nova-common (= ${binary:Version}), kpartx, kvm, python-libvirt, libvirt-bin (>= 0.7.5), curl, ${python:Depends}, ${misc:Depends}
+Description: Nova Cloud Computing - compute node
+ Nova is a cloud computing fabric controller (the main part of an IaaS
+ system) built to match the popular AWS EC2 and S3 APIs. It is written in
+ Python, using the Tornado and Twisted frameworks, and relies on the
+ standard AMQP messaging protocol, and the Redis distributed KVS.
+ .
+ Nova is intended to be easy to extend, and adapt. For example, it
+ currently uses an LDAP server for users and groups, but also includes a
+ fake LDAP server, that stores data in Redis. It has extensive test
+ coverage, and uses the Sphinx toolkit (the same as Python itself) for code
+ and user documentation.
+ .
+ While Nova is currently in Beta use within several organizations, the
+ codebase is very much under active development.
+ .
+ This is the package you will install on the nodes that will run your
+ virtual machines.
Package: nova-volume
Architecture: all
Depends: nova-common (= ${binary:Version}), vblade, vblade-persist, ${python:Depends}, ${misc:Depends}
-Description: Nova volume
+Description: Nova Cloud Computing - storage
+ Nova is a cloud computing fabric controller (the main part of an IaaS
+ system) built to match the popular AWS EC2 and S3 APIs. It is written in
+ Python, using the Tornado and Twisted frameworks, and relies on the
+ standard AMQP messaging protocol, and the Redis distributed KVS.
+ .
+ Nova is intended to be easy to extend, and adapt. For example, it
+ currently uses an LDAP server for users and groups, but also includes a
+ fake LDAP server, that stores data in Redis. It has extensive test
+ coverage, and uses the Sphinx toolkit (the same as Python itself) for code
+ and user documentation.
+ .
+ While Nova is currently in Beta use within several organizations, the
+ codebase is very much under active development.
+ .
+ This is the package you will install on your storage nodes.
Package: nova-api
Architecture: all
Depends: nova-common (= ${binary:Version}), ${python:Depends}, ${misc:Depends}
-Description: Nova api
+Description: Nova Cloud Computing - API frontend
+ Nova is a cloud computing fabric controller (the main part of an IaaS
+ system) built to match the popular AWS EC2 and S3 APIs. It is written in
+ Python, using the Tornado and Twisted frameworks, and relies on the
+ standard AMQP messaging protocol, and the Redis distributed KVS.
+ .
+ Nova is intended to be easy to extend, and adapt. For example, it
+ currently uses an LDAP server for users and groups, but also includes a
+ fake LDAP server, that stores data in Redis. It has extensive test
+ coverage, and uses the Sphinx toolkit (the same as Python itself) for code
+ and user documentation.
+ .
+ While Nova is currently in Beta use within several organizations, the
+ codebase is very much under active development.
+ .
+ This package provides the API frontend.
Package: nova-objectstore
Architecture: all
-Depends: nova-common (= ${binary:Version}), ${python:Depends}, ${misc:Depends}
-Description: Nova object store
+Depends: nova-common (= ${binary:Version}), nginx, ${python:Depends}, ${misc:Depends}
+Description: Nova Cloud Computing - object store
+ Nova is a cloud computing fabric controller (the main part of an IaaS
+ system) built to match the popular AWS EC2 and S3 APIs. It is written in
+ Python, using the Tornado and Twisted frameworks, and relies on the
+ standard AMQP messaging protocol, and the Redis distributed KVS.
+ .
+ Nova is intended to be easy to extend, and adapt. For example, it
+ currently uses an LDAP server for users and groups, but also includes a
+ fake LDAP server, that stores data in Redis. It has extensive test
+ coverage, and uses the Sphinx toolkit (the same as Python itself) for code
+ and user documentation.
+ .
+ While Nova is currently in Beta use within several organizations, the
+ codebase is very much under active development.
+ .
+ This is the package you will install on the nodes that will contain your
+ object store.
Package: nova-instancemonitor
Architecture: all
@@ -42,4 +118,19 @@ Description: Nova instance monitor
Package: nova-tools
Architecture: all
Depends: python-boto, ${python:Depends}, ${misc:Depends}
-Description: CLI tools to access nova
+Description: Nova Cloud Computing - management tools
+ Nova is a cloud computing fabric controller (the main part of an IaaS
+ system) built to match the popular AWS EC2 and S3 APIs. It is written in
+ Python, using the Tornado and Twisted frameworks, and relies on the
+ standard AMQP messaging protocol, and the Redis distributed KVS.
+ .
+ Nova is intended to be easy to extend, and adapt. For example, it
+ currently uses an LDAP server for users and groups, but also includes a
+ fake LDAP server, that stores data in Redis. It has extensive test
+ coverage, and uses the Sphinx toolkit (the same as Python itself) for code
+ and user documentation.
+ .
+ While Nova is currently in Beta use within several organizations, the
+ codebase is very much under active development.
+ .
+ This package contains admin tools for Nova.
diff --git a/debian/nova-api.conf b/debian/nova-api.conf
new file mode 100644
index 000000000..9cd4051b1
--- /dev/null
+++ b/debian/nova-api.conf
@@ -0,0 +1,5 @@
+--daemonize=1
+--ca_path=/var/lib/nova/CA
+--keys_path=/var/lib/nova/keys
+--fake_users=1
+--datastore_path=/var/lib/nova/keeper
diff --git a/debian/nova-api.init b/debian/nova-api.init
index 925c92c5e..597fbef95 100644
--- a/debian/nova-api.init
+++ b/debian/nova-api.init
@@ -13,10 +13,10 @@
set -e
DAEMON=/usr/bin/nova-api
-DAEMON_ARGS="--flagfile=/etc/nova.conf"
+DAEMON_ARGS="--flagfile=/etc/nova/nova-api.conf"
PIDFILE=/var/run/nova-api.pid
-ENABLED=false
+ENABLED=true
if test -f /etc/default/nova-api; then
. /etc/default/nova-api
diff --git a/debian/nova-api.install b/debian/nova-api.install
index 757235b11..02dbda02d 100644
--- a/debian/nova-api.install
+++ b/debian/nova-api.install
@@ -1 +1,2 @@
bin/nova-api usr/bin
+debian/nova-api.conf etc/nova
diff --git a/debian/nova-common.dirs b/debian/nova-common.dirs
new file mode 100644
index 000000000..b58fe8b7f
--- /dev/null
+++ b/debian/nova-common.dirs
@@ -0,0 +1,11 @@
+etc/nova
+var/lib/nova/buckets
+var/lib/nova/CA
+var/lib/nova/CA/INTER
+var/lib/nova/CA/newcerts
+var/lib/nova/CA/private
+var/lib/nova/CA/reqs
+var/lib/nova/images
+var/lib/nova/instances
+var/lib/nova/keys
+var/lib/nova/networks
diff --git a/debian/nova-common.install b/debian/nova-common.install
index ab7455314..9b1bbf147 100644
--- a/debian/nova-common.install
+++ b/debian/nova-common.install
@@ -1,5 +1,10 @@
bin/nova-manage usr/bin
-nova/auth/novarc.template usr/lib/pymodules/python2.6/nova/auth
-nova/cloudpipe/client.ovpn.template usr/lib/pymodules/python2.6/nova/cloudpipe
-nova/compute/libvirt.xml.template usr/lib/pymodules/python2.6/nova/compute
+debian/nova-manage.conf etc/nova
+nova/auth/novarc.template usr/share/nova
+nova/cloudpipe/client.ovpn.template usr/share/nova
+nova/compute/libvirt.xml.template usr/share/nova
+nova/compute/interfaces.template usr/share/nova
usr/lib/python*/*-packages/nova/*
+CA/openssl.cnf.tmpl var/lib/nova/CA
+CA/geninter.sh var/lib/nova/CA
+CA/genrootca.sh var/lib/nova/CA
diff --git a/debian/nova-compute.conf b/debian/nova-compute.conf
new file mode 100644
index 000000000..e4ca3fe95
--- /dev/null
+++ b/debian/nova-compute.conf
@@ -0,0 +1,10 @@
+--ca_path=/var/lib/nova/CA
+--keys_path=/var/lib/nova/keys
+--datastore_path=/var/lib/nova/keeper
+--instances_path=/var/lib/nova/instances
+--networks_path=/var/lib/nova/networks
+--simple_network_template=/usr/share/nova/interfaces.template
+--libvirt_xml_template=/usr/share/nova/libvirt.xml.template
+--vpn_client_template=/usr/share/nova/client.ovpn.template
+--credentials_template=/usr/share/nova/novarc.template
+--fake_users=1
diff --git a/debian/nova-compute.init b/debian/nova-compute.init
index 89d0e5fce..d0f093a7a 100644
--- a/debian/nova-compute.init
+++ b/debian/nova-compute.init
@@ -13,10 +13,10 @@
set -e
DAEMON=/usr/bin/nova-compute
-DAEMON_ARGS="--flagfile=/etc/nova.conf"
+DAEMON_ARGS="--flagfile=/etc/nova/nova-compute.conf"
PIDFILE=/var/run/nova-compute.pid
-ENABLED=false
+ENABLED=true
if test -f /etc/default/nova-compute; then
. /etc/default/nova-compute
diff --git a/debian/nova-compute.install b/debian/nova-compute.install
index 6387cef07..5f9df46a8 100644
--- a/debian/nova-compute.install
+++ b/debian/nova-compute.install
@@ -1 +1,2 @@
bin/nova-compute usr/bin
+debian/nova-compute.conf etc/nova
diff --git a/debian/nova-manage.conf b/debian/nova-manage.conf
new file mode 100644
index 000000000..5ccda7ecf
--- /dev/null
+++ b/debian/nova-manage.conf
@@ -0,0 +1,4 @@
+--ca_path=/var/lib/nova/CA
+--credentials_template=/usr/share/nova/novarc.template
+--keys_path=/var/lib/nova/keys
+--vpn_client_template=/usr/share/nova/client.ovpn.template
diff --git a/debian/nova-objectstore.conf b/debian/nova-objectstore.conf
new file mode 100644
index 000000000..af3271d3b
--- /dev/null
+++ b/debian/nova-objectstore.conf
@@ -0,0 +1,7 @@
+--daemonize=1
+--ca_path=/var/lib/nova/CA
+--keys_path=/var/lib/nova/keys
+--datastore_path=/var/lib/nova/keeper
+--fake_users=1
+--images_path=/var/lib/nova/images
+--buckets_path=/var/lib/nova/buckets
diff --git a/debian/nova-objectstore.init b/debian/nova-objectstore.init
index be7d32d8e..9676345ad 100644
--- a/debian/nova-objectstore.init
+++ b/debian/nova-objectstore.init
@@ -13,10 +13,10 @@
set -e
DAEMON=/usr/bin/nova-objectstore
-DAEMON_ARGS="--flagfile=/etc/nova.conf"
+DAEMON_ARGS="--flagfile=/etc/nova/nova-objectstore.conf"
PIDFILE=/var/run/nova-objectstore.pid
-ENABLED=false
+ENABLED=true
if test -f /etc/default/nova-objectstore; then
. /etc/default/nova-objectstore
diff --git a/debian/nova-objectstore.install b/debian/nova-objectstore.install
index ccc60fccc..3ed93ff37 100644
--- a/debian/nova-objectstore.install
+++ b/debian/nova-objectstore.install
@@ -1 +1,3 @@
bin/nova-objectstore usr/bin
+debian/nova-objectstore.conf etc/nova
+debian/nova-objectstore.nginx.conf etc/nginx/sites-available
diff --git a/debian/nova-objectstore.links b/debian/nova-objectstore.links
new file mode 100644
index 000000000..38e33948e
--- /dev/null
+++ b/debian/nova-objectstore.links
@@ -0,0 +1 @@
+/etc/nginx/sites-available/nova-objectstore.nginx.conf /etc/nginx/sites-enabled/nova-objectstore.nginx.conf
diff --git a/debian/nova-objectstore.nginx.conf b/debian/nova-objectstore.nginx.conf
new file mode 100644
index 000000000..b63424150
--- /dev/null
+++ b/debian/nova-objectstore.nginx.conf
@@ -0,0 +1,17 @@
+server {
+ listen 3333 default;
+ server_name localhost;
+ client_max_body_size 10m;
+
+ access_log /var/log/nginx/localhost.access.log;
+
+ location ~ /_images/.+ {
+ root /var/lib/nova/images;
+ rewrite ^/_images/(.*)$ /$1 break;
+ }
+
+ location / {
+ proxy_pass http://localhost:3334/;
+ }
+}
+
diff --git a/debian/nova-volume.conf b/debian/nova-volume.conf
new file mode 100644
index 000000000..af3271d3b
--- /dev/null
+++ b/debian/nova-volume.conf
@@ -0,0 +1,7 @@
+--daemonize=1
+--ca_path=/var/lib/nova/CA
+--keys_path=/var/lib/nova/keys
+--datastore_path=/var/lib/nova/keeper
+--fake_users=1
+--images_path=/var/lib/nova/images
+--buckets_path=/var/lib/nova/buckets
diff --git a/debian/nova-volume.init b/debian/nova-volume.init
index 80da3f70c..d5c2dddf8 100644
--- a/debian/nova-volume.init
+++ b/debian/nova-volume.init
@@ -13,10 +13,10 @@
set -e
DAEMON=/usr/bin/nova-volume
-DAEMON_ARGS="--flagfile=/etc/nova.conf"
+DAEMON_ARGS="--flagfile=/etc/nova/nova-volume.conf"
PIDFILE=/var/run/nova-volume.pid
-ENABLED=false
+ENABLED=true
if test -f /etc/default/nova-volume; then
. /etc/default/nova-volume
diff --git a/debian/nova-volume.install b/debian/nova-volume.install
index 37b535c03..9a840c78e 100644
--- a/debian/nova-volume.install
+++ b/debian/nova-volume.install
@@ -1 +1,2 @@
bin/nova-volume usr/bin
+debian/nova-volume.conf etc/nova
diff --git a/docs/conf.py b/docs/conf.py
index 9dfdfc8be..bc61f438c 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -16,7 +16,7 @@ import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.append(os.path.abspath('.'))
+sys.path.append(os.path.abspath('/Users/jmckenty/Projects/cc'))
sys.path.append([os.path.abspath('../nova'),os.path.abspath('../'),os.path.abspath('../vendor')])
from nova import vendor
diff --git a/docs/getting.started.rst b/docs/getting.started.rst
index ffa396e08..55a73dd00 100644
--- a/docs/getting.started.rst
+++ b/docs/getting.started.rst
@@ -1,22 +1,3 @@
-..
- Copyright 2010 United States Government as represented by the
- Administrator of the National Aeronautics and Space Administration.
- All Rights Reserved.
-
- Copyright 2010 Anso Labs, LLC
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
Getting Started with Nova
=========================
@@ -62,7 +43,7 @@ Installation
::
# system libraries and tools
- apt-get install -y aoetools vlan
+ apt-get install -y aoetools vlan curl
modprobe aoe
# python libraries
@@ -81,6 +62,7 @@ Installation
# ON THE COMPUTE NODE:
apt-get install -y python-libvirt
apt-get install -y kpartx kvm libvirt-bin
+ modprobe kvm
# optional packages
apt-get install -y euca2ools
@@ -111,7 +93,7 @@ ON CLOUD CONTROLLER
location ~ /_images/.+ {
root NOVA_PATH/images;
- rewrite ^/_images/(.*)\$ /\$1 break;
+ rewrite ^/_images/(.*)$ /$1 break;
}
location / {
@@ -128,6 +110,7 @@ ON VOLUME NODE
# This creates a 1GB file to create volumes out of
dd if=/dev/zero of=MY_FILE_PATH bs=100M count=10
losetup --show -f MY_FILE_PATH
+ # replace loop0 below with whatever losetup returns
echo "--storage_dev=/dev/loop0" >> NOVA_PATH/bin/nova.conf
Running
diff --git a/nova/adminclient.py b/nova/adminclient.py
index 480e907c9..3d239fb1d 100644
--- a/nova/adminclient.py
+++ b/nova/adminclient.py
@@ -28,7 +28,8 @@ import boto
from boto.ec2.regioninfo import RegionInfo
class UserInfo(object):
- """ Information about a Nova user
+ """
+ Information about a Nova user, as parsed through SAX
fields include:
username
accesskey
@@ -46,11 +47,9 @@ class UserInfo(object):
def __repr__(self):
return 'UserInfo:%s' % self.username
- # this is needed by the sax parser, so ignore the ugly name
def startElement(self, name, attrs, connection):
return None
- # this is needed by the sax parser, so ignore the ugly name
def endElement(self, name, value, connection):
if name == 'username':
self.username = str(value)
@@ -63,7 +62,7 @@ class UserInfo(object):
class HostInfo(object):
"""
- Information about a Nova Host:
+ Information about a Nova Host, as parsed through SAX:
Disk stats
Running Instances
Memory stats
diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py
index e27ac57bb..116fcbb78 100644
--- a/nova/auth/fakeldap.py
+++ b/nova/auth/fakeldap.py
@@ -34,15 +34,19 @@ SCOPE_SUBTREE = 2
MOD_ADD = 0
MOD_DELETE = 1
+
class NO_SUCH_OBJECT(Exception):
pass
+
class OBJECT_CLASS_VIOLATION(Exception):
pass
+
def initialize(uri):
return FakeLDAP()
+
def _match_query(query, attrs):
"""Match an ldap query to an attribute dictionary.
@@ -67,6 +71,7 @@ def _match_query(query, attrs):
(k, sep, v) = inner.partition('=')
return _match(k, v, attrs)
+
def _paren_groups(source):
"""Split a string into parenthesized groups."""
count = 0
@@ -83,6 +88,7 @@ def _paren_groups(source):
result.append(source[start:pos+1])
return result
+
def _match(k, v, attrs):
"""Match a given key and value against an attribute list."""
if k not in attrs:
@@ -96,6 +102,7 @@ def _match(k, v, attrs):
return True
return False
+
def _subs(value):
"""Returns a list of subclass strings.
@@ -109,6 +116,32 @@ def _subs(value):
return [value] + subs[value]
return [value]
+
+def _from_json(encoded):
+ """Convert attribute values from json representation.
+
+ Args:
+ encoded -- a json encoded string
+
+ Returns a list of strings
+
+ """
+ return [str(x) for x in json.loads(encoded)]
+
+
+def _to_json(unencoded):
+ """Convert attribute values into json representation.
+
+ Args:
+ unencoded -- an unencoded string or list of strings. If it
+ is a single string, it will be converted into a list.
+
+ Returns a json string
+
+ """
+ return json.dumps(list(unencoded))
+
+
class FakeLDAP(object):
#TODO(vish): refactor this class to use a wrapper instead of accessing
# redis directly
@@ -125,7 +158,7 @@ class FakeLDAP(object):
"""Add an object with the specified attributes at dn."""
key = "%s%s" % (self.__redis_prefix, dn)
- value_dict = dict([(k, self.__to_json(v)) for k, v in attr])
+ value_dict = dict([(k, _to_json(v)) for k, v in attr])
datastore.Redis.instance().hmset(key, value_dict)
def delete_s(self, dn):
@@ -145,12 +178,12 @@ class FakeLDAP(object):
key = "%s%s" % (self.__redis_prefix, dn)
for cmd, k, v in attrs:
- values = self.__from_json(redis.hget(key, k))
+ values = _from_json(redis.hget(key, k))
if cmd == MOD_ADD:
values.append(v)
else:
values.remove(v)
- values = redis.hset(key, k, self.__to_json(values))
+ values = redis.hset(key, k, _to_json(values))
def search_s(self, dn, scope, query=None, fields=None):
"""Search for all matching objects under dn using the query.
@@ -171,7 +204,7 @@ class FakeLDAP(object):
# get the attributes from redis
attrs = redis.hgetall(key)
# turn the values from redis into lists
- attrs = dict([(k, self.__from_json(v))
+ attrs = dict([(k, _from_json(v))
for k, v in attrs.iteritems()])
# filter the objects by query
if not query or _match_query(query, attrs):
@@ -188,25 +221,4 @@ class FakeLDAP(object):
def __redis_prefix(self):
return 'ldap:'
- def __from_json(self, encoded):
- """Convert attribute values from json representation.
-
- Args:
- encoded -- a json encoded string
-
- Returns a list of strings
- """
- return [str(x) for x in json.loads(encoded)]
-
- def __to_json(self, unencoded):
- """Convert attribute values into json representation.
-
- Args:
- unencoded -- an unencoded string or list of strings. If it
- is a single string, it will be converted into a list.
-
- Returns a json string
-
- """
- return json.dumps(list(unencoded))
diff --git a/nova/auth/users.py b/nova/auth/users.py
index bb78dad99..0ee2d4441 100644
--- a/nova/auth/users.py
+++ b/nova/auth/users.py
@@ -27,6 +27,7 @@ import datetime
import logging
import os
import shutil
+import signer
import string
from string import Template
import tempfile
@@ -39,15 +40,14 @@ except Exception, e:
import fakeldap as ldap
import fakeldap
-from nova import datastore
# TODO(termie): clean up these imports
-import signer
+from nova import datastore
from nova import exception
from nova import flags
from nova import crypto
from nova import utils
-from nova.compute import model
+
from nova import objectstore # for flags
@@ -101,10 +101,17 @@ flags.DEFINE_string('credential_cert_file', 'cert.pem',
'Filename of certificate in credentials zip')
flags.DEFINE_string('credential_rc_file', 'novarc',
'Filename of rc in credentials zip')
-flags.DEFINE_integer('vpn_start_port', 8000,
+
+flags.DEFINE_integer('vpn_start_port', 1000,
'Start port for the cloudpipe VPN servers')
-flags.DEFINE_integer('vpn_end_port', 9999,
+flags.DEFINE_integer('vpn_end_port', 2000,
'End port for the cloudpipe VPN servers')
+
+flags.DEFINE_string('credential_cert_subject',
+ '/C=US/ST=California/L=MountainView/O=AnsoLabs/'
+ 'OU=NovaDev/CN=%s-%s',
+ 'Subject for certificate for users')
+
flags.DEFINE_string('vpn_ip', '127.0.0.1',
'Public IP for the cloudpipe VPN servers')
@@ -306,7 +313,7 @@ class NoMorePorts(exception.Error):
pass
-class Vpn(model.BasicModel):
+class Vpn(datastore.BasicModel):
def __init__(self, project_id):
self.project_id = project_id
super(Vpn, self).__init__()
@@ -317,27 +324,25 @@ class Vpn(model.BasicModel):
@classmethod
def create(cls, project_id):
- # TODO (vish): get list of vpn ips from redis
- for ip in [FLAGS.vpn_ip]:
- try:
- port = cls.find_free_port_for_ip(ip)
- vpn = cls(project_id)
- # save ip for project
- vpn['project'] = project_id
- vpn['ip'] = ip
- vpn['port'] = port
- vpn.save()
- return vpn
- except NoMorePorts:
- pass
- raise NoMorePorts()
+ # TODO(vish): get list of vpn ips from redis
+ port = cls.find_free_port_for_ip(FLAGS.vpn_ip)
+ vpn = cls(project_id)
+ # save ip for project
+ vpn['project'] = project_id
+ vpn['ip'] = FLAGS.vpn_ip
+ vpn['port'] = port
+ vpn.save()
+ return vpn
@classmethod
def find_free_port_for_ip(cls, ip):
- # TODO(vish): the redis access should be refactored into a
- # base class
+ # TODO(vish): these redis commands should be generalized and
+ # placed into a base class. Conceptually, it is
+ # similar to an association, but we are just
+ # storing a set of values instead of keys that
+ # should be turned into objects.
redis = datastore.Redis.instance()
- key = 'ip:%s:ports'
+ key = 'ip:%s:ports' % ip
# TODO(vish): these ports should be allocated through an admin
# command instead of a flag
if (not redis.exists(key) and
@@ -345,14 +350,14 @@ class Vpn(model.BasicModel):
for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
redis.sadd(key, i)
- port = datastore.Redis.instance().spop(key)
+ port = redis.spop(key)
if not port:
raise NoMorePorts()
return port
@classmethod
def num_ports_for_ip(cls, ip):
- return datastore.Redis.instance().scard('ip:%s:ports')
+ return datastore.Redis.instance().scard('ip:%s:ports' % ip)
@property
def ip(self):
@@ -466,7 +471,9 @@ class UserManager(object):
# create and destroy a project
Vpn.create(name)
return conn.create_project(name,
- User.safe_id(manager_user), description, member_users)
+ User.safe_id(manager_user),
+ description,
+ member_users)
def get_projects(self):
@@ -584,7 +591,7 @@ class UserManager(object):
def __cert_subject(self, uid):
# FIXME(ja) - this should be pulled from a global configuration
- return "/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=%s-%s" % (uid, str(datetime.datetime.utcnow().isoformat()))
+ return FLAGS.credential_cert_subject % (uid, utils.isotime())
class LDAPWrapper(object):
@@ -773,7 +780,7 @@ class LDAPWrapper(object):
def __create_group(self, group_dn, name, uid,
description, member_uids = None):
- if self.group_exists(name):
+ if self.group_exists(group_dn):
raise exception.Duplicate("Group can't be created because "
"group %s already exists" % name)
members = []
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index 09da71c64..5f6ccf82e 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -76,12 +76,13 @@ class CloudPipe(object):
zippy.close()
def setup_keypair(self, user_id, project_id):
- key_name = '%s-key' % project_id
+ key_name = '%s%s' % (project_id, FLAGS.vpn_key_suffix)
try:
private_key, fingerprint = self.manager.generate_key_pair(user_id, key_name)
try:
key_dir = os.path.join(FLAGS.keys_path, user_id)
- os.makedirs(key_dir)
+ if not os.path.exists(key_dir):
+ os.makedirs(key_dir)
with open(os.path.join(key_dir, '%s.pem' % key_name),'w') as f:
f.write(private_key)
except:
diff --git a/nova/compute/disk.py b/nova/compute/disk.py
index e7090dad3..b6398f41e 100644
--- a/nova/compute/disk.py
+++ b/nova/compute/disk.py
@@ -36,7 +36,7 @@ from nova import exception
def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None):
"""Takes a single partition represented by infile and writes a bootable
drive image into outfile.
-
+
The first 63 sectors (0-62) of the resulting image is a master boot record.
Infile becomes the first primary partition.
If local bytes is specified, a second primary partition is created and
@@ -87,12 +87,14 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None):
% (infile, outfile, sector_size, primary_first))
@defer.inlineCallbacks
-def inject_key(key, image, partition=None, execute=None):
- """Injects a ssh key into a disk image.
- It adds the specified key to /root/.ssh/authorized_keys
+def inject_data(image, key=None, net=None, partition=None, execute=None):
+ """Injects a ssh key and optionally net data into a disk image.
+
it will mount the image as a fully partitioned disk and attempt to inject
into the specified partition number.
+
If partition is not specified it mounts the image as a single partition.
+
"""
out, err = yield execute('sudo losetup -f --show %s' % image)
if err:
@@ -119,15 +121,17 @@ def inject_key(key, image, partition=None, execute=None):
raise exception.Error('Failed to mount filesystem: %s' % err)
try:
- # inject key file
- yield _inject_into_fs(key, tmpdir, execute=execute)
+ if key:
+ # inject key file
+ yield _inject_key_into_fs(key, tmpdir, execute=execute)
+ if net:
+ yield _inject_net_into_fs(net, tmpdir, execute=execute)
finally:
# unmount device
yield execute('sudo umount %s' % mapped_device)
finally:
# remove temporary directory
- # TODO(termie): scary, is there any thing we can check here?
- yield execute('rm -rf %s' % tmpdir)
+ yield execute('rmdir %s' % tmpdir)
if not partition is None:
# remove partitions
yield execute('sudo kpartx -d %s' % device)
@@ -136,11 +140,17 @@ def inject_key(key, image, partition=None, execute=None):
yield execute('sudo losetup -d %s' % device)
@defer.inlineCallbacks
-def _inject_into_fs(key, fs, execute=None):
+def _inject_key_into_fs(key, fs, execute=None):
sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh')
yield execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
yield execute('sudo chown root %s' % sshdir)
yield execute('sudo chmod 700 %s' % sshdir)
keyfile = os.path.join(sshdir, 'authorized_keys')
- yield execute('sudo bash -c "cat >> %s"' % keyfile, '\n' + key + '\n')
+ yield execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n')
+
+@defer.inlineCallbacks
+def _inject_net_into_fs(net, fs, execute=None):
+ netfile = os.path.join(os.path.join(os.path.join(
+ fs, 'etc'), 'network'), 'interfaces')
+ yield execute('sudo tee %s' % netfile, net)
diff --git a/nova/compute/interfaces.template b/nova/compute/interfaces.template
new file mode 100644
index 000000000..11df301f6
--- /dev/null
+++ b/nova/compute/interfaces.template
@@ -0,0 +1,18 @@
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+# The primary network interface
+auto eth0
+iface eth0 inet static
+ address %(address)s
+ netmask %(netmask)s
+ network %(network)s
+ broadcast %(broadcast)s
+ gateway %(gateway)s
+ dns-nameservers %(dns)s
+
+
diff --git a/nova/compute/linux_net.py b/nova/compute/linux_net.py
index 0bd5ce007..c9e5bb1a7 100644
--- a/nova/compute/linux_net.py
+++ b/nova/compute/linux_net.py
@@ -62,6 +62,9 @@ def remove_rule(cmd):
def bind_public_ip(ip, interface):
runthis("Binding IP to interface: %s", "sudo ip addr add %s dev %s" % (ip, interface))
+
+def unbind_public_ip(ip, interface):
+ runthis("Binding IP to interface: %s", "sudo ip addr del %s dev %s" % (ip, interface))
def vlan_create(net):
""" create a vlan on on a bridge device unless vlan already exists """
@@ -95,10 +98,10 @@ def dnsmasq_cmd(net):
' --pid-file=%s' % dhcp_file(net['vlan'], 'pid'),
' --listen-address=%s' % net.dhcp_listen_address,
' --except-interface=lo',
- ' --dhcp-range=%s,static,120s' % (net.dhcp_range_start),
- ' --dhcp-lease-max=61',
+ ' --dhcp-range=%s,static,600s' % (net.dhcp_range_start),
' --dhcp-hostsfile=%s' % dhcp_file(net['vlan'], 'conf'),
- ' --dhcp-leasefile=%s' % dhcp_file(net['vlan'], 'leases')]
+ ' --dhcp-script=%s' % bin_file('dhcpleasor.py'),
+ ' --leasefile-ro']
return ''.join(cmd)
def hostDHCP(network, host, mac):
@@ -154,6 +157,9 @@ def dhcp_file(vlan, kind):
return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path, vlan, kind))
+def bin_file(script):
+ return os.path.abspath(os.path.join(__file__, "../../../bin", script))
+
def dnsmasq_pid_for(network):
""" the pid for prior dnsmasq instance for a vlan,
returns None if no pid file exists
diff --git a/nova/compute/model.py b/nova/compute/model.py
index ad1f97a0a..cc5f74b3d 100644
--- a/nova/compute/model.py
+++ b/nova/compute/model.py
@@ -57,19 +57,6 @@ from nova import utils
FLAGS = flags.FLAGS
-class ConnectionError(exception.Error):
- pass
-
-
-def absorb_connection_error(fn):
- def _wrapper(*args, **kwargs):
- try:
- return fn(*args, **kwargs)
- except redis.exceptions.ConnectionError, ce:
- raise ConnectionError(str(ce))
- return _wrapper
-
-
# TODO(todd): Implement this at the class level for Instance
class InstanceDirectory(object):
"""an api for interacting with the global state of instances"""
@@ -81,7 +68,7 @@ class InstanceDirectory(object):
def __getitem__(self, item):
return self.get(item)
- @absorb_connection_error
+ @datastore.absorb_connection_error
def by_project(self, project):
"""returns a list of instance objects for a project"""
for instance_id in datastore.Redis.instance().smembers('project:%s:instances' % project):
@@ -105,12 +92,12 @@ class InstanceDirectory(object):
"""returns the instance a volume is attached to"""
pass
- @absorb_connection_error
+ @datastore.absorb_connection_error
def exists(self, instance_id):
return datastore.Redis.instance().sismember('instances', instance_id)
@property
- @absorb_connection_error
+ @datastore.absorb_connection_error
def all(self):
"""returns a list of all instances"""
for instance_id in datastore.Redis.instance().smembers('instances'):
@@ -121,196 +108,8 @@ class InstanceDirectory(object):
instance_id = utils.generate_uid('i')
return self.get(instance_id)
-class BasicModel(object):
- """
- All Redis-backed data derives from this class.
-
- You MUST specify an identifier() property that returns a unique string
- per instance.
-
- You MUST have an initializer that takes a single argument that is a value
- returned by identifier() to load a new class with.
-
- You may want to specify a dictionary for default_state().
-
- You may also specify override_type at the class left to use a key other
- than __class__.__name__.
-
- You override save and destroy calls to automatically build and destroy
- associations.
- """
-
- override_type = None
-
- @absorb_connection_error
- def __init__(self):
- self.initial_state = {}
- self.state = datastore.Redis.instance().hgetall(self.__redis_key)
- if self.state:
- self.initial_state = self.state
- else:
- self.state = self.default_state()
-
- def default_state(self):
- """You probably want to define this in your subclass"""
- return {}
-
- @classmethod
- def _redis_name(cls):
- return self.override_type or cls.__name__
-
- @classmethod
- def lookup(cls, identifier):
- rv = cls(identifier)
- if rv.is_new_record():
- return None
- else:
- return rv
-
- @classmethod
- @absorb_connection_error
- def all(cls):
- """yields all objects in the store"""
- redis_set = cls._redis_set_name(cls.__name__)
- for identifier in datastore.Redis.instance().smembers(redis_set):
- yield cls(identifier)
-
- @classmethod
- @absorb_connection_error
- def associated_to(cls, foreign_type, foreign_id):
- redis_set = cls._redis_association_name(foreign_type, foreign_id)
- for identifier in datastore.Redis.instance().smembers(redis_set):
- yield cls(identifier)
-
- @classmethod
- def _redis_set_name(cls, kls_name):
- # stupidly pluralize (for compatiblity with previous codebase)
- return kls_name.lower() + "s"
- @classmethod
- def _redis_association_name(cls, foreign_type, foreign_id):
- return cls._redis_set_name("%s:%s:%s" %
- (foreign_type, foreign_id, cls.__name__))
-
- @property
- def identifier(self):
- """You DEFINITELY want to define this in your subclass"""
- raise NotImplementedError("Your sublcass should define identifier")
-
- @property
- def __redis_key(self):
- return '%s:%s' % (self.__class__.__name__.lower(), self.identifier)
-
- def __repr__(self):
- return "<%s:%s>" % (self.__class__.__name__, self.identifier)
-
- def keys(self):
- return self.state.keys()
-
- def copy(self):
- copyDict = {}
- for item in self.keys():
- copyDict[item] = self[item]
- return copyDict
-
- def get(self, item, default):
- return self.state.get(item, default)
-
- def update(self, update_dict):
- return self.state.update(update_dict)
-
- def setdefault(self, item, default):
- return self.state.setdefault(item, default)
-
- def __getitem__(self, item):
- return self.state[item]
-
- def __setitem__(self, item, val):
- self.state[item] = val
- return self.state[item]
-
- def __delitem__(self, item):
- """We don't support this"""
- raise Exception("Silly monkey, models NEED all their properties.")
-
- def is_new_record(self):
- return self.initial_state == {}
-
- @absorb_connection_error
- def add_to_index(self):
- set_name = self.__class__._redis_set_name(self.__class__.__name__)
- datastore.Redis.instance().sadd(set_name, self.identifier)
-
- @absorb_connection_error
- def remove_from_index(self):
- set_name = self.__class__._redis_set_name(self.__class__.__name__)
- datastore.Redis.instance().srem(set_name, self.identifier)
-
- @absorb_connection_error
- def remove_from_index(self):
- set_name = self.__class__._redis_set_name(self.__class__.__name__)
- datastore.Redis.instance().srem(set_name, self.identifier)
-
- @absorb_connection_error
- def associate_with(self, foreign_type, foreign_id):
- # note the extra 's' on the end is for plurality
- # to match the old data without requiring a migration of any sort
- self.add_associated_model_to_its_set(foreign_type, foreign_id)
- redis_set = self.__class__._redis_association_name(foreign_type,
- foreign_id)
- datastore.Redis.instance().sadd(redis_set, self.identifier)
-
- @absorb_connection_error
- def unassociate_with(self, foreign_type, foreign_id):
- redis_set = self.__class__._redis_association_name(foreign_type,
- foreign_id)
- datastore.Redis.instance().srem(redis_set, self.identifier)
-
- def add_associated_model_to_its_set(self, my_type, my_id):
- table = globals()
- klsname = my_type.capitalize()
- if table.has_key(klsname):
- my_class = table[klsname]
- my_inst = my_class(my_id)
- my_inst.save()
- else:
- logging.warning("no model class for %s when building"
- " association from %s",
- klsname, self)
-
- @absorb_connection_error
- def save(self):
- """
- update the directory with the state from this model
- also add it to the index of items of the same type
- then set the initial_state = state so new changes are tracked
- """
- # TODO(ja): implement hmset in redis-py and use it
- # instead of multiple calls to hset
- if self.is_new_record():
- self["create_time"] = utils.isotime()
- for key, val in self.state.iteritems():
- # if (not self.initial_state.has_key(key)
- # or self.initial_state[key] != val):
- datastore.Redis.instance().hset(self.__redis_key, key, val)
- self.add_to_index()
- self.initial_state = self.state
- return True
-
- @absorb_connection_error
- def destroy(self):
- """
- deletes all related records from datastore.
- does NOT do anything to running libvirt state.
- """
- logging.info("Destroying datamodel for %s %s",
- self.__class__.__name__, self.identifier)
- datastore.Redis.instance().delete(self.__redis_key)
- self.remove_from_index()
- return True
-
-
-class Instance(BasicModel):
+class Instance(datastore.BasicModel):
"""Wrapper around stored properties of an instance"""
def __init__(self, instance_id):
@@ -363,12 +162,11 @@ class Instance(BasicModel):
def destroy(self):
"""Destroy associations, then destroy the object"""
self.unassociate_with("project", self.project)
+ self.unassociate_with("node", self['node_name'])
return super(Instance, self).destroy()
-class Host(BasicModel):
- """
- A Host is the machine where a Daemon is running.
- """
+class Host(datastore.BasicModel):
+ """A Host is the machine where a Daemon is running."""
def __init__(self, hostname):
"""loads an instance from the datastore if exists"""
@@ -384,10 +182,8 @@ class Host(BasicModel):
return self.hostname
-class Daemon(BasicModel):
- """
- A Daemon is a job (compute, api, network, ...) that runs on a host.
- """
+class Daemon(datastore.BasicModel):
+ """A Daemon is a job (compute, api, network, ...) that runs on a host."""
def __init__(self, host_or_combined, binpath=None):
"""loads an instance from the datastore if exists"""
@@ -404,9 +200,9 @@ class Daemon(BasicModel):
super(Daemon, self).__init__()
def default_state(self):
- return {"hostname": self.hostname,
- "binary": self.binary,
- "updated_at": utils.isotime()
+ return {"hostname": self.hostname,
+ "binary": self.binary,
+ "updated_at": utils.isotime()
}
@property
@@ -429,8 +225,7 @@ class Daemon(BasicModel):
def heartbeat(self):
self['updated_at'] = utils.isotime()
- self.save()
- return True
+ return self.save()
@classmethod
def by_host(cls, hostname):
diff --git a/nova/compute/network.py b/nova/compute/network.py
index 7b37cde6d..e5d3d18df 100644
--- a/nova/compute/network.py
+++ b/nova/compute/network.py
@@ -31,11 +31,10 @@ from nova import vendor
import IPy
from nova import datastore
-import nova.exception
-from nova.compute import exception
from nova import flags
-from nova.compute import model
from nova import utils
+from nova import exception
+from nova.compute import exception as compute_exception
from nova.auth import users
import linux_net
@@ -59,10 +58,30 @@ flags.DEFINE_integer('cnt_vpn_clients', 5,
flags.DEFINE_integer('cloudpipe_start_port', 12000,
'Starting port for mapped CloudPipe external ports')
+flags.DEFINE_boolean('simple_network', False,
+ 'Use simple networking instead of vlans')
+flags.DEFINE_string('simple_network_bridge', 'br100',
+ 'Bridge for simple network instances')
+flags.DEFINE_list('simple_network_ips', ['192.168.0.2'],
+ 'Available ips for simple network')
+flags.DEFINE_string('simple_network_template',
+ utils.abspath('compute/interfaces.template'),
+ 'Template file for simple network')
+flags.DEFINE_string('simple_network_netmask', '255.255.255.0',
+ 'Netmask for simple network')
+flags.DEFINE_string('simple_network_network', '192.168.0.0',
+ 'Network for simple network')
+flags.DEFINE_string('simple_network_gateway', '192.168.0.1',
+ 'Broadcast for simple network')
+flags.DEFINE_string('simple_network_broadcast', '192.168.0.255',
+ 'Broadcast for simple network')
+flags.DEFINE_string('simple_network_dns', '8.8.4.4',
+ 'Dns for simple network')
+
logging.getLogger().setLevel(logging.DEBUG)
-class Vlan(model.BasicModel):
+class Vlan(datastore.BasicModel):
def __init__(self, project, vlan):
"""
Since we don't want to try and find a vlan by its identifier,
@@ -82,7 +101,7 @@ class Vlan(model.BasicModel):
return instance
@classmethod
- @model.absorb_connection_error
+ @datastore.absorb_connection_error
def lookup(cls, project):
set_name = cls._redis_set_name(cls.__name__)
vlan = datastore.Redis.instance().hget(set_name, project)
@@ -92,14 +111,14 @@ class Vlan(model.BasicModel):
return None
@classmethod
- @model.absorb_connection_error
+ @datastore.absorb_connection_error
def dict_by_project(cls):
"""a hash of project:vlan"""
set_name = cls._redis_set_name(cls.__name__)
return datastore.Redis.instance().hgetall(set_name)
@classmethod
- @model.absorb_connection_error
+ @datastore.absorb_connection_error
def dict_by_vlan(cls):
"""a hash of vlan:project"""
set_name = cls._redis_set_name(cls.__name__)
@@ -110,23 +129,23 @@ class Vlan(model.BasicModel):
return rv
@classmethod
- @model.absorb_connection_error
+ @datastore.absorb_connection_error
def all(cls):
set_name = cls._redis_set_name(cls.__name__)
for project,vlan in datastore.Redis.instance().hgetall(set_name):
yield cls(project, vlan)
- @model.absorb_connection_error
+ @datastore.absorb_connection_error
def save(self):
"""
Vlan saves state into a giant hash named "vlans", with keys of
- proejct_id and value of valn number. Therefore, we skip the
+ project_id and value of vlan number. Therefore, we skip the
default way of saving into "vlan:ID" and adding to a set of "vlans".
"""
set_name = self._redis_set_name(self.__class__.__name__)
datastore.Redis.instance().hset(set_name, self.project_id, self.vlan_id)
- @model.absorb_connection_error
+ @datastore.absorb_connection_error
def destroy(self):
set_name = self._redis_set_name(self.__class__.__name__)
datastore.Redis.instance().hdel(set_name, self.project)
@@ -135,6 +154,7 @@ class Vlan(model.BasicModel):
vlan = int(self.vlan_id)
network = IPy.IP(FLAGS.private_range)
start = (vlan-FLAGS.vlan_start) * FLAGS.network_size
+ # minus one for the gateway.
return "%s-%s" % (network[start],
network[start + FLAGS.network_size - 1])
@@ -144,7 +164,7 @@ class Vlan(model.BasicModel):
# TODO(ja): does vlanpool "keeper" need to know the min/max - shouldn't FLAGS always win?
# TODO(joshua): Save the IPs at the top of each subnet for cloudpipe vpn clients
-class BaseNetwork(model.BasicModel):
+class BaseNetwork(datastore.BasicModel):
override_type = 'network'
@property
@@ -188,18 +208,9 @@ class BaseNetwork(model.BasicModel):
return self.network.broadcast()
@property
- def gateway(self):
- return self.network[1]
-
- @property
def bridge_name(self):
return "br%s" % (self["vlan"])
- def range(self):
- # the .2 address is always CloudPipe
- for idx in range(3, len(self.network)-2):
- yield self.network[idx]
-
@property
def user(self):
return users.UserManager.instance().get_user(self['user_id'])
@@ -214,7 +225,7 @@ class BaseNetwork(model.BasicModel):
@property
def hosts(self):
- return datastore.Redis.instance().hgetall(self._hosts_key)
+ return datastore.Redis.instance().hgetall(self._hosts_key) or {}
def _add_host(self, _user_id, _project_id, host, target):
datastore.Redis.instance().hset(self._hosts_key, host, target)
@@ -241,14 +252,22 @@ class BaseNetwork(model.BasicModel):
self._add_host(user_id, project_id, address, mac)
self.express(address=address)
return address
- raise exception.NoMoreAddresses()
+ raise compute_exception.NoMoreAddresses("Project %s with network %s" %
+ (project_id, str(self.network)))
- def deallocate_ip(self, ip_str):
+ def lease_ip(self, ip_str):
+ logging.debug("Leasing allocated IP %s" % (ip_str))
+
+ def release_ip(self, ip_str):
if not ip_str in self.assigned:
- raise exception.AddressNotAllocated()
+ raise compute_exception.AddressNotAllocated()
self.deexpress(address=ip_str)
self._rem_host(ip_str)
+ def deallocate_ip(self, ip_str):
+ # Do nothing for now, cleanup on ip release
+ pass
+
def list_addresses(self):
for address in self.hosts:
yield address
@@ -280,8 +299,6 @@ class BridgedNetwork(BaseNetwork):
def get_network_for_project(cls, user_id, project_id, security_group):
vlan = get_vlan_for_project(project_id)
network_str = vlan.subnet()
- logging.debug("creating network on vlan %s with network string %s",
- vlan.vlan_id, network_str)
return cls.create(user_id, project_id, security_group, vlan.vlan_id,
network_str)
@@ -307,7 +324,7 @@ class DHCPNetwork(BridgedNetwork):
def __init__(self, *args, **kwargs):
super(DHCPNetwork, self).__init__(*args, **kwargs)
- logging.debug("Initing DHCPNetwork object...")
+ # logging.debug("Initing DHCPNetwork object...")
self.dhcp_listen_address = self.network[1]
self.dhcp_range_start = self.network[3]
self.dhcp_range_end = self.network[-(1 + FLAGS.cnt_vpn_clients)]
@@ -351,7 +368,7 @@ class DHCPNetwork(BridgedNetwork):
else:
linux_net.start_dnsmasq(self)
-class PublicAddress(model.BasicModel):
+class PublicAddress(datastore.BasicModel):
override_type = "address"
def __init__(self, address):
@@ -422,14 +439,14 @@ class PublicNetworkController(BaseNetwork):
def associate_address(self, public_ip, private_ip, instance_id):
if not public_ip in self.assigned:
- raise exception.AddressNotAllocated()
+ raise compute_exception.AddressNotAllocated()
# TODO(joshua): Keep an index going both ways
for addr in self.host_objs:
if addr.get('private_ip', None) == private_ip:
- raise exception.AddressAlreadyAssociated()
+ raise compute_exception.AddressAlreadyAssociated()
addr = self.get_host(public_ip)
if addr.get('private_ip', 'available') != 'available':
- raise exception.AddressAlreadyAssociated()
+ raise compute_exception.AddressAlreadyAssociated()
addr['private_ip'] = private_ip
addr['instance_id'] = instance_id
addr.save()
@@ -437,10 +454,10 @@ class PublicNetworkController(BaseNetwork):
def disassociate_address(self, public_ip):
if not public_ip in self.assigned:
- raise exception.AddressNotAllocated()
+ raise compute_exception.AddressNotAllocated()
addr = self.get_host(public_ip)
if addr.get('private_ip', 'available') == 'available':
- raise exception.AddressNotAssociated()
+ raise compute_exception.AddressNotAssociated()
self.deexpress(address=public_ip)
addr['private_ip'] = 'available'
addr['instance_id'] = 'available'
@@ -470,6 +487,7 @@ class PublicNetworkController(BaseNetwork):
def deexpress(self, address=None):
addr = self.get_host(address)
private_ip = addr['private_ip']
+ linux_net.unbind_public_ip(address, FLAGS.public_interface)
linux_net.remove_rule("PREROUTING -t nat -d %s -j DNAT --to %s"
% (address, private_ip))
linux_net.remove_rule("POSTROUTING -t nat -s %s -j SNAT --to %s"
@@ -502,22 +520,42 @@ def get_vlan_for_project(project_id):
# NOTE(todd): This doesn't check for vlan id match, because
# it seems to be assumed that vlan<=>project is
# always a 1:1 mapping. It could be made way
- # sexier if it didn't fight agains the way
+ # sexier if it didn't fight against the way
# BasicModel worked and used associate_with
# to build connections to projects.
+ # NOTE(josh): This is here because we want to make sure we
+ # don't orphan any VLANs. It is basically
+ # garbage collection for after projects abandoned
+ # their reference.
vlan.project_id = project_id
vlan.save()
return vlan
else:
return Vlan.create(project_id, vnum)
- raise exception.AddressNotAllocated("Out of VLANs")
+ raise compute_exception.AddressNotAllocated("Out of VLANs")
def get_network_by_address(address):
+ logging.debug("Get Network By Address: %s" % address)
for project in users.UserManager.instance().get_projects():
net = get_project_network(project.id)
if address in net.assigned:
+ logging.debug("Found %s in %s" % (address, project.id))
return net
- raise exception.AddressNotAllocated()
+ raise compute_exception.AddressNotAllocated()
+
+def allocate_simple_ip():
+ redis = datastore.Redis.instance()
+ if not redis.exists('ips') and not len(redis.keys('instances:*')):
+ for address in FLAGS.simple_network_ips:
+ redis.sadd('ips', address)
+ address = redis.spop('ips')
+ if not address:
+ raise exception.NoMoreAddresses()
+ return address
+
+def deallocate_simple_ip(address):
+ datastore.Redis.instance().sadd('ips', address)
+
def allocate_vpn_ip(user_id, project_id, mac):
return get_project_network(project_id).allocate_vpn_ip(mac)
@@ -527,6 +565,12 @@ def allocate_ip(user_id, project_id, mac):
def deallocate_ip(address):
return get_network_by_address(address).deallocate_ip(address)
+
+def release_ip(address):
+ return get_network_by_address(address).release_ip(address)
+
+def lease_ip(address):
+ return get_network_by_address(address).lease_ip(address)
def get_project_network(project_id, security_group='default'):
""" get a project's private network, allocating one if needed """
@@ -534,7 +578,7 @@ def get_project_network(project_id, security_group='default'):
# Refactor to still use the LDAP backend, but not User specific.
project = users.UserManager.instance().get_project(project_id)
if not project:
- raise nova.exception.Error("Project %s doesn't exist, uhoh." %
+ raise exception.Error("Project %s doesn't exist, uhoh." %
project_id)
return DHCPNetwork.get_network_for_project(project.project_manager_id,
project.id, security_group)
diff --git a/nova/compute/node.py b/nova/compute/node.py
index 7859d71c0..f41bc34ea 100644
--- a/nova/compute/node.py
+++ b/nova/compute/node.py
@@ -30,7 +30,6 @@ import base64
import json
import logging
import os
-import random
import shutil
import sys
@@ -58,7 +57,7 @@ from nova.objectstore import image # for image_path flag
FLAGS = flags.FLAGS
flags.DEFINE_string('libvirt_xml_template',
utils.abspath('compute/libvirt.xml.template'),
- 'Network XML Template')
+ 'Libvirt XML Template')
flags.DEFINE_bool('use_s3', True,
'whether to get images from s3 or use local copy')
flags.DEFINE_string('instances_path', utils.abspath('../instances'),
@@ -144,8 +143,7 @@ class Node(object, service.Service):
@defer.inlineCallbacks
def report_state(self, nodename, daemon):
- # TODO(termie): Termie has an idea for wrapping this connection failure
- # pattern to be more elegant. -todd
+ # TODO(termie): make this pattern be more elegant. -todd
try:
record = model.Daemon(nodename, daemon)
record.heartbeat()
@@ -164,9 +162,10 @@ class Node(object, service.Service):
""" launch a new instance with specified options """
logging.debug("Starting instance %s..." % (instance_id))
inst = self.instdir.get(instance_id)
- # TODO: Get the real security group of launch in here
- security_group = "default"
- net = network.BridgedNetwork.get_network_for_project(inst['user_id'],
+ if not FLAGS.simple_network:
+ # TODO: Get the real security group of launch in here
+ security_group = "default"
+ net = network.BridgedNetwork.get_network_for_project(inst['user_id'],
inst['project_id'],
security_group).express()
inst['node_name'] = FLAGS.node_name
@@ -470,7 +469,7 @@ class Instance(object):
# ensure directories exist and are writable
yield self._pool.simpleExecute('mkdir -p %s' % basepath())
yield self._pool.simpleExecute('chmod 0777 %s' % basepath())
-
+
# TODO(termie): these are blocking calls, it would be great
# if they weren't.
@@ -478,11 +477,11 @@ class Instance(object):
f = open(basepath('libvirt.xml'), 'w')
f.write(libvirt_xml)
f.close()
-
+
if FLAGS.fake_libvirt:
logging.info('fake_libvirt, nothing to do for create_image')
raise defer.returnValue(None);
-
+
if FLAGS.use_s3:
_fetch_file = self._fetch_s3_image
else:
@@ -495,12 +494,23 @@ class Instance(object):
if not os.path.exists(basepath('ramdisk')):
yield _fetch_file(data['ramdisk_id'], basepath('ramdisk'))
- execute = lambda cmd, input=None: self._pool.simpleExecute(cmd=cmd, input=input, error_ok=1)
-
- if data['key_data']:
- logging.info('Injecting key data into image %s', data['image_id'])
- yield disk.inject_key(
- data['key_data'], basepath('disk-raw'), execute=execute)
+ execute = lambda cmd, input=None: self._pool.simpleExecute(cmd=cmd,
+ input=input,
+ error_ok=1)
+
+ key = data['key_data']
+ net = None
+ if FLAGS.simple_network:
+ with open(FLAGS.simple_network_template) as f:
+ net = f.read() % {'address': data['private_dns_name'],
+ 'network': FLAGS.simple_network_network,
+ 'netmask': FLAGS.simple_network_netmask,
+ 'gateway': FLAGS.simple_network_gateway,
+ 'broadcast': FLAGS.simple_network_broadcast,
+ 'dns': FLAGS.simple_network_dns}
+ if key or net:
+ logging.info('Injecting data into image %s', data['image_id'])
+ yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute)
if os.path.exists(basepath('disk')):
yield self._pool.simpleExecute('rm -f %s' % basepath('disk'))
@@ -509,7 +519,7 @@ class Instance(object):
* 1024 * 1024 * 1024)
yield disk.partition(
basepath('disk-raw'), basepath('disk'), bytes, execute=execute)
-
+
@defer.inlineCallbacks
@exception.wrap_exception
def spawn(self):
@@ -520,7 +530,7 @@ class Instance(object):
self.set_state(Instance.NOSTATE, 'launching')
logging.info('self %s', self)
try:
- yield self._create_image(xml)
+ yield self._create_image(xml)
self._conn.createXML(xml, 0)
# TODO(termie): this should actually register
# a callback to check for successful boot
@@ -543,8 +553,6 @@ class Instance(object):
timer.f = _wait_for_boot
timer.start(interval=0.5, now=True)
except Exception, ex:
- # FIXME(todd): this is just for debugging during testing
- print "FUUUUUUUUUUUUUUUUUUUUUU: %s" % ex
logging.debug(ex)
self.set_state(Instance.SHUTDOWN)
diff --git a/nova/crypto.py b/nova/crypto.py
index 413796ccc..fc6ed714f 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -23,10 +23,12 @@ Wrappers around standard crypto, including root and intermediate CAs,
SSH keypairs and x509 certificates.
"""
+import base64
import hashlib
import logging
import os
import shutil
+import struct
import tempfile
import time
import utils
@@ -86,14 +88,17 @@ def generate_key_pair(bits=1024):
def ssl_pub_to_ssh_pub(ssl_public_key, name='root', suffix='nova'):
- """requires lsh-utils"""
- convert="sed -e'1d' -e'$d' | pkcs1-conv --public-key-info --base-64 |" \
- + " sexp-conv | sed -e'1s/(rsa-pkcs1/(rsa-pkcs1-sha1/' | sexp-conv -s" \
- + " transport | lsh-export-key --openssh"
- (out, err) = utils.execute(convert, ssl_public_key)
- if err:
- raise exception.Error("Failed to generate key: %s", err)
- return '%s %s@%s\n' %(out.strip(), name, suffix)
+ rsa_key = M2Crypto.RSA.load_pub_key_bio(M2Crypto.BIO.MemoryBuffer(ssl_public_key))
+ e, n = rsa_key.pub()
+
+ key_type = 'ssh-rsa'
+
+ key_data = struct.pack('>I', len(key_type))
+ key_data += key_type
+ key_data += '%s%s' % (e,n)
+
+ b64_blob = base64.b64encode(key_data)
+ return '%s %s %s@%s\n' %(key_type, b64_blob, name, suffix)
def generate_x509_cert(subject, bits=1024):
diff --git a/nova/datastore.py b/nova/datastore.py
index 922432a23..9dbae08a4 100644
--- a/nova/datastore.py
+++ b/nova/datastore.py
@@ -28,6 +28,7 @@ before trying to run this.
from nova import vendor
import redis
+from nova import exception
from nova import flags
from nova import utils
@@ -54,3 +55,200 @@ class Redis(object):
cls._instance = inst
return cls._instance
+
+class ConnectionError(exception.Error):
+ pass
+
+
+def absorb_connection_error(fn):
+ def _wrapper(*args, **kwargs):
+ try:
+ return fn(*args, **kwargs)
+ except redis.exceptions.ConnectionError, ce:
+ raise ConnectionError(str(ce))
+ return _wrapper
+
+
+class BasicModel(object):
+ """
+ All Redis-backed data derives from this class.
+
+ You MUST specify an identifier() property that returns a unique string
+ per instance.
+
+ You MUST have an initializer that takes a single argument that is a value
+ returned by identifier() to load a new class with.
+
+ You may want to specify a dictionary for default_state().
+
+ You may also specify override_type at the class left to use a key other
+ than __class__.__name__.
+
+ You override save and destroy calls to automatically build and destroy
+ associations.
+ """
+
+ override_type = None
+
+ @absorb_connection_error
+ def __init__(self):
+ self.initial_state = {}
+ self.state = Redis.instance().hgetall(self.__redis_key)
+ if self.state:
+ self.initial_state = self.state
+ else:
+ self.state = self.default_state()
+
+ def default_state(self):
+ """You probably want to define this in your subclass"""
+ return {}
+
+ @classmethod
+ def _redis_name(cls):
+ return self.override_type or cls.__name__
+
+ @classmethod
+ def lookup(cls, identifier):
+ rv = cls(identifier)
+ if rv.is_new_record():
+ return None
+ else:
+ return rv
+
+ @classmethod
+ @absorb_connection_error
+ def all(cls):
+ """yields all objects in the store"""
+ redis_set = cls._redis_set_name(cls.__name__)
+ for identifier in Redis.instance().smembers(redis_set):
+ yield cls(identifier)
+
+ @classmethod
+ @absorb_connection_error
+ def associated_to(cls, foreign_type, foreign_id):
+ redis_set = cls._redis_association_name(foreign_type, foreign_id)
+ for identifier in Redis.instance().smembers(redis_set):
+ yield cls(identifier)
+
+ @classmethod
+ def _redis_set_name(cls, kls_name):
+ # stupidly pluralize (for compatiblity with previous codebase)
+ return kls_name.lower() + "s"
+
+ @classmethod
+ def _redis_association_name(cls, foreign_type, foreign_id):
+ return cls._redis_set_name("%s:%s:%s" %
+ (foreign_type, foreign_id, cls.__name__))
+
+ @property
+ def identifier(self):
+ """You DEFINITELY want to define this in your subclass"""
+ raise NotImplementedError("Your subclass should define identifier")
+
+ @property
+ def __redis_key(self):
+ return '%s:%s' % (self.__class__.__name__.lower(), self.identifier)
+
+ def __repr__(self):
+ return "<%s:%s>" % (self.__class__.__name__, self.identifier)
+
+ def keys(self):
+ return self.state.keys()
+
+ def copy(self):
+ copyDict = {}
+ for item in self.keys():
+ copyDict[item] = self[item]
+ return copyDict
+
+ def get(self, item, default):
+ return self.state.get(item, default)
+
+ def update(self, update_dict):
+ return self.state.update(update_dict)
+
+ def setdefault(self, item, default):
+ return self.state.setdefault(item, default)
+
+ def __getitem__(self, item):
+ return self.state[item]
+
+ def __setitem__(self, item, val):
+ self.state[item] = val
+ return self.state[item]
+
+ def __delitem__(self, item):
+ """We don't support this"""
+ raise Exception("Silly monkey, models NEED all their properties.")
+
+ def is_new_record(self):
+ return self.initial_state == {}
+
+ @absorb_connection_error
+ def add_to_index(self):
+ set_name = self.__class__._redis_set_name(self.__class__.__name__)
+ Redis.instance().sadd(set_name, self.identifier)
+
+ @absorb_connection_error
+ def remove_from_index(self):
+ set_name = self.__class__._redis_set_name(self.__class__.__name__)
+ Redis.instance().srem(set_name, self.identifier)
+
+ @absorb_connection_error
+ def remove_from_index(self):
+ set_name = self.__class__._redis_set_name(self.__class__.__name__)
+ Redis.instance().srem(set_name, self.identifier)
+
+ @absorb_connection_error
+ def associate_with(self, foreign_type, foreign_id):
+ # note the extra 's' on the end is for plurality
+ # to match the old data without requiring a migration of any sort
+ self.add_associated_model_to_its_set(foreign_type, foreign_id)
+ redis_set = self.__class__._redis_association_name(foreign_type,
+ foreign_id)
+ Redis.instance().sadd(redis_set, self.identifier)
+
+ @absorb_connection_error
+ def unassociate_with(self, foreign_type, foreign_id):
+ redis_set = self.__class__._redis_association_name(foreign_type,
+ foreign_id)
+ Redis.instance().srem(redis_set, self.identifier)
+
+ def add_associated_model_to_its_set(self, my_type, my_id):
+ table = globals()
+ klsname = my_type.capitalize()
+ if table.has_key(klsname):
+ my_class = table[klsname]
+ my_inst = my_class(my_id)
+ my_inst.save()
+ else:
+ logging.warning("no model class for %s when building"
+ " association from %s",
+ klsname, self)
+
+ @absorb_connection_error
+ def save(self):
+ """
+ update the directory with the state from this model
+ also add it to the index of items of the same type
+ then set the initial_state = state so new changes are tracked
+ """
+ # TODO(ja): implement hmset in redis-py and use it
+ # instead of multiple calls to hset
+ if self.is_new_record():
+ self["create_time"] = utils.isotime()
+ for key, val in self.state.iteritems():
+ Redis.instance().hset(self.__redis_key, key, val)
+ self.add_to_index()
+ self.initial_state = self.state
+ return True
+
+ @absorb_connection_error
+ def destroy(self):
+ """deletes all related records from datastore."""
+ logging.info("Destroying datamodel for %s %s",
+ self.__class__.__name__, self.identifier)
+ Redis.instance().delete(self.__redis_key)
+ self.remove_from_index()
+ return True
+
diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py
index 4471a4ef4..ceab7d1f9 100644
--- a/nova/endpoint/admin.py
+++ b/nova/endpoint/admin.py
@@ -24,6 +24,9 @@ Admin API controller, exposed through http via the api worker.
import base64
+from nova.auth import users
+from nova.compute import model
+
def user_dict(user, base64_file=None):
"""Convert the user object to a result dict"""
if user:
@@ -62,28 +65,24 @@ class AdminController(object):
allowing project managers to administer project users.
"""
- def __init__(self, user_manager, host_manager):
- self.user_manager = user_manager
- self.host_manager = host_manager
-
def __str__(self):
return 'AdminController'
@admin_only
def describe_user(self, _context, name, **_kwargs):
"""Returns user data, including access and secret keys."""
- return user_dict(self.user_manager.get_user(name))
+ return user_dict(users.UserManager.instance().get_user(name))
@admin_only
def describe_users(self, _context, **_kwargs):
"""Returns all users - should be changed to deal with a list."""
return {'userSet':
- [user_dict(u) for u in self.user_manager.get_users()] }
+ [user_dict(u) for u in users.UserManager.instance().get_users()] }
@admin_only
def register_user(self, _context, name, **_kwargs):
"""Creates a new user, and returns generated credentials."""
- return user_dict(self.user_manager.create_user(name))
+ return user_dict(users.UserManager.instance().create_user(name))
@admin_only
def deregister_user(self, _context, name, **_kwargs):
@@ -91,7 +90,7 @@ class AdminController(object):
Should throw an exception if the user has instances,
volumes, or buckets remaining.
"""
- self.user_manager.delete_user(name)
+ users.UserManager.instance().delete_user(name)
return True
@@ -103,8 +102,8 @@ class AdminController(object):
"""
if project is None:
project = name
- project = self.user_manager.get_project(project)
- user = self.user_manager.get_user(name)
+ project = users.UserManager.instance().get_project(project)
+ user = users.UserManager.instance().get_user(name)
return user_dict(user, base64.b64encode(project.get_credentials(user)))
@admin_only
@@ -117,9 +116,9 @@ class AdminController(object):
* DHCP servers running
* Iptables / bridges
"""
- return {'hostSet': [host_dict(h) for h in self.host_manager.all()]}
+ return {'hostSet': [host_dict(h) for h in model.Host.all()]}
@admin_only
def describe_host(self, _context, name, **_kwargs):
"""Returns status info for single node."""
- return host_dict(self.host_manager.lookup(name))
+ return host_dict(model.Host.lookup(name))
diff --git a/nova/endpoint/api.py b/nova/endpoint/api.py
index ece487b7b..86a4551ad 100755
--- a/nova/endpoint/api.py
+++ b/nova/endpoint/api.py
@@ -324,7 +324,7 @@ class APIRequestHandler(tornado.web.RequestHandler):
class APIServerApplication(tornado.web.Application):
- def __init__(self, user_manager, controllers):
+ def __init__(self, controllers):
tornado.web.Application.__init__(self, [
(r'/', RootRequestHandler),
(r'/cloudpipe/(.*)', nova.cloudpipe.api.CloudPipeRequestHandler),
@@ -341,5 +341,4 @@ class APIServerApplication(tornado.web.Application):
(r'/2007-01-19/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/1.0/([-A-Za-z0-9/]*)', MetadataRequestHandler),
], pool=multiprocessing.Pool(4))
- self.user_manager = user_manager
self.controllers = controllers
diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index d6c164163..9dccc24dc 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -115,9 +115,9 @@ class CloudController(object):
def get_metadata(self, ip):
i = self.get_instance_by_ip(ip)
- mpi = self._get_mpi_data(i['project_id'])
if i is None:
return None
+ mpi = self._get_mpi_data(i['project_id'])
if i['key_name']:
keys = {
'0': {
@@ -170,6 +170,28 @@ class CloudController(object):
'zoneState': 'available'}]}
@rbac.allow('all')
+ def describe_regions(self, context, region_name=None, **kwargs):
+ # TODO(vish): region_name is an array. Support filtering
+ return {'regionInfo': [{'regionName': 'nova',
+ 'regionUrl': FLAGS.ec2_url}]}
+
+ @rbac.allow('all')
+ def describe_snapshots(self,
+ context,
+ snapshot_id=None,
+ owner=None,
+ restorable_by=None,
+ **kwargs):
+ return {'snapshotSet': [{'snapshotId': 'fixme',
+ 'volumeId': 'fixme',
+ 'status': 'fixme',
+ 'startTime': 'fixme',
+ 'progress': 'fixme',
+ 'ownerId': 'fixme',
+ 'volumeSize': 0,
+ 'description': 'fixme'}]}
+
+ @rbac.allow('all')
def describe_key_pairs(self, context, key_name=None, **kwargs):
key_pairs = context.user.get_key_pairs()
if not key_name is None:
@@ -178,7 +200,8 @@ class CloudController(object):
result = []
for key_pair in key_pairs:
# filter out the vpn keys
- if context.user.is_admin() or not key_pair.name.endswith('-key'):
+ suffix = FLAGS.vpn_key_suffix
+ if context.user.is_admin() or not key_pair.name.endswith(suffix):
result.append({
'keyName': key_pair.name,
'keyFingerprint': key_pair.fingerprint,
@@ -380,15 +403,17 @@ class CloudController(object):
def _format_instances(self, context, reservation_id = None):
reservations = {}
- for instance in self.instdir.all:
+ if context.user.is_admin():
+ instgenerator = self.instdir.all
+ else:
+ instgenerator = self.instdir.by_project(context.project.id)
+ for instance in instgenerator:
res_id = instance.get('reservation_id', 'Unknown')
if reservation_id != None and reservation_id != res_id:
continue
if not context.user.is_admin():
if instance['image_id'] == FLAGS.vpn_image_id:
continue
- if context.project.id != instance['project_id']:
- continue
i = {}
i['instance_id'] = instance.get('instance_id', None)
i['image_id'] = instance.get('image_id', None)
@@ -475,6 +500,14 @@ class CloudController(object):
# TODO - Strip the IP from the instance
return defer.succeed({'disassociateResponse': ["Address disassociated."]})
+ def release_ip(self, context, private_ip, **kwargs):
+ self.network.release_ip(private_ip)
+ return defer.succeed({'releaseResponse': ["Address released."]})
+
+ def lease_ip(self, context, private_ip, **kwargs):
+ self.network.lease_ip(private_ip)
+ return defer.succeed({'leaseResponse': ["Address leased."]})
+
@rbac.allow('projectmanager', 'sysadmin')
def run_instances(self, context, **kwargs):
# make sure user can access the image
@@ -493,11 +526,20 @@ class CloudController(object):
key_data = key_pair.public_key
# TODO: Get the real security group of launch in here
security_group = "default"
- bridge_name = network.BridgedNetwork.get_network_for_project(context.user.id, context.project.id, security_group)['bridge_name']
+ if FLAGS.simple_network:
+ bridge_name = FLAGS.simple_network_bridge
+ else:
+ net = network.BridgedNetwork.get_network_for_project(
+ context.user.id, context.project.id, security_group)
+ bridge_name = net['bridge_name']
for num in range(int(kwargs['max_count'])):
inst = self.instdir.new()
# TODO(ja): add ari, aki
inst['image_id'] = kwargs['image_id']
+ if 'kernel_id' in kwargs:
+ inst['kernel_id'] = kwargs['kernel_id']
+ if 'ramdisk_id' in kwargs:
+ inst['ramdisk_id'] = kwargs['ramdisk_id']
inst['user_data'] = kwargs.get('user_data', '')
inst['instance_type'] = kwargs.get('instance_type', 'm1.small')
inst['reservation_id'] = reservation_id
@@ -509,12 +551,19 @@ class CloudController(object):
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = num
inst['bridge_name'] = bridge_name
- if inst['image_id'] == FLAGS.vpn_image_id:
- address = network.allocate_vpn_ip(
- inst['user_id'], inst['project_id'], mac=inst['mac_address'])
+ if FLAGS.simple_network:
+ address = network.allocate_simple_ip()
else:
- address = network.allocate_ip(
- inst['user_id'], inst['project_id'], mac=inst['mac_address'])
+ if inst['image_id'] == FLAGS.vpn_image_id:
+ address = network.allocate_vpn_ip(
+ inst['user_id'],
+ inst['project_id'],
+ mac=inst['mac_address'])
+ else:
+ address = network.allocate_ip(
+ inst['user_id'],
+ inst['project_id'],
+ mac=inst['mac_address'])
inst['private_dns_name'] = str(address)
# TODO: allocate expresses on the router node
inst.save()
@@ -544,10 +593,13 @@ class CloudController(object):
pass
if instance.get('private_dns_name', None):
logging.debug("Deallocating address %s" % instance.get('private_dns_name', None))
- try:
- self.network.deallocate_ip(instance.get('private_dns_name', None))
- except Exception, _err:
- pass
+ if FLAGS.simple_network:
+ network.deallocate_simple_ip(instance.get('private_dns_name', None))
+ else:
+ try:
+ self.network.deallocate_ip(instance.get('private_dns_name', None))
+ except Exception, _err:
+ pass
if instance.get('node_name', 'unassigned') != 'unassigned': #It's also internal default
rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
{"method": "terminate_instance",
@@ -609,9 +661,8 @@ class CloudController(object):
result = { 'image_id': image_id, 'launchPermission': [] }
if image['isPublic']:
result['launchPermission'].append({ 'group': 'all' })
-
return defer.succeed(result)
-
+
@rbac.allow('projectmanager', 'sysadmin')
def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs):
# TODO(devcamcar): Support users and groups other than 'all'.
diff --git a/nova/flags.py b/nova/flags.py
index bf7b6e3a3..985f9ba04 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -74,6 +74,9 @@ DEFINE_string('default_instance_type',
'default instance type to use, testing only')
DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server')
+DEFINE_string('vpn_key_suffix',
+ '-key',
+ 'Suffix to add to project name for vpn key')
# UNUSED
DEFINE_string('node_availability_zone',
diff --git a/nova/rpc.py b/nova/rpc.py
index 54843973a..b0f6ef7f3 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -63,6 +63,10 @@ class Connection(connection.BrokerConnection):
cls._instance = cls(**params)
return cls._instance
+ @classmethod
+ def recreate(cls):
+ del cls._instance
+ return cls.instance()
class Consumer(messaging.Consumer):
# TODO(termie): it would be nice to give these some way of automatically
@@ -79,9 +83,22 @@ class Consumer(messaging.Consumer):
attachToTornado = attach_to_tornado
- @exception.wrap_exception
def fetch(self, *args, **kwargs):
- super(Consumer, self).fetch(*args, **kwargs)
+ # TODO(vish): the logic for failed connections and logging should be
+ # refactored into some sort of connection manager object
+ try:
+ if getattr(self, 'failed_connection', False):
+ # attempt to reconnect
+ self.conn = Connection.recreate()
+ self.backend = self.conn.create_backend()
+ super(Consumer, self).fetch(*args, **kwargs)
+ if getattr(self, 'failed_connection', False):
+ logging.error("Reconnected to queue")
+ self.failed_connection = False
+ except Exception, ex:
+ if not getattr(self, 'failed_connection', False):
+ logging.exception("Failed to fetch message from queue")
+ self.failed_connection = True
def attach_to_twisted(self):
loop = task.LoopingCall(self.fetch, enable_callbacks=True)
@@ -115,9 +132,10 @@ class AdapterConsumer(TopicConsumer):
args = message_data.get('args', {})
message.ack()
if not method:
- # vish: we may not want to ack here, but that means that bad messages
- # stay in the queue indefinitely, so for now we just log the
- # message and send an error string back to the caller
+ # NOTE(vish): we may not want to ack here, but that means that bad
+ # messages stay in the queue indefinitely, so for now
+ # we just log the message and send an error string
+ # back to the caller
_log.warn('no method for message: %s' % (message_data))
msg_reply(msg_id, 'No method for message: %s' % message_data)
return
diff --git a/nova/tests/api_unittest.py b/nova/tests/api_unittest.py
index a796ab41e..d82089e6f 100644
--- a/nova/tests/api_unittest.py
+++ b/nova/tests/api_unittest.py
@@ -159,7 +159,7 @@ class ApiEc2TestCase(test.BaseTestCase):
self.host = '127.0.0.1'
- self.app = api.APIServerApplication(self.users, {'Cloud': self.cloud})
+ self.app = api.APIServerApplication({'Cloud': self.cloud})
self.ec2 = boto.connect_ec2(
aws_access_key_id='fake',
aws_secret_access_key='fake',
diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index f215c0b3f..bccaacfa7 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -18,6 +18,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
import logging
import unittest
@@ -26,6 +27,8 @@ import IPy
from nova import flags
from nova import test
+from nova import exception
+from nova.compute.exception import NoMoreAddresses
from nova.compute import network
from nova.auth import users
from nova import utils
@@ -40,6 +43,7 @@ class NetworkTestCase(test.TrialTestCase):
network_size=32)
logging.getLogger().setLevel(logging.DEBUG)
self.manager = users.UserManager.instance()
+ self.dnsmasq = FakeDNSMasq()
try:
self.manager.create_user('netuser', 'netuser', 'netuser')
except: pass
@@ -66,59 +70,128 @@ class NetworkTestCase(test.TrialTestCase):
address = network.allocate_ip(
"netuser", "project0", utils.generate_mac())
logging.debug("Was allocated %s" % (address))
- self.assertEqual(True, address in self._get_project_addresses("project0"))
+ net = network.get_project_network("project0", "default")
+ self.assertEqual(True, is_in_project(address, "project0"))
+ mac = utils.generate_mac()
+ hostname = "test-host"
+ self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
rv = network.deallocate_ip(address)
- self.assertEqual(False, address in self._get_project_addresses("project0"))
+
+ # Doesn't go away until it's dhcp released
+ self.assertEqual(True, is_in_project(address, "project0"))
+
+ self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
+ self.assertEqual(False, is_in_project(address, "project0"))
def test_range_allocation(self):
+ mac = utils.generate_mac()
+ secondmac = utils.generate_mac()
+ hostname = "test-host"
address = network.allocate_ip(
- "netuser", "project0", utils.generate_mac())
+ "netuser", "project0", mac)
secondaddress = network.allocate_ip(
- "netuser", "project1", utils.generate_mac())
- self.assertEqual(True,
- address in self._get_project_addresses("project0"))
- self.assertEqual(True,
- secondaddress in self._get_project_addresses("project1"))
- self.assertEqual(False, address in self._get_project_addresses("project1"))
+ "netuser", "project1", secondmac)
+ net = network.get_project_network("project0", "default")
+ secondnet = network.get_project_network("project1", "default")
+
+ self.assertEqual(True, is_in_project(address, "project0"))
+ self.assertEqual(True, is_in_project(secondaddress, "project1"))
+ self.assertEqual(False, is_in_project(address, "project1"))
+
+ # Addresses are allocated before they're issued
+ self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
+ self.dnsmasq.issue_ip(secondmac, secondaddress,
+ hostname, secondnet.bridge_name)
+
rv = network.deallocate_ip(address)
- self.assertEqual(False, address in self._get_project_addresses("project0"))
+ self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
+ self.assertEqual(False, is_in_project(address, "project0"))
+
+ # First address release shouldn't affect the second
+ self.assertEqual(True, is_in_project(secondaddress, "project1"))
+
rv = network.deallocate_ip(secondaddress)
- self.assertEqual(False,
- secondaddress in self._get_project_addresses("project1"))
+ self.dnsmasq.release_ip(secondmac, secondaddress,
+ hostname, secondnet.bridge_name)
+ self.assertEqual(False, is_in_project(secondaddress, "project1"))
def test_subnet_edge(self):
secondaddress = network.allocate_ip("netuser", "project0",
utils.generate_mac())
+ hostname = "toomany-hosts"
for project in range(1,5):
project_id = "project%s" % (project)
+ mac = utils.generate_mac()
+ mac2 = utils.generate_mac()
+ mac3 = utils.generate_mac()
address = network.allocate_ip(
- "netuser", project_id, utils.generate_mac())
+ "netuser", project_id, mac)
address2 = network.allocate_ip(
- "netuser", project_id, utils.generate_mac())
+ "netuser", project_id, mac2)
address3 = network.allocate_ip(
- "netuser", project_id, utils.generate_mac())
- self.assertEqual(False,
- address in self._get_project_addresses("project0"))
- self.assertEqual(False,
- address2 in self._get_project_addresses("project0"))
- self.assertEqual(False,
- address3 in self._get_project_addresses("project0"))
+ "netuser", project_id, mac3)
+ self.assertEqual(False, is_in_project(address, "project0"))
+ self.assertEqual(False, is_in_project(address2, "project0"))
+ self.assertEqual(False, is_in_project(address3, "project0"))
rv = network.deallocate_ip(address)
rv = network.deallocate_ip(address2)
rv = network.deallocate_ip(address3)
+ net = network.get_project_network(project_id, "default")
+ self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
+ self.dnsmasq.release_ip(mac2, address2, hostname, net.bridge_name)
+ self.dnsmasq.release_ip(mac3, address3, hostname, net.bridge_name)
+ net = network.get_project_network("project0", "default")
rv = network.deallocate_ip(secondaddress)
+ self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
- def test_too_many_projects(self):
- for i in range(0, 30):
- name = 'toomany-project%s' % i
- self.manager.create_project(name, 'netuser', name)
- address = network.allocate_ip(
- "netuser", name, utils.generate_mac())
- rv = network.deallocate_ip(address)
- self.manager.delete_project(name)
+ def test_release_before_deallocate(self):
+ pass
+
+ def test_deallocate_before_issued(self):
+ pass
+
+ def test_too_many_addresses(self):
+ """
+ Network size is 32, there are 5 addresses reserved for VPN.
+ So we should get 23 usable addresses
+ """
+ net = network.get_project_network("project0", "default")
+ hostname = "toomany-hosts"
+ macs = {}
+ addresses = {}
+ for i in range(0, 22):
+ macs[i] = utils.generate_mac()
+ addresses[i] = network.allocate_ip("netuser", "project0", macs[i])
+ self.dnsmasq.issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
+
+ self.assertRaises(NoMoreAddresses, network.allocate_ip, "netuser", "project0", utils.generate_mac())
+
+ for i in range(0, 22):
+ rv = network.deallocate_ip(addresses[i])
+ self.dnsmasq.release_ip(macs[i], addresses[i], hostname, net.bridge_name)
+
+def is_in_project(address, project_id):
+ return address in network.get_project_network(project_id).list_addresses()
+
+def _get_project_addresses(project_id):
+ project_addresses = []
+ for addr in network.get_project_network(project_id).list_addresses():
+ project_addresses.append(addr)
+ return project_addresses
+
+def binpath(script):
+ return os.path.abspath(os.path.join(__file__, "../../../bin", script))
+
+class FakeDNSMasq(object):
+ def issue_ip(self, mac, ip, hostname, interface):
+ cmd = "%s add %s %s %s" % (binpath('dhcpleasor.py'), mac, ip, hostname)
+ env = {'DNSMASQ_INTERFACE': interface, 'TESTING' : '1'}
+ (out, err) = utils.execute(cmd, addl_env=env)
+ logging.debug("ISSUE_IP: %s, %s " % (out, err))
+
+ def release_ip(self, mac, ip, hostname, interface):
+ cmd = "%s del %s %s %s" % (binpath('dhcpleasor.py'), mac, ip, hostname)
+ env = {'DNSMASQ_INTERFACE': interface, 'TESTING' : '1'}
+ (out, err) = utils.execute(cmd, addl_env=env)
+ logging.debug("RELEASE_IP: %s, %s " % (out, err))
- def _get_project_addresses(self, project_id):
- project_addresses = []
- for addr in network.get_project_network(project_id).list_addresses():
- project_addresses.append(addr)
- return project_addresses
diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py
index 89c1d59c5..cee567c8b 100644
--- a/nova/tests/objectstore_unittest.py
+++ b/nova/tests/objectstore_unittest.py
@@ -28,7 +28,6 @@ import tempfile
from nova import vendor
from nova import flags
-from nova import rpc
from nova import objectstore
from nova import test
from nova.auth import users
@@ -57,7 +56,6 @@ class ObjectStoreTestCase(test.BaseTestCase):
buckets_path=os.path.join(oss_tempdir, 'buckets'),
images_path=os.path.join(oss_tempdir, 'images'),
ca_path=os.path.join(os.path.dirname(__file__), 'CA'))
- self.conn = rpc.Connection.instance()
logging.getLogger().setLevel(logging.DEBUG)
self.um = users.UserManager.instance()
diff --git a/nova/tests/storage_unittest.py b/nova/tests/storage_unittest.py
index 73215c5ca..36fcc6f19 100644
--- a/nova/tests/storage_unittest.py
+++ b/nova/tests/storage_unittest.py
@@ -38,10 +38,7 @@ class StorageTestCase(test.TrialTestCase):
self.mystorage = None
self.flags(fake_libvirt=True,
fake_storage=True)
- if FLAGS.fake_storage:
- self.mystorage = storage.FakeBlockStore()
- else:
- self.mystorage = storage.BlockStore()
+ self.mystorage = storage.BlockStore()
def test_run_create_volume(self):
vol_size = '0'
@@ -65,6 +62,18 @@ class StorageTestCase(test.TrialTestCase):
self.mystorage.create_volume,
vol_size, user_id, project_id)
+ def test_too_many_volumes(self):
+ vol_size = '1'
+ user_id = 'fake'
+ project_id = 'fake'
+ num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1
+ total_slots = FLAGS.slots_per_shelf * num_shelves
+ for i in xrange(total_slots):
+ self.mystorage.create_volume(vol_size, user_id, project_id)
+ self.assertRaises(storage.NoMoreVolumes,
+ self.mystorage.create_volume,
+ vol_size, user_id, project_id)
+
def test_run_attach_detach_volume(self):
# Create one volume and one node to test with
instance_id = "storage-test"
diff --git a/nova/tests/validator_unittest.py b/nova/tests/validator_unittest.py
index 6ebce0994..eea1beccb 100644
--- a/nova/tests/validator_unittest.py
+++ b/nova/tests/validator_unittest.py
@@ -42,5 +42,4 @@ class ValidationTestCase(test.TrialTestCase):
@validate.typetest(instanceid=str, size=int, number_of_instances=int)
def type_case(instanceid, size, number_of_instances):
- print ("type_case was successfully executed")
- return True \ No newline at end of file
+ return True
diff --git a/nova/utils.py b/nova/utils.py
index 094de5d74..2982b5480 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -22,13 +22,13 @@
System-level utilities and helper functions.
"""
+import inspect
import logging
+import os
+import random
+import subprocess
import socket
import sys
-import os.path
-import inspect
-import subprocess
-import random
from datetime import datetime
from nova import flags
@@ -47,11 +47,12 @@ def fetchfile(url, target):
# fp.close()
execute("curl %s -o %s" % (url, target))
-
-def execute(cmd, input=None):
- #logging.debug("Running %s" % (cmd))
+def execute(cmd, input=None, addl_env=None):
+ env = os.environ.copy()
+ if addl_env:
+ env.update(addl_env)
obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
result = None
if input != None:
result = obj.communicate(input)
diff --git a/nova/volume/storage.py b/nova/volume/storage.py
index 273a6afd1..288ab76ba 100644
--- a/nova/volume/storage.py
+++ b/nova/volume/storage.py
@@ -26,9 +26,10 @@ Currently uses Ata-over-Ethernet.
import glob
import logging
-import random
+import os
import socket
-import subprocess
+import shutil
+import tempfile
import time
from nova import vendor
@@ -38,10 +39,8 @@ from twisted.internet import defer
from nova import datastore
from nova import exception
from nova import flags
-from nova import rpc
from nova import utils
from nova import validate
-from nova.compute import model
FLAGS = flags.FLAGS
@@ -54,16 +53,27 @@ flags.DEFINE_string('aoe_eth_dev', 'eth0',
flags.DEFINE_string('storage_name',
socket.gethostname(),
'name of this node')
-flags.DEFINE_integer('shelf_id',
- utils.last_octet(utils.get_my_ip()),
- 'AoE shelf_id for this node')
+flags.DEFINE_integer('first_shelf_id',
+ utils.last_octet(utils.get_my_ip()) * 10,
+ 'AoE starting shelf_id for this node')
+flags.DEFINE_integer('last_shelf_id',
+ utils.last_octet(utils.get_my_ip()) * 10 + 9,
+ 'AoE starting shelf_id for this node')
+flags.DEFINE_string('aoe_export_dir',
+ '/var/lib/vblade-persist/vblades',
+ 'AoE directory where exports are created')
+flags.DEFINE_integer('slots_per_shelf',
+ 16,
+ 'Number of AoE slots per shelf')
flags.DEFINE_string('storage_availability_zone',
'nova',
'availability zone of this node')
flags.DEFINE_boolean('fake_storage', False,
'Should we make real storage volumes to attach?')
-# TODO(joshua) Index of volumes by project
+
+class NoMoreVolumes(exception.Error):
+ pass
def get_volume(volume_id):
""" Returns a redis-backed volume object """
@@ -84,9 +94,14 @@ class BlockStore(object):
super(BlockStore, self).__init__()
self.volume_class = Volume
if FLAGS.fake_storage:
+ FLAGS.aoe_export_dir = tempfile.mkdtemp()
self.volume_class = FakeVolume
self._init_volume_group()
+ def __del__(self):
+ if FLAGS.fake_storage:
+ shutil.rmtree(FLAGS.aoe_export_dir)
+
def report_state(self):
#TODO: aggregate the state of the system
pass
@@ -140,19 +155,7 @@ class BlockStore(object):
utils.runthis("PVCreate returned: %s", "sudo pvcreate %s" % (FLAGS.storage_dev))
utils.runthis("VGCreate returned: %s", "sudo vgcreate %s %s" % (FLAGS.volume_group, FLAGS.storage_dev))
-
-class FakeBlockStore(BlockStore):
- def __init__(self):
- super(FakeBlockStore, self).__init__()
-
- def _init_volume_group(self):
- pass
-
- def _restart_exports(self):
- pass
-
-
-class Volume(model.BasicModel):
+class Volume(datastore.BasicModel):
def __init__(self, volume_id=None):
self.volume_id = volume_id
@@ -182,7 +185,7 @@ class Volume(model.BasicModel):
vol['delete_on_termination'] = 'False'
vol.save()
vol.create_lv()
- vol.setup_export()
+ vol._setup_export()
# TODO(joshua) - We need to trigger a fanout message for aoe-discover on all the nodes
# TODO(joshua
vol['status'] = "available"
@@ -195,7 +198,7 @@ class Volume(model.BasicModel):
self['mountpoint'] = mountpoint
self['status'] = "in-use"
self['attach_status'] = "attaching"
- self['attach_time'] = utils.utctime()
+ self['attach_time'] = utils.isotime()
self['delete_on_termination'] = 'False'
self.save()
@@ -234,15 +237,22 @@ class Volume(model.BasicModel):
def _delete_lv(self):
utils.runthis("Removing LV: %s", "sudo lvremove -f %s/%s" % (FLAGS.volume_group, self['volume_id']))
- def setup_export(self):
+ def _setup_export(self):
(shelf_id, blade_id) = get_next_aoe_numbers()
self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id)
self['shelf_id'] = shelf_id
self['blade_id'] = blade_id
self.save()
+ self._exec_export()
+
+ def _exec_export(self):
utils.runthis("Creating AOE export: %s",
"sudo vblade-persist setup %s %s %s /dev/%s/%s" %
- (shelf_id, blade_id, FLAGS.aoe_eth_dev, FLAGS.volume_group, self['volume_id']))
+ (self['shelf_id'],
+ self['blade_id'],
+ FLAGS.aoe_eth_dev,
+ FLAGS.volume_group,
+ self['volume_id']))
def _remove_export(self):
utils.runthis("Stopped AOE export: %s", "sudo vblade-persist stop %s %s" % (self['shelf_id'], self['blade_id']))
@@ -253,13 +263,10 @@ class FakeVolume(Volume):
def create_lv(self):
pass
- def setup_export(self):
- # TODO(???): This may not be good enough?
- blade_id = ''.join([random.choice('0123456789') for x in xrange(3)])
- self['shelf_id'] = FLAGS.shelf_id
- self['blade_id'] = blade_id
- self['aoe_device'] = "e%s.%s" % (FLAGS.shelf_id, blade_id)
- self.save()
+ def _exec_export(self):
+ fname = os.path.join(FLAGS.aoe_export_dir, self['aoe_device'])
+ f = file(fname, "w")
+ f.close()
def _remove_export(self):
pass
@@ -268,9 +275,13 @@ class FakeVolume(Volume):
pass
def get_next_aoe_numbers():
- aoes = glob.glob("/var/lib/vblade-persist/vblades/e*")
- aoes.extend(['e0.0'])
- blade_id = int(max([int(a.split('.')[1]) for a in aoes])) + 1
- logging.debug("Next blade_id is %s" % (blade_id))
- shelf_id = FLAGS.shelf_id
- return (shelf_id, blade_id)
+ for shelf_id in xrange(FLAGS.first_shelf_id, FLAGS.last_shelf_id + 1):
+ aoes = glob.glob("%s/e%s.*" % (FLAGS.aoe_export_dir, shelf_id))
+ if not aoes:
+ blade_id = 0
+ else:
+ blade_id = int(max([int(a.rpartition('.')[2]) for a in aoes])) + 1
+ if blade_id < FLAGS.slots_per_shelf:
+ logging.debug("Next shelf.blade is %s.%s", shelf_id, blade_id)
+ return (shelf_id, blade_id)
+ raise NoMoreVolumes()
diff --git a/run_tests.py b/run_tests.py
index 91e886c76..d6f68f830 100644
--- a/run_tests.py
+++ b/run_tests.py
@@ -46,13 +46,13 @@ import sys
from nova import vendor
from twisted.scripts import trial as trial_script
+from nova import datastore
from nova import flags
from nova import twistd
from nova.tests.access_unittest import *
from nova.tests.api_unittest import *
from nova.tests.cloud_unittest import *
-from nova.tests.keeper_unittest import *
from nova.tests.model_unittest import *
from nova.tests.network_unittest import *
from nova.tests.node_unittest import *
@@ -60,7 +60,6 @@ from nova.tests.objectstore_unittest import *
from nova.tests.process_unittest import *
from nova.tests.storage_unittest import *
from nova.tests.users_unittest import *
-from nova.tests.datastore_unittest import *
from nova.tests.validator_unittest import *
diff --git a/smoketests/flags.py b/smoketests/flags.py
index 6c2131d70..f239c5f40 100644
--- a/smoketests/flags.py
+++ b/smoketests/flags.py
@@ -1,7 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
+# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 Anso Labs, LLC
@@ -23,8 +23,6 @@ Package-level global flags are defined here, the rest are defined
where they're used.
"""
-import socket
-
from nova import vendor
from gflags import *
@@ -37,9 +35,13 @@ DEFINE_bool = DEFINE_bool
# __GLOBAL FLAGS ONLY__
# Define any app-specific flags in their own files, docs at:
# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39
-
-DEFINE_bool('verbose', False, 'show debug output')
DEFINE_string('admin_access_key', 'admin', 'Access key for admin user')
DEFINE_string('admin_secret_key', 'admin', 'Secret key for admin user')
DEFINE_string('clc_ip', '127.0.0.1', 'IP of cloud controller API')
-DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server')
+DEFINE_string('bundle_kernel', 'openwrt-x86-vmlinuz',
+ 'Local kernel file to use for bundling tests')
+DEFINE_string('bundle_image', 'openwrt-x86-ext2.image',
+ 'Local image file to use for bundling tests')
+#DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE',
+# 'AMI for cloudpipe vpn server')
+
diff --git a/smoketests/novatestcase.py b/smoketests/novatestcase.py
index 869e31946..c19ef781b 100644
--- a/smoketests/novatestcase.py
+++ b/smoketests/novatestcase.py
@@ -24,17 +24,21 @@ import random
import sys
import unittest
-from nova.adminclient import NovaAdminClient
-from nova.smoketests import flags
-
from nova import vendor
import paramiko
-nova_admin = NovaAdminClient(access_key=flags.admin_access_key, secret_key=flags.admin_secret_key, clc_ip=host)
+from nova import adminclient
+from smoketests import flags
+
+FLAGS = flags.FLAGS
+
class NovaTestCase(unittest.TestCase):
def setUp(self):
- pass
+ self.nova_admin = adminclient.NovaAdminClient(
+ access_key=FLAGS.admin_access_key,
+ secret_key=FLAGS.admin_secret_key,
+ clc_ip=FLAGS.clc_ip)
def tearDown(self):
pass
@@ -55,22 +59,22 @@ class NovaTestCase(unittest.TestCase):
@property
def admin(self):
- return nova_admin.connection_for('admin')
+ return self.nova_admin.connection_for('admin')
def connection_for(self, username):
- return nova_admin.connection_for(username)
+ return self.nova_admin.connection_for(username)
def create_user(self, username):
- return nova_admin.create_user(username)
+ return self.nova_admin.create_user(username)
def get_user(self, username):
- return nova_admin.get_user(username)
+ return self.nova_admin.get_user(username)
def delete_user(self, username):
- return nova_admin.delete_user(username)
+ return self.nova_admin.delete_user(username)
def get_signed_zip(self, username):
- return nova_admin.get_zip(username)
+ return self.nova_admin.get_zip(username)
def create_key_pair(self, conn, key_name):
try:
diff --git a/smoketests/smoketest.py b/smoketests/smoketest.py
index f7794a0ff..b752d814a 100644
--- a/smoketests/smoketest.py
+++ b/smoketests/smoketest.py
@@ -27,20 +27,19 @@ import time
import unittest
import zipfile
-from nova.smoketests import flags
-from nova.smoketests.novatestcase import NovaTestCase
-
from nova import vendor
import paramiko
+from smoketests import flags
+from smoketests import novatestcase
+
+SUITE_NAMES = '[user, image, security, public_network, volume]'
+
FLAGS = flags.FLAGS
-flags.DEFINE_string('bundle_kernel', 'openwrt-x86-vmlinuz',
- 'Local kernel file to use for bundling tests')
-flags.DEFINE_string('bundle_image', 'openwrt-x86-ext2.image',
- 'Local image file to use for bundling tests')
+flags.DEFINE_string('suite', None, 'Specific test suite to run ' + SUITE_NAMES)
# TODO(devamcar): Use random tempfile
-ZIP_FILENAME = '/tmp/euca-me-x509.zip'
+ZIP_FILENAME = '/tmp/nova-me-x509.zip'
data = {}
@@ -50,7 +49,7 @@ test_bucket = '%s_bucket' % test_prefix
test_key = '%s_key' % test_prefix
# Test admin credentials and user creation
-class UserTests(NovaTestCase):
+class UserTests(novatestcase.NovaTestCase):
def test_001_admin_can_connect(self):
conn = self.connection_for('admin')
self.assert_(conn)
@@ -81,30 +80,31 @@ class UserTests(NovaTestCase):
pass
# Test image bundling, registration, and launching
-class ImageTests(NovaTestCase):
+class ImageTests(novatestcase.NovaTestCase):
def test_000_setUp(self):
self.create_user(test_username)
def test_001_admin_can_bundle_image(self):
- self.assertTrue(self.bundle_image(IMAGE_FILENAME))
+ self.assertTrue(self.bundle_image(FLAGS.bundle_image))
def test_002_admin_can_upload_image(self):
- self.assertTrue(self.upload_image(test_bucket, IMAGE_FILENAME))
+ self.assertTrue(self.upload_image(test_bucket, FLAGS.bundle_image))
def test_003_admin_can_register_image(self):
- image_id = self.register_image(test_bucket, IMAGE_FILENAME)
+ image_id = self.register_image(test_bucket, FLAGS.bundle_image)
self.assert_(image_id is not None)
data['image_id'] = image_id
def test_004_admin_can_bundle_kernel(self):
- self.assertTrue(self.bundle_image(flags.bundle_kernel, kernel=True))
+ self.assertTrue(self.bundle_image(FLAGS.bundle_kernel, kernel=True))
def test_005_admin_can_upload_kernel(self):
- self.assertTrue(self.upload_image(test_bucket, flags.bundle_kernel))
+ self.assertTrue(self.upload_image(test_bucket, FLAGS.bundle_kernel))
def test_006_admin_can_register_kernel(self):
- # FIXME: registration should verify that bucket/manifest exists before returning successfully!
- kernel_id = self.register_image(test_bucket, flags.bundle_kernel)
+ # FIXME(devcamcar): registration should verify that bucket/manifest
+ # exists before returning successfully.
+ kernel_id = self.register_image(test_bucket, FLAGS.bundle_kernel)
self.assert_(kernel_id is not None)
data['kernel_id'] = kernel_id
@@ -129,7 +129,8 @@ class ImageTests(NovaTestCase):
self.assert_(kernel.type == 'kernel')
def test_008_admin_can_describe_image_attribute(self):
- attrs = self.admin.get_image_attribute(data['image_id'], 'launchPermission')
+ attrs = self.admin.get_image_attribute(data['image_id'],
+ 'launchPermission')
self.assert_(attrs.name, 'launch_permission')
def test_009_me_cannot_see_non_public_images(self):
@@ -155,9 +156,10 @@ class ImageTests(NovaTestCase):
pass
def test_012_me_can_see_launch_permission(self):
- attrs = self.admin.get_image_attribute(data['image_id'], 'launchPermission')
- self.assert(_attrs.name, 'launch_permission')
- self.assert(_attrs.groups[0], 'all')
+ attrs = self.admin.get_image_attribute(data['image_id'],
+ 'launchPermission')
+ self.assert_(attrs.name, 'launch_permission')
+ self.assert_(attrs.groups[0], 'all')
# FIXME: add tests that user can launch image
@@ -173,7 +175,8 @@ class ImageTests(NovaTestCase):
# def test_015_user_can_terminate(self):
# conn = self.connection_for(test_username)
-# terminated = conn.terminate_instances(instance_ids=[data['my_instance_id']])
+# terminated = conn.terminate_instances(
+# instance_ids=[data['my_instance_id']])
# self.assertEqual(len(terminated), 1)
def test_016_admin_can_deregister_kernel(self):
@@ -191,7 +194,7 @@ class ImageTests(NovaTestCase):
# Test key pairs and security groups
-class SecurityTests(NovaTestCase):
+class SecurityTests(novatestcase.NovaTestCase):
def test_000_setUp(self):
self.create_user(test_username + '_me')
self.create_user(test_username + '_you')
@@ -234,13 +237,15 @@ class SecurityTests(NovaTestCase):
def test_005_can_ping_private_ip(self):
for x in xrange(120):
# ping waits for 1 second
- status, output = commands.getstatusoutput("ping -c1 -w1 %s" % data['my_private_ip'])
+ status, output = commands.getstatusoutput(
+ 'ping -c1 -w1 %s' % data['my_private_ip'])
if status == 0:
break
else:
- self.assert_("could not ping instance")
+ self.assert_('could not ping instance')
#def test_005_me_cannot_ssh_when_unauthorized(self):
- # self.assertRaises(paramiko.SSHException, self.connect_ssh, data['my_private_ip'], 'mykey')
+ # self.assertRaises(paramiko.SSHException, self.connect_ssh,
+ # data['my_private_ip'], 'mykey')
#def test_006_me_can_authorize_ssh(self):
# conn = self.connection_for(test_username + '_me')
@@ -271,12 +276,13 @@ class SecurityTests(NovaTestCase):
# )
#def test_009_you_cannot_ping_my_instance(self):
- # TODO: should ping my_private_ip from with an instance started by "you"
+ # TODO: should ping my_private_ip from with an instance started by you.
#self.assertFalse(self.can_ping(data['my_private_ip']))
def test_010_you_cannot_ssh_to_my_instance(self):
try:
- conn = self.connect_ssh(data['my_private_ip'], test_key + 'yourkey')
+ conn = self.connect_ssh(data['my_private_ip'],
+ test_key + 'yourkey')
conn.close()
except paramiko.SSHException:
pass
@@ -313,7 +319,7 @@ class SecurityTests(NovaTestCase):
# print output
# Testing rebundling
-class RebundlingTests(NovaTestCase):
+class RebundlingTests(novatestcase.NovaTestCase):
def test_000_setUp(self):
self.create_user('me')
self.create_user('you')
@@ -323,25 +329,29 @@ class RebundlingTests(NovaTestCase):
def test_001_me_can_download_credentials_within_instance(self):
conn = self.connect_ssh(data['my_private_ip'], 'mykey')
- stdin, stdout = conn.exec_command('python ~/smoketests/install-credentials.py')
+ stdin, stdout = conn.exec_command(
+ 'python ~/smoketests/install-credentials.py')
conn.close()
self.assertEqual(stdout, 'ok')
def test_002_me_can_rebundle_within_instance(self):
conn = self.connect_ssh(data['my_private_ip'], 'mykey')
- stdin, stdout = conn.exec_command('python ~/smoketests/rebundle-instance.py')
+ stdin, stdout = conn.exec_command(
+ 'python ~/smoketests/rebundle-instance.py')
conn.close()
self.assertEqual(stdout, 'ok')
def test_003_me_can_upload_image_within_instance(self):
conn = self.connect_ssh(data['my_private_ip'], 'mykey')
- stdin, stdout = conn.exec_command('python ~/smoketests/upload-bundle.py')
+ stdin, stdout = conn.exec_command(
+ 'python ~/smoketests/upload-bundle.py')
conn.close()
self.assertEqual(stdout, 'ok')
def test_004_me_can_register_image_within_instance(self):
conn = self.connect_ssh(data['my_private_ip'], 'mykey')
- stdin, stdout = conn.exec_command('python ~/smoketests/register-image.py')
+ stdin, stdout = conn.exec_command(
+ 'python ~/smoketests/register-image.py')
conn.close()
if re.matches('ami-{\w+}', stdout):
data['my_image_id'] = stdout.strip()
@@ -356,9 +366,9 @@ class RebundlingTests(NovaTestCase):
def test_006_me_can_make_image_public(self):
conn = self.connection_for(test_username)
conn.modify_image_attribute(image_id=data['my_image_id'],
- operation='add',
- attribute='launchPermission',
- groups='all')
+ operation='add',
+ attribute='launchPermission',
+ groups='all')
def test_007_you_can_see_my_public_image(self):
conn = self.connection_for('you')
@@ -377,7 +387,7 @@ class RebundlingTests(NovaTestCase):
data = {}
# Test elastic IPs
-class ElasticIPTests(NovaTestCase):
+class ElasticIPTests(novatestcase.NovaTestCase):
def test_000_setUp(self):
data['image_id'] = 'ami-tiny'
@@ -386,7 +396,7 @@ class ElasticIPTests(NovaTestCase):
self.create_key_pair(conn, 'mykey')
conn = self.connection_for('admin')
- #data['image_id'] = self.setUp_test_image(IMAGE_FILENAME)
+ #data['image_id'] = self.setUp_test_image(FLAGS.bundle_image)
def test_001_me_can_launch_image_with_keypair(self):
conn = self.connection_for('me')
@@ -423,20 +433,23 @@ class ElasticIPTests(NovaTestCase):
ZONE = 'nova'
DEVICE = 'vdb'
# Test iscsi volumes
-class VolumeTests(NovaTestCase):
+class VolumeTests(novatestcase.NovaTestCase):
def test_000_setUp(self):
self.create_user(test_username)
data['image_id'] = 'ami-tiny' # A7370FE3
conn = self.connection_for(test_username)
self.create_key_pair(conn, test_key)
- reservation = conn.run_instances(data['image_id'], instance_type='m1.tiny', key_name=test_key)
+ reservation = conn.run_instances(data['image_id'],
+ instance_type='m1.tiny',
+ key_name=test_key)
data['instance_id'] = reservation.instances[0].id
data['private_ip'] = reservation.instances[0].private_dns_name
# wait for instance to show up
for x in xrange(120):
# ping waits for 1 second
- status, output = commands.getstatusoutput("ping -c1 -w1 %s" % data['private_ip'])
+ status, output = commands.getstatusoutput(
+ 'ping -c1 -w1 %s' % data['private_ip'])
if status == 0:
break
else:
@@ -462,9 +475,11 @@ class VolumeTests(NovaTestCase):
def test_003_me_can_mount_volume(self):
conn = self.connect_ssh(data['private_ip'], test_key)
- # HACK: the tiny image doesn't create the node properly
- # this will make /dev/vd* if it doesn't exist
- stdin, stdout, stderr = conn.exec_command('grep %s /proc/partitions | `awk \'{print "mknod /dev/"$4" b "$1" "$2}\'`' % DEVICE)
+ # FIXME(devcamcar): the tiny image doesn't create the node properly
+ # this will make /dev/vd* if it doesn't exist
+ stdin, stdout, stderr = conn.exec_command(
+ 'grep %s /proc/partitions |' + \
+ '`awk \'{print "mknod /dev/"$4" b "$1" "$2}\'`' % DEVICE)
commands = []
commands.append('mkdir -p /mnt/vol')
commands.append('mkfs.ext2 /dev/%s' % DEVICE)
@@ -478,8 +493,9 @@ class VolumeTests(NovaTestCase):
def test_004_me_can_write_to_volume(self):
conn = self.connect_ssh(data['private_ip'], test_key)
- # FIXME: This doesn't fail if the volume hasn't been mounted
- stdin, stdout, stderr = conn.exec_command('echo hello > /mnt/vol/test.txt')
+ # FIXME(devcamcar): This doesn't fail if the volume hasn't been mounted
+ stdin, stdout, stderr = conn.exec_command(
+ 'echo hello > /mnt/vol/test.txt')
err = stderr.read()
conn.close()
if len(err) > 0:
@@ -487,7 +503,8 @@ class VolumeTests(NovaTestCase):
def test_005_volume_is_correct_size(self):
conn = self.connect_ssh(data['private_ip'], test_key)
- stdin, stdout, stderr = conn.exec_command("df -h | grep %s | awk {'print $2'}" % DEVICE)
+ stdin, stdout, stderr = conn.exec_command(
+ "df -h | grep %s | awk {'print $2'}" % DEVICE)
out = stdout.read()
conn.close()
if not out.strip() == '1007.9M':
@@ -531,19 +548,21 @@ def build_suites():
'volume': unittest.makeSuite(VolumeTests),
}
-def main(argv=None):
- if len(argv) == 1:
- unittest.main()
- else:
- suites = build_suites()
+def main():
+ argv = FLAGS(sys.argv)
+ suites = build_suites()
+ if FLAGS.suite:
try:
- suite = suites[argv[1]]
+ suite = suites[FLAGS.suite]
except KeyError:
- print >> sys.stderr, 'Available test suites: [user, image, security, public_network, volume]'
- return
+ print >> sys.stderr, 'Available test suites:', SUITE_NAMES
+ return 1
unittest.TextTestRunner(verbosity=2).run(suite)
+ else:
+ for suite in suites.itervalues():
+ unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == "__main__":
- sys.exit(main(sys.argv))
+ sys.exit(main())