summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjaypipes@gmail.com <>2010-10-27 10:51:37 -0400
committerjaypipes@gmail.com <>2010-10-27 10:51:37 -0400
commit24e19b43af5efe193bf28bed468e85ee57ce76df (patch)
tree76b9f1e5ed0453e10e2e2d1a4784afd56fd0b035
parent198af0ef9e65bc4c2efe74b9d93cf40210eb77bc (diff)
parenteb82a8a7d8220adf31db3afb46849f24924ec973 (diff)
downloadnova-24e19b43af5efe193bf28bed468e85ee57ce76df.tar.gz
nova-24e19b43af5efe193bf28bed468e85ee57ce76df.tar.xz
nova-24e19b43af5efe193bf28bed468e85ee57ce76df.zip
Merge trunk and resolve conflicts
-rw-r--r--Authors21
-rw-r--r--MANIFEST.in2
-rwxr-xr-xbin/nova-api1
-rwxr-xr-xbin/nova-manage47
-rw-r--r--bzrplugins/novalog/__init__.py19
-rw-r--r--doc/source/_templates/layout.html17
-rw-r--r--doc/source/getting.started.rst63
-rw-r--r--doc/source/index.rst22
-rw-r--r--nova/adminclient.py97
-rw-r--r--nova/api/__init__.py15
-rw-r--r--nova/api/cloud.py20
-rw-r--r--nova/api/ec2/__init__.py13
-rw-r--r--nova/api/ec2/admin.py11
-rw-r--r--nova/api/ec2/apirequest.py8
-rw-r--r--nova/api/ec2/cloud.py128
-rw-r--r--nova/api/ec2/images.py6
-rw-r--r--nova/api/ec2/metadatarequesthandler.py7
-rw-r--r--nova/api/openstack/__init__.py41
-rw-r--r--nova/api/openstack/auth.py9
-rw-r--r--nova/api/openstack/backup_schedules.py1
-rw-r--r--nova/api/openstack/faults.py2
-rw-r--r--nova/api/openstack/flavors.py6
-rw-r--r--nova/api/openstack/images.py10
-rw-r--r--nova/api/openstack/ratelimiting/__init__.py11
-rw-r--r--nova/api/openstack/servers.py29
-rw-r--r--nova/api/openstack/sharedipgroups.py4
-rw-r--r--nova/auth/dbdriver.py51
-rw-r--r--nova/auth/fakeldap.py11
-rw-r--r--nova/auth/ldapdriver.py12
-rw-r--r--nova/auth/manager.py2
-rw-r--r--nova/cloudpipe/pipelib.py30
-rw-r--r--nova/compute/disk.py32
-rw-r--r--nova/compute/manager.py53
-rw-r--r--nova/compute/monitor.py111
-rw-r--r--nova/compute/power_state.py13
-rw-r--r--nova/context.py11
-rw-r--r--nova/crypto.py45
-rw-r--r--nova/db/api.py12
-rw-r--r--nova/db/sqlalchemy/api.py739
-rw-r--r--nova/db/sqlalchemy/models.py69
-rw-r--r--nova/db/sqlalchemy/session.py3
-rw-r--r--nova/exception.py5
-rw-r--r--nova/fakerabbit.py1
-rw-r--r--nova/flags.py3
-rw-r--r--nova/image/service.py17
-rw-r--r--nova/image/services/glance/__init__.py8
-rw-r--r--nova/manager.py8
-rw-r--r--nova/network/linux_net.py32
-rw-r--r--nova/network/manager.py7
-rw-r--r--nova/objectstore/bucket.py20
-rw-r--r--nova/objectstore/handler.py57
-rw-r--r--nova/objectstore/image.py41
-rw-r--r--nova/objectstore/stored.py4
-rw-r--r--nova/process.py23
-rw-r--r--nova/quota.py3
-rw-r--r--nova/rpc.py2
-rw-r--r--nova/scheduler/driver.py2
-rw-r--r--nova/scheduler/manager.py3
-rw-r--r--nova/scheduler/simple.py1
-rw-r--r--nova/server.py7
-rw-r--r--nova/service.py53
-rw-r--r--nova/test.py163
-rw-r--r--nova/tests/access_unittest.py7
-rw-r--r--nova/tests/api/__init__.py8
-rw-r--r--nova/tests/api/fakes.py18
-rw-r--r--nova/tests/api/openstack/__init__.py14
-rw-r--r--nova/tests/api/openstack/fakes.py26
-rw-r--r--nova/tests/api/openstack/test_api.py74
-rw-r--r--nova/tests/api/openstack/test_auth.py25
-rw-r--r--nova/tests/api/openstack/test_faults.py20
-rw-r--r--nova/tests/api/openstack/test_images.py23
-rw-r--r--nova/tests/api/openstack/test_ratelimiting.py11
-rw-r--r--nova/tests/api/openstack/test_servers.py52
-rw-r--r--nova/tests/api/test_wsgi.py6
-rw-r--r--nova/tests/api_integration.py8
-rw-r--r--nova/tests/api_unittest.py32
-rw-r--r--nova/tests/auth_unittest.py10
-rw-r--r--nova/tests/cloud_unittest.py36
-rw-r--r--nova/tests/compute_unittest.py4
-rw-r--r--nova/tests/flags_unittest.py5
-rw-r--r--nova/tests/network_unittest.py55
-rw-r--r--nova/tests/objectstore_unittest.py15
-rw-r--r--nova/tests/process_unittest.py3
-rw-r--r--nova/tests/quota_unittest.py11
-rw-r--r--nova/tests/rpc_unittest.py11
-rw-r--r--nova/tests/scheduler_unittest.py8
-rw-r--r--nova/tests/service_unittest.py175
-rw-r--r--nova/tests/validator_unittest.py3
-rw-r--r--nova/tests/virt_unittest.py100
-rw-r--r--nova/tests/volume_unittest.py11
-rw-r--r--nova/twistd.py19
-rw-r--r--nova/utils.py55
-rw-r--r--nova/validate.py41
-rw-r--r--nova/virt/fake.py21
-rw-r--r--nova/virt/images.py4
-rw-r--r--nova/virt/libvirt.rescue.qemu.xml.template37
-rw-r--r--nova/virt/libvirt.rescue.uml.xml.template26
-rw-r--r--nova/virt/libvirt.rescue.xen.xml.template34
-rw-r--r--nova/virt/libvirt_conn.py207
-rw-r--r--nova/virt/xenapi.py28
-rw-r--r--nova/volume/driver.py1
-rw-r--r--nova/wsgi.py22
-rw-r--r--pylintrc2
-rw-r--r--tools/pip-requires1
104 files changed, 1992 insertions, 1541 deletions
diff --git a/Authors b/Authors
new file mode 100644
index 000000000..ec3a1cbd8
--- /dev/null
+++ b/Authors
@@ -0,0 +1,21 @@
+Andy Smith <code@term.ie>
+Anne Gentle <anne@openstack.org>
+Chris Behrens <cbehrens@codestud.com>
+Devin Carlen <devin.carlen@gmail.com>
+Eric Day <eday@oddments.org>
+Ewan Mellor <ewan.mellor@citrix.com>
+Hisaki Ohara <hisaki.ohara@intel.com>
+Jay Pipes <jaypipes@gmail.com>
+Jesse Andrews <anotherjesse@gmail.com>
+Joe Heck <heckj@mac.com>
+Joel Moore joelbm24@gmail.com
+Joshua McKenty <jmckenty@gmail.com>
+Justin Santa Barbara <justin@fathomdb.com>
+Matt Dietz <matt.dietz@rackspace.com>
+Michael Gundlach <michael.gundlach@rackspace.com>
+Monty Taylor <mordred@inaugust.com>
+Paul Voccio <paul@openstack.org>
+Rick Clark <rick@openstack.org>
+Soren Hansen <soren.hansen@rackspace.com>
+Todd Willey <todd@ansolabs.com>
+Vishvananda Ishaya <vishvananda@gmail.com>
diff --git a/MANIFEST.in b/MANIFEST.in
index 562aa3f7b..4fe5f0b34 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,6 +1,6 @@
include HACKING LICENSE run_tests.py run_tests.sh
include README builddeb.sh exercise_rsapi.py
-include ChangeLog MANIFEST.in pylintrc
+include ChangeLog MANIFEST.in pylintrc Authors
graft CA
graft doc
graft smoketests
diff --git a/bin/nova-api b/bin/nova-api
index a5027700b..20f1bd74f 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -39,6 +39,7 @@ from nova import server
FLAGS = flags.FLAGS
flags.DEFINE_integer('api_port', 8773, 'API port')
+
def main(_args):
from nova import api
from nova import wsgi
diff --git a/bin/nova-manage b/bin/nova-manage
index 219f32250..08b3da123 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -22,8 +22,8 @@
# Copyright (c) 2005, the Lawrence Journal-World
# All rights reserved.
#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
@@ -32,20 +32,21 @@
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
-# 3. Neither the name of Django nor the names of its contributors may be used
-# to endorse or promote products derived from this software without
+# 3. Neither the name of Django nor the names of its contributors may be
+# used to endorse or promote products derived from this software without
# specific prior written permission.
#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
-# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
@@ -181,8 +182,8 @@ class ShellCommands(object):
if shell == 'ipython':
try:
import IPython
- # Explicitly pass an empty list as arguments, because otherwise IPython
- # would use sys.argv from this script.
+ # Explicitly pass an empty list as arguments, because
+ # otherwise IPython would use sys.argv from this script.
shell = IPython.Shell.IPShell(argv=[])
shell.mainloop()
except ImportError:
@@ -190,13 +191,14 @@ class ShellCommands(object):
if shell == 'python':
import code
- try: # Try activating rlcompleter, because it's handy.
+ try:
+ # Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
- # We don't have to wrap the following import in a 'try', because
- # we already know 'readline' was imported successfully.
+ # We don't have to wrap the following import in a 'try',
+ # because we already know 'readline' was imported successfully.
import rlcompleter
readline.parse_and_bind("tab:complete")
code.interact()
@@ -242,7 +244,6 @@ class UserCommands(object):
print 'export EC2_ACCESS_KEY=%s' % user.access
print 'export EC2_SECRET_KEY=%s' % user.secret
-
def __init__(self):
self.manager = manager.AuthManager()
@@ -291,6 +292,7 @@ class UserCommands(object):
is_admin = False
self.manager.modify_user(name, access_key, secret_key, is_admin)
+
class ProjectCommands(object):
"""Class for managing projects."""
@@ -380,7 +382,6 @@ class FloatingIpCommands(object):
db.floating_ip_destroy(context.get_admin_context(),
str(address))
-
def list(self, host=None):
"""Lists all floating ips (optionally by host)
arguments: [host]"""
@@ -397,6 +398,7 @@ class FloatingIpCommands(object):
floating_ip['address'],
instance)
+
class NetworkCommands(object):
"""Class for managing networks."""
@@ -429,8 +431,7 @@ CATEGORIES = [
('shell', ShellCommands),
('vpn', VpnCommands),
('floating', FloatingIpCommands),
- ('network', NetworkCommands)
-]
+ ('network', NetworkCommands)]
def lazy_match(name, key_value_tuples):
diff --git a/bzrplugins/novalog/__init__.py b/bzrplugins/novalog/__init__.py
index e16b2e00f..9817dc886 100644
--- a/bzrplugins/novalog/__init__.py
+++ b/bzrplugins/novalog/__init__.py
@@ -17,13 +17,14 @@
import bzrlib.log
from bzrlib.osutils import format_date
-#
-# This is mostly stolen from bzrlib.log.GnuChangelogLogFormatter
-# The difference is that it logs the author rather than the committer
-# which for Nova always is Tarmac.
-#
+
class NovaLogFormat(bzrlib.log.GnuChangelogLogFormatter):
+ """This is mostly stolen from bzrlib.log.GnuChangelogLogFormatter
+ The difference is that it logs the author rather than the committer
+ which for Nova always is Tarmac."""
+
preferred_levels = 1
+
def log_revision(self, revision):
"""Log a revision, either merged or not."""
to_file = self.to_file
@@ -38,13 +39,14 @@ class NovaLogFormat(bzrlib.log.GnuChangelogLogFormatter):
to_file.write('%s %s\n\n' % (date_str, ", ".join(authors)))
if revision.delta is not None and revision.delta.has_changed():
- for c in revision.delta.added + revision.delta.removed + revision.delta.modified:
+ for c in revision.delta.added + revision.delta.removed + \
+ revision.delta.modified:
path, = c[:1]
to_file.write('\t* %s:\n' % (path,))
for c in revision.delta.renamed:
- oldpath,newpath = c[:2]
+ oldpath, newpath = c[:2]
# For renamed files, show both the old and the new path
- to_file.write('\t* %s:\n\t* %s:\n' % (oldpath,newpath))
+ to_file.write('\t* %s:\n\t* %s:\n' % (oldpath, newpath))
to_file.write('\n')
if not revision.rev.message:
@@ -56,4 +58,3 @@ class NovaLogFormat(bzrlib.log.GnuChangelogLogFormatter):
to_file.write('\n')
bzrlib.log.register_formatter('novalog', NovaLogFormat)
-
diff --git a/doc/source/_templates/layout.html b/doc/source/_templates/layout.html
new file mode 100644
index 000000000..0b72a77ac
--- /dev/null
+++ b/doc/source/_templates/layout.html
@@ -0,0 +1,17 @@
+{% extends "!layout.html" %}
+
+{% block footer %}
+{{ super() }}
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-17511903-1");
+pageTracker._setDomainName("none");
+pageTracker._setAllowLinker(true);
+pageTracker._trackPageview();
+} catch(err) {}</script>
+{% endblock %}
+
diff --git a/doc/source/getting.started.rst b/doc/source/getting.started.rst
index f683bb256..2df4a45ea 100644
--- a/doc/source/getting.started.rst
+++ b/doc/source/getting.started.rst
@@ -18,34 +18,30 @@
Getting Started with Nova
=========================
+This code base is continually changing so dependencies also change.
-GOTTA HAVE A nova.pth file added or it WONT WORK (will write setup.py file soon)
-
-Create a file named nova.pth in your python libraries directory
-(usually /usr/local/lib/python2.6/dist-packages) with a single line that points
-to the directory where you checked out the source (that contains the nova/
-directory).
-
-DEPENDENCIES
+Dependencies
------------
Related servers we rely on
* RabbitMQ: messaging queue, used for all communication between components
-* OpenLDAP: users, groups (maybe cut)
-* ReDIS: Remote Dictionary Store (for fast, shared state data)
-* nginx: HTTP server to handle serving large files (because Tornado can't)
+
+Optional servers
+
+* OpenLDAP: By default, the auth server uses the RDBMS-backed datastore by setting FLAGS.auth_driver to 'nova.auth.dbdriver.DbDriver'. But OpenLDAP (or LDAP) could be configured.
+* ReDIS: By default, this is not enabled as the auth driver.
Python libraries we don't vendor
* M2Crypto: python library interface for openssl
* curl
-* XenAPI: Needed only for Xen Cloud Platform or XenServer support. Available from http://wiki.xensource.com/xenwiki/XCP_SDK or http://community.citrix.com/cdn/xs/sdks.
+* XenAPI: Needed only for Xen Cloud Platform or XenServer support. Available from http://wiki.xensource.com/xenwiki/XCP_SDK or http://community.citrix.com/cdn/xs/sdks.
Vendored python libaries (don't require any installation)
-* Tornado: scalable non blocking web server for api requests
* Twisted: just for the twisted.internet.defer package
+* Tornado: scalable non blocking web server for api requests
* boto: python api for aws api
* IPy: library for managing ip addresses
@@ -58,40 +54,19 @@ Recommended
Installation
--------------
-::
- # system libraries and tools
- apt-get install -y aoetools vlan curl
- modprobe aoe
-
- # python libraries
- apt-get install -y python-setuptools python-dev python-pycurl python-m2crypto
-
- # ON THE CLOUD CONTROLLER
- apt-get install -y rabbitmq-server dnsmasq nginx
- # build redis from 2.0.0-rc1 source
- # setup ldap (slap.sh as root will remove ldap and reinstall it)
- NOVA_PATH/nova/auth/slap.sh
- /etc/init.d/rabbitmq-server start
-
- # ON VOLUME NODE:
- apt-get install -y vblade-persist
-
- # ON THE COMPUTE NODE:
- apt-get install -y python-libvirt
- apt-get install -y kpartx kvm libvirt-bin
- modprobe kvm
-
- # optional packages
- apt-get install -y euca2ools
+ Due to many changes it's best to rely on the `OpenStack wiki <http://wiki.openstack.org>`_ for installation instructions.
Configuration
---------------
-ON CLOUD CONTROLLER
+These instructions are incomplete, but we are actively updating the `OpenStack wiki <http://wiki.openstack.org>`_ with more configuration information.
+
+On the cloud controller
* Add yourself to the libvirtd group, log out, and log back in
-* fix hardcoded ec2 metadata/userdata uri ($IP is the IP of the cloud), and masqurade all traffic from launched instances
+* Fix hardcoded ec2 metadata/userdata uri ($IP is the IP of the cloud), and masqurade all traffic from launched instances
+
::
iptables -t nat -A PREROUTING -s 0.0.0.0/0 -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination $IP:8773
@@ -119,9 +94,9 @@ ON CLOUD CONTROLLER
}
}
-ON VOLUME NODE
+On the volume node
-* create a filesystem (you can use an actual disk if you have one spare, default is /dev/sdb)
+* Create a filesystem (you can use an actual disk if you have one spare, default is /dev/sdb)
::
@@ -137,9 +112,7 @@ Running
Launch servers
* rabbitmq
-* redis
-* slapd
-* nginx
+* redis (optional)
Launch nova components
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 6627fe066..1109e9011 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -15,18 +15,22 @@
License for the specific language governing permissions and limitations
under the License.
-Welcome to nova's documentation!
+Welcome to Nova's documentation!
================================
-Nova is a cloud computing fabric controller (the main part of an IaaS system) built to match the popular AWS EC2 and S3 APIs.
-It is written in Python, using the Tornado and Twisted frameworks, and relies on the standard AMQP messaging protocol,
-and the Redis distributed KVS.
-Nova is intended to be easy to extend, and adapt. For example, it currently uses
-an LDAP server for users and groups, but also includes a fake LDAP server,
-that stores data in Redis. It has extensive test coverage, and uses the
-Sphinx toolkit (the same as Python itself) for code and user documentation.
+Nova is a cloud computing fabric controller (the main part of an IaaS system).
+It is written in Python and relies on the standard AMQP messaging protocol, uses the Twisted framework,
+and optionally uses the Redis distributed key value store for authorization.
+
+Nova is intended to be easy to extend and adapt. For example, authentication and authorization
+requests by default use an RDBMS-backed datastore driver. However, there is already support
+for using LDAP backing authentication (slapd) and if you wish to "fake" LDAP, there is a module
+available that uses ReDIS to store authentication information in an LDAP-like backing datastore.
+It has extensive test coverage, and uses the Sphinx toolkit (the same as Python itself) for code
+and developer documentation. Additional documentation is available on the
+'OpenStack wiki <http://wiki.openstack.org>'_.
While Nova is currently in Beta use within several organizations, the codebase
-is very much under active development - there are bugs!
+is very much under active development - please test it and log bugs!
Contents:
diff --git a/nova/adminclient.py b/nova/adminclient.py
index fc9fcfde0..b7a3d2c32 100644
--- a/nova/adminclient.py
+++ b/nova/adminclient.py
@@ -25,10 +25,10 @@ import httplib
from boto.ec2.regioninfo import RegionInfo
-DEFAULT_CLC_URL='http://127.0.0.1:8773'
-DEFAULT_REGION='nova'
-DEFAULT_ACCESS_KEY='admin'
-DEFAULT_SECRET_KEY='admin'
+DEFAULT_CLC_URL = 'http://127.0.0.1:8773'
+DEFAULT_REGION = 'nova'
+DEFAULT_ACCESS_KEY = 'admin'
+DEFAULT_SECRET_KEY = 'admin'
class UserInfo(object):
@@ -199,9 +199,7 @@ class NovaAdminClient(object):
def connection_for(self, username, project, clc_url=None, region=None,
**kwargs):
- """
- Returns a boto ec2 connection for the given username.
- """
+ """Returns a boto ec2 connection for the given username."""
if not clc_url:
clc_url = self.clc_url
if not region:
@@ -220,36 +218,37 @@ class NovaAdminClient(object):
**kwargs)
def split_clc_url(self, clc_url):
- """
- Splits a cloud controller endpoint url.
- """
+ """Splits a cloud controller endpoint url."""
parts = httplib.urlsplit(clc_url)
is_secure = parts.scheme == 'https'
ip, port = parts.netloc.split(':')
return {'ip': ip, 'port': int(port), 'is_secure': is_secure}
def get_users(self):
- """ grabs the list of all users """
+ """Grabs the list of all users."""
return self.apiconn.get_list('DescribeUsers', {}, [('item', UserInfo)])
def get_user(self, name):
- """ grab a single user by name """
- user = self.apiconn.get_object('DescribeUser', {'Name': name}, UserInfo)
-
+ """Grab a single user by name."""
+ user = self.apiconn.get_object('DescribeUser', {'Name': name},
+ UserInfo)
if user.username != None:
return user
def has_user(self, username):
- """ determine if user exists """
+ """Determine if user exists."""
return self.get_user(username) != None
def create_user(self, username):
- """ creates a new user, returning the userinfo object with access/secret """
- return self.apiconn.get_object('RegisterUser', {'Name': username}, UserInfo)
+ """Creates a new user, returning the userinfo object with
+ access/secret."""
+ return self.apiconn.get_object('RegisterUser', {'Name': username},
+ UserInfo)
def delete_user(self, username):
- """ deletes a user """
- return self.apiconn.get_object('DeregisterUser', {'Name': username}, UserInfo)
+ """Deletes a user."""
+ return self.apiconn.get_object('DeregisterUser', {'Name': username},
+ UserInfo)
def get_roles(self, project_roles=True):
"""Returns a list of available roles."""
@@ -258,11 +257,10 @@ class NovaAdminClient(object):
[('item', UserRole)])
def get_user_roles(self, user, project=None):
- """Returns a list of roles for the given user.
- Omitting project will return any global roles that the user has.
- Specifying project will return only project specific roles.
- """
- params = {'User':user}
+ """Returns a list of roles for the given user. Omitting project will
+ return any global roles that the user has. Specifying project will
+ return only project specific roles."""
+ params = {'User': user}
if project:
params['Project'] = project
return self.apiconn.get_list('DescribeUserRoles',
@@ -270,24 +268,19 @@ class NovaAdminClient(object):
[('item', UserRole)])
def add_user_role(self, user, role, project=None):
- """
- Add a role to a user either globally or for a specific project.
- """
+ """Add a role to a user either globally or for a specific project."""
return self.modify_user_role(user, role, project=project,
operation='add')
def remove_user_role(self, user, role, project=None):
- """
- Remove a role from a user either globally or for a specific project.
- """
+ """Remove a role from a user either globally or for a specific
+ project."""
return self.modify_user_role(user, role, project=project,
operation='remove')
def modify_user_role(self, user, role, project=None, operation='add',
**kwargs):
- """
- Add or remove a role for a user and project.
- """
+ """Add or remove a role for a user and project."""
params = {'User': user,
'Role': role,
'Project': project,
@@ -295,9 +288,7 @@ class NovaAdminClient(object):
return self.apiconn.get_status('ModifyUserRole', params)
def get_projects(self, user=None):
- """
- Returns a list of all projects.
- """
+ """Returns a list of all projects."""
if user:
params = {'User': user}
else:
@@ -307,9 +298,7 @@ class NovaAdminClient(object):
[('item', ProjectInfo)])
def get_project(self, name):
- """
- Returns a single project with the specified name.
- """
+ """Returns a single project with the specified name."""
project = self.apiconn.get_object('DescribeProject',
{'Name': name},
ProjectInfo)
@@ -319,9 +308,7 @@ class NovaAdminClient(object):
def create_project(self, projectname, manager_user, description=None,
member_users=None):
- """
- Creates a new project.
- """
+ """Creates a new project."""
params = {'Name': projectname,
'ManagerUser': manager_user,
'Description': description,
@@ -329,50 +316,38 @@ class NovaAdminClient(object):
return self.apiconn.get_object('RegisterProject', params, ProjectInfo)
def delete_project(self, projectname):
- """
- Permanently deletes the specified project.
- """
+ """Permanently deletes the specified project."""
return self.apiconn.get_object('DeregisterProject',
{'Name': projectname},
ProjectInfo)
def get_project_members(self, name):
- """
- Returns a list of members of a project.
- """
+ """Returns a list of members of a project."""
return self.apiconn.get_list('DescribeProjectMembers',
{'Name': name},
[('item', ProjectMember)])
def add_project_member(self, user, project):
- """
- Adds a user to a project.
- """
+ """Adds a user to a project."""
return self.modify_project_member(user, project, operation='add')
def remove_project_member(self, user, project):
- """
- Removes a user from a project.
- """
+ """Removes a user from a project."""
return self.modify_project_member(user, project, operation='remove')
def modify_project_member(self, user, project, operation='add'):
- """
- Adds or removes a user from a project.
- """
+ """Adds or removes a user from a project."""
params = {'User': user,
'Project': project,
'Operation': operation}
return self.apiconn.get_status('ModifyProjectMember', params)
def get_zip(self, user, project):
- """
- Returns the content of a zip file containing novarc and access credentials.
- """
+ """Returns the content of a zip file containing novarc and access
+ credentials."""
params = {'Name': user, 'Project': project}
zip = self.apiconn.get_object('GenerateX509ForUser', params, UserInfo)
return zip.file
def get_hosts(self):
return self.apiconn.get_list('DescribeHosts', {}, [('item', HostInfo)])
-
diff --git a/nova/api/__init__.py b/nova/api/__init__.py
index 8ec7094d7..8a1d9fe32 100644
--- a/nova/api/__init__.py
+++ b/nova/api/__init__.py
@@ -31,12 +31,13 @@ from nova.api import openstack
from nova.api.ec2 import metadatarequesthandler
-flags.DEFINE_string('osapi_subdomain', 'api',
+flags.DEFINE_string('osapi_subdomain', 'api',
'subdomain running the OpenStack API')
-flags.DEFINE_string('ec2api_subdomain', 'ec2',
+flags.DEFINE_string('ec2api_subdomain', 'ec2',
'subdomain running the EC2 API')
-flags.DEFINE_string('FAKE_subdomain', None,
- 'set to api or ec2 to fake the subdomain of the host for testing')
+flags.DEFINE_string('FAKE_subdomain', None,
+ 'set to api or ec2 to fake the subdomain of the host '
+ 'for testing')
FLAGS = flags.FLAGS
@@ -44,7 +45,7 @@ class API(wsgi.Router):
"""Routes top-level requests to the appropriate controller."""
def __init__(self):
- osapidomain = {'sub_domain': [FLAGS.osapi_subdomain]}
+ osapidomain = {'sub_domain': [FLAGS.osapi_subdomain]}
ec2domain = {'sub_domain': [FLAGS.ec2api_subdomain]}
# If someone wants to pretend they're hitting the OSAPI subdomain
# on their local box, they can set FAKE_subdomain to 'api', which
@@ -55,7 +56,7 @@ class API(wsgi.Router):
ec2domain = {}
mapper = routes.Mapper()
mapper.sub_domains = True
- mapper.connect("/", controller=self.osapi_versions,
+ mapper.connect("/", controller=self.osapi_versions,
conditions=osapidomain)
mapper.connect("/v1.0/{path_info:.*}", controller=openstack.API(),
conditions=osapidomain)
@@ -107,5 +108,3 @@ class API(wsgi.Router):
'2009-04-04',
]
return ''.join('%s\n' % v for v in versions)
-
-
diff --git a/nova/api/cloud.py b/nova/api/cloud.py
index aa84075dc..b8f15019f 100644
--- a/nova/api/cloud.py
+++ b/nova/api/cloud.py
@@ -36,3 +36,23 @@ def reboot(instance_id, context=None):
db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "reboot_instance",
"args": {"instance_id": instance_ref['id']}})
+
+
+def rescue(instance_id, context):
+ """Rescue the given instance."""
+ instance_ref = db.instance_get_by_internal_id(context, instance_id)
+ host = instance_ref['host']
+ rpc.cast(context,
+ db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "rescue_instance",
+ "args": {"instance_id": instance_ref['id']}})
+
+
+def unrescue(instance_id, context):
+ """Unrescue the given instance."""
+ instance_ref = db.instance_get_by_internal_id(context, instance_id)
+ host = instance_ref['host']
+ rpc.cast(context,
+ db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "unrescue_instance",
+ "args": {"instance_id": instance_ref['id']}})
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 5735eb956..0df4d3710 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -62,7 +62,8 @@ class Authenticate(wsgi.Middleware):
# Make a copy of args for authentication and signature verification.
auth_params = dict(req.params)
- auth_params.pop('Signature') # not part of authentication args
+ # Not part of authentication args
+ auth_params.pop('Signature')
# Authenticate the request.
try:
@@ -109,9 +110,11 @@ class Router(wsgi.Middleware):
'SignatureVersion', 'Version', 'Timestamp']
args = dict(req.params)
try:
- action = req.params['Action'] # raise KeyError if omitted
+ # Raise KeyError if omitted
+ action = req.params['Action']
for non_arg in non_args:
- args.pop(non_arg) # remove, but raise KeyError if omitted
+ # Remove, but raise KeyError if omitted
+ args.pop(non_arg)
except:
raise webob.exc.HTTPBadRequest()
@@ -184,7 +187,8 @@ class Authorizer(wsgi.Middleware):
context = req.environ['ec2.context']
controller_name = req.environ['ec2.controller'].__class__.__name__
action = req.environ['ec2.action']
- allowed_roles = self.action_roles[controller_name].get(action, ['none'])
+ allowed_roles = self.action_roles[controller_name].get(action,
+ ['none'])
if self._matches_any_role(context, allowed_roles):
return self.application
else:
@@ -242,4 +246,3 @@ class Executor(wsgi.Application):
'<Message>%s</Message></Error></Errors>'
'<RequestID>?</RequestID></Response>') % (code, message)
return resp
-
diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py
index 36feae451..1c6ab688d 100644
--- a/nova/api/ec2/admin.py
+++ b/nova/api/ec2/admin.py
@@ -73,7 +73,7 @@ class AdminController(object):
def describe_users(self, _context, **_kwargs):
"""Returns all users - should be changed to deal with a list."""
return {'userSet':
- [user_dict(u) for u in manager.AuthManager().get_users()] }
+ [user_dict(u) for u in manager.AuthManager().get_users()]}
def register_user(self, _context, name, **_kwargs):
"""Creates a new user, and returns generated credentials."""
@@ -91,7 +91,7 @@ class AdminController(object):
def describe_roles(self, context, project_roles=True, **kwargs):
"""Returns a list of allowed roles."""
roles = manager.AuthManager().get_roles(project_roles)
- return { 'roles': [{'role': r} for r in roles]}
+ return {'roles': [{'role': r} for r in roles]}
def describe_user_roles(self, context, user, project=None, **kwargs):
"""Returns a list of roles for the given user.
@@ -99,7 +99,7 @@ class AdminController(object):
Specifying project will return only project specific roles.
"""
roles = manager.AuthManager().get_user_roles(user, project=project)
- return { 'roles': [{'role': r} for r in roles]}
+ return {'roles': [{'role': r} for r in roles]}
def modify_user_role(self, context, user, role, project=None,
operation='add', **kwargs):
@@ -155,9 +155,10 @@ class AdminController(object):
'members': [{'member': m} for m in project.member_ids]}
return result
- def modify_project_member(self, context, user, project, operation, **kwargs):
+ def modify_project_member(self, context, user, project, operation,
+ **kwargs):
"""Add or remove a user from a project."""
- if operation =='add':
+ if operation == 'add':
manager.AuthManager().add_to_project(user, project)
elif operation == 'remove':
manager.AuthManager().remove_from_project(user, project)
diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py
index 3c8651582..5758781b6 100644
--- a/nova/api/ec2/apirequest.py
+++ b/nova/api/ec2/apirequest.py
@@ -44,6 +44,7 @@ def _underscore_to_xmlcase(str):
res = _underscore_to_camelcase(str)
return res[:1].lower() + res[1:]
+
def _try_convert(value):
"""Return a non-string if possible"""
if value == 'None':
@@ -59,12 +60,12 @@ def _try_convert(value):
return value
if valueneg[0] == '0':
if valueneg[1] in 'xX':
- return int(value,16)
+ return int(value, 16)
elif valueneg[1] in 'bB':
- return int(value,2)
+ return int(value, 2)
else:
try:
- return int(value,8)
+ return int(value, 8)
except ValueError:
pass
try:
@@ -80,6 +81,7 @@ def _try_convert(value):
except ValueError:
return value
+
class APIRequest(object):
def __init__(self, controller, action):
self.controller = controller
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 096ddf668..51e972aa7 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -25,6 +25,7 @@ datastore.
import base64
import datetime
import logging
+import re
import os
import time
@@ -48,6 +49,7 @@ flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
InvalidInputException = exception.InvalidInputException
+
class QuotaError(exception.ApiError):
"""Quota Exceeeded"""
pass
@@ -138,8 +140,8 @@ class CloudController(object):
for node in nodes:
rpc.cast(context,
'%s.%s' % (FLAGS.compute_topic, node),
- { "method": "refresh_security_group",
- "args": {"security_group_id": security_group.id}})
+ {"method": "refresh_security_group",
+ "args": {"security_group_id": security_group.id}})
def get_metadata(self, address):
ctxt = context.get_admin_context()
@@ -148,48 +150,42 @@ class CloudController(object):
return None
mpi = self._get_mpi_data(ctxt, instance_ref['project_id'])
if instance_ref['key_name']:
- keys = {
- '0': {
- '_name': instance_ref['key_name'],
- 'openssh-key': instance_ref['key_data']
- }
- }
+ keys = {'0': {'_name': instance_ref['key_name'],
+ 'openssh-key': instance_ref['key_data']}}
else:
keys = ''
hostname = instance_ref['hostname']
floating_ip = db.instance_get_floating_address(ctxt,
instance_ref['id'])
+ ec2_id = internal_id_to_ec2_id(instance_ref['internal_id'])
data = {
'user-data': base64.b64decode(instance_ref['user_data']),
'meta-data': {
'ami-id': instance_ref['image_id'],
'ami-launch-index': instance_ref['launch_index'],
'ami-manifest-path': 'FIXME',
- 'block-device-mapping': { # TODO(vish): replace with real data
+ 'block-device-mapping': {
+ # TODO(vish): replace with real data
'ami': 'sda1',
'ephemeral0': 'sda2',
'root': '/dev/sda1',
- 'swap': 'sda3'
- },
+ 'swap': 'sda3'},
'hostname': hostname,
'instance-action': 'none',
- 'instance-id': internal_id_to_ec2_id(instance_ref['internal_id']),
+ 'instance-id': ec2_id,
'instance-type': instance_ref['instance_type'],
'local-hostname': hostname,
'local-ipv4': address,
'kernel-id': instance_ref['kernel_id'],
- 'placement': {
- 'availability-zone': 'nova' # TODO(vish): real zone
- },
+ # TODO(vish): real zone
+ 'placement': {'availability-zone': 'nova'},
'public-hostname': hostname,
'public-ipv4': floating_ip or '',
'public-keys': keys,
'ramdisk-id': instance_ref['ramdisk_id'],
'reservation-id': instance_ref['reservation_id'],
'security-groups': '',
- 'mpi': mpi
- }
- }
+ 'mpi': mpi}}
if False: # TODO(vish): store ancestor ids
data['ancestor-ami-ids'] = []
if False: # TODO(vish): store product codes
@@ -212,7 +208,7 @@ class CloudController(object):
'regionEndpoint': FLAGS.ec2_url}]
if region_name:
regions = [r for r in regions if r['regionName'] in region_name]
- return {'regionInfo': regions }
+ return {'regionInfo': regions}
def describe_snapshots(self,
context,
@@ -238,7 +234,8 @@ class CloudController(object):
for key_pair in key_pairs:
# filter out the vpn keys
suffix = FLAGS.vpn_key_suffix
- if context.user.is_admin() or not key_pair['name'].endswith(suffix):
+ if context.user.is_admin() or \
+ not key_pair['name'].endswith(suffix):
result.append({
'keyName': key_pair['name'],
'keyFingerprint': key_pair['fingerprint'],
@@ -272,7 +269,7 @@ class CloudController(object):
if not group_name is None:
groups = [g for g in groups if g.name in group_name]
- return {'securityGroupInfo': groups }
+ return {'securityGroupInfo': groups}
def _format_security_group(self, context, group):
g = {}
@@ -296,13 +293,10 @@ class CloudController(object):
g['ipPermissions'] += [r]
return g
-
- def _authorize_revoke_rule_args_to_dict(self, context,
- to_port=None, from_port=None,
- ip_protocol=None, cidr_ip=None,
- user_id=None,
- source_security_group_name=None,
- source_security_group_owner_id=None):
+ def _revoke_rule_args_to_dict(self, context, to_port=None, from_port=None,
+ ip_protocol=None, cidr_ip=None, user_id=None,
+ source_security_group_name=None,
+ source_security_group_owner_id=None):
values = {}
@@ -323,16 +317,16 @@ class CloudController(object):
values['cidr'] = '0.0.0.0/0'
if ip_protocol and from_port and to_port:
- from_port = int(from_port)
- to_port = int(to_port)
+ from_port = int(from_port)
+ to_port = int(to_port)
ip_protocol = str(ip_protocol)
- if ip_protocol.upper() not in ['TCP','UDP','ICMP']:
- raise InvalidInputException('%s is not a valid ipProtocol' %
- (ip_protocol,))
+ if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']:
+ raise InvalidInputException('%s is not a valid ipProtocol' %
+ (ip_protocol,))
if ((min(from_port, to_port) < -1) or
(max(from_port, to_port) > 65535)):
- raise InvalidInputException('Invalid port range')
+ raise InvalidInputException('Invalid port range')
values['protocol'] = ip_protocol
values['from_port'] = from_port
@@ -344,7 +338,6 @@ class CloudController(object):
return values
-
def _security_group_rule_exists(self, security_group, values):
"""Indicates whether the specified rule values are already
defined in the given security group.
@@ -363,20 +356,19 @@ class CloudController(object):
return True
return False
-
def revoke_security_group_ingress(self, context, group_name, **kwargs):
self._ensure_default_security_group(context)
security_group = db.security_group_get_by_name(context,
context.project_id,
group_name)
- criteria = self._authorize_revoke_rule_args_to_dict(context, **kwargs)
+ criteria = self._revoke_rule_args_to_dict(context, **kwargs)
if criteria == None:
raise exception.ApiError("No rule for the specified parameters.")
for rule in security_group.rules:
match = True
- for (k,v) in criteria.iteritems():
+ for (k, v) in criteria.iteritems():
if getattr(rule, k, False) != v:
match = False
if match:
@@ -395,7 +387,7 @@ class CloudController(object):
context.project_id,
group_name)
- values = self._authorize_revoke_rule_args_to_dict(context, **kwargs)
+ values = self._revoke_rule_args_to_dict(context, **kwargs)
values['parent_group_id'] = security_group.id
if self._security_group_rule_exists(security_group, values):
@@ -408,7 +400,6 @@ class CloudController(object):
return True
-
def _get_source_project_id(self, context, source_security_group_owner_id):
if source_security_group_owner_id:
# Parse user:project for source group.
@@ -426,13 +417,12 @@ class CloudController(object):
return source_project_id
-
def create_security_group(self, context, group_name, group_description):
self._ensure_default_security_group(context)
if db.security_group_exists(context, context.project_id, group_name):
raise exception.ApiError('group %s already exists' % group_name)
- group = {'user_id' : context.user.id,
+ group = {'user_id': context.user.id,
'project_id': context.project_id,
'name': group_name,
'description': group_description}
@@ -441,7 +431,6 @@ class CloudController(object):
return {'securityGroupSet': [self._format_security_group(context,
group_ref)]}
-
def delete_security_group(self, context, group_name, **kwargs):
security_group = db.security_group_get_by_name(context,
context.project_id,
@@ -449,7 +438,6 @@ class CloudController(object):
db.security_group_destroy(context, security_group.id)
return True
-
def get_console_output(self, context, instance_id, **kwargs):
# instance_id is passed in as a list of instances
ec2_id = instance_id[0]
@@ -458,13 +446,13 @@ class CloudController(object):
output = rpc.call(context,
'%s.%s' % (FLAGS.compute_topic,
instance_ref['host']),
- {"method" : "get_console_output",
- "args" : {"instance_id": instance_ref['id']}})
+ {"method": "get_console_output",
+ "args": {"instance_id": instance_ref['id']}})
now = datetime.datetime.utcnow()
- return { "InstanceId" : ec2_id,
- "Timestamp" : now,
- "output" : base64.b64encode(output) }
+ return {"InstanceId": ec2_id,
+ "Timestamp": now,
+ "output": base64.b64encode(output)}
def describe_volumes(self, context, **kwargs):
if context.user.is_admin():
@@ -531,9 +519,11 @@ class CloudController(object):
return {'volumeSet': [self._format_volume(context, volume_ref)]}
-
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
volume_ref = db.volume_get_by_ec2_id(context, volume_id)
+ if not re.match("^/dev/[a-z]d[a-z]+$", device):
+ raise exception.ApiError("Invalid device specified: %s. "
+ "Example device: /dev/vdb" % device)
# TODO(vish): abstract status checking?
if volume_ref['status'] != "available":
raise exception.ApiError("Volume status must be available")
@@ -634,8 +624,7 @@ class CloudController(object):
i['imageId'] = instance['image_id']
i['instanceState'] = {
'code': instance['state'],
- 'name': instance['state_description']
- }
+ 'name': instance['state_description']}
fixed_addr = None
floating_addr = None
if instance['fixed_ip']:
@@ -657,7 +646,7 @@ class CloudController(object):
i['amiLaunchIndex'] = instance['launch_index']
i['displayName'] = instance['display_name']
i['displayDescription'] = instance['display_description']
- if not reservations.has_key(instance['reservation_id']):
+ if instance['reservation_id'] not in reservations:
r = {}
r['reservationId'] = instance['reservation_id']
r['ownerId'] = instance['project_id']
@@ -758,10 +747,10 @@ class CloudController(object):
context.project_id,
'default')
except exception.NotFound:
- values = { 'name' : 'default',
- 'description' : 'default',
- 'user_id' : context.user.id,
- 'project_id' : context.project_id }
+ values = {'name': 'default',
+ 'description': 'default',
+ 'user_id': context.user.id,
+ 'project_id': context.project_id}
group = db.security_group_create(context, values)
def run_instances(self, context, **kwargs):
@@ -805,7 +794,7 @@ class CloudController(object):
logging.debug("Going to run %s instances...", num_instances)
launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
key_data = None
- if kwargs.has_key('key_name'):
+ if 'key_name' in kwargs:
key_pair_ref = db.key_pair_get(context,
context.user.id,
kwargs['key_name'])
@@ -886,7 +875,6 @@ class CloudController(object):
(context.project.name, context.user.name, instance_ref['id']))
return self._format_run_instances(context, reservation_id)
-
def terminate_instances(self, context, instance_id, **kwargs):
"""Terminate each instance in instance_id, which is a list of ec2 ids.
@@ -953,8 +941,21 @@ class CloudController(object):
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids"""
- for id_str in instance_id:
- cloud.reboot(id_str, context=context)
+ for ec2_id in instance_id:
+ internal_id = ec2_id_to_internal_id(ec2_id)
+ cloud.reboot(internal_id, context=context)
+ return True
+
+ def rescue_instance(self, context, instance_id, **kwargs):
+ """This is an extension to the normal ec2_api"""
+ internal_id = ec2_id_to_internal_id(instance_id)
+ cloud.rescue(internal_id, context=context)
+ return True
+
+ def unrescue_instance(self, context, instance_id, **kwargs):
+ """This is an extension to the normal ec2_api"""
+ internal_id = ec2_id_to_internal_id(instance_id)
+ cloud.unrescue(internal_id, context=context)
return True
def update_instance(self, context, ec2_id, **kwargs):
@@ -996,7 +997,7 @@ class CloudController(object):
def register_image(self, context, image_location=None, **kwargs):
# FIXME: should the objectstore be doing these authorization checks?
- if image_location is None and kwargs.has_key('name'):
+ if image_location is None and 'name' in kwargs:
image_location = kwargs['name']
image_id = images.register(context, image_location)
logging.debug("Registered %s as %s" % (image_location, image_id))
@@ -1014,7 +1015,8 @@ class CloudController(object):
result['launchPermission'].append({'group': 'all'})
return result
- def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs):
+ def modify_image_attribute(self, context, image_id, attribute,
+ operation_type, **kwargs):
# TODO(devcamcar): Support users and groups other than 'all'.
if attribute != 'launchPermission':
raise exception.ApiError('attribute not supported: %s' % attribute)
diff --git a/nova/api/ec2/images.py b/nova/api/ec2/images.py
index f0a43dad6..60f9008e9 100644
--- a/nova/api/ec2/images.py
+++ b/nova/api/ec2/images.py
@@ -43,6 +43,7 @@ def modify(context, image_id, operation):
return True
+
def update(context, image_id, attributes):
"""update an image's attributes / info.json"""
attributes.update({"image_id": image_id})
@@ -52,6 +53,7 @@ def update(context, image_id, attributes):
query_args=qs(attributes))
return True
+
def register(context, image_location):
""" rpc call to register a new image based from a manifest """
@@ -64,13 +66,14 @@ def register(context, image_location):
return image_id
+
def list(context, filter_list=[]):
""" return a list of all images that a user can see
optionally filtered by a list of image_id """
if FLAGS.connection_type == 'fake':
- return [{ 'imageId' : 'bar'}]
+ return [{'imageId': 'bar'}]
# FIXME: send along the list of only_images to check for
response = conn(context).make_request(
@@ -82,6 +85,7 @@ def list(context, filter_list=[]):
return [i for i in result if i['imageId'] in filter_list]
return result
+
def get(context, image_id):
"""return a image object if the context has permissions"""
result = list(context, [image_id])
diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py
index 08a8040ca..2f4f414cc 100644
--- a/nova/api/ec2/metadatarequesthandler.py
+++ b/nova/api/ec2/metadatarequesthandler.py
@@ -27,7 +27,6 @@ from nova.api.ec2 import cloud
class MetadataRequestHandler(object):
-
"""Serve metadata from the EC2 API."""
def print_data(self, data):
@@ -43,7 +42,8 @@ class MetadataRequestHandler(object):
else:
output += '/'
output += '\n'
- return output[:-1] # cut off last \n
+ # Cut off last \n
+ return output[:-1]
elif isinstance(data, list):
return '\n'.join(data)
else:
@@ -65,7 +65,8 @@ class MetadataRequestHandler(object):
cc = cloud.CloudController()
meta_data = cc.get_metadata(req.remote_addr)
if meta_data is None:
- logging.error('Failed to get metadata for ip: %s' % req.remote_addr)
+ logging.error('Failed to get metadata for ip: %s' %
+ req.remote_addr)
raise webob.exc.HTTPNotFound()
data = self.lookup(req.path_info, meta_data)
if data is None:
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index 5706dbc09..1dd3ba770 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -23,6 +23,7 @@ WSGI middleware for OpenStack API controllers.
import json
import time
+import logging
import routes
import webob.dec
import webob.exc
@@ -43,9 +44,10 @@ from nova.auth import manager
FLAGS = flags.FLAGS
flags.DEFINE_string('nova_api_auth',
- 'nova.api.openstack.auth.BasicApiAuthManager',
+ 'nova.api.openstack.auth.BasicApiAuthManager',
'The auth mechanism to use for the OpenStack API implemenation')
+
class API(wsgi.Middleware):
"""WSGI entry point for all OpenStack API requests."""
@@ -53,6 +55,16 @@ class API(wsgi.Middleware):
app = AuthMiddleware(RateLimitingMiddleware(APIRouter()))
super(API, self).__init__(app)
+ @webob.dec.wsgify
+ def __call__(self, req):
+ try:
+ return req.get_response(self.application)
+ except Exception as ex:
+ logging.warn("Caught error: %s" % str(ex))
+ exc = webob.exc.HTTPInternalServerError(explanation=str(ex))
+ return faults.Fault(exc)
+
+
class AuthMiddleware(wsgi.Middleware):
"""Authorize the openstack API request or return an HTTP Forbidden."""
@@ -62,7 +74,7 @@ class AuthMiddleware(wsgi.Middleware):
@webob.dec.wsgify
def __call__(self, req):
- if not req.headers.has_key("X-Auth-Token"):
+ if 'X-Auth-Token' not in req.headers:
return self.auth_driver.authenticate(req)
user = self.auth_driver.authorize_token(req.headers["X-Auth-Token"])
@@ -70,11 +82,12 @@ class AuthMiddleware(wsgi.Middleware):
if not user:
return faults.Fault(webob.exc.HTTPUnauthorized())
- if not req.environ.has_key('nova.context'):
+ if 'nova.context' not in req.environ:
req.environ['nova.context'] = {}
req.environ['nova.context']['user'] = user
return self.application
+
class RateLimitingMiddleware(wsgi.Middleware):
"""Rate limit incoming requests according to the OpenStack rate limits."""
@@ -87,7 +100,7 @@ class RateLimitingMiddleware(wsgi.Middleware):
"""
super(RateLimitingMiddleware, self).__init__(application)
if not service_host:
- #TODO(gundlach): These limits were based on limitations of Cloud
+ #TODO(gundlach): These limits were based on limitations of Cloud
#Servers. We should revisit them in Nova.
self.limiter = ratelimiting.Limiter(limits={
'DELETE': (100, ratelimiting.PER_MINUTE),
@@ -102,13 +115,14 @@ class RateLimitingMiddleware(wsgi.Middleware):
@webob.dec.wsgify
def __call__(self, req):
"""Rate limit the request.
-
- If the request should be rate limited, return a 413 status with a
+
+ If the request should be rate limited, return a 413 status with a
Retry-After header giving the time when the request would succeed.
"""
user_id = req.environ['nova.context']['user']['id']
action_name = self.get_action_name(req)
- if not action_name: # not rate limited
+ if not action_name:
+ # Not rate limited
return self.application
delay = self.get_delay(action_name, user_id)
if delay:
@@ -152,13 +166,13 @@ class APIRouter(wsgi.Router):
def __init__(self):
mapper = routes.Mapper()
mapper.resource("server", "servers", controller=servers.Controller(),
- collection={ 'detail': 'GET'},
- member={'action':'POST'})
+ collection={'detail': 'GET'},
+ member={'action': 'POST'})
- mapper.resource("backup_schedule", "backup_schedules",
+ mapper.resource("backup_schedule", "backup_schedules",
controller=backup_schedules.Controller(),
- parent_resource=dict(member_name='server',
- collection_name = 'servers'))
+ parent_resource=dict(member_name='server',
+ collection_name='servers'))
mapper.resource("image", "images", controller=images.Controller(),
collection={'detail': 'GET'})
@@ -172,7 +186,7 @@ class APIRouter(wsgi.Router):
def limited(items, req):
"""Return a slice of items according to requested offset and limit.
-
+
items - a sliceable
req - wobob.Request possibly containing offset and limit GET variables.
offset is where to start in the list, and limit is the maximum number
@@ -187,4 +201,3 @@ def limited(items, req):
limit = min(1000, limit)
range_end = offset + limit
return items[offset:range_end]
-
diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py
index 7aba55728..ff428ff70 100644
--- a/nova/api/openstack/auth.py
+++ b/nova/api/openstack/auth.py
@@ -15,9 +15,11 @@ from nova.api.openstack import faults
FLAGS = flags.FLAGS
+
class Context(object):
pass
+
class BasicApiAuthManager(object):
""" Implements a somewhat rudimentary version of OpenStack Auth"""
@@ -61,7 +63,7 @@ class BasicApiAuthManager(object):
def authorize_token(self, token_hash):
""" retrieves user information from the datastore given a token
-
+
If the token has expired, returns None
If the token is not found, returns None
Otherwise returns dict(id=(the authorized user's id))
@@ -69,7 +71,7 @@ class BasicApiAuthManager(object):
This method will also remove the token if the timestamp is older than
2 days ago.
"""
- token = self.db.auth_get_token(self.context, token_hash)
+ token = self.db.auth_get_token(self.context, token_hash)
if token:
delta = datetime.datetime.now() - token.created_at
if delta.days >= 2:
@@ -94,8 +96,7 @@ class BasicApiAuthManager(object):
token_dict['user_id'] = user.id
token = self.db.auth_create_token(self.context, token_dict)
return token, user
- return None, None
+ return None, None
def _get_server_mgmt_url(self):
return 'https://%s/v1.0/' % self.host
-
diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py
index db240c65a..3ed691d7b 100644
--- a/nova/api/openstack/backup_schedules.py
+++ b/nova/api/openstack/backup_schedules.py
@@ -22,6 +22,7 @@ from nova import wsgi
from nova.api.openstack import faults
import nova.image.service
+
class Controller(wsgi.Controller):
def __init__(self):
pass
diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py
index 32e5c866f..e69e51439 100644
--- a/nova/api/openstack/faults.py
+++ b/nova/api/openstack/faults.py
@@ -55,7 +55,7 @@ class Fault(webob.exc.HTTPException):
if code == 413:
retry = self.wrapped_exc.headers['Retry-After']
fault_data[fault_name]['retryAfter'] = retry
- # 'code' is an attribute on the fault tag itself
+ # 'code' is an attribute on the fault tag itself
metadata = {'application/xml': {'attributes': {fault_name: 'code'}}}
serializer = wsgi.Serializer(req.environ, metadata)
self.wrapped_exc.body = serializer.to_content_type(fault_data)
diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py
index 793984a5d..f23f74fd1 100644
--- a/nova/api/openstack/flavors.py
+++ b/nova/api/openstack/flavors.py
@@ -22,16 +22,14 @@ from nova.compute import instance_types
from nova import wsgi
import nova.api.openstack
+
class Controller(wsgi.Controller):
"""Flavor controller for the OpenStack API."""
_serialization_metadata = {
'application/xml': {
"attributes": {
- "flavor": [ "id", "name", "ram", "disk" ]
- }
- }
- }
+ "flavor": ["id", "name", "ram", "disk"]}}}
def index(self, req):
"""Return all flavors in brief."""
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index 5ccf659f7..5bc915e63 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -27,16 +27,14 @@ from nova.api.openstack import faults
FLAGS = flags.FLAGS
+
class Controller(wsgi.Controller):
_serialization_metadata = {
'application/xml': {
"attributes": {
- "image": [ "id", "name", "updated", "created", "status",
- "serverId", "progress" ]
- }
- }
- }
+ "image": ["id", "name", "updated", "created", "status",
+ "serverId", "progress"]}}}
def __init__(self):
self._service = utils.import_object(FLAGS.image_service)
@@ -72,6 +70,6 @@ class Controller(wsgi.Controller):
raise faults.Fault(exc.HTTPNotFound())
def update(self, req, id):
- # Users may not modify public images, and that's all that
+ # Users may not modify public images, and that's all that
# we support for now.
raise faults.Fault(exc.HTTPNotFound())
diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py
index f843bac0f..918caf055 100644
--- a/nova/api/openstack/ratelimiting/__init__.py
+++ b/nova/api/openstack/ratelimiting/__init__.py
@@ -13,6 +13,7 @@ PER_MINUTE = 60
PER_HOUR = 60 * 60
PER_DAY = 60 * 60 * 24
+
class Limiter(object):
"""Class providing rate limiting of arbitrary actions."""
@@ -67,10 +68,10 @@ class Limiter(object):
self._levels[key] = (now, new_level)
return None
-
# If one instance of this WSGIApps is unable to handle your load, put a
# sharding app in front that shards by username to one of many backends.
+
class WSGIApp(object):
"""Application that tracks rate limits in memory. Send requests to it of
@@ -101,7 +102,8 @@ class WSGIApp(object):
return webob.exc.HTTPForbidden(
headers={'X-Wait-Seconds': "%.2f" % delay})
else:
- return '' # 200 OK
+ # 200 OK
+ return ''
class WSGIAppProxy(object):
@@ -109,7 +111,7 @@ class WSGIAppProxy(object):
"""Limiter lookalike that proxies to a ratelimiting.WSGIApp."""
def __init__(self, service_host):
- """Creates a proxy pointing to a ratelimiting.WSGIApp at the given
+ """Creates a proxy pointing to a ratelimiting.WSGIApp at the given
host."""
self.service_host = service_host
@@ -118,5 +120,6 @@ class WSGIAppProxy(object):
conn.request('POST', '/limiter/%s/%s' % (username, action))
resp = conn.getresponse()
if resp.status == 200:
- return None # no delay
+ # No delay
+ return None
return float(resp.getheader('X-Wait-Seconds'))
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 6ce364eb7..e1a254d4e 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -34,30 +34,32 @@ import nova.image.service
FLAGS = flags.FLAGS
+
def _filter_params(inst_dict):
""" Extracts all updatable parameters for a server update request """
keys = dict(name='name', admin_pass='adminPass')
new_attrs = {}
for k, v in keys.items():
- if inst_dict.has_key(v):
+ if v in inst_dict:
new_attrs[k] = inst_dict[v]
return new_attrs
+
def _entity_list(entities):
""" Coerces a list of servers into proper dictionary format """
return dict(servers=entities)
+
def _entity_detail(inst):
""" Maps everything to Rackspace-like attributes for return"""
power_mapping = {
- power_state.NOSTATE: 'build',
- power_state.RUNNING: 'active',
- power_state.BLOCKED: 'active',
- power_state.PAUSED: 'suspended',
+ power_state.NOSTATE: 'build',
+ power_state.RUNNING: 'active',
+ power_state.BLOCKED: 'active',
+ power_state.PAUSED: 'suspended',
power_state.SHUTDOWN: 'active',
- power_state.SHUTOFF: 'active',
- power_state.CRASHED: 'error'
- }
+ power_state.SHUTOFF: 'active',
+ power_state.CRASHED: 'error'}
inst_dict = {}
mapped_keys = dict(status='state', imageId='image_id',
@@ -73,21 +75,20 @@ def _entity_detail(inst):
return dict(server=inst_dict)
+
def _entity_inst(inst):
""" Filters all model attributes save for id and name """
return dict(server=dict(id=inst['id'], name=inst['server_name']))
+
class Controller(wsgi.Controller):
""" The Server API controller for the OpenStack API """
_serialization_metadata = {
'application/xml': {
"attributes": {
- "server": [ "id", "imageId", "name", "flavorId", "hostId",
- "status", "progress", "progress" ]
- }
- }
- }
+ "server": ["id", "imageId", "name", "flavorId", "hostId",
+ "status", "progress", "progress"]}}}
def __init__(self, db_driver=None):
if not db_driver:
@@ -210,7 +211,7 @@ class Controller(wsgi.Controller):
image = img_service.show(image_id)
if not image:
- raise Exception, "Image not found"
+ raise Exception("Image not found")
inst['server_name'] = env['server']['name']
inst['image_id'] = image_id
diff --git a/nova/api/openstack/sharedipgroups.py b/nova/api/openstack/sharedipgroups.py
index 4d2d0ede1..e805ca9f7 100644
--- a/nova/api/openstack/sharedipgroups.py
+++ b/nova/api/openstack/sharedipgroups.py
@@ -17,4 +17,6 @@
from nova import wsgi
-class Controller(wsgi.Controller): pass
+
+class Controller(wsgi.Controller):
+ pass
diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py
index 648d6e828..a1584322b 100644
--- a/nova/auth/dbdriver.py
+++ b/nova/auth/dbdriver.py
@@ -47,19 +47,23 @@ class DbDriver(object):
def get_user(self, uid):
"""Retrieve user by id"""
- return self._db_user_to_auth_user(db.user_get(context.get_admin_context(), uid))
+ user = db.user_get(context.get_admin_context(), uid)
+ return self._db_user_to_auth_user(user)
def get_user_from_access_key(self, access):
"""Retrieve user by access key"""
- return self._db_user_to_auth_user(db.user_get_by_access_key(context.get_admin_context(), access))
+ user = db.user_get_by_access_key(context.get_admin_context(), access)
+ return self._db_user_to_auth_user(user)
def get_project(self, pid):
"""Retrieve project by id"""
- return self._db_project_to_auth_projectuser(db.project_get(context.get_admin_context(), pid))
+ project = db.project_get(context.get_admin_context(), pid)
+ return self._db_project_to_auth_projectuser(project)
def get_users(self):
"""Retrieve list of users"""
- return [self._db_user_to_auth_user(user) for user in db.user_get_all(context.get_admin_context())]
+ return [self._db_user_to_auth_user(user)
+ for user in db.user_get_all(context.get_admin_context())]
def get_projects(self, uid=None):
"""Retrieve list of projects"""
@@ -71,11 +75,10 @@ class DbDriver(object):
def create_user(self, name, access_key, secret_key, is_admin):
"""Create a user"""
- values = { 'id' : name,
- 'access_key' : access_key,
- 'secret_key' : secret_key,
- 'is_admin' : is_admin
- }
+ values = {'id': name,
+ 'access_key': access_key,
+ 'secret_key': secret_key,
+ 'is_admin': is_admin}
try:
user_ref = db.user_create(context.get_admin_context(), values)
return self._db_user_to_auth_user(user_ref)
@@ -83,18 +86,19 @@ class DbDriver(object):
raise exception.Duplicate('User %s already exists' % name)
def _db_user_to_auth_user(self, user_ref):
- return { 'id' : user_ref['id'],
- 'name' : user_ref['id'],
- 'access' : user_ref['access_key'],
- 'secret' : user_ref['secret_key'],
- 'admin' : user_ref['is_admin'] }
+ return {'id': user_ref['id'],
+ 'name': user_ref['id'],
+ 'access': user_ref['access_key'],
+ 'secret': user_ref['secret_key'],
+ 'admin': user_ref['is_admin']}
def _db_project_to_auth_projectuser(self, project_ref):
- return { 'id' : project_ref['id'],
- 'name' : project_ref['name'],
- 'project_manager_id' : project_ref['project_manager'],
- 'description' : project_ref['description'],
- 'member_ids' : [member['id'] for member in project_ref['members']] }
+ member_ids = [member['id'] for member in project_ref['members']]
+ return {'id': project_ref['id'],
+ 'name': project_ref['name'],
+ 'project_manager_id': project_ref['project_manager'],
+ 'description': project_ref['description'],
+ 'member_ids': member_ids}
def create_project(self, name, manager_uid,
description=None, member_uids=None):
@@ -121,10 +125,10 @@ class DbDriver(object):
% member_uid)
members.add(member)
- values = { 'id' : name,
- 'name' : name,
- 'project_manager' : manager['id'],
- 'description': description }
+ values = {'id': name,
+ 'name': name,
+ 'project_manager': manager['id'],
+ 'description': description}
try:
project = db.project_create(context.get_admin_context(), values)
@@ -244,4 +248,3 @@ class DbDriver(object):
if not project:
raise exception.NotFound('Project "%s" not found' % project_id)
return user, project
-
diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py
index 3e92c38f6..cf3a84a5d 100644
--- a/nova/auth/fakeldap.py
+++ b/nova/auth/fakeldap.py
@@ -35,6 +35,7 @@ flags.DEFINE_integer('redis_port', 6379,
'Port that redis is running on.')
flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away')
+
class Redis(object):
def __init__(self):
if hasattr(self.__class__, '_instance'):
@@ -51,19 +52,19 @@ class Redis(object):
SCOPE_BASE = 0
-SCOPE_ONELEVEL = 1 # not implemented
+SCOPE_ONELEVEL = 1 # Not implemented
SCOPE_SUBTREE = 2
MOD_ADD = 0
MOD_DELETE = 1
MOD_REPLACE = 2
-class NO_SUCH_OBJECT(Exception): # pylint: disable-msg=C0103
+class NO_SUCH_OBJECT(Exception): # pylint: disable-msg=C0103
"""Duplicate exception class from real LDAP module."""
pass
-class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable-msg=C0103
+class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable-msg=C0103
"""Duplicate exception class from real LDAP module."""
pass
@@ -251,8 +252,6 @@ class FakeLDAP(object):
return objects
@property
- def __redis_prefix(self): # pylint: disable-msg=R0201
+ def __redis_prefix(self): # pylint: disable-msg=R0201
"""Get the prefix to use for all redis keys."""
return 'ldap:'
-
-
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index 640ea169e..ceade1d65 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -294,24 +294,26 @@ class LdapDriver(object):
def __find_dns(self, dn, query=None, scope=None):
"""Find dns by query"""
- if scope is None: # one of the flags is 0!!
+ if scope is None:
+ # One of the flags is 0!
scope = self.ldap.SCOPE_SUBTREE
try:
res = self.conn.search_s(dn, scope, query)
except self.ldap.NO_SUCH_OBJECT:
return []
- # just return the DNs
+ # Just return the DNs
return [dn for dn, _attributes in res]
def __find_objects(self, dn, query=None, scope=None):
"""Find objects by query"""
- if scope is None: # one of the flags is 0!!
+ if scope is None:
+ # One of the flags is 0!
scope = self.ldap.SCOPE_SUBTREE
try:
res = self.conn.search_s(dn, scope, query)
except self.ldap.NO_SUCH_OBJECT:
return []
- # just return the attributes
+ # Just return the attributes
return [attributes for dn, attributes in res]
def __find_role_dns(self, tree):
@@ -480,6 +482,6 @@ class LdapDriver(object):
class FakeLdapDriver(LdapDriver):
"""Fake Ldap Auth driver"""
- def __init__(self): # pylint: disable-msg=W0231
+ def __init__(self): # pylint: disable-msg=W0231
__import__('nova.auth.fakeldap')
self.ldap = sys.modules['nova.auth.fakeldap']
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index bf7ca8a95..001a96875 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -23,7 +23,7 @@ Nova authentication management
import logging
import os
import shutil
-import string # pylint: disable-msg=W0402
+import string # pylint: disable-msg=W0402
import tempfile
import uuid
import zipfile
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index 4fc2c85cb..3472201cd 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -49,7 +49,7 @@ class CloudPipe(object):
self.manager = manager.AuthManager()
def launch_vpn_instance(self, project_id):
- logging.debug( "Launching VPN for %s" % (project_id))
+ logging.debug("Launching VPN for %s" % (project_id))
project = self.manager.get_project(project_id)
# Make a payload.zip
tmpfolder = tempfile.mkdtemp()
@@ -57,16 +57,18 @@ class CloudPipe(object):
zippath = os.path.join(tmpfolder, filename)
z = zipfile.ZipFile(zippath, "w", zipfile.ZIP_DEFLATED)
- z.write(FLAGS.boot_script_template,'autorun.sh')
+ z.write(FLAGS.boot_script_template, 'autorun.sh')
z.close()
key_name = self.setup_key_pair(project.project_manager_id, project_id)
zippy = open(zippath, "r")
- context = context.RequestContext(user=project.project_manager, project=project)
+ context = context.RequestContext(user=project.project_manager,
+ project=project)
reservation = self.controller.run_instances(context,
- # run instances expects encoded userdata, it is decoded in the get_metadata_call
- # autorun.sh also decodes the zip file, hence the double encoding
+ # Run instances expects encoded userdata, it is decoded in the
+ # get_metadata_call. autorun.sh also decodes the zip file, hence
+ # the double encoding.
user_data=zippy.read().encode("base64").encode("base64"),
max_count=1,
min_count=1,
@@ -79,12 +81,14 @@ class CloudPipe(object):
def setup_key_pair(self, user_id, project_id):
key_name = '%s%s' % (project_id, FLAGS.vpn_key_suffix)
try:
- private_key, fingerprint = self.manager.generate_key_pair(user_id, key_name)
+ private_key, fingerprint = self.manager.generate_key_pair(user_id,
+ key_name)
try:
key_dir = os.path.join(FLAGS.keys_path, user_id)
if not os.path.exists(key_dir):
os.makedirs(key_dir)
- with open(os.path.join(key_dir, '%s.pem' % key_name),'w') as f:
+ file_name = os.path.join(key_dir, '%s.pem' % key_name)
+ with open(file_name, 'w') as f:
f.write(private_key)
except:
pass
@@ -95,9 +99,13 @@ class CloudPipe(object):
# def setup_secgroups(self, username):
# conn = self.euca.connection_for(username)
# try:
- # secgroup = conn.create_security_group("vpn-secgroup", "vpn-secgroup")
- # secgroup.authorize(ip_protocol = "udp", from_port = "1194", to_port = "1194", cidr_ip = "0.0.0.0/0")
- # secgroup.authorize(ip_protocol = "tcp", from_port = "80", to_port = "80", cidr_ip = "0.0.0.0/0")
- # secgroup.authorize(ip_protocol = "tcp", from_port = "22", to_port = "22", cidr_ip = "0.0.0.0/0")
+ # secgroup = conn.create_security_group("vpn-secgroup",
+ # "vpn-secgroup")
+ # secgroup.authorize(ip_protocol = "udp", from_port = "1194",
+ # to_port = "1194", cidr_ip = "0.0.0.0/0")
+ # secgroup.authorize(ip_protocol = "tcp", from_port = "80",
+ # to_port = "80", cidr_ip = "0.0.0.0/0")
+ # secgroup.authorize(ip_protocol = "tcp", from_port = "22",
+ # to_port = "22", cidr_ip = "0.0.0.0/0")
# except:
# pass
diff --git a/nova/compute/disk.py b/nova/compute/disk.py
index 447a29004..e362b4507 100644
--- a/nova/compute/disk.py
+++ b/nova/compute/disk.py
@@ -34,6 +34,8 @@ from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10,
'minimum size in bytes of root partition')
+flags.DEFINE_integer('block_size', 1024 * 1024 * 256,
+ 'block_size to use for dd')
@defer.inlineCallbacks
@@ -72,33 +74,36 @@ def partition(infile, outfile, local_bytes=0, resize=True,
" by sector size: %d / %d", local_bytes, sector_size)
local_sectors = local_bytes / sector_size
- mbr_last = 62 # a
- primary_first = mbr_last + 1 # b
- primary_last = primary_first + primary_sectors - 1 # c
- local_first = primary_last + 1 # d
- local_last = local_first + local_sectors - 1 # e
- last_sector = local_last # e
+ mbr_last = 62 # a
+ primary_first = mbr_last + 1 # b
+ primary_last = primary_first + primary_sectors - 1 # c
+ local_first = primary_last + 1 # d
+ local_last = local_first + local_sectors - 1 # e
+ last_sector = local_last # e
# create an empty file
yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
- % (outfile, last_sector, sector_size))
+ % (outfile, mbr_last, sector_size))
# make mbr partition
yield execute('parted --script %s mklabel msdos' % outfile)
+ # append primary file
+ yield execute('dd if=%s of=%s bs=%s conv=notrunc,fsync oflag=append'
+ % (infile, outfile, FLAGS.block_size))
+
# make primary partition
yield execute('parted --script %s mkpart primary %ds %ds'
% (outfile, primary_first, primary_last))
- # make local partition
if local_bytes > 0:
+ # make the file bigger
+ yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
+ % (outfile, last_sector, sector_size))
+ # make and format local partition
yield execute('parted --script %s mkpartfs primary %s %ds %ds'
% (outfile, local_type, local_first, local_last))
- # copy file into partition
- yield execute('dd if=%s of=%s bs=%d seek=%d conv=notrunc,fsync'
- % (infile, outfile, sector_size, primary_first))
-
@defer.inlineCallbacks
def inject_data(image, key=None, net=None, partition=None, execute=None):
@@ -157,7 +162,7 @@ def inject_data(image, key=None, net=None, partition=None, execute=None):
@defer.inlineCallbacks
def _inject_key_into_fs(key, fs, execute=None):
sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh')
- yield execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
+ yield execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
yield execute('sudo chown root %s' % sshdir)
yield execute('sudo chmod 700 %s' % sshdir)
keyfile = os.path.join(sshdir, 'authorized_keys')
@@ -169,4 +174,3 @@ def _inject_net_into_fs(net, fs, execute=None):
netfile = os.path.join(os.path.join(os.path.join(
fs, 'etc'), 'network'), 'interfaces')
yield execute('sudo tee %s' % netfile, net)
-
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index c752d954b..d99d938af 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -20,10 +20,8 @@
Handles all code relating to instances (guest vms)
"""
-import base64
import datetime
import logging
-import os
from twisted.internet import defer
@@ -59,7 +57,11 @@ class ComputeManager(manager.Manager):
"""Update the state of an instance from the driver info"""
# FIXME(ja): include other fields from state?
instance_ref = self.db.instance_get(context, instance_id)
- state = self.driver.get_info(instance_ref.name)['state']
+ try:
+ info = self.driver.get_info(instance_ref['name'])
+ state = info['state']
+ except exception.NotFound:
+ state = power_state.NOSTATE
self.db.instance_set_state(context, instance_id, state)
@defer.inlineCallbacks
@@ -161,16 +163,15 @@ class ComputeManager(manager.Manager):
def reboot_instance(self, context, instance_id):
"""Reboot an instance on this server."""
context = context.elevated()
- self._update_state(context, instance_id)
instance_ref = self.db.instance_get(context, instance_id)
+ self._update_state(context, instance_id)
if instance_ref['state'] != power_state.RUNNING:
- raise exception.Error(
- 'trying to reboot a non-running'
- 'instance: %s (state: %s excepted: %s)' %
- (instance_ref['internal_id'],
- instance_ref['state'],
- power_state.RUNNING))
+ logging.warn('trying to reboot a non-running '
+ 'instance: %s (state: %s excepted: %s)',
+ instance_ref['internal_id'],
+ instance_ref['state'],
+ power_state.RUNNING)
logging.debug('instance %s: rebooting', instance_ref['name'])
self.db.instance_set_state(context,
@@ -180,6 +181,38 @@ class ComputeManager(manager.Manager):
yield self.driver.reboot(instance_ref)
self._update_state(context, instance_id)
+ @defer.inlineCallbacks
+ @exception.wrap_exception
+ def rescue_instance(self, context, instance_id):
+ """Rescue an instance on this server."""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+
+ logging.debug('instance %s: rescuing',
+ instance_ref['internal_id'])
+ self.db.instance_set_state(context,
+ instance_id,
+ power_state.NOSTATE,
+ 'rescuing')
+ yield self.driver.rescue(instance_ref)
+ self._update_state(context, instance_id)
+
+ @defer.inlineCallbacks
+ @exception.wrap_exception
+ def unrescue_instance(self, context, instance_id):
+ """Rescue an instance on this server."""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+
+ logging.debug('instance %s: unrescuing',
+ instance_ref['internal_id'])
+ self.db.instance_set_state(context,
+ instance_id,
+ power_state.NOSTATE,
+ 'unrescuing')
+ yield self.driver.unrescue(instance_ref)
+ self._update_state(context, instance_id)
+
@exception.wrap_exception
def get_console_output(self, context, instance_id):
"""Send the console output for an instance."""
diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py
index 268864900..d0154600f 100644
--- a/nova/compute/monitor.py
+++ b/nova/compute/monitor.py
@@ -85,8 +85,7 @@ RRD_VALUES = {
'RRA:MAX:0.5:6:800',
'RRA:MAX:0.5:24:800',
'RRA:MAX:0.5:444:800',
- ]
- }
+ ]}
utcnow = datetime.datetime.utcnow
@@ -97,15 +96,12 @@ def update_rrd(instance, name, data):
Updates the specified RRD file.
"""
filename = os.path.join(instance.get_rrd_path(), '%s.rrd' % name)
-
+
if not os.path.exists(filename):
init_rrd(instance, name)
-
+
timestamp = int(time.mktime(utcnow().timetuple()))
- rrdtool.update (
- filename,
- '%d:%s' % (timestamp, data)
- )
+ rrdtool.update(filename, '%d:%s' % (timestamp, data))
def init_rrd(instance, name):
@@ -113,29 +109,28 @@ def init_rrd(instance, name):
Initializes the specified RRD file.
"""
path = os.path.join(FLAGS.monitoring_rrd_path, instance.instance_id)
-
+
if not os.path.exists(path):
os.makedirs(path)
-
+
filename = os.path.join(path, '%s.rrd' % name)
-
+
if not os.path.exists(filename):
- rrdtool.create (
+ rrdtool.create(
filename,
'--step', '%d' % FLAGS.monitoring_instances_step,
'--start', '0',
- *RRD_VALUES[name]
- )
+ *RRD_VALUES[name])
+
-
def graph_cpu(instance, duration):
"""
Creates a graph of cpu usage for the specified instance and duration.
"""
path = instance.get_rrd_path()
filename = os.path.join(path, 'cpu-%s.png' % duration)
-
- rrdtool.graph (
+
+ rrdtool.graph(
filename,
'--disable-rrdtool-tag',
'--imgformat', 'PNG',
@@ -146,9 +141,8 @@ def graph_cpu(instance, duration):
'-l', '0',
'-u', '100',
'DEF:cpu=%s:cpu:AVERAGE' % os.path.join(path, 'cpu.rrd'),
- 'AREA:cpu#eacc00:% CPU',
- )
-
+ 'AREA:cpu#eacc00:% CPU',)
+
store_graph(instance.instance_id, filename)
@@ -158,8 +152,8 @@ def graph_net(instance, duration):
"""
path = instance.get_rrd_path()
filename = os.path.join(path, 'net-%s.png' % duration)
-
- rrdtool.graph (
+
+ rrdtool.graph(
filename,
'--disable-rrdtool-tag',
'--imgformat', 'PNG',
@@ -174,20 +168,19 @@ def graph_net(instance, duration):
'DEF:rx=%s:rx:AVERAGE' % os.path.join(path, 'net.rrd'),
'DEF:tx=%s:tx:AVERAGE' % os.path.join(path, 'net.rrd'),
'AREA:rx#00FF00:In traffic',
- 'LINE1:tx#0000FF:Out traffic',
- )
-
+ 'LINE1:tx#0000FF:Out traffic',)
+
store_graph(instance.instance_id, filename)
-
+
def graph_disk(instance, duration):
"""
Creates a graph of disk usage for the specified duration.
- """
+ """
path = instance.get_rrd_path()
filename = os.path.join(path, 'disk-%s.png' % duration)
-
- rrdtool.graph (
+
+ rrdtool.graph(
filename,
'--disable-rrdtool-tag',
'--imgformat', 'PNG',
@@ -202,9 +195,8 @@ def graph_disk(instance, duration):
'DEF:rd=%s:rd:AVERAGE' % os.path.join(path, 'disk.rrd'),
'DEF:wr=%s:wr:AVERAGE' % os.path.join(path, 'disk.rrd'),
'AREA:rd#00FF00:Read',
- 'LINE1:wr#0000FF:Write',
- )
-
+ 'LINE1:wr#0000FF:Write',)
+
store_graph(instance.instance_id, filename)
@@ -224,17 +216,16 @@ def store_graph(instance_id, filename):
is_secure=False,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
port=FLAGS.s3_port,
- host=FLAGS.s3_host
- )
+ host=FLAGS.s3_host)
bucket_name = '_%s.monitor' % instance_id
-
+
# Object store isn't creating the bucket like it should currently
# when it is first requested, so have to catch and create manually.
try:
bucket = s3.get_bucket(bucket_name)
except Exception:
bucket = s3.create_bucket(bucket_name)
-
+
key = boto.s3.Key(bucket)
key.key = os.path.basename(filename)
key.set_contents_from_filename(filename)
@@ -247,18 +238,18 @@ class Instance(object):
self.last_updated = datetime.datetime.min
self.cputime = 0
self.cputime_last_updated = None
-
+
init_rrd(self, 'cpu')
init_rrd(self, 'net')
init_rrd(self, 'disk')
-
+
def needs_update(self):
"""
Indicates whether this instance is due to have its statistics updated.
"""
delta = utcnow() - self.last_updated
return delta.seconds >= FLAGS.monitoring_instances_step
-
+
def update(self):
"""
Updates the instances statistics and stores the resulting graphs
@@ -271,7 +262,7 @@ class Instance(object):
if data != None:
logging.debug('CPU: %s', data)
update_rrd(self, 'cpu', data)
-
+
data = self.fetch_net_stats()
logging.debug('NET: %s', data)
update_rrd(self, 'net', data)
@@ -279,7 +270,7 @@ class Instance(object):
data = self.fetch_disk_stats()
logging.debug('DISK: %s', data)
update_rrd(self, 'disk', data)
-
+
# TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls
# and make the methods @defer.inlineCallbacks.
graph_cpu(self, '1d')
@@ -297,13 +288,13 @@ class Instance(object):
logging.exception('unexpected error during update')
self.last_updated = utcnow()
-
+
def get_rrd_path(self):
"""
Returns the path to where RRD files are stored.
"""
return os.path.join(FLAGS.monitoring_rrd_path, self.instance_id)
-
+
def fetch_cpu_stats(self):
"""
Returns cpu usage statistics for this instance.
@@ -327,17 +318,17 @@ class Instance(object):
# Calculate the number of seconds between samples.
d = self.cputime_last_updated - cputime_last_updated
t = d.days * 86400 + d.seconds
-
+
logging.debug('t = %d', t)
# Calculate change over time in number of nanoseconds of CPU time used.
cputime_delta = self.cputime - cputime_last
-
+
logging.debug('cputime_delta = %s', cputime_delta)
# Get the number of virtual cpus in this domain.
vcpus = int(info['num_cpu'])
-
+
logging.debug('vcpus = %d', vcpus)
# Calculate CPU % used and cap at 100.
@@ -349,9 +340,9 @@ class Instance(object):
"""
rd = 0
wr = 0
-
+
disks = self.conn.get_disks(self.instance_id)
-
+
# Aggregate the read and write totals.
for disk in disks:
try:
@@ -363,7 +354,7 @@ class Instance(object):
logging.error('Cannot get blockstats for "%s" on "%s"',
disk, self.instance_id)
raise
-
+
return '%d:%d' % (rd, wr)
def fetch_net_stats(self):
@@ -372,9 +363,9 @@ class Instance(object):
"""
rx = 0
tx = 0
-
+
interfaces = self.conn.get_interfaces(self.instance_id)
-
+
# Aggregate the in and out totals.
for interface in interfaces:
try:
@@ -385,7 +376,7 @@ class Instance(object):
logging.error('Cannot get ifstats for "%s" on "%s"',
interface, self.instance_id)
raise
-
+
return '%d:%d' % (rx, tx)
@@ -400,16 +391,16 @@ class InstanceMonitor(object, service.Service):
"""
self._instances = {}
self._loop = task.LoopingCall(self.updateInstances)
-
+
def startService(self):
self._instances = {}
self._loop.start(interval=FLAGS.monitoring_instances_delay)
service.Service.startService(self)
-
+
def stopService(self):
self._loop.stop()
service.Service.stopService(self)
-
+
def updateInstances(self):
"""
Update resource usage for all running instances.
@@ -420,20 +411,20 @@ class InstanceMonitor(object, service.Service):
logging.exception('unexpected exception getting connection')
time.sleep(FLAGS.monitoring_instances_delay)
return
-
+
domain_ids = conn.list_instances()
try:
- self.updateInstances_(conn, domain_ids)
+ self.updateInstances_(conn, domain_ids)
except Exception, exn:
- logging.exception('updateInstances_')
+ logging.exception('updateInstances_')
def updateInstances_(self, conn, domain_ids):
for domain_id in domain_ids:
- if not domain_id in self._instances:
+ if not domain_id in self._instances:
instance = Instance(conn, domain_id)
self._instances[domain_id] = instance
logging.debug('Found instance: %s', domain_id)
-
+
for key in self._instances.keys():
instance = self._instances[key]
if instance.needs_update():
diff --git a/nova/compute/power_state.py b/nova/compute/power_state.py
index b27aa4677..cefdf2d9e 100644
--- a/nova/compute/power_state.py
+++ b/nova/compute/power_state.py
@@ -30,12 +30,11 @@ CRASHED = 0x06
def name(code):
d = {
- NOSTATE : 'pending',
- RUNNING : 'running',
- BLOCKED : 'blocked',
- PAUSED : 'paused',
+ NOSTATE: 'pending',
+ RUNNING: 'running',
+ BLOCKED: 'blocked',
+ PAUSED: 'paused',
SHUTDOWN: 'shutdown',
- SHUTOFF : 'shutdown',
- CRASHED : 'crashed',
- }
+ SHUTOFF: 'shutdown',
+ CRASHED: 'crashed'}
return d[code]
diff --git a/nova/context.py b/nova/context.py
index f5d3fed08..f2669c9f1 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -26,7 +26,9 @@ import random
from nova import exception
from nova import utils
+
class RequestContext(object):
+
def __init__(self, user, project, is_admin=None, read_deleted=False,
remote_address=None, timestamp=None, request_id=None):
if hasattr(user, 'id'):
@@ -56,10 +58,8 @@ class RequestContext(object):
timestamp = utils.parse_isotime(timestamp)
self.timestamp = timestamp
if not request_id:
- request_id = ''.join(
- [random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-')
- for x in xrange(20)]
- )
+ chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-'
+ request_id = ''.join([random.choice(chars) for x in xrange(20)])
self.request_id = request_id
@property
@@ -81,7 +81,8 @@ class RequestContext(object):
from nova.auth import manager
if not self._project:
try:
- self._project = manager.AuthManager().get_project(self.project_id)
+ auth_manager = manager.AuthManager()
+ self._project = auth_manager.get_project(self.project_id)
except exception.NotFound:
pass
return self._project
diff --git a/nova/crypto.py b/nova/crypto.py
index 1c6fe57ad..16b4f5e1f 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -39,9 +39,12 @@ from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('ca_file', 'cacert.pem', 'Filename of root CA')
-flags.DEFINE_string('keys_path', utils.abspath('../keys'), 'Where we keep our keys')
-flags.DEFINE_string('ca_path', utils.abspath('../CA'), 'Where we keep our root CA')
-flags.DEFINE_boolean('use_intermediate_ca', False, 'Should we use intermediate CAs for each project?')
+flags.DEFINE_string('keys_path', utils.abspath('../keys'),
+ 'Where we keep our keys')
+flags.DEFINE_string('ca_path', utils.abspath('../CA'),
+ 'Where we keep our root CA')
+flags.DEFINE_boolean('use_intermediate_ca', False,
+ 'Should we use intermediate CAs for each project?')
def ca_path(project_id):
@@ -55,11 +58,11 @@ def fetch_ca(project_id=None, chain=True):
project_id = None
buffer = ""
if project_id:
- with open(ca_path(project_id),"r") as cafile:
+ with open(ca_path(project_id), "r") as cafile:
buffer += cafile.read()
if not chain:
return buffer
- with open(ca_path(None),"r") as cafile:
+ with open(ca_path(None), "r") as cafile:
buffer += cafile.read()
return buffer
@@ -88,17 +91,18 @@ def generate_key_pair(bits=1024):
def ssl_pub_to_ssh_pub(ssl_public_key, name='root', suffix='nova'):
- rsa_key = M2Crypto.RSA.load_pub_key_bio(M2Crypto.BIO.MemoryBuffer(ssl_public_key))
+ pub_key_buffer = M2Crypto.BIO.MemoryBuffer(ssl_public_key)
+ rsa_key = M2Crypto.RSA.load_pub_key_bio(pub_key_buffer)
e, n = rsa_key.pub()
key_type = 'ssh-rsa'
key_data = struct.pack('>I', len(key_type))
key_data += key_type
- key_data += '%s%s' % (e,n)
+ key_data += '%s%s' % (e, n)
b64_blob = base64.b64encode(key_data)
- return '%s %s %s@%s\n' %(key_type, b64_blob, name, suffix)
+ return '%s %s %s@%s\n' % (key_type, b64_blob, name, suffix)
def generate_x509_cert(subject, bits=1024):
@@ -106,8 +110,11 @@ def generate_x509_cert(subject, bits=1024):
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
csrfile = os.path.join(tmpdir, 'temp.csr')
logging.debug("openssl genrsa -out %s %s" % (keyfile, bits))
- utils.runthis("Generating private key: %s", "openssl genrsa -out %s %s" % (keyfile, bits))
- utils.runthis("Generating CSR: %s", "openssl req -new -key %s -out %s -batch -subj %s" % (keyfile, csrfile, subject))
+ utils.runthis("Generating private key: %s",
+ "openssl genrsa -out %s %s" % (keyfile, bits))
+ utils.runthis("Generating CSR: %s",
+ "openssl req -new -key %s -out %s -batch -subj %s" %
+ (keyfile, csrfile, subject))
private_key = open(keyfile).read()
csr = open(csrfile).read()
shutil.rmtree(tmpdir)
@@ -123,7 +130,8 @@ def sign_csr(csr_text, intermediate=None):
if not os.path.exists(user_ca):
start = os.getcwd()
os.chdir(FLAGS.ca_path)
- utils.runthis("Generating intermediate CA: %s", "sh geninter.sh %s" % (intermediate))
+ utils.runthis("Generating intermediate CA: %s",
+ "sh geninter.sh %s" % (intermediate))
os.chdir(start)
return _sign_csr(csr_text, user_ca)
@@ -137,7 +145,10 @@ def _sign_csr(csr_text, ca_folder):
start = os.getcwd()
# Change working dir to CA
os.chdir(ca_folder)
- utils.runthis("Signing cert: %s", "openssl ca -batch -out %s/outbound.crt -config ./openssl.cnf -infiles %s/inbound.csr" % (tmpfolder, tmpfolder))
+ utils.runthis("Signing cert: %s",
+ "openssl ca -batch -out %s/outbound.crt "
+ "-config ./openssl.cnf -infiles %s/inbound.csr" %
+ (tmpfolder, tmpfolder))
os.chdir(start)
with open("%s/outbound.crt" % (tmpfolder), "r") as crtfile:
return crtfile.read()
@@ -148,10 +159,11 @@ def mkreq(bits, subject="foo", ca=0):
req = M2Crypto.X509.Request()
rsa = M2Crypto.RSA.gen_key(bits, 65537, callback=lambda: None)
pk.assign_rsa(rsa)
- rsa = None # should not be freed here
+ # Should not be freed here
+ rsa = None
req.set_pubkey(pk)
req.set_subject(subject)
- req.sign(pk,'sha512')
+ req.sign(pk, 'sha512')
assert req.verify(pk)
pk2 = req.get_pubkey()
assert req.verify(pk2)
@@ -165,7 +177,8 @@ def mkcacert(subject='nova', years=1):
cert = M2Crypto.X509.X509()
cert.set_serial_number(1)
cert.set_version(2)
- cert.set_subject(sub) # FIXME subject is not set in mkreq yet
+ # FIXME subject is not set in mkreq yet
+ cert.set_subject(sub)
t = long(time.time()) + time.timezone
now = M2Crypto.ASN1.ASN1_UTCTIME()
now.set_time(t)
@@ -189,7 +202,6 @@ def mkcacert(subject='nova', years=1):
return cert, pk, pkey
-
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
@@ -212,6 +224,7 @@ def mkcacert(subject='nova', years=1):
# IN THE SOFTWARE.
# http://code.google.com/p/boto
+
def compute_md5(fp):
"""
@type fp: file
diff --git a/nova/db/api.py b/nova/db/api.py
index 6dbf3b809..0731e2e05 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -256,10 +256,12 @@ def instance_get_all(context):
"""Get all instances."""
return IMPL.instance_get_all(context)
+
def instance_get_all_by_user(context, user_id):
"""Get all instances."""
return IMPL.instance_get_all_by_user(context, user_id)
+
def instance_get_all_by_project(context, project_id):
"""Get all instance belonging to a project."""
return IMPL.instance_get_all_by_project(context, project_id)
@@ -306,7 +308,8 @@ def instance_update(context, instance_id, values):
def instance_add_security_group(context, instance_id, security_group_id):
"""Associate the given security group with the given instance"""
- return IMPL.instance_add_security_group(context, instance_id, security_group_id)
+ return IMPL.instance_add_security_group(context, instance_id,
+ security_group_id)
###################
@@ -482,10 +485,12 @@ def auth_destroy_token(context, token):
"""Destroy an auth token"""
return IMPL.auth_destroy_token(context, token)
+
def auth_get_token(context, token_hash):
"""Retrieves a token given the hash representing it"""
return IMPL.auth_get_token(context, token_hash)
+
def auth_create_token(context, token):
"""Creates a new token"""
return IMPL.auth_create_token(context, token)
@@ -644,7 +649,9 @@ def security_group_rule_create(context, values):
def security_group_rule_get_by_security_group(context, security_group_id):
"""Get all rules for a a given security group"""
- return IMPL.security_group_rule_get_by_security_group(context, security_group_id)
+ return IMPL.security_group_rule_get_by_security_group(context,
+ security_group_id)
+
def security_group_rule_destroy(context, security_group_rule_id):
"""Deletes a security group rule"""
@@ -767,4 +774,3 @@ def host_get_networks(context, host):
network host
"""
return IMPL.host_get_networks(context, host)
-
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 74fd0fdc8..b52c0b69a 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -19,6 +19,7 @@
Implementation of SQLAlchemy backend
"""
+import random
import warnings
from nova import db
@@ -124,10 +125,10 @@ def service_get(context, service_id, session=None):
if not session:
session = get_session()
- result = session.query(models.Service
- ).filter_by(id=service_id
- ).filter_by(deleted=can_read_deleted(context)
- ).first()
+ result = session.query(models.Service).\
+ filter_by(id=service_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
if not result:
raise exception.NotFound('No service for id %s' % service_id)
@@ -138,23 +139,23 @@ def service_get(context, service_id, session=None):
@require_admin_context
def service_get_all_by_topic(context, topic):
session = get_session()
- return session.query(models.Service
- ).filter_by(deleted=False
- ).filter_by(disabled=False
- ).filter_by(topic=topic
- ).all()
+ return session.query(models.Service).\
+ filter_by(deleted=False).\
+ filter_by(disabled=False).\
+ filter_by(topic=topic).\
+ all()
@require_admin_context
def _service_get_all_topic_subquery(context, session, topic, subq, label):
sort_value = getattr(subq.c, label)
- return session.query(models.Service, func.coalesce(sort_value, 0)
- ).filter_by(topic=topic
- ).filter_by(deleted=False
- ).filter_by(disabled=False
- ).outerjoin((subq, models.Service.host == subq.c.host)
- ).order_by(sort_value
- ).all()
+ return session.query(models.Service, func.coalesce(sort_value, 0)).\
+ filter_by(topic=topic).\
+ filter_by(deleted=False).\
+ filter_by(disabled=False).\
+ outerjoin((subq, models.Service.host == subq.c.host)).\
+ order_by(sort_value).\
+ all()
@require_admin_context
@@ -171,10 +172,10 @@ def service_get_all_compute_sorted(context):
topic = 'compute'
label = 'instance_cores'
subq = session.query(models.Instance.host,
- func.sum(models.Instance.vcpus).label(label)
- ).filter_by(deleted=False
- ).group_by(models.Instance.host
- ).subquery()
+ func.sum(models.Instance.vcpus).label(label)).\
+ filter_by(deleted=False).\
+ group_by(models.Instance.host).\
+ subquery()
return _service_get_all_topic_subquery(context,
session,
topic,
@@ -189,10 +190,10 @@ def service_get_all_network_sorted(context):
topic = 'network'
label = 'network_count'
subq = session.query(models.Network.host,
- func.count(models.Network.id).label(label)
- ).filter_by(deleted=False
- ).group_by(models.Network.host
- ).subquery()
+ func.count(models.Network.id).label(label)).\
+ filter_by(deleted=False).\
+ group_by(models.Network.host).\
+ subquery()
return _service_get_all_topic_subquery(context,
session,
topic,
@@ -207,10 +208,10 @@ def service_get_all_volume_sorted(context):
topic = 'volume'
label = 'volume_gigabytes'
subq = session.query(models.Volume.host,
- func.sum(models.Volume.size).label(label)
- ).filter_by(deleted=False
- ).group_by(models.Volume.host
- ).subquery()
+ func.sum(models.Volume.size).label(label)).\
+ filter_by(deleted=False).\
+ group_by(models.Volume.host).\
+ subquery()
return _service_get_all_topic_subquery(context,
session,
topic,
@@ -221,11 +222,11 @@ def service_get_all_volume_sorted(context):
@require_admin_context
def service_get_by_args(context, host, binary):
session = get_session()
- result = session.query(models.Service
- ).filter_by(host=host
- ).filter_by(binary=binary
- ).filter_by(deleted=can_read_deleted(context)
- ).first()
+ result = session.query(models.Service).\
+ filter_by(host=host).\
+ filter_by(binary=binary).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
if not result:
raise exception.NotFound('No service for %s, %s' % (host, binary))
@@ -257,13 +258,13 @@ def floating_ip_allocate_address(context, host, project_id):
authorize_project_context(context, project_id)
session = get_session()
with session.begin():
- floating_ip_ref = session.query(models.FloatingIp
- ).filter_by(host=host
- ).filter_by(fixed_ip_id=None
- ).filter_by(project_id=None
- ).filter_by(deleted=False
- ).with_lockmode('update'
- ).first()
+ floating_ip_ref = session.query(models.FloatingIp).\
+ filter_by(host=host).\
+ filter_by(fixed_ip_id=None).\
+ filter_by(project_id=None).\
+ filter_by(deleted=False).\
+ with_lockmode('update').\
+ first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not floating_ip_ref:
@@ -285,10 +286,10 @@ def floating_ip_create(context, values):
def floating_ip_count_by_project(context, project_id):
authorize_project_context(context, project_id)
session = get_session()
- return session.query(models.FloatingIp
- ).filter_by(project_id=project_id
- ).filter_by(deleted=False
- ).count()
+ return session.query(models.FloatingIp).\
+ filter_by(project_id=project_id).\
+ filter_by(deleted=False).\
+ count()
@require_context
@@ -351,31 +352,31 @@ def floating_ip_disassociate(context, address):
@require_admin_context
def floating_ip_get_all(context):
session = get_session()
- return session.query(models.FloatingIp
- ).options(joinedload_all('fixed_ip.instance')
- ).filter_by(deleted=False
- ).all()
+ return session.query(models.FloatingIp).\
+ options(joinedload_all('fixed_ip.instance')).\
+ filter_by(deleted=False).\
+ all()
@require_admin_context
def floating_ip_get_all_by_host(context, host):
session = get_session()
- return session.query(models.FloatingIp
- ).options(joinedload_all('fixed_ip.instance')
- ).filter_by(host=host
- ).filter_by(deleted=False
- ).all()
+ return session.query(models.FloatingIp).\
+ options(joinedload_all('fixed_ip.instance')).\
+ filter_by(host=host).\
+ filter_by(deleted=False).\
+ all()
@require_context
def floating_ip_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
session = get_session()
- return session.query(models.FloatingIp
- ).options(joinedload_all('fixed_ip.instance')
- ).filter_by(project_id=project_id
- ).filter_by(deleted=False
- ).all()
+ return session.query(models.FloatingIp).\
+ options(joinedload_all('fixed_ip.instance')).\
+ filter_by(project_id=project_id).\
+ filter_by(deleted=False).\
+ all()
@require_context
@@ -384,10 +385,10 @@ def floating_ip_get_by_address(context, address, session=None):
if not session:
session = get_session()
- result = session.query(models.FloatingIp
- ).filter_by(address=address
- ).filter_by(deleted=can_read_deleted(context)
- ).first()
+ result = session.query(models.FloatingIp).\
+ filter_by(address=address).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
if not result:
raise exception.NotFound('No fixed ip for address %s' % address)
@@ -402,12 +403,12 @@ def fixed_ip_associate(context, address, instance_id):
session = get_session()
with session.begin():
instance = instance_get(context, instance_id, session=session)
- fixed_ip_ref = session.query(models.FixedIp
- ).filter_by(address=address
- ).filter_by(deleted=False
- ).filter_by(instance=None
- ).with_lockmode('update'
- ).first()
+ fixed_ip_ref = session.query(models.FixedIp).\
+ filter_by(address=address).\
+ filter_by(deleted=False).\
+ filter_by(instance=None).\
+ with_lockmode('update').\
+ first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not fixed_ip_ref:
@@ -422,13 +423,13 @@ def fixed_ip_associate_pool(context, network_id, instance_id):
with session.begin():
network_or_none = or_(models.FixedIp.network_id == network_id,
models.FixedIp.network_id == None)
- fixed_ip_ref = session.query(models.FixedIp
- ).filter(network_or_none
- ).filter_by(reserved=False
- ).filter_by(deleted=False
- ).filter_by(instance=None
- ).with_lockmode('update'
- ).first()
+ fixed_ip_ref = session.query(models.FixedIp).\
+ filter(network_or_none).\
+ filter_by(reserved=False).\
+ filter_by(deleted=False).\
+ filter_by(instance=None).\
+ with_lockmode('update').\
+ first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not fixed_ip_ref:
@@ -451,6 +452,7 @@ def fixed_ip_create(_context, values):
fixed_ip_ref.save()
return fixed_ip_ref['address']
+
@require_context
def fixed_ip_disassociate(context, address):
session = get_session()
@@ -461,6 +463,7 @@ def fixed_ip_disassociate(context, address):
fixed_ip_ref.instance = None
fixed_ip_ref.save(session=session)
+
@require_admin_context
def fixed_ip_disassociate_all_by_timeout(_context, host, time):
session = get_session()
@@ -482,12 +485,12 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time):
def fixed_ip_get_by_address(context, address, session=None):
if not session:
session = get_session()
- result = session.query(models.FixedIp
- ).filter_by(address=address
- ).filter_by(deleted=can_read_deleted(context)
- ).options(joinedload('network')
- ).options(joinedload('instance')
- ).first()
+ result = session.query(models.FixedIp).\
+ filter_by(address=address).\
+ filter_by(deleted=can_read_deleted(context)).\
+ options(joinedload('network')).\
+ options(joinedload('instance')).\
+ first()
if not result:
raise exception.NotFound('No floating ip for address %s' % address)
@@ -534,7 +537,8 @@ def instance_create(context, values):
session = get_session()
with session.begin():
while instance_ref.internal_id == None:
- internal_id = utils.generate_uid(instance_ref.__prefix__)
+ # Instances have integer internal ids.
+ internal_id = random.randint(0, 2 ** 32 - 1)
if not instance_internal_id_exists(context, internal_id,
session=session):
instance_ref.internal_id = internal_id
@@ -546,10 +550,10 @@ def instance_create(context, values):
def instance_data_get_for_project(context, project_id):
session = get_session()
result = session.query(func.count(models.Instance.id),
- func.sum(models.Instance.vcpus)
- ).filter_by(project_id=project_id
- ).filter_by(deleted=False
- ).first()
+ func.sum(models.Instance.vcpus)).\
+ filter_by(project_id=project_id).\
+ filter_by(deleted=False).\
+ first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@@ -569,18 +573,18 @@ def instance_get(context, instance_id, session=None):
result = None
if is_admin_context(context):
- result = session.query(models.Instance
- ).options(joinedload('security_groups')
- ).filter_by(id=instance_id
- ).filter_by(deleted=can_read_deleted(context)
- ).first()
+ result = session.query(models.Instance).\
+ options(joinedload('security_groups')).\
+ filter_by(id=instance_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
elif is_user_context(context):
- result = session.query(models.Instance
- ).options(joinedload('security_groups')
- ).filter_by(project_id=context.project_id
- ).filter_by(id=instance_id
- ).filter_by(deleted=False
- ).first()
+ result = session.query(models.Instance).\
+ options(joinedload('security_groups')).\
+ filter_by(project_id=context.project_id).\
+ filter_by(id=instance_id).\
+ filter_by(deleted=False).\
+ first()
if not result:
raise exception.NotFound('No instance for id %s' % instance_id)
@@ -590,22 +594,22 @@ def instance_get(context, instance_id, session=None):
@require_admin_context
def instance_get_all(context):
session = get_session()
- return session.query(models.Instance
- ).options(joinedload_all('fixed_ip.floating_ips')
- ).options(joinedload('security_groups')
- ).filter_by(deleted=can_read_deleted(context)
- ).all()
+ return session.query(models.Instance).\
+ options(joinedload_all('fixed_ip.floating_ips')).\
+ options(joinedload('security_groups')).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
@require_admin_context
def instance_get_all_by_user(context, user_id):
session = get_session()
- return session.query(models.Instance
- ).options(joinedload_all('fixed_ip.floating_ips')
- ).options(joinedload('security_groups')
- ).filter_by(deleted=can_read_deleted(context)
- ).filter_by(user_id=user_id
- ).all()
+ return session.query(models.Instance).\
+ options(joinedload_all('fixed_ip.floating_ips')).\
+ options(joinedload('security_groups')).\
+ filter_by(deleted=can_read_deleted(context)).\
+ filter_by(user_id=user_id).\
+ all()
@require_context
@@ -613,12 +617,12 @@ def instance_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
session = get_session()
- return session.query(models.Instance
- ).options(joinedload_all('fixed_ip.floating_ips')
- ).options(joinedload('security_groups')
- ).filter_by(project_id=project_id
- ).filter_by(deleted=can_read_deleted(context)
- ).all()
+ return session.query(models.Instance).\
+ options(joinedload_all('fixed_ip.floating_ips')).\
+ options(joinedload('security_groups')).\
+ filter_by(project_id=project_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
@require_context
@@ -626,20 +630,20 @@ def instance_get_all_by_reservation(context, reservation_id):
session = get_session()
if is_admin_context(context):
- return session.query(models.Instance
- ).options(joinedload_all('fixed_ip.floating_ips')
- ).options(joinedload('security_groups')
- ).filter_by(reservation_id=reservation_id
- ).filter_by(deleted=can_read_deleted(context)
- ).all()
+ return session.query(models.Instance).\
+ options(joinedload_all('fixed_ip.floating_ips')).\
+ options(joinedload('security_groups')).\
+ filter_by(reservation_id=reservation_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
elif is_user_context(context):
- return session.query(models.Instance
- ).options(joinedload_all('fixed_ip.floating_ips')
- ).options(joinedload('security_groups')
- ).filter_by(project_id=context.project_id
- ).filter_by(reservation_id=reservation_id
- ).filter_by(deleted=False
- ).all()
+ return session.query(models.Instance).\
+ options(joinedload_all('fixed_ip.floating_ips')).\
+ options(joinedload('security_groups')).\
+ filter_by(project_id=context.project_id).\
+ filter_by(reservation_id=reservation_id).\
+ filter_by(deleted=False).\
+ all()
@require_context
@@ -647,18 +651,18 @@ def instance_get_by_internal_id(context, internal_id):
session = get_session()
if is_admin_context(context):
- result = session.query(models.Instance
- ).options(joinedload('security_groups')
- ).filter_by(internal_id=internal_id
- ).filter_by(deleted=can_read_deleted(context)
- ).first()
+ result = session.query(models.Instance).\
+ options(joinedload('security_groups')).\
+ filter_by(internal_id=internal_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
elif is_user_context(context):
- result = session.query(models.Instance
- ).options(joinedload('security_groups')
- ).filter_by(project_id=context.project_id
- ).filter_by(internal_id=internal_id
- ).filter_by(deleted=False
- ).first()
+ result = session.query(models.Instance).\
+ options(joinedload('security_groups')).\
+ filter_by(project_id=context.project_id).\
+ filter_by(internal_id=internal_id).\
+ filter_by(deleted=False).\
+ first()
if not result:
raise exception.NotFound('Instance %s not found' % (internal_id))
@@ -669,9 +673,9 @@ def instance_get_by_internal_id(context, internal_id):
def instance_internal_id_exists(context, internal_id, session=None):
if not session:
session = get_session()
- return session.query(
- exists().where(models.Instance.internal_id==internal_id)
- ).one()[0]
+ return session.query(exists().\
+ where(models.Instance.internal_id == internal_id)).\
+ one()[0]
@require_context
@@ -774,11 +778,11 @@ def key_pair_get(context, user_id, name, session=None):
if not session:
session = get_session()
- result = session.query(models.KeyPair
- ).filter_by(user_id=user_id
- ).filter_by(name=name
- ).filter_by(deleted=can_read_deleted(context)
- ).first()
+ result = session.query(models.KeyPair).\
+ filter_by(user_id=user_id).\
+ filter_by(name=name).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
if not result:
raise exception.NotFound('no keypair for user %s, name %s' %
(user_id, name))
@@ -789,10 +793,10 @@ def key_pair_get(context, user_id, name, session=None):
def key_pair_get_all_by_user(context, user_id):
authorize_user_context(context, user_id)
session = get_session()
- return session.query(models.KeyPair
- ).filter_by(user_id=user_id
- ).filter_by(deleted=False
- ).all()
+ return session.query(models.KeyPair).\
+ filter_by(user_id=user_id).\
+ filter_by(deleted=False).\
+ all()
###################
@@ -802,11 +806,11 @@ def key_pair_get_all_by_user(context, user_id):
def network_associate(context, project_id):
session = get_session()
with session.begin():
- network_ref = session.query(models.Network
- ).filter_by(deleted=False
- ).filter_by(project_id=None
- ).with_lockmode('update'
- ).first()
+ network_ref = session.query(models.Network).\
+ filter_by(deleted=False).\
+ filter_by(project_id=None).\
+ with_lockmode('update').\
+ first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not network_ref:
@@ -819,40 +823,40 @@ def network_associate(context, project_id):
@require_admin_context
def network_count(context):
session = get_session()
- return session.query(models.Network
- ).filter_by(deleted=can_read_deleted(context)
- ).count()
+ return session.query(models.Network).\
+ filter_by(deleted=can_read_deleted(context)).\
+ count()
@require_admin_context
def network_count_allocated_ips(context, network_id):
session = get_session()
- return session.query(models.FixedIp
- ).filter_by(network_id=network_id
- ).filter_by(allocated=True
- ).filter_by(deleted=False
- ).count()
+ return session.query(models.FixedIp).\
+ filter_by(network_id=network_id).\
+ filter_by(allocated=True).\
+ filter_by(deleted=False).\
+ count()
@require_admin_context
def network_count_available_ips(context, network_id):
session = get_session()
- return session.query(models.FixedIp
- ).filter_by(network_id=network_id
- ).filter_by(allocated=False
- ).filter_by(reserved=False
- ).filter_by(deleted=False
- ).count()
+ return session.query(models.FixedIp).\
+ filter_by(network_id=network_id).\
+ filter_by(allocated=False).\
+ filter_by(reserved=False).\
+ filter_by(deleted=False).\
+ count()
@require_admin_context
def network_count_reserved_ips(context, network_id):
session = get_session()
- return session.query(models.FixedIp
- ).filter_by(network_id=network_id
- ).filter_by(reserved=True
- ).filter_by(deleted=False
- ).count()
+ return session.query(models.FixedIp).\
+ filter_by(network_id=network_id).\
+ filter_by(reserved=True).\
+ filter_by(deleted=False).\
+ count()
@require_admin_context
@@ -884,16 +888,16 @@ def network_get(context, network_id, session=None):
result = None
if is_admin_context(context):
- result = session.query(models.Network
- ).filter_by(id=network_id
- ).filter_by(deleted=can_read_deleted(context)
- ).first()
+ result = session.query(models.Network).\
+ filter_by(id=network_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
elif is_user_context(context):
- result = session.query(models.Network
- ).filter_by(project_id=context.project_id
- ).filter_by(id=network_id
- ).filter_by(deleted=False
- ).first()
+ result = session.query(models.Network).\
+ filter_by(project_id=context.project_id).\
+ filter_by(id=network_id).\
+ filter_by(deleted=False).\
+ first()
if not result:
raise exception.NotFound('No network for id %s' % network_id)
@@ -906,21 +910,21 @@ def network_get(context, network_id, session=None):
@require_admin_context
def network_get_associated_fixed_ips(context, network_id):
session = get_session()
- return session.query(models.FixedIp
- ).options(joinedload_all('instance')
- ).filter_by(network_id=network_id
- ).filter(models.FixedIp.instance_id != None
- ).filter_by(deleted=False
- ).all()
+ return session.query(models.FixedIp).\
+ options(joinedload_all('instance')).\
+ filter_by(network_id=network_id).\
+ filter(models.FixedIp.instance_id != None).\
+ filter_by(deleted=False).\
+ all()
@require_admin_context
def network_get_by_bridge(context, bridge):
session = get_session()
- result = session.query(models.Network
- ).filter_by(bridge=bridge
- ).filter_by(deleted=False
- ).first()
+ result = session.query(models.Network).\
+ filter_by(bridge=bridge).\
+ filter_by(deleted=False).\
+ first()
if not result:
raise exception.NotFound('No network for bridge %s' % bridge)
@@ -930,12 +934,12 @@ def network_get_by_bridge(context, bridge):
@require_admin_context
def network_get_by_instance(_context, instance_id):
session = get_session()
- rv = session.query(models.Network
- ).filter_by(deleted=False
- ).join(models.Network.fixed_ips
- ).filter_by(instance_id=instance_id
- ).filter_by(deleted=False
- ).first()
+ rv = session.query(models.Network).\
+ filter_by(deleted=False).\
+ join(models.Network.fixed_ips).\
+ filter_by(instance_id=instance_id).\
+ filter_by(deleted=False).\
+ first()
if not rv:
raise exception.NotFound('No network for instance %s' % instance_id)
return rv
@@ -945,11 +949,11 @@ def network_get_by_instance(_context, instance_id):
def network_set_host(context, network_id, host_id):
session = get_session()
with session.begin():
- network_ref = session.query(models.Network
- ).filter_by(id=network_id
- ).filter_by(deleted=False
- ).with_lockmode('update'
- ).first()
+ network_ref = session.query(models.Network).\
+ filter_by(id=network_id).\
+ filter_by(deleted=False).\
+ with_lockmode('update').\
+ first()
if not network_ref:
raise exception.NotFound('No network for id %s' % network_id)
@@ -977,10 +981,10 @@ def network_update(context, network_id, values):
@require_context
def project_get_network(context, project_id):
session = get_session()
- rv = session.query(models.Network
- ).filter_by(project_id=project_id
- ).filter_by(deleted=False
- ).first()
+ rv = session.query(models.Network).\
+ filter_by(project_id=project_id).\
+ filter_by(deleted=False).\
+ first()
if not rv:
try:
return network_associate(context, project_id)
@@ -988,10 +992,10 @@ def project_get_network(context, project_id):
# NOTE(vish): We hit this if there is a race and two
# processes are attempting to allocate the
# network at the same time
- rv = session.query(models.Network
- ).filter_by(project_id=project_id
- ).filter_by(deleted=False
- ).first()
+ rv = session.query(models.Network).\
+ filter_by(project_id=project_id).\
+ filter_by(deleted=False).\
+ first()
return rv
@@ -1009,9 +1013,9 @@ def queue_get_for(_context, topic, physical_node_id):
@require_admin_context
def export_device_count(context):
session = get_session()
- return session.query(models.ExportDevice
- ).filter_by(deleted=can_read_deleted(context)
- ).count()
+ return session.query(models.ExportDevice).\
+ filter_by(deleted=can_read_deleted(context)).\
+ count()
@require_admin_context
@@ -1032,15 +1036,17 @@ def auth_destroy_token(_context, token):
session = get_session()
session.delete(token)
+
def auth_get_token(_context, token_hash):
session = get_session()
- tk = session.query(models.AuthToken
- ).filter_by(token_hash=token_hash
- ).first()
+ tk = session.query(models.AuthToken).\
+ filter_by(token_hash=token_hash).\
+ first()
if not tk:
raise exception.NotFound('Token %s does not exist' % token_hash)
return tk
+
def auth_create_token(_context, token):
tk = models.AuthToken()
tk.update(token)
@@ -1056,10 +1062,10 @@ def quota_get(context, project_id, session=None):
if not session:
session = get_session()
- result = session.query(models.Quota
- ).filter_by(project_id=project_id
- ).filter_by(deleted=can_read_deleted(context)
- ).first()
+ result = session.query(models.Quota).\
+ filter_by(project_id=project_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
if not result:
raise exception.NotFound('No quota for project_id %s' % project_id)
@@ -1098,11 +1104,11 @@ def quota_destroy(context, project_id):
def volume_allocate_shelf_and_blade(context, volume_id):
session = get_session()
with session.begin():
- export_device = session.query(models.ExportDevice
- ).filter_by(volume=None
- ).filter_by(deleted=False
- ).with_lockmode('update'
- ).first()
+ export_device = session.query(models.ExportDevice).\
+ filter_by(volume=None).\
+ filter_by(deleted=False).\
+ with_lockmode('update').\
+ first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not export_device:
@@ -1120,7 +1126,8 @@ def volume_attached(context, volume_id, instance_id, mountpoint):
volume_ref['status'] = 'in-use'
volume_ref['mountpoint'] = mountpoint
volume_ref['attach_status'] = 'attached'
- volume_ref.instance = instance_get(context, instance_id, session=session)
+ volume_ref.instance = instance_get(context, instance_id,
+ session=session)
volume_ref.save(session=session)
@@ -1132,7 +1139,7 @@ def volume_create(context, values):
session = get_session()
with session.begin():
while volume_ref.ec2_id == None:
- ec2_id = utils.generate_uid(volume_ref.__prefix__)
+ ec2_id = utils.generate_uid('vol')
if not volume_ec2_id_exists(context, ec2_id, session=session):
volume_ref.ec2_id = ec2_id
volume_ref.save(session=session)
@@ -1143,10 +1150,10 @@ def volume_create(context, values):
def volume_data_get_for_project(context, project_id):
session = get_session()
result = session.query(func.count(models.Volume.id),
- func.sum(models.Volume.size)
- ).filter_by(project_id=project_id
- ).filter_by(deleted=False
- ).first()
+ func.sum(models.Volume.size)).\
+ filter_by(project_id=project_id).\
+ filter_by(deleted=False).\
+ first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@@ -1182,16 +1189,16 @@ def volume_get(context, volume_id, session=None):
result = None
if is_admin_context(context):
- result = session.query(models.Volume
- ).filter_by(id=volume_id
- ).filter_by(deleted=can_read_deleted(context)
- ).first()
+ result = session.query(models.Volume).\
+ filter_by(id=volume_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
elif is_user_context(context):
- result = session.query(models.Volume
- ).filter_by(project_id=context.project_id
- ).filter_by(id=volume_id
- ).filter_by(deleted=False
- ).first()
+ result = session.query(models.Volume).\
+ filter_by(project_id=context.project_id).\
+ filter_by(id=volume_id).\
+ filter_by(deleted=False).\
+ first()
if not result:
raise exception.NotFound('No volume for id %s' % volume_id)
@@ -1201,19 +1208,20 @@ def volume_get(context, volume_id, session=None):
@require_admin_context
def volume_get_all(context):
session = get_session()
- return session.query(models.Volume
- ).filter_by(deleted=can_read_deleted(context)
- ).all()
+ return session.query(models.Volume).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
+
@require_context
def volume_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
session = get_session()
- return session.query(models.Volume
- ).filter_by(project_id=project_id
- ).filter_by(deleted=can_read_deleted(context)
- ).all()
+ return session.query(models.Volume).\
+ filter_by(project_id=project_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
@require_context
@@ -1222,16 +1230,16 @@ def volume_get_by_ec2_id(context, ec2_id):
result = None
if is_admin_context(context):
- result = session.query(models.Volume
- ).filter_by(ec2_id=ec2_id
- ).filter_by(deleted=can_read_deleted(context)
- ).first()
+ result = session.query(models.Volume).\
+ filter_by(ec2_id=ec2_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
elif is_user_context(context):
- result = session.query(models.Volume
- ).filter_by(project_id=context.project_id
- ).filter_by(ec2_id=ec2_id
- ).filter_by(deleted=False
- ).first()
+ result = session.query(models.Volume).\
+ filter_by(project_id=context.project_id).\
+ filter_by(ec2_id=ec2_id).\
+ filter_by(deleted=False).\
+ first()
else:
raise exception.NotAuthorized()
@@ -1246,19 +1254,19 @@ def volume_ec2_id_exists(context, ec2_id, session=None):
if not session:
session = get_session()
- return session.query(exists(
- ).where(models.Volume.id==ec2_id)
- ).one()[0]
+ return session.query(exists().\
+ where(models.Volume.id == ec2_id)).\
+ one()[0]
@require_admin_context
def volume_get_instance(context, volume_id):
session = get_session()
- result = session.query(models.Volume
- ).filter_by(id=volume_id
- ).filter_by(deleted=can_read_deleted(context)
- ).options(joinedload('instance')
- ).first()
+ result = session.query(models.Volume).\
+ filter_by(id=volume_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ options(joinedload('instance')).\
+ first()
if not result:
raise exception.NotFound('Volume %s not found' % ec2_id)
@@ -1268,9 +1276,9 @@ def volume_get_instance(context, volume_id):
@require_admin_context
def volume_get_shelf_and_blade(context, volume_id):
session = get_session()
- result = session.query(models.ExportDevice
- ).filter_by(volume_id=volume_id
- ).first()
+ result = session.query(models.ExportDevice).\
+ filter_by(volume_id=volume_id).\
+ first()
if not result:
raise exception.NotFound('No export device found for volume %s' %
volume_id)
@@ -1293,10 +1301,10 @@ def volume_update(context, volume_id, values):
@require_context
def security_group_get_all(context):
session = get_session()
- return session.query(models.SecurityGroup
- ).filter_by(deleted=can_read_deleted(context)
- ).options(joinedload_all('rules')
- ).all()
+ return session.query(models.SecurityGroup).\
+ filter_by(deleted=can_read_deleted(context)).\
+ options(joinedload_all('rules')).\
+ all()
@require_context
@@ -1304,18 +1312,18 @@ def security_group_get(context, security_group_id, session=None):
if not session:
session = get_session()
if is_admin_context(context):
- result = session.query(models.SecurityGroup
- ).filter_by(deleted=can_read_deleted(context),
- ).filter_by(id=security_group_id
- ).options(joinedload_all('rules')
- ).first()
+ result = session.query(models.SecurityGroup).\
+ filter_by(deleted=can_read_deleted(context),).\
+ filter_by(id=security_group_id).\
+ options(joinedload_all('rules')).\
+ first()
else:
- result = session.query(models.SecurityGroup
- ).filter_by(deleted=False
- ).filter_by(id=security_group_id
- ).filter_by(project_id=context.project_id
- ).options(joinedload_all('rules')
- ).first()
+ result = session.query(models.SecurityGroup).\
+ filter_by(deleted=False).\
+ filter_by(id=security_group_id).\
+ filter_by(project_id=context.project_id).\
+ options(joinedload_all('rules')).\
+ first()
if not result:
raise exception.NotFound("No secuity group with id %s" %
security_group_id)
@@ -1325,13 +1333,13 @@ def security_group_get(context, security_group_id, session=None):
@require_context
def security_group_get_by_name(context, project_id, group_name):
session = get_session()
- result = session.query(models.SecurityGroup
- ).filter_by(project_id=project_id
- ).filter_by(name=group_name
- ).filter_by(deleted=False
- ).options(joinedload_all('rules')
- ).options(joinedload_all('instances')
- ).first()
+ result = session.query(models.SecurityGroup).\
+ filter_by(project_id=project_id).\
+ filter_by(name=group_name).\
+ filter_by(deleted=False).\
+ options(joinedload_all('rules')).\
+ options(joinedload_all('instances')).\
+ first()
if not result:
raise exception.NotFound(
'No security group named %s for project: %s' \
@@ -1342,23 +1350,23 @@ def security_group_get_by_name(context, project_id, group_name):
@require_context
def security_group_get_by_project(context, project_id):
session = get_session()
- return session.query(models.SecurityGroup
- ).filter_by(project_id=project_id
- ).filter_by(deleted=False
- ).options(joinedload_all('rules')
- ).all()
+ return session.query(models.SecurityGroup).\
+ filter_by(project_id=project_id).\
+ filter_by(deleted=False).\
+ options(joinedload_all('rules')).\
+ all()
@require_context
def security_group_get_by_instance(context, instance_id):
session = get_session()
- return session.query(models.SecurityGroup
- ).filter_by(deleted=False
- ).options(joinedload_all('rules')
- ).join(models.SecurityGroup.instances
- ).filter_by(id=instance_id
- ).filter_by(deleted=False
- ).all()
+ return session.query(models.SecurityGroup).\
+ filter_by(deleted=False).\
+ options(joinedload_all('rules')).\
+ join(models.SecurityGroup.instances).\
+ filter_by(id=instance_id).\
+ filter_by(deleted=False).\
+ all()
@require_context
@@ -1392,6 +1400,7 @@ def security_group_destroy(context, security_group_id):
'where group_id=:id',
{'id': security_group_id})
+
@require_context
def security_group_destroy_all(context, session=None):
if not session:
@@ -1410,16 +1419,16 @@ def security_group_rule_get(context, security_group_rule_id, session=None):
if not session:
session = get_session()
if is_admin_context(context):
- result = session.query(models.SecurityGroupIngressRule
- ).filter_by(deleted=can_read_deleted(context)
- ).filter_by(id=security_group_rule_id
- ).first()
+ result = session.query(models.SecurityGroupIngressRule).\
+ filter_by(deleted=can_read_deleted(context)).\
+ filter_by(id=security_group_rule_id).\
+ first()
else:
# TODO(vish): Join to group and check for project_id
- result = session.query(models.SecurityGroupIngressRule
- ).filter_by(deleted=False
- ).filter_by(id=security_group_rule_id
- ).first()
+ result = session.query(models.SecurityGroupIngressRule).\
+ filter_by(deleted=False).\
+ filter_by(id=security_group_rule_id).\
+ first()
if not result:
raise exception.NotFound("No secuity group rule with id %s" %
security_group_rule_id)
@@ -1433,6 +1442,7 @@ def security_group_rule_create(context, values):
security_group_rule_ref.save()
return security_group_rule_ref
+
@require_context
def security_group_rule_destroy(context, security_group_rule_id):
session = get_session()
@@ -1450,10 +1460,10 @@ def user_get(context, id, session=None):
if not session:
session = get_session()
- result = session.query(models.User
- ).filter_by(id=id
- ).filter_by(deleted=can_read_deleted(context)
- ).first()
+ result = session.query(models.User).\
+ filter_by(id=id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
if not result:
raise exception.NotFound('No user for id %s' % id)
@@ -1466,10 +1476,10 @@ def user_get_by_access_key(context, access_key, session=None):
if not session:
session = get_session()
- result = session.query(models.User
- ).filter_by(access_key=access_key
- ).filter_by(deleted=can_read_deleted(context)
- ).first()
+ result = session.query(models.User).\
+ filter_by(access_key=access_key).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
if not result:
raise exception.NotFound('No user for access key %s' % access_key)
@@ -1489,21 +1499,21 @@ def user_create(_context, values):
def user_delete(context, id):
session = get_session()
with session.begin():
- session.execute('delete from user_project_association where user_id=:id',
- {'id': id})
- session.execute('delete from user_role_association where user_id=:id',
- {'id': id})
- session.execute('delete from user_project_role_association where user_id=:id',
- {'id': id})
+ session.execute('delete from user_project_association '
+ 'where user_id=:id', {'id': id})
+ session.execute('delete from user_role_association '
+ 'where user_id=:id', {'id': id})
+ session.execute('delete from user_project_role_association '
+ 'where user_id=:id', {'id': id})
user_ref = user_get(context, id, session=session)
session.delete(user_ref)
def user_get_all(context):
session = get_session()
- return session.query(models.User
- ).filter_by(deleted=can_read_deleted(context)
- ).all()
+ return session.query(models.User).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
def project_create(_context, values):
@@ -1527,11 +1537,11 @@ def project_get(context, id, session=None):
if not session:
session = get_session()
- result = session.query(models.Project
- ).filter_by(deleted=False
- ).filter_by(id=id
- ).options(joinedload_all('members')
- ).first()
+ result = session.query(models.Project).\
+ filter_by(deleted=False).\
+ filter_by(id=id).\
+ options(joinedload_all('members')).\
+ first()
if not result:
raise exception.NotFound("No project with id %s" % id)
@@ -1541,18 +1551,18 @@ def project_get(context, id, session=None):
def project_get_all(context):
session = get_session()
- return session.query(models.Project
- ).filter_by(deleted=can_read_deleted(context)
- ).options(joinedload_all('members')
- ).all()
+ return session.query(models.Project).\
+ filter_by(deleted=can_read_deleted(context)).\
+ options(joinedload_all('members')).\
+ all()
def project_get_by_user(context, user_id):
session = get_session()
- user = session.query(models.User
- ).filter_by(deleted=can_read_deleted(context)
- ).options(joinedload_all('projects')
- ).first()
+ user = session.query(models.User).\
+ filter_by(deleted=can_read_deleted(context)).\
+ options(joinedload_all('projects')).\
+ first()
return user.projects
@@ -1585,10 +1595,10 @@ def project_update(context, project_id, values):
def project_delete(context, id):
session = get_session()
with session.begin():
- session.execute('delete from user_project_association where project_id=:id',
- {'id': id})
- session.execute('delete from user_project_role_association where project_id=:id',
- {'id': id})
+ session.execute('delete from user_project_association '
+ 'where project_id=:id', {'id': id})
+ session.execute('delete from user_project_role_association '
+ 'where project_id=:id', {'id': id})
project_ref = project_get(context, id, session=session)
session.delete(project_ref)
@@ -1603,29 +1613,30 @@ def user_get_roles(context, user_id):
def user_get_roles_for_project(context, user_id, project_id):
session = get_session()
with session.begin():
- res = session.query(models.UserProjectRoleAssociation
- ).filter_by(user_id=user_id
- ).filter_by(project_id=project_id
- ).all()
+ res = session.query(models.UserProjectRoleAssociation).\
+ filter_by(user_id=user_id).\
+ filter_by(project_id=project_id).\
+ all()
return [association.role for association in res]
+
def user_remove_project_role(context, user_id, project_id, role):
session = get_session()
with session.begin():
- session.execute('delete from user_project_role_association where ' + \
- 'user_id=:user_id and project_id=:project_id and ' + \
- 'role=:role', { 'user_id' : user_id,
- 'project_id' : project_id,
- 'role' : role })
+ session.execute('delete from user_project_role_association where '
+ 'user_id=:user_id and project_id=:project_id and '
+ 'role=:role', {'user_id': user_id,
+ 'project_id': project_id,
+ 'role': role})
def user_remove_role(context, user_id, role):
session = get_session()
with session.begin():
- res = session.query(models.UserRoleAssociation
- ).filter_by(user_id=user_id
- ).filter_by(role=role
- ).all()
+ res = session.query(models.UserRoleAssociation).\
+ filter_by(user_id=user_id).\
+ filter_by(role=role).\
+ all()
for role in res:
session.delete(role)
@@ -1634,7 +1645,8 @@ def user_add_role(context, user_id, role):
session = get_session()
with session.begin():
user_ref = user_get(context, user_id, session=session)
- models.UserRoleAssociation(user=user_ref, role=role).save(session=session)
+ models.UserRoleAssociation(user=user_ref, role=role).\
+ save(session=session)
def user_add_project_role(context, user_id, project_id, role):
@@ -1650,12 +1662,11 @@ def user_add_project_role(context, user_id, project_id, role):
###################
-
@require_admin_context
def host_get_networks(context, host):
session = get_session()
with session.begin():
- return session.query(models.Network
- ).filter_by(deleted=False
- ).filter_by(host=host
- ).all()
+ return session.query(models.Network).\
+ filter_by(deleted=False).\
+ filter_by(host=host).\
+ all()
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 853c320e4..a367cf770 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -20,11 +20,9 @@
SQLAlchemy models for nova data
"""
-import sys
import datetime
-# TODO(vish): clean up these imports
-from sqlalchemy.orm import relationship, backref, exc, object_mapper
+from sqlalchemy.orm import relationship, backref, object_mapper
from sqlalchemy import Column, Integer, String, schema
from sqlalchemy import ForeignKey, DateTime, Boolean, Text
from sqlalchemy.exc import IntegrityError
@@ -46,17 +44,11 @@ class NovaBase(object):
"""Base class for Nova Models"""
__table_args__ = {'mysql_engine': 'InnoDB'}
__table_initialized__ = False
- __prefix__ = 'none'
created_at = Column(DateTime, default=datetime.datetime.utcnow)
updated_at = Column(DateTime, onupdate=datetime.datetime.utcnow)
deleted_at = Column(DateTime)
deleted = Column(Boolean, default=False)
- @property
- def str_id(self):
- """Get string id of object (generally prefix + '-' + id)"""
- return "%s-%s" % (self.__prefix__, self.id)
-
def save(self, session=None):
"""Save this object"""
if not session:
@@ -104,7 +96,6 @@ class NovaBase(object):
#class Image(BASE, NovaBase):
# """Represents an image in the datastore"""
# __tablename__ = 'images'
-# __prefix__ = 'ami'
# id = Column(Integer, primary_key=True)
# ec2_id = Column(String(12), unique=True)
# user_id = Column(String(255))
@@ -144,8 +135,8 @@ class NovaBase(object):
# """Represents a host where services are running"""
# __tablename__ = 'hosts'
# id = Column(String(255), primary_key=True)
-#
-#
+
+
class Service(BASE, NovaBase):
"""Represents a running service on a host"""
__tablename__ = 'services'
@@ -160,7 +151,6 @@ class Service(BASE, NovaBase):
class Instance(BASE, NovaBase):
"""Represents a guest vm"""
__tablename__ = 'instances'
- __prefix__ = 'i'
id = Column(Integer, primary_key=True)
internal_id = Column(Integer, unique=True)
@@ -237,7 +227,6 @@ class Instance(BASE, NovaBase):
class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a vm"""
__tablename__ = 'volumes'
- __prefix__ = 'vol'
id = Column(Integer, primary_key=True)
ec2_id = Column(String(12), unique=True)
@@ -279,15 +268,12 @@ class Quota(BASE, NovaBase):
gigabytes = Column(Integer)
floating_ips = Column(Integer)
- @property
- def str_id(self):
- return self.project_id
-
class ExportDevice(BASE, NovaBase):
"""Represates a shelf and blade that a volume can be exported on"""
__tablename__ = 'export_devices'
- __table_args__ = (schema.UniqueConstraint("shelf_id", "blade_id"), {'mysql_engine': 'InnoDB'})
+ __table_args__ = (schema.UniqueConstraint("shelf_id", "blade_id"),
+ {'mysql_engine': 'InnoDB'})
id = Column(Integer, primary_key=True)
shelf_id = Column(Integer)
blade_id = Column(Integer)
@@ -318,10 +304,13 @@ class SecurityGroup(BASE, NovaBase):
instances = relationship(Instance,
secondary="security_group_instance_association",
- primaryjoin="and_(SecurityGroup.id == SecurityGroupInstanceAssociation.security_group_id,"
- "SecurityGroup.deleted == False)",
- secondaryjoin="and_(SecurityGroupInstanceAssociation.instance_id == Instance.id,"
- "Instance.deleted == False)",
+ primaryjoin='and_('
+ 'SecurityGroup.id == '
+ 'SecurityGroupInstanceAssociation.security_group_id,'
+ 'SecurityGroup.deleted == False)',
+ secondaryjoin='and_('
+ 'SecurityGroupInstanceAssociation.instance_id == Instance.id,'
+ 'Instance.deleted == False)',
backref='security_groups')
@property
@@ -340,11 +329,12 @@ class SecurityGroupIngressRule(BASE, NovaBase):
parent_group_id = Column(Integer, ForeignKey('security_groups.id'))
parent_group = relationship("SecurityGroup", backref="rules",
- foreign_keys=parent_group_id,
- primaryjoin="and_(SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,"
- "SecurityGroupIngressRule.deleted == False)")
+ foreign_keys=parent_group_id,
+ primaryjoin='and_('
+ 'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,'
+ 'SecurityGroupIngressRule.deleted == False)')
- protocol = Column(String(5)) # "tcp", "udp", or "icmp"
+ protocol = Column(String(5)) # "tcp", "udp", or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(String(255))
@@ -366,10 +356,6 @@ class KeyPair(BASE, NovaBase):
fingerprint = Column(String(255))
public_key = Column(Text)
- @property
- def str_id(self):
- return '%s.%s' % (self.user_id, self.name)
-
class Network(BASE, NovaBase):
"""Represents a network"""
@@ -424,16 +410,13 @@ class FixedIp(BASE, NovaBase):
instance = relationship(Instance,
backref=backref('fixed_ip', uselist=False),
foreign_keys=instance_id,
- primaryjoin='and_(FixedIp.instance_id==Instance.id,'
- 'FixedIp.deleted==False)')
+ primaryjoin='and_('
+ 'FixedIp.instance_id == Instance.id,'
+ 'FixedIp.deleted == False)')
allocated = Column(Boolean, default=False)
leased = Column(Boolean, default=False)
reserved = Column(Boolean, default=False)
- @property
- def str_id(self):
- return self.address
-
class User(BASE, NovaBase):
"""Represents a user"""
@@ -465,13 +448,13 @@ class UserProjectRoleAssociation(BASE, NovaBase):
__tablename__ = 'user_project_role_association'
user_id = Column(String(255), primary_key=True)
user = relationship(User,
- primaryjoin=user_id==User.id,
+ primaryjoin=user_id == User.id,
foreign_keys=[User.id],
uselist=False)
project_id = Column(String(255), primary_key=True)
project = relationship(Project,
- primaryjoin=project_id==Project.id,
+ primaryjoin=project_id == Project.id,
foreign_keys=[Project.id],
uselist=False)
@@ -495,7 +478,6 @@ class UserProjectAssociation(BASE, NovaBase):
project_id = Column(String(255), ForeignKey(Project.id), primary_key=True)
-
class FloatingIp(BASE, NovaBase):
"""Represents a floating ip that dynamically forwards to a fixed ip"""
__tablename__ = 'floating_ips'
@@ -505,8 +487,9 @@ class FloatingIp(BASE, NovaBase):
fixed_ip = relationship(FixedIp,
backref=backref('floating_ips'),
foreign_keys=fixed_ip_id,
- primaryjoin='and_(FloatingIp.fixed_ip_id==FixedIp.id,'
- 'FloatingIp.deleted==False)')
+ primaryjoin='and_('
+ 'FloatingIp.fixed_ip_id == FixedIp.id,'
+ 'FloatingIp.deleted == False)')
project_id = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
@@ -517,7 +500,7 @@ def register_models():
models = (Service, Instance, Volume, ExportDevice, FixedIp,
FloatingIp, Network, SecurityGroup,
SecurityGroupIngressRule, SecurityGroupInstanceAssociation,
- AuthToken, User, Project) # , Image, Host
+ AuthToken, User, Project) # , Image, Host
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py
index 826754f6a..e0d84c107 100644
--- a/nova/db/sqlalchemy/session.py
+++ b/nova/db/sqlalchemy/session.py
@@ -29,6 +29,7 @@ FLAGS = flags.FLAGS
_ENGINE = None
_MAKER = None
+
def get_session(autocommit=True, expire_on_commit=False):
"""Helper method to grab session"""
global _ENGINE
@@ -39,5 +40,5 @@ def get_session(autocommit=True, expire_on_commit=False):
_MAKER = (sessionmaker(bind=_ENGINE,
autocommit=autocommit,
expire_on_commit=expire_on_commit))
- session = _MAKER()
+ session = _MAKER()
return session
diff --git a/nova/exception.py b/nova/exception.py
index f157fab2d..6d6c37338 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -47,7 +47,7 @@ class ApiError(Error):
def __init__(self, message='Unknown', code='Unknown'):
self.message = message
self.code = code
- super(ApiError, self).__init__('%s: %s'% (code, message))
+ super(ApiError, self).__init__('%s: %s' % (code, message))
class NotFound(Error):
@@ -69,6 +69,7 @@ class NotEmpty(Error):
class Invalid(Error):
pass
+
class InvalidInputException(Error):
pass
@@ -86,5 +87,3 @@ def wrap_exception(f):
raise
_wrap.func_name = f.func_name
return _wrap
-
-
diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py
index df5e61e6e..c64617931 100644
--- a/nova/fakerabbit.py
+++ b/nova/fakerabbit.py
@@ -130,7 +130,6 @@ class Backend(object):
self._exchanges[exchange].publish(
message, routing_key=routing_key)
-
__instance = None
def __init__(self, *args, **kwargs):
diff --git a/nova/flags.py b/nova/flags.py
index 3b473488f..f3b0384ad 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -180,7 +180,8 @@ DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
DEFINE_integer('s3_port', 3333, 's3 port')
DEFINE_string('s3_host', '127.0.0.1', 's3 host')
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
-DEFINE_string('scheduler_topic', 'scheduler', 'the topic scheduler nodes listen on')
+DEFINE_string('scheduler_topic', 'scheduler',
+ 'the topic scheduler nodes listen on')
DEFINE_string('volume_topic', 'volume', 'the topic volume nodes listen on')
DEFINE_string('network_topic', 'network', 'the topic network nodes listen on')
diff --git a/nova/image/service.py b/nova/image/service.py
index ebdc65fef..37cadddcc 100644
--- a/nova/image/service.py
+++ b/nova/image/service.py
@@ -30,7 +30,8 @@ flags.DEFINE_string('glance_teller_address', 'http://127.0.0.1',
flags.DEFINE_string('glance_teller_port', '9191',
'Port for Glance\'s Teller service')
flags.DEFINE_string('glance_parallax_address', 'http://127.0.0.1',
- 'IP address or URL where Glance\'s Parallax service resides')
+ 'IP address or URL where Glance\'s Parallax service '
+ 'resides')
flags.DEFINE_string('glance_parallax_port', '9292',
'Port for Glance\'s Parallax service')
@@ -120,10 +121,10 @@ class BaseImageService(object):
def delete(self, image_id):
"""
- Delete the given image.
-
+ Delete the given image.
+
:raises NotFound if the image does not exist.
-
+
"""
raise NotImplementedError
@@ -131,14 +132,14 @@ class BaseImageService(object):
class LocalImageService(BaseImageService):
"""Image service storing images to local disk.
-
+
It assumes that image_ids are integers."""
def __init__(self):
self._path = "/tmp/nova/images"
try:
os.makedirs(self._path)
- except OSError: # exists
+ except OSError: # Exists
pass
def _path_to(self, image_id):
@@ -156,7 +157,7 @@ class LocalImageService(BaseImageService):
def show(self, id):
try:
- return pickle.load(open(self._path_to(id)))
+ return pickle.load(open(self._path_to(id)))
except IOError:
raise exception.NotFound
@@ -164,7 +165,7 @@ class LocalImageService(BaseImageService):
"""
Store the image data and return the new image id.
"""
- id = random.randint(0, 2**32-1)
+ id = random.randint(0, 2 ** 32 - 1)
data['id'] = id
self.update(id, data)
return id
diff --git a/nova/image/services/glance/__init__.py b/nova/image/services/glance/__init__.py
index 6df6f2dcf..f1d05f0bc 100644
--- a/nova/image/services/glance/__init__.py
+++ b/nova/image/services/glance/__init__.py
@@ -30,6 +30,7 @@ import nova.image.service
FLAGS = flags.FLAGS
+
class TellerClient(object):
def __init__(self):
@@ -153,7 +154,6 @@ class ParallaxClient(object):
class GlanceImageService(nova.image.service.BaseImageService):
-
"""Provides storage and retrieval of disk image objects within Glance."""
def __init__(self):
@@ -202,10 +202,10 @@ class GlanceImageService(nova.image.service.BaseImageService):
def delete(self, image_id):
"""
- Delete the given image.
-
+ Delete the given image.
+
:raises NotFound if the image does not exist.
-
+
"""
self.parallax.delete_image_metadata(image_id)
diff --git a/nova/manager.py b/nova/manager.py
index 56ba7d3f6..4244b2db4 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -45,8 +45,6 @@ class Manager(object):
yield
def init_host(self):
- """Do any initialization that needs to be run if this is a standalone service.
-
- Child classes should override this method.
- """
- pass
+ """Do any initialization that needs to be run if this is a standalone
+ service. Child classes should override this method."""
+ pass
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index c0be0e8cc..7b323efa1 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -53,6 +53,7 @@ flags.DEFINE_bool('use_nova_chains', False,
DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)]
+
def init_host():
"""Basic networking setup goes here"""
# NOTE(devcamcar): Cloud public DNAT entries, CloudPipe port
@@ -72,6 +73,7 @@ def init_host():
_confirm_rule("POSTROUTING", "-t nat -s %(range)s -d %(range)s -j ACCEPT" %
{'range': FLAGS.fixed_range})
+
def bind_floating_ip(floating_ip):
"""Bind ip to public interface"""
_execute("sudo ip addr add %s dev %s" % (floating_ip,
@@ -103,7 +105,7 @@ def ensure_floating_forward(floating_ip, fixed_ip):
_confirm_rule("FORWARD", "-d %s -p icmp -j ACCEPT"
% (fixed_ip))
for (protocol, port) in DEFAULT_PORTS:
- _confirm_rule("FORWARD","-d %s -p %s --dport %s -j ACCEPT"
+ _confirm_rule("FORWARD", "-d %s -p %s --dport %s -j ACCEPT"
% (fixed_ip, protocol, port))
@@ -167,11 +169,9 @@ def get_dhcp_hosts(context, network_id):
return '\n'.join(hosts)
-# TODO(ja): if the system has restarted or pid numbers have wrapped
-# then you cannot be certain that the pid refers to the
-# dnsmasq. As well, sending a HUP only reloads the hostfile,
-# so any configuration options (like dchp-range, vlan, ...)
-# aren't reloaded
+# NOTE(ja): Sending a HUP only reloads the hostfile, so any
+# configuration options (like dchp-range, vlan, ...)
+# aren't reloaded.
def update_dhcp(context, network_id):
"""(Re)starts a dnsmasq server for a given network
@@ -191,13 +191,16 @@ def update_dhcp(context, network_id):
# if dnsmasq is already running, then tell it to reload
if pid:
- # TODO(ja): use "/proc/%d/cmdline" % (pid) to determine if pid refers
- # correct dnsmasq process
- try:
- _execute('sudo kill -HUP %d' % pid)
- return
- except Exception as exc: # pylint: disable-msg=W0703
- logging.debug("Hupping dnsmasq threw %s", exc)
+ out, _err = _execute('cat /proc/%d/cmdline' % pid,
+ check_exit_code=False)
+ if conffile in out:
+ try:
+ _execute('sudo kill -HUP %d' % pid)
+ return
+ except Exception as exc: # pylint: disable-msg=W0703
+ logging.debug("Hupping dnsmasq threw %s", exc)
+ else:
+ logging.debug("Pid %d is stale, relaunching dnsmasq", pid)
# FLAGFILE and DNSMASQ_INTERFACE in env
env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile,
@@ -233,7 +236,8 @@ def _confirm_rule(chain, cmd):
"""Delete and re-add iptables rule"""
if FLAGS.use_nova_chains:
chain = "nova_%s" % chain.lower()
- _execute("sudo iptables --delete %s %s" % (chain, cmd), check_exit_code=False)
+ _execute("sudo iptables --delete %s %s" % (chain, cmd),
+ check_exit_code=False)
_execute("sudo iptables -I %s %s" % (chain, cmd))
diff --git a/nova/network/manager.py b/nova/network/manager.py
index c7080ccd8..8a20cb491 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -49,7 +49,8 @@ flags.DEFINE_string('vpn_ip', utils.get_my_ip(),
flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks')
flags.DEFINE_integer('network_size', 256,
'Number of addresses in each private subnet')
-flags.DEFINE_string('floating_range', '4.4.4.0/24', 'Floating IP address block')
+flags.DEFINE_string('floating_range', '4.4.4.0/24',
+ 'Floating IP address block')
flags.DEFINE_string('fixed_range', '10.0.0.0/8', 'Fixed IP address block')
flags.DEFINE_integer('cnt_vpn_clients', 5,
'Number of addresses reserved for vpn clients')
@@ -170,7 +171,7 @@ class NetworkManager(manager.Manager):
if not fixed_ip_ref['leased']:
logging.warn("IP %s released that was not leased", address)
self.db.fixed_ip_update(context,
- fixed_ip_ref['str_id'],
+ fixed_ip_ref['address'],
{'leased': False})
if not fixed_ip_ref['allocated']:
self.db.fixed_ip_disassociate(context, address)
@@ -287,7 +288,6 @@ class FlatManager(NetworkManager):
self.db.network_update(context, network_id, net)
-
class FlatDHCPManager(NetworkManager):
"""Flat networking with dhcp"""
@@ -432,4 +432,3 @@ class VlanManager(NetworkManager):
"""Number of reserved ips at the top of the range"""
parent_reserved = super(VlanManager, self)._top_reserved_ips
return parent_reserved + FLAGS.cnt_vpn_clients
-
diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py
index cfe5b14d8..0ba4934d1 100644
--- a/nova/objectstore/bucket.py
+++ b/nova/objectstore/bucket.py
@@ -69,7 +69,8 @@ class Bucket(object):
"""Create a new bucket owned by a project.
@bucket_name: a string representing the name of the bucket to create
- @context: a nova.auth.api.ApiContext object representing who owns the bucket.
+ @context: a nova.auth.api.ApiContext object representing who owns the
+ bucket.
Raises:
NotAuthorized: if the bucket is already exists or has invalid name
@@ -77,12 +78,12 @@ class Bucket(object):
path = os.path.abspath(os.path.join(
FLAGS.buckets_path, bucket_name))
if not path.startswith(os.path.abspath(FLAGS.buckets_path)) or \
- os.path.exists(path):
- raise exception.NotAuthorized()
+ os.path.exists(path):
+ raise exception.NotAuthorized()
os.makedirs(path)
- with open(path+'.json', 'w') as f:
+ with open(path + '.json', 'w') as f:
json.dump({'ownerId': context.project_id}, f)
@property
@@ -99,22 +100,25 @@ class Bucket(object):
@property
def owner_id(self):
try:
- with open(self.path+'.json') as f:
+ with open(self.path + '.json') as f:
return json.load(f)['ownerId']
except:
return None
def is_authorized(self, context):
try:
- return context.user.is_admin() or self.owner_id == context.project_id
+ return context.user.is_admin() or \
+ self.owner_id == context.project_id
except Exception, e:
return False
def list_keys(self, prefix='', marker=None, max_keys=1000, terse=False):
object_names = []
+ path_length = len(self.path)
for root, dirs, files in os.walk(self.path):
for file_name in files:
- object_names.append(os.path.join(root, file_name)[len(self.path)+1:])
+ object_name = os.path.join(root, file_name)[path_length + 1:]
+ object_names.append(object_name)
object_names.sort()
contents = []
@@ -164,7 +168,7 @@ class Bucket(object):
if len(os.listdir(self.path)) > 0:
raise exception.NotEmpty()
os.rmdir(self.path)
- os.remove(self.path+'.json')
+ os.remove(self.path + '.json')
def __getitem__(self, key):
return stored.Object(self, key)
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
index b93e92fe6..b26906001 100644
--- a/nova/objectstore/handler.py
+++ b/nova/objectstore/handler.py
@@ -44,7 +44,6 @@ import multiprocessing
import os
import urllib
-from tornado import escape
from twisted.application import internet
from twisted.application import service
from twisted.web import error
@@ -55,6 +54,7 @@ from twisted.web import static
from nova import context
from nova import exception
from nova import flags
+from nova import utils
from nova.auth import manager
from nova.objectstore import bucket
from nova.objectstore import image
@@ -70,10 +70,10 @@ def render_xml(request, value):
name = value.keys()[0]
request.write('<?xml version="1.0" encoding="UTF-8"?>\n')
- request.write('<' + escape.utf8(name) +
+ request.write('<' + utils.utf8(name) +
' xmlns="http://doc.s3.amazonaws.com/2006-03-01">')
_render_parts(value.values()[0], request.write)
- request.write('</' + escape.utf8(name) + '>')
+ request.write('</' + utils.utf8(name) + '>')
request.finish()
@@ -87,7 +87,7 @@ def finish(request, content=None):
def _render_parts(value, write_cb):
"""Helper method to render different Python objects to XML"""
if isinstance(value, basestring):
- write_cb(escape.xhtml_escape(value))
+ write_cb(utils.xhtml_escape(value))
elif isinstance(value, int) or isinstance(value, long):
write_cb(str(value))
elif isinstance(value, datetime.datetime):
@@ -97,9 +97,9 @@ def _render_parts(value, write_cb):
if not isinstance(subvalue, list):
subvalue = [subvalue]
for subsubvalue in subvalue:
- write_cb('<' + escape.utf8(name) + '>')
+ write_cb('<' + utils.utf8(name) + '>')
_render_parts(subsubvalue, write_cb)
- write_cb('</' + escape.utf8(name) + '>')
+ write_cb('</' + utils.utf8(name) + '>')
else:
raise Exception("Unknown S3 value type %r", value)
@@ -119,7 +119,7 @@ def get_context(request):
# Authorization Header format: 'AWS <access>:<secret>'
authorization_header = request.getHeader('Authorization')
if not authorization_header:
- raise exception.NotAuthorized
+ raise exception.NotAuthorized()
auth_header_value = authorization_header.split(' ')[1]
access, _ignored, secret = auth_header_value.rpartition(':')
am = manager.AuthManager()
@@ -134,7 +134,8 @@ def get_context(request):
return context.RequestContext(user, project)
except exception.Error as ex:
logging.debug("Authentication Failure: %s", ex)
- raise exception.NotAuthorized
+ raise exception.NotAuthorized()
+
class ErrorHandlingResource(resource.Resource):
"""Maps exceptions to 404 / 401 codes. Won't work for
@@ -162,7 +163,7 @@ class S3(ErrorHandlingResource):
def __init__(self):
ErrorHandlingResource.__init__(self)
- def getChild(self, name, request): # pylint: disable-msg=C0103
+ def getChild(self, name, request): # pylint: disable-msg=C0103
"""Returns either the image or bucket resource"""
request.context = get_context(request)
if name == '':
@@ -172,7 +173,7 @@ class S3(ErrorHandlingResource):
else:
return BucketResource(name)
- def render_GET(self, request): # pylint: disable-msg=R0201
+ def render_GET(self, request): # pylint: disable-msg=R0201
"""Renders the GET request for a list of buckets as XML"""
logging.debug('List of buckets requested')
buckets = [b for b in bucket.Bucket.all() \
@@ -209,7 +210,7 @@ class BucketResource(ErrorHandlingResource):
return error.NoResource(message="No such bucket").render(request)
if not bucket_object.is_authorized(request.context):
- raise exception.NotAuthorized
+ raise exception.NotAuthorized()
prefix = get_argument(request, "prefix", u"")
marker = get_argument(request, "marker", u"")
@@ -239,7 +240,7 @@ class BucketResource(ErrorHandlingResource):
bucket_object = bucket.Bucket(self.name)
if not bucket_object.is_authorized(request.context):
- raise exception.NotAuthorized
+ raise exception.NotAuthorized()
bucket_object.delete()
request.setResponseCode(204)
@@ -262,7 +263,7 @@ class ObjectResource(ErrorHandlingResource):
logging.debug("Getting object: %s / %s", self.bucket.name, self.name)
if not self.bucket.is_authorized(request.context):
- raise exception.NotAuthorized
+ raise exception.NotAuthorized()
obj = self.bucket[urllib.unquote(self.name)]
request.setHeader("Content-Type", "application/unknown")
@@ -280,7 +281,7 @@ class ObjectResource(ErrorHandlingResource):
logging.debug("Putting object: %s / %s", self.bucket.name, self.name)
if not self.bucket.is_authorized(request.context):
- raise exception.NotAuthorized
+ raise exception.NotAuthorized()
key = urllib.unquote(self.name)
request.content.seek(0, 0)
@@ -301,7 +302,7 @@ class ObjectResource(ErrorHandlingResource):
self.name)
if not self.bucket.is_authorized(request.context):
- raise exception.NotAuthorized
+ raise exception.NotAuthorized()
del self.bucket[urllib.unquote(self.name)]
request.setResponseCode(204)
@@ -318,12 +319,16 @@ class ImageResource(ErrorHandlingResource):
def render_GET(self, request):
"""Returns the image file"""
+ if not self.img.is_authorized(request.context, True):
+ raise exception.NotAuthorized()
return static.File(self.img.image_path,
- defaultType='application/octet-stream'
- ).render_GET(request)
+ defaultType='application/octet-stream').\
+ render_GET(request)
+
class ImagesResource(resource.Resource):
"""A web resource representing a list of images"""
+
def getChild(self, name, _request):
"""Returns itself or an ImageResource if no name given"""
if name == '':
@@ -331,7 +336,7 @@ class ImagesResource(resource.Resource):
else:
return ImageResource(name)
- def render_GET(self, request): # pylint: disable-msg=R0201
+ def render_GET(self, request): # pylint: disable-msg=R0201
""" returns a json listing of all images
that a user has permissions to see """
@@ -360,7 +365,7 @@ class ImagesResource(resource.Resource):
request.finish()
return server.NOT_DONE_YET
- def render_PUT(self, request): # pylint: disable-msg=R0201
+ def render_PUT(self, request): # pylint: disable-msg=R0201
""" create a new registered image """
image_id = get_argument(request, 'image_id', u'')
@@ -369,19 +374,19 @@ class ImagesResource(resource.Resource):
image_path = os.path.join(FLAGS.images_path, image_id)
if not image_path.startswith(FLAGS.images_path) or \
os.path.exists(image_path):
- raise exception.NotAuthorized
+ raise exception.NotAuthorized()
bucket_object = bucket.Bucket(image_location.split("/")[0])
if not bucket_object.is_authorized(request.context):
- raise exception.NotAuthorized
+ raise exception.NotAuthorized()
p = multiprocessing.Process(target=image.Image.register_aws_image,
args=(image_id, image_location, request.context))
p.start()
return ''
- def render_POST(self, request): # pylint: disable-msg=R0201
+ def render_POST(self, request): # pylint: disable-msg=R0201
"""Update image attributes: public/private"""
# image_id required for all requests
@@ -389,13 +394,13 @@ class ImagesResource(resource.Resource):
image_object = image.Image(image_id)
if not image_object.is_authorized(request.context):
logging.debug("not authorized for render_POST in images")
- raise exception.NotAuthorized
+ raise exception.NotAuthorized()
operation = get_argument(request, 'operation', u'')
if operation:
# operation implies publicity toggle
logging.debug("handling publicity toggle")
- image_object.set_public(operation=='add')
+ image_object.set_public(operation == 'add')
else:
# other attributes imply update
logging.debug("update user fields")
@@ -405,13 +410,13 @@ class ImagesResource(resource.Resource):
image_object.update_user_editable_fields(clean_args)
return ''
- def render_DELETE(self, request): # pylint: disable-msg=R0201
+ def render_DELETE(self, request): # pylint: disable-msg=R0201
"""Delete a registered image"""
image_id = get_argument(request, "image_id", u"")
image_object = image.Image(image_id)
if not image_object.is_authorized(request.context):
- raise exception.NotAuthorized
+ raise exception.NotAuthorized()
image_object.delete()
diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py
index 413b269b7..b7b2ec6ab 100644
--- a/nova/objectstore/image.py
+++ b/nova/objectstore/image.py
@@ -48,8 +48,8 @@ class Image(object):
self.image_id = image_id
self.path = os.path.abspath(os.path.join(FLAGS.images_path, image_id))
if not self.path.startswith(os.path.abspath(FLAGS.images_path)) or \
- not os.path.isdir(self.path):
- raise exception.NotFound
+ not os.path.isdir(self.path):
+ raise exception.NotFound
@property
def image_path(self):
@@ -127,8 +127,8 @@ class Image(object):
a string of the image id for the kernel
@type ramdisk: bool or str
- @param ramdisk: either TRUE meaning this partition is a ramdisk image or
- a string of the image id for the ramdisk
+ @param ramdisk: either TRUE meaning this partition is a ramdisk image
+ or a string of the image id for the ramdisk
@type public: bool
@@ -160,8 +160,7 @@ class Image(object):
'isPublic': public,
'architecture': 'x86_64',
'imageType': image_type,
- 'state': 'available'
- }
+ 'state': 'available'}
if type(kernel) is str and len(kernel) > 0:
info['kernelId'] = kernel
@@ -180,7 +179,7 @@ class Image(object):
os.makedirs(image_path)
bucket_name = image_location.split("/")[0]
- manifest_path = image_location[len(bucket_name)+1:]
+ manifest_path = image_location[len(bucket_name) + 1:]
bucket_object = bucket.Bucket(bucket_name)
manifest = ElementTree.fromstring(bucket_object[manifest_path].read())
@@ -204,10 +203,9 @@ class Image(object):
'imageId': image_id,
'imageLocation': image_location,
'imageOwnerId': context.project_id,
- 'isPublic': False, # FIXME: grab public from manifest
- 'architecture': 'x86_64', # FIXME: grab architecture from manifest
- 'imageType' : image_type
- }
+ 'isPublic': False, # FIXME: grab public from manifest
+ 'architecture': 'x86_64', # FIXME: grab architecture from manifest
+ 'imageType': image_type}
if kernel_id:
info['kernelId'] = kernel_id
@@ -230,24 +228,29 @@ class Image(object):
write_state('decrypting')
# FIXME: grab kernelId and ramdiskId from bundle manifest
- encrypted_key = binascii.a2b_hex(manifest.find("image/ec2_encrypted_key").text)
- encrypted_iv = binascii.a2b_hex(manifest.find("image/ec2_encrypted_iv").text)
+ hex_key = manifest.find("image/ec2_encrypted_key").text
+ encrypted_key = binascii.a2b_hex(hex_key)
+ hex_iv = manifest.find("image/ec2_encrypted_iv").text
+ encrypted_iv = binascii.a2b_hex(hex_iv)
cloud_private_key = os.path.join(FLAGS.ca_path, "private/cakey.pem")
decrypted_filename = os.path.join(image_path, 'image.tar.gz')
- Image.decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_private_key, decrypted_filename)
+ Image.decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
+ cloud_private_key, decrypted_filename)
write_state('untarring')
image_file = Image.untarzip_image(image_path, decrypted_filename)
- shutil.move(os.path.join(image_path, image_file), os.path.join(image_path, 'image'))
+ shutil.move(os.path.join(image_path, image_file),
+ os.path.join(image_path, 'image'))
write_state('available')
os.unlink(decrypted_filename)
os.unlink(encrypted_filename)
@staticmethod
- def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_private_key, decrypted_filename):
+ def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
+ cloud_private_key, decrypted_filename):
key, err = utils.execute(
'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
process_input=encrypted_key,
@@ -259,13 +262,15 @@ class Image(object):
process_input=encrypted_iv,
check_exit_code=False)
if err:
- raise exception.Error("Failed to decrypt initialization vector: %s" % err)
+ raise exception.Error("Failed to decrypt initialization "
+ "vector: %s" % err)
_out, err = utils.execute(
'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s'
% (encrypted_filename, key, iv, decrypted_filename),
check_exit_code=False)
if err:
- raise exception.Error("Failed to decrypt image file %s : %s" % (encrypted_filename, err))
+ raise exception.Error("Failed to decrypt image file %s : %s" %
+ (encrypted_filename, err))
@staticmethod
def untarzip_image(path, filename):
diff --git a/nova/objectstore/stored.py b/nova/objectstore/stored.py
index 9829194cb..a3f6e9c0b 100644
--- a/nova/objectstore/stored.py
+++ b/nova/objectstore/stored.py
@@ -50,8 +50,8 @@ class Object(object):
return os.path.getmtime(self.path)
def read(self):
- """ read all contents of key into memory and return """
- return self.file.read()
+ """ read all contents of key into memory and return """
+ return self.file.read()
@property
def file(self):
diff --git a/nova/process.py b/nova/process.py
index 13cb90e82..b33df048b 100644
--- a/nova/process.py
+++ b/nova/process.py
@@ -36,6 +36,7 @@ FLAGS = flags.FLAGS
flags.DEFINE_integer('process_pool_size', 4,
'Number of processes to use in the process pool')
+
# This is based on _BackRelay from twister.internal.utils, but modified to
# capture both stdout and stderr, without odd stderr handling, and also to
# handle stdin
@@ -55,8 +56,8 @@ class BackRelayWithInput(protocol.ProcessProtocol):
will be called back when the process ends. This C{Deferred} is also
associated with the L{_ProcessExecutionError} which C{deferred} fires
with earlier in this case so that users can determine when the process
- has actually ended, in addition to knowing when bytes have been received
- via stderr.
+ has actually ended, in addition to knowing when bytes have been
+ received via stderr.
"""
def __init__(self, deferred, cmd, started_deferred=None,
@@ -93,7 +94,7 @@ class BackRelayWithInput(protocol.ProcessProtocol):
if self.deferred is not None:
stdout, stderr = self.stdout.getvalue(), self.stderr.getvalue()
exit_code = reason.value.exitCode
- if self.check_exit_code and exit_code <> 0:
+ if self.check_exit_code and exit_code != 0:
self.deferred.errback(self._build_execution_error(exit_code))
else:
try:
@@ -101,14 +102,15 @@ class BackRelayWithInput(protocol.ProcessProtocol):
reason.trap(error.ProcessDone)
self.deferred.callback((stdout, stderr))
except:
- # NOTE(justinsb): This logic is a little suspicious to me...
- # If the callback throws an exception, then errback will be
- # called also. However, this is what the unit tests test for...
- self.deferred.errback(self._build_execution_error(exit_code))
+ # NOTE(justinsb): This logic is a little suspicious to me.
+ # If the callback throws an exception, then errback will
+ # be called also. However, this is what the unit tests
+ # test for.
+ exec_error = self._build_execution_error(exit_code)
+ self.deferred.errback(exec_error)
elif self.on_process_ended is not None:
self.on_process_ended.errback(reason)
-
def connectionMade(self):
if self.started_deferred:
self.started_deferred.callback(self)
@@ -116,6 +118,7 @@ class BackRelayWithInput(protocol.ProcessProtocol):
self.transport.write(str(self.process_input))
self.transport.closeStdin()
+
def get_process_output(executable, args=None, env=None, path=None,
process_reactor=None, check_exit_code=True,
process_input=None, started_deferred=None,
@@ -142,7 +145,7 @@ def get_process_output(executable, args=None, env=None, path=None,
if not args is None:
args = [str(x) for x in args]
process_reactor.spawnProcess(process_handler, executable,
- (executable,)+tuple(args), env, path)
+ (executable,) + tuple(args), env, path)
return deferred
@@ -193,9 +196,11 @@ class ProcessPool(object):
class SharedPool(object):
_instance = None
+
def __init__(self):
if SharedPool._instance is None:
self.__class__._instance = ProcessPool()
+
def __getattr__(self, key):
return getattr(self._instance, key)
diff --git a/nova/quota.py b/nova/quota.py
index 045051207..01dd0ecd4 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -37,6 +37,7 @@ flags.DEFINE_integer('quota_gigabytes', 1000,
flags.DEFINE_integer('quota_floating_ips', 10,
'number of floating ips allowed per project')
+
def get_quota(context, project_id):
rval = {'instances': FLAGS.quota_instances,
'cores': FLAGS.quota_cores,
@@ -52,6 +53,7 @@ def get_quota(context, project_id):
pass
return rval
+
def allowed_instances(context, num_instances, instance_type):
"""Check quota and return min(num_instances, allowed_instances)"""
project_id = context.project_id
@@ -92,4 +94,3 @@ def allowed_floating_ips(context, num_floating_ips):
quota = get_quota(context, project_id)
allowed_floating_ips = quota['floating_ips'] - used_floating_ips
return min(num_floating_ips, allowed_floating_ips)
-
diff --git a/nova/rpc.py b/nova/rpc.py
index 965934205..895820cd0 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -268,6 +268,7 @@ def _unpack_context(msg):
LOG.debug('unpacked context: %s', context_dict)
return context.RequestContext.from_dict(context_dict)
+
def _pack_context(msg, context):
"""Pack context into msg.
@@ -280,6 +281,7 @@ def _pack_context(msg, context):
for (key, value) in context.to_dict().iteritems()])
msg.update(context)
+
def call(context, topic, msg):
"""Sends a message on a topic and wait for a response"""
LOG.debug("Making asynchronous call...")
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index c89d25a47..f271d573f 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -31,10 +31,12 @@ FLAGS = flags.FLAGS
flags.DEFINE_integer('service_down_time', 60,
'maximum time since last checkin for up service')
+
class NoValidHost(exception.Error):
"""There is no valid host for the command."""
pass
+
class Scheduler(object):
"""The base class that all Scheduler clases should inherit from."""
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index b3b2b4dce..60a3d2b4b 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -56,7 +56,8 @@ class SchedulerManager(manager.Manager):
driver_method = 'schedule_%s' % method
elevated = context.elevated()
try:
- host = getattr(self.driver, driver_method)(elevated, *args, **kwargs)
+ host = getattr(self.driver, driver_method)(elevated, *args,
+ **kwargs)
except AttributeError:
host = self.driver.schedule(elevated, topic, *args, **kwargs)
diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py
index fdaff74d8..7f5093656 100644
--- a/nova/scheduler/simple.py
+++ b/nova/scheduler/simple.py
@@ -36,6 +36,7 @@ flags.DEFINE_integer("max_gigabytes", 10000,
flags.DEFINE_integer("max_networks", 1000,
"maximum number of networks to allow per host")
+
class SimpleScheduler(chance.ChanceScheduler):
"""Implements Naive Scheduler that tries to find least loaded host."""
diff --git a/nova/server.py b/nova/server.py
index c58a15041..cb424caa1 100644
--- a/nova/server.py
+++ b/nova/server.py
@@ -54,11 +54,11 @@ def stop(pidfile):
"""
# Get the pid from the pidfile
try:
- pid = int(open(pidfile,'r').read().strip())
+ pid = int(open(pidfile, 'r').read().strip())
except IOError:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % pidfile)
- return # not an error in a restart
+ return
# Try killing the daemon process
try:
@@ -143,6 +143,5 @@ def daemonize(args, name, main):
stderr=stderr,
uid=FLAGS.uid,
gid=FLAGS.gid,
- files_preserve=files_to_keep
- ):
+ files_preserve=files_to_keep):
main(args)
diff --git a/nova/service.py b/nova/service.py
index 2d7961ab9..d53d92b65 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -49,15 +49,17 @@ flags.DEFINE_integer('periodic_interval', 60,
class Service(object, service.Service):
"""Base class for workers that run on hosts."""
- def __init__(self, host, binary, topic, manager, *args, **kwargs):
+ def __init__(self, host, binary, topic, manager, report_interval=None,
+ periodic_interval=None, *args, **kwargs):
self.host = host
self.binary = binary
self.topic = topic
self.manager_class_name = manager
+ self.report_interval = report_interval
+ self.periodic_interval = periodic_interval
super(Service, self).__init__(*args, **kwargs)
self.saved_args, self.saved_kwargs = args, kwargs
-
def startService(self): # pylint: disable-msg C0103
manager_class = utils.import_class(self.manager_class_name)
self.manager = manager_class(host=self.host, *self.saved_args,
@@ -73,6 +75,26 @@ class Service(object, service.Service):
except exception.NotFound:
self._create_service_ref(ctxt)
+ conn = rpc.Connection.instance()
+ if self.report_interval:
+ consumer_all = rpc.AdapterConsumer(
+ connection=conn,
+ topic=self.topic,
+ proxy=self)
+ consumer_node = rpc.AdapterConsumer(
+ connection=conn,
+ topic='%s.%s' % (self.topic, self.host),
+ proxy=self)
+
+ consumer_all.attach_to_twisted()
+ consumer_node.attach_to_twisted()
+
+ pulse = task.LoopingCall(self.report_state)
+ pulse.start(interval=self.report_interval, now=False)
+
+ if self.periodic_interval:
+ pulse = task.LoopingCall(self.periodic_tasks)
+ pulse.start(interval=self.periodic_interval, now=False)
def _create_service_ref(self, context):
service_ref = db.service_create(context,
@@ -83,10 +105,8 @@ class Service(object, service.Service):
self.service_id = service_ref['id']
def __getattr__(self, key):
- try:
- return super(Service, self).__getattr__(key)
- except AttributeError:
- return getattr(self.manager, key)
+ manager = self.__dict__.get('manager', None)
+ return getattr(manager, key)
@classmethod
def create(cls,
@@ -119,25 +139,8 @@ class Service(object, service.Service):
if not periodic_interval:
periodic_interval = FLAGS.periodic_interval
logging.warn("Starting %s node", topic)
- service_obj = cls(host, binary, topic, manager)
- conn = rpc.Connection.instance()
- consumer_all = rpc.AdapterConsumer(
- connection=conn,
- topic=topic,
- proxy=service_obj)
- consumer_node = rpc.AdapterConsumer(
- connection=conn,
- topic='%s.%s' % (topic, host),
- proxy=service_obj)
-
- consumer_all.attach_to_twisted()
- consumer_node.attach_to_twisted()
-
- pulse = task.LoopingCall(service_obj.report_state)
- pulse.start(interval=report_interval, now=False)
-
- pulse = task.LoopingCall(service_obj.periodic_tasks)
- pulse.start(interval=periodic_interval, now=False)
+ service_obj = cls(host, binary, topic, manager,
+ report_interval, periodic_interval)
# This is the parent service that twistd will be looking for when it
# parses this file, return it so that we can get it into globals.
diff --git a/nova/test.py b/nova/test.py
index 8e89eb43e..5c2a72819 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -28,7 +28,6 @@ import time
import mox
import stubout
-from tornado import ioloop
from twisted.internet import defer
from twisted.trial import unittest
@@ -58,7 +57,7 @@ def skip_if_fake(func):
class TrialTestCase(unittest.TestCase):
"""Test case base class for all unit tests"""
- def setUp(self): # pylint: disable-msg=C0103
+ def setUp(self):
"""Run before each test method to initialize test environment"""
super(TrialTestCase, self).setUp()
# NOTE(vish): We need a better method for creating fixtures for tests
@@ -82,8 +81,9 @@ class TrialTestCase(unittest.TestCase):
self._monkey_patch_attach()
self._original_flags = FLAGS.FlagValuesDict()
- def tearDown(self): # pylint: disable-msg=C0103
- """Runs after each test method to finalize/tear down test environment"""
+ def tearDown(self):
+ """Runs after each test method to finalize/tear down test
+ environment."""
try:
self.mox.UnsetStubs()
self.stubs.UnsetAll()
@@ -91,7 +91,8 @@ class TrialTestCase(unittest.TestCase):
self.mox.VerifyAll()
# NOTE(vish): Clean up any ips associated during the test.
ctxt = context.get_admin_context()
- db.fixed_ip_disassociate_all_by_timeout(ctxt, FLAGS.host, self.start)
+ db.fixed_ip_disassociate_all_by_timeout(ctxt, FLAGS.host,
+ self.start)
db.network_disassociate_all(ctxt)
rpc.Consumer.attach_to_twisted = self.originalAttach
for x in self.injected:
@@ -149,6 +150,7 @@ class TrialTestCase(unittest.TestCase):
def _monkey_patch_attach(self):
self.originalAttach = rpc.Consumer.attach_to_twisted
+
def _wrapped(innerSelf):
rv = self.originalAttach(innerSelf)
self.injected.append(rv)
@@ -156,154 +158,3 @@ class TrialTestCase(unittest.TestCase):
_wrapped.func_name = self.originalAttach.func_name
rpc.Consumer.attach_to_twisted = _wrapped
-
-
-class BaseTestCase(TrialTestCase):
- # TODO(jaypipes): Can this be moved into the TrialTestCase class?
- """Base test case class for all unit tests.
-
- DEPRECATED: This is being removed once Tornado is gone, use TrialTestCase.
- """
- def setUp(self): # pylint: disable-msg=C0103
- """Run before each test method to initialize test environment"""
- super(BaseTestCase, self).setUp()
- # TODO(termie): we could possibly keep a more global registry of
- # the injected listeners... this is fine for now though
- self.ioloop = ioloop.IOLoop.instance()
-
- self._waiting = None
- self._done_waiting = False
- self._timed_out = False
-
- def _wait_for_test(self, timeout=60):
- """ Push the ioloop along to wait for our test to complete. """
- self._waiting = self.ioloop.add_timeout(time.time() + timeout,
- self._timeout)
- def _wait():
- """Wrapped wait function. Called on timeout."""
- if self._timed_out:
- self.fail('test timed out')
- self._done()
- if self._done_waiting:
- self.ioloop.stop()
- return
- # we can use add_callback here but this uses less cpu when testing
- self.ioloop.add_timeout(time.time() + 0.01, _wait)
-
- self.ioloop.add_callback(_wait)
- self.ioloop.start()
-
- def _done(self):
- """Callback used for cleaning up deferred test methods."""
- if self._waiting:
- try:
- self.ioloop.remove_timeout(self._waiting)
- except Exception: # pylint: disable-msg=W0703
- # TODO(jaypipes): This produces a pylint warning. Should
- # we really be catching Exception and then passing here?
- pass
- self._waiting = None
- self._done_waiting = True
-
- def _maybe_inline_callbacks(self, func):
- """ If we're doing async calls in our tests, wait on them.
-
- This is probably the most complicated hunk of code we have so far.
-
- First up, if the function is normal (not async) we just act normal
- and return.
-
- Async tests will use the "Inline Callbacks" pattern, which means
- you yield Deferreds at every "waiting" step of your code instead
- of making epic callback chains.
-
- Example (callback chain, ugly):
-
- d = self.compute.terminate_instance(instance_id) # a Deferred instance
- def _describe(_):
- d_desc = self.compute.describe_instances() # another Deferred instance
- return d_desc
- def _checkDescribe(rv):
- self.assertEqual(rv, [])
- d.addCallback(_describe)
- d.addCallback(_checkDescribe)
- d.addCallback(lambda x: self._done())
- self._wait_for_test()
-
- Example (inline callbacks! yay!):
-
- yield self.compute.terminate_instance(instance_id)
- rv = yield self.compute.describe_instances()
- self.assertEqual(rv, [])
-
- If the test fits the Inline Callbacks pattern we will automatically
- handle calling wait and done.
- """
- # TODO(termie): this can be a wrapper function instead and
- # and we can make a metaclass so that we don't
- # have to copy all that "run" code below.
- g = func()
- if not hasattr(g, 'send'):
- self._done()
- return defer.succeed(g)
-
- inlined = defer.inlineCallbacks(func)
- d = inlined()
- return d
-
- def _catch_exceptions(self, result, failure):
- """Catches all exceptions and handles keyboard interrupts."""
- exc = (failure.type, failure.value, failure.getTracebackObject())
- if isinstance(failure.value, self.failureException):
- result.addFailure(self, exc)
- elif isinstance(failure.value, KeyboardInterrupt):
- raise
- else:
- result.addError(self, exc)
-
- self._done()
-
- def _timeout(self):
- """Helper method which trips the timeouts"""
- self._waiting = False
- self._timed_out = True
-
- def run(self, result=None):
- """Runs the test case"""
-
- result.startTest(self)
- test_method = getattr(self, self._testMethodName)
- try:
- try:
- self.setUp()
- except KeyboardInterrupt:
- raise
- except:
- result.addError(self, sys.exc_info())
- return
-
- ok = False
- try:
- d = self._maybe_inline_callbacks(test_method)
- d.addErrback(lambda x: self._catch_exceptions(result, x))
- d.addBoth(lambda x: self._done() and x)
- self._wait_for_test()
- ok = True
- except self.failureException:
- result.addFailure(self, sys.exc_info())
- except KeyboardInterrupt:
- raise
- except:
- result.addError(self, sys.exc_info())
-
- try:
- self.tearDown()
- except KeyboardInterrupt:
- raise
- except:
- result.addError(self, sys.exc_info())
- ok = False
- if ok:
- result.addSuccess(self)
- finally:
- result.stopTest(self)
diff --git a/nova/tests/access_unittest.py b/nova/tests/access_unittest.py
index 8167259c4..0f66c0a26 100644
--- a/nova/tests/access_unittest.py
+++ b/nova/tests/access_unittest.py
@@ -29,9 +29,12 @@ from nova.auth import manager
FLAGS = flags.FLAGS
+
+
class Context(object):
pass
+
class AccessTestCase(test.TrialTestCase):
def setUp(self):
super(AccessTestCase, self).setUp()
@@ -56,9 +59,11 @@ class AccessTestCase(test.TrialTestCase):
self.project.add_role(self.testnet, 'netadmin')
self.project.add_role(self.testsys, 'sysadmin')
#user is set in each test
+
def noopWSGIApp(environ, start_response):
start_response('200 OK', [])
return ['']
+
self.mw = ec2.Authorizer(noopWSGIApp)
self.mw.action_roles = {'str': {
'_allow_all': ['all'],
@@ -80,7 +85,7 @@ class AccessTestCase(test.TrialTestCase):
def response_status(self, user, methodName):
ctxt = context.RequestContext(user, self.project)
- environ = {'ec2.context' : ctxt,
+ environ = {'ec2.context': ctxt,
'ec2.controller': 'some string',
'ec2.action': methodName}
req = webob.Request.blank('/', environ)
diff --git a/nova/tests/api/__init__.py b/nova/tests/api/__init__.py
index f051e2390..46f09e906 100644
--- a/nova/tests/api/__init__.py
+++ b/nova/tests/api/__init__.py
@@ -32,10 +32,10 @@ from nova.tests.api.fakes import APIStub
class Test(unittest.TestCase):
- def setUp(self): # pylint: disable-msg=C0103
+ def setUp(self):
self.stubs = stubout.StubOutForTesting()
- def tearDown(self): # pylint: disable-msg=C0103
+ def tearDown(self):
self.stubs.UnsetAll()
def _request(self, url, subdomain, **kwargs):
@@ -66,8 +66,7 @@ class Test(unittest.TestCase):
def test_metadata(self):
def go(url):
- result = self._request(url, 'ec2',
- REMOTE_ADDR='128.192.151.2')
+ result = self._request(url, 'ec2', REMOTE_ADDR='128.192.151.2')
# Each should get to the ORM layer and fail to find the IP
self.assertRaises(nova.exception.NotFound, go, '/latest/')
self.assertRaises(nova.exception.NotFound, go, '/2009-04-04/')
@@ -78,6 +77,5 @@ class Test(unittest.TestCase):
self.assertTrue('2007-12-15\n' in result.body)
-
if __name__ == '__main__':
unittest.main()
diff --git a/nova/tests/api/fakes.py b/nova/tests/api/fakes.py
index d0a2cc027..0aedcaff0 100644
--- a/nova/tests/api/fakes.py
+++ b/nova/tests/api/fakes.py
@@ -1,6 +1,24 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
import webob.dec
from nova import wsgi
+
class APIStub(object):
"""Class to verify request and mark it was called."""
@webob.dec.wsgify
diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/api/openstack/__init__.py
index b534897f5..2e357febe 100644
--- a/nova/tests/api/openstack/__init__.py
+++ b/nova/tests/api/openstack/__init__.py
@@ -27,11 +27,13 @@ class RateLimitingMiddlewareTest(unittest.TestCase):
def test_get_action_name(self):
middleware = RateLimitingMiddleware(APIStub())
+
def verify(method, url, action_name):
req = Request.blank(url)
req.method = method
action = middleware.get_action_name(req)
self.assertEqual(action, action_name)
+
verify('PUT', '/servers/4', 'PUT')
verify('DELETE', '/servers/4', 'DELETE')
verify('POST', '/images/4', 'POST')
@@ -60,7 +62,7 @@ class RateLimitingMiddlewareTest(unittest.TestCase):
middleware = RateLimitingMiddleware(APIStub())
self.exhaust(middleware, 'POST', '/servers/4', 'usr1', 10)
self.exhaust(middleware, 'POST', '/images/4', 'usr2', 10)
- self.assertTrue(set(middleware.limiter._levels) ==
+ self.assertTrue(set(middleware.limiter._levels) ==
set(['usr1:POST', 'usr1:POST servers', 'usr2:POST']))
def test_POST_servers_action_correctly_ratelimited(self):
@@ -85,19 +87,19 @@ class LimiterTest(unittest.TestCase):
def test_limiter(self):
items = range(2000)
req = Request.blank('/')
- self.assertEqual(limited(items, req), items[ :1000])
+ self.assertEqual(limited(items, req), items[:1000])
req = Request.blank('/?offset=0')
- self.assertEqual(limited(items, req), items[ :1000])
+ self.assertEqual(limited(items, req), items[:1000])
req = Request.blank('/?offset=3')
self.assertEqual(limited(items, req), items[3:1003])
req = Request.blank('/?offset=2005')
self.assertEqual(limited(items, req), [])
req = Request.blank('/?limit=10')
- self.assertEqual(limited(items, req), items[ :10])
+ self.assertEqual(limited(items, req), items[:10])
req = Request.blank('/?limit=0')
- self.assertEqual(limited(items, req), items[ :1000])
+ self.assertEqual(limited(items, req), items[:1000])
req = Request.blank('/?limit=3000')
- self.assertEqual(limited(items, req), items[ :1000])
+ self.assertEqual(limited(items, req), items[:1000])
req = Request.blank('/?offset=1&limit=3')
self.assertEqual(limited(items, req), items[1:4])
req = Request.blank('/?offset=3&limit=0')
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index f12c7b610..1b8c18974 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -37,7 +37,7 @@ from nova.wsgi import Router
FLAGS = flags.FLAGS
-class Context(object):
+class Context(object):
pass
@@ -85,11 +85,11 @@ def stub_out_image_service(stubs):
def stub_out_auth(stubs):
def fake_auth_init(self, app):
self.application = app
-
- stubs.Set(nova.api.openstack.AuthMiddleware,
- '__init__', fake_auth_init)
- stubs.Set(nova.api.openstack.AuthMiddleware,
- '__call__', fake_wsgi)
+
+ stubs.Set(nova.api.openstack.AuthMiddleware,
+ '__init__', fake_auth_init)
+ stubs.Set(nova.api.openstack.AuthMiddleware,
+ '__call__', fake_wsgi)
def stub_out_rate_limiting(stubs):
@@ -106,7 +106,7 @@ def stub_out_rate_limiting(stubs):
def stub_out_networking(stubs):
def get_my_ip():
- return '127.0.0.1'
+ return '127.0.0.1'
stubs.Set(nova.utils, 'get_my_ip', get_my_ip)
FLAGS.FAKE_subdomain = 'api'
@@ -138,7 +138,6 @@ def stub_out_glance(stubs, initial_fixtures=[]):
return id
def fake_update_image_metadata(self, image_id, image_data):
-
f = self.fake_get_image_metadata(image_id)
if not f:
raise exc.NotFound
@@ -146,7 +145,6 @@ def stub_out_glance(stubs, initial_fixtures=[]):
f.update(image_data)
def fake_delete_image_metadata(self, image_id):
-
f = self.fake_get_image_metadata(image_id)
if not f:
raise exc.NotFound
@@ -165,9 +163,11 @@ def stub_out_glance(stubs, initial_fixtures=[]):
fake_parallax_client.fake_get_image_metadata)
stubs.Set(nova.image.services.glance.ParallaxClient, 'add_image_metadata',
fake_parallax_client.fake_add_image_metadata)
- stubs.Set(nova.image.services.glance.ParallaxClient, 'update_image_metadata',
+ stubs.Set(nova.image.services.glance.ParallaxClient,
+ 'update_image_metadata',
fake_parallax_client.fake_update_image_metadata)
- stubs.Set(nova.image.services.glance.ParallaxClient, 'delete_image_metadata',
+ stubs.Set(nova.image.services.glance.ParallaxClient,
+ 'delete_image_metadata',
fake_parallax_client.fake_delete_image_metadata)
stubs.Set(nova.image.services.glance.GlanceImageService, 'delete_all',
fake_parallax_client.fake_delete_all)
@@ -175,7 +175,7 @@ def stub_out_glance(stubs, initial_fixtures=[]):
class FakeToken(object):
def __init__(self, **kwargs):
- for k,v in kwargs.iteritems():
+ for k, v in kwargs.iteritems():
setattr(self, k, v)
@@ -201,7 +201,7 @@ class FakeAuthDatabase(object):
class FakeAuthManager(object):
auth_data = {}
- def add_user(self, key, user):
+ def add_user(self, key, user):
FakeAuthManager.auth_data[key] = user
def get_user(self, uid):
diff --git a/nova/tests/api/openstack/test_api.py b/nova/tests/api/openstack/test_api.py
new file mode 100644
index 000000000..dd83991b9
--- /dev/null
+++ b/nova/tests/api/openstack/test_api.py
@@ -0,0 +1,74 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+import webob.exc
+import webob.dec
+
+import nova.api.openstack
+from nova.api.openstack import API
+from nova.api.openstack import faults
+from webob import Request
+
+
+class APITest(unittest.TestCase):
+
+ def test_exceptions_are_converted_to_faults(self):
+
+ @webob.dec.wsgify
+ def succeed(req):
+ return 'Succeeded'
+
+ @webob.dec.wsgify
+ def raise_webob_exc(req):
+ raise webob.exc.HTTPNotFound(explanation='Raised a webob.exc')
+
+ @webob.dec.wsgify
+ def fail(req):
+ raise Exception("Threw an exception")
+
+ @webob.dec.wsgify
+ def raise_api_fault(req):
+ exc = webob.exc.HTTPNotFound(explanation='Raised a webob.exc')
+ return faults.Fault(exc)
+
+ api = API()
+
+ api.application = succeed
+ resp = Request.blank('/').get_response(api)
+ self.assertFalse('cloudServersFault' in resp.body, resp.body)
+ self.assertEqual(resp.status_int, 200, resp.body)
+
+ api.application = raise_webob_exc
+ resp = Request.blank('/').get_response(api)
+ self.assertFalse('cloudServersFault' in resp.body, resp.body)
+ self.assertEqual(resp.status_int, 404, resp.body)
+
+ api.application = raise_api_fault
+ resp = Request.blank('/').get_response(api)
+ self.assertTrue('itemNotFound' in resp.body, resp.body)
+ self.assertEqual(resp.status_int, 404, resp.body)
+
+ api.application = fail
+ resp = Request.blank('/').get_response(api)
+ self.assertTrue('{"cloudServersFault' in resp.body, resp.body)
+ self.assertEqual(resp.status_int, 500, resp.body)
+
+ api.application = fail
+ resp = Request.blank('/.xml').get_response(api)
+ self.assertTrue('<cloudServersFault' in resp.body, resp.body)
+ self.assertEqual(resp.status_int, 500, resp.body)
diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py
index 61d17d7e8..b63da187f 100644
--- a/nova/tests/api/openstack/test_auth.py
+++ b/nova/tests/api/openstack/test_auth.py
@@ -1,3 +1,20 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
import datetime
import unittest
@@ -11,7 +28,9 @@ import nova.auth.manager
from nova import auth
from nova.tests.api.openstack import fakes
+
class Test(unittest.TestCase):
+
def setUp(self):
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(nova.api.openstack.auth.BasicApiAuthManager,
@@ -42,7 +61,7 @@ class Test(unittest.TestCase):
def test_authorize_token(self):
f = fakes.FakeAuthManager()
f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None))
-
+
req = webob.Request.blank('/v1.0/')
req.headers['X-Auth-User'] = 'herp'
req.headers['X-Auth-Key'] = 'derp'
@@ -63,14 +82,14 @@ class Test(unittest.TestCase):
result = req.get_response(nova.api.API())
self.assertEqual(result.status, '200 OK')
self.assertEqual(result.headers['X-Test-Success'], 'True')
-
+
def test_token_expiry(self):
self.destroy_called = False
token_hash = 'bacon'
def destroy_token_mock(meh, context, token):
self.destroy_called = True
-
+
def bad_token(meh, context, token_hash):
return fakes.FakeToken(
token_hash=token_hash,
diff --git a/nova/tests/api/openstack/test_faults.py b/nova/tests/api/openstack/test_faults.py
index 70a811469..fda2b5ede 100644
--- a/nova/tests/api/openstack/test_faults.py
+++ b/nova/tests/api/openstack/test_faults.py
@@ -1,3 +1,20 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
import unittest
import webob
import webob.dec
@@ -5,6 +22,7 @@ import webob.exc
from nova.api.openstack import faults
+
class TestFaults(unittest.TestCase):
def test_fault_parts(self):
@@ -19,7 +37,7 @@ class TestFaults(unittest.TestCase):
def test_retry_header(self):
req = webob.Request.blank('/.xml')
- exc = webob.exc.HTTPRequestEntityTooLarge(explanation='sorry',
+ exc = webob.exc.HTTPRequestEntityTooLarge(explanation='sorry',
headers={'Retry-After': 4})
f = faults.Fault(exc)
resp = req.get_response(f)
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index e156c0957..d61c3a99b 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -90,7 +90,7 @@ class BaseImageServiceTests(object):
id = self.service.create(fixture)
fixture['status'] = 'in progress'
-
+
self.service.update(id, fixture)
new_image_data = self.service.show(id)
self.assertEquals('in progress', new_image_data['status'])
@@ -121,7 +121,7 @@ class BaseImageServiceTests(object):
num_images = len(self.service.index())
self.assertEquals(2, num_images, str(self.service.index()))
-
+
self.service.delete(ids[0])
num_images = len(self.service.index())
@@ -135,7 +135,8 @@ class LocalImageServiceTest(unittest.TestCase,
def setUp(self):
self.stubs = stubout.StubOutForTesting()
- self.service = utils.import_object('nova.image.service.LocalImageService')
+ service_class = 'nova.image.service.LocalImageService'
+ self.service = utils.import_object(service_class)
def tearDown(self):
self.service.delete_all()
@@ -150,7 +151,8 @@ class GlanceImageServiceTest(unittest.TestCase,
def setUp(self):
self.stubs = stubout.StubOutForTesting()
fakes.stub_out_glance(self.stubs)
- self.service = utils.import_object('nova.image.services.glance.GlanceImageService')
+ service_class = 'nova.image.services.glance.GlanceImageService'
+ self.service = utils.import_object(service_class)
self.service.delete_all()
def tearDown(self):
@@ -172,8 +174,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase):
'deleted': False,
'is_public': True,
'status': 'available',
- 'image_type': 'kernel'
- },
+ 'image_type': 'kernel'},
{'id': 'slkduhfas73kkaskgdas',
'name': 'public image #2',
'created_at': str(datetime.datetime.utcnow()),
@@ -182,9 +183,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase):
'deleted': False,
'is_public': True,
'status': 'available',
- 'image_type': 'ramdisk'
- },
- ]
+ 'image_type': 'ramdisk'}]
def setUp(self):
self.orig_image_service = FLAGS.image_service
@@ -211,7 +210,8 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase):
in self.IMAGE_FIXTURES]
for image in res_dict['images']:
- self.assertEquals(1, fixture_index.count(image), "image %s not in fixture index!" % str(image))
+ self.assertEquals(1, fixture_index.count(image),
+ "image %s not in fixture index!" % str(image))
def test_get_image_details(self):
req = webob.Request.blank('/v1.0/images/detail')
@@ -219,4 +219,5 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase):
res_dict = json.loads(res.body)
for image in res_dict['images']:
- self.assertEquals(1, self.IMAGE_FIXTURES.count(image), "image %s not in fixtures!" % str(image))
+ self.assertEquals(1, self.IMAGE_FIXTURES.count(image),
+ "image %s not in fixtures!" % str(image))
diff --git a/nova/tests/api/openstack/test_ratelimiting.py b/nova/tests/api/openstack/test_ratelimiting.py
index ad9e67454..4c9d6bc23 100644
--- a/nova/tests/api/openstack/test_ratelimiting.py
+++ b/nova/tests/api/openstack/test_ratelimiting.py
@@ -6,6 +6,7 @@ import webob
import nova.api.openstack.ratelimiting as ratelimiting
+
class LimiterTest(unittest.TestCase):
def setUp(self):
@@ -66,13 +67,16 @@ class LimiterTest(unittest.TestCase):
class FakeLimiter(object):
"""Fake Limiter class that you can tell how to behave."""
+
def __init__(self, test):
self._action = self._username = self._delay = None
self.test = test
+
def mock(self, action, username, delay):
self._action = action
self._username = username
self._delay = delay
+
def perform(self, action, username):
self.test.assertEqual(action, self._action)
self.test.assertEqual(username, self._username)
@@ -88,7 +92,7 @@ class WSGIAppTest(unittest.TestCase):
def test_invalid_methods(self):
requests = []
for method in ['GET', 'PUT', 'DELETE']:
- req = webob.Request.blank('/limits/michael/breakdance',
+ req = webob.Request.blank('/limits/michael/breakdance',
dict(REQUEST_METHOD=method))
requests.append(req)
for req in requests:
@@ -180,7 +184,7 @@ def wire_HTTPConnection_to_WSGI(host, app):
the connection object will be a fake. Its requests will be sent directly
to the given WSGI app rather than through a socket.
-
+
Code connecting to hosts other than host will not be affected.
This method may be called multiple times to map different hosts to
@@ -189,13 +193,16 @@ def wire_HTTPConnection_to_WSGI(host, app):
class HTTPConnectionDecorator(object):
"""Wraps the real HTTPConnection class so that when you instantiate
the class you might instead get a fake instance."""
+
def __init__(self, wrapped):
self.wrapped = wrapped
+
def __call__(self, connection_host, *args, **kwargs):
if connection_host == host:
return FakeHttplibConnection(app, host)
else:
return self.wrapped(connection_host, *args, **kwargs)
+
httplib.HTTPConnection = HTTPConnectionDecorator(httplib.HTTPConnection)
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index f4a09fd97..785fb6f3a 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -32,9 +32,9 @@ from nova.tests.api.openstack import fakes
FLAGS = flags.FLAGS
-
FLAGS.verbose = True
+
def return_server(context, id):
return stub_instance(id)
@@ -44,10 +44,8 @@ def return_servers(context, user_id=1):
def stub_instance(id, user_id=1):
- return Instance(
- id=id, state=0, image_id=10, server_name='server%s'%id,
- user_id=user_id
- )
+ return Instance(id=id, state=0, image_id=10, server_name='server%s' % id,
+ user_id=user_id)
class ServersTest(unittest.TestCase):
@@ -61,9 +59,10 @@ class ServersTest(unittest.TestCase):
fakes.stub_out_key_pair_funcs(self.stubs)
fakes.stub_out_image_service(self.stubs)
self.stubs.Set(nova.db.api, 'instance_get_all', return_servers)
- self.stubs.Set(nova.db.api, 'instance_get_by_internal_id', return_server)
- self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
- return_servers)
+ self.stubs.Set(nova.db.api, 'instance_get_by_internal_id',
+ return_server)
+ self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
+ return_servers)
def tearDown(self):
self.stubs.UnsetAll()
@@ -79,26 +78,26 @@ class ServersTest(unittest.TestCase):
req = webob.Request.blank('/v1.0/servers')
res = req.get_response(nova.api.API())
res_dict = json.loads(res.body)
-
+
i = 0
for s in res_dict['servers']:
self.assertEqual(s['id'], i)
- self.assertEqual(s['name'], 'server%d'%i)
+ self.assertEqual(s['name'], 'server%d' % i)
self.assertEqual(s.get('imageId', None), None)
i += 1
def test_create_instance(self):
def server_update(context, id, params):
- pass
+ pass
def instance_create(context, inst):
return {'id': 1, 'internal_id': 1}
def fake_method(*args, **kwargs):
pass
-
+
def project_get_network(context, user_id):
- return dict(id='1', host='localhost')
+ return dict(id='1', host='localhost')
def queue_get_for(context, *args):
return 'network_topic'
@@ -112,11 +111,10 @@ class ServersTest(unittest.TestCase):
self.stubs.Set(nova.db.api, 'queue_get_for', queue_get_for)
self.stubs.Set(nova.network.manager.VlanManager, 'allocate_fixed_ip',
fake_method)
-
+
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
- personality = {}
- ))
+ personality={}))
req = webob.Request.blank('/v1.0/servers')
req.method = 'POST'
req.body = json.dumps(body)
@@ -186,44 +184,41 @@ class ServersTest(unittest.TestCase):
req = webob.Request.blank('/v1.0/servers/detail')
res = req.get_response(nova.api.API())
res_dict = json.loads(res.body)
-
+
i = 0
for s in res_dict['servers']:
self.assertEqual(s['id'], i)
- self.assertEqual(s['name'], 'server%d'%i)
+ self.assertEqual(s['name'], 'server%d' % i)
self.assertEqual(s['imageId'], 10)
i += 1
def test_server_reboot(self):
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
- personality = {}
- ))
+ personality={}))
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
- req.content_type= 'application/json'
+ req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(nova.api.API())
def test_server_rebuild(self):
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
- personality = {}
- ))
+ personality={}))
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
- req.content_type= 'application/json'
+ req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(nova.api.API())
def test_server_resize(self):
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
- personality = {}
- ))
+ personality={}))
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
- req.content_type= 'application/json'
+ req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(nova.api.API())
@@ -232,8 +227,9 @@ class ServersTest(unittest.TestCase):
req.method = 'DELETE'
self.server_delete_called = False
+
def instance_destroy_mock(context, id):
- self.server_delete_called = True
+ self.server_delete_called = True
self.stubs.Set(nova.db.api, 'instance_destroy',
instance_destroy_mock)
diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py
index 9425b01d0..44e2d615c 100644
--- a/nova/tests/api/test_wsgi.py
+++ b/nova/tests/api/test_wsgi.py
@@ -72,7 +72,7 @@ class Test(unittest.TestCase):
"""Test controller to call from router."""
test = self
- def show(self, req, id): # pylint: disable-msg=W0622,C0103
+ def show(self, req, id): # pylint: disable-msg=W0622,C0103
"""Default action called for requests with an ID."""
self.test.assertEqual(req.path_info, '/tests/123')
self.test.assertEqual(id, '123')
@@ -95,7 +95,7 @@ class Test(unittest.TestCase):
class SerializerTest(unittest.TestCase):
def match(self, url, accept, expect):
- input_dict = dict(servers=dict(a=(2,3)))
+ input_dict = dict(servers=dict(a=(2, 3)))
expected_xml = '<servers><a>(2,3)</a></servers>'
expected_json = '{"servers":{"a":[2,3]}}'
req = webob.Request.blank(url, headers=dict(Accept=accept))
@@ -112,7 +112,7 @@ class SerializerTest(unittest.TestCase):
self.match('/servers/4.json', None, expect='json')
self.match('/servers/4', 'application/json', expect='json')
self.match('/servers/4', 'application/xml', expect='xml')
- self.match('/servers/4.xml', None, expect='xml')
+ self.match('/servers/4.xml', None, expect='xml')
def test_defaults_to_json(self):
self.match('/servers/4', None, expect='json')
diff --git a/nova/tests/api_integration.py b/nova/tests/api_integration.py
index 23a88f083..54403c655 100644
--- a/nova/tests/api_integration.py
+++ b/nova/tests/api_integration.py
@@ -28,16 +28,17 @@ CLC_IP = '127.0.0.1'
CLC_PORT = 8773
REGION = 'test'
+
def get_connection():
- return boto.connect_ec2 (
+ return boto.connect_ec2(
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
is_secure=False,
region=RegionInfo(None, REGION, CLC_IP),
port=CLC_PORT,
path='/services/Cloud',
- debug=99
- )
+ debug=99)
+
class APIIntegrationTests(unittest.TestCase):
def test_001_get_all_images(self):
@@ -51,4 +52,3 @@ if __name__ == '__main__':
#print conn.get_all_key_pairs()
#print conn.create_key_pair
#print conn.create_security_group('name', 'description')
-
diff --git a/nova/tests/api_unittest.py b/nova/tests/api_unittest.py
index 3f60f38f2..0a81c575b 100644
--- a/nova/tests/api_unittest.py
+++ b/nova/tests/api_unittest.py
@@ -83,7 +83,7 @@ class FakeHttplibConnection(object):
pass
-class XmlConversionTestCase(test.BaseTestCase):
+class XmlConversionTestCase(test.TrialTestCase):
"""Unit test api xml conversion"""
def test_number_conversion(self):
conv = apirequest._try_convert
@@ -99,9 +99,10 @@ class XmlConversionTestCase(test.BaseTestCase):
self.assertEqual(conv('-'), '-')
self.assertEqual(conv('-0'), 0)
-class ApiEc2TestCase(test.BaseTestCase):
+
+class ApiEc2TestCase(test.TrialTestCase):
"""Unit test for the cloud controller on an EC2 API"""
- def setUp(self): # pylint: disable-msg=C0103,C0111
+ def setUp(self):
super(ApiEc2TestCase, self).setUp()
self.manager = manager.AuthManager()
@@ -138,7 +139,6 @@ class ApiEc2TestCase(test.BaseTestCase):
self.manager.delete_project(project)
self.manager.delete_user(user)
-
def test_get_all_key_pairs(self):
"""Test that, after creating a user and project and generating
a key pair, that the API call to list key pairs works properly"""
@@ -183,7 +183,7 @@ class ApiEc2TestCase(test.BaseTestCase):
self.manager.add_role('fake', 'netadmin')
project.add_role('fake', 'netadmin')
- security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
+ security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
self.ec2.create_security_group(security_group_name, 'test group')
@@ -217,10 +217,11 @@ class ApiEc2TestCase(test.BaseTestCase):
self.manager.add_role('fake', 'netadmin')
project.add_role('fake', 'netadmin')
- security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
+ security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
- group = self.ec2.create_security_group(security_group_name, 'test group')
+ group = self.ec2.create_security_group(security_group_name,
+ 'test group')
self.expect_http()
self.mox.ReplayAll()
@@ -241,7 +242,7 @@ class ApiEc2TestCase(test.BaseTestCase):
self.assertEquals(int(group.rules[0].from_port), 80)
self.assertEquals(int(group.rules[0].to_port), 81)
self.assertEquals(len(group.rules[0].grants), 1)
- self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0')
+ self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0')
self.expect_http()
self.mox.ReplayAll()
@@ -282,12 +283,14 @@ class ApiEc2TestCase(test.BaseTestCase):
self.manager.add_role('fake', 'netadmin')
project.add_role('fake', 'netadmin')
- security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
+ rand_string = 'sdiuisudfsdcnpaqwertasd'
+ security_group_name = "".join(random.choice(rand_string)
+ for x in range(random.randint(4, 8)))
+ other_security_group_name = "".join(random.choice(rand_string)
for x in range(random.randint(4, 8)))
- other_security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
- for x in range(random.randint(4, 8)))
- group = self.ec2.create_security_group(security_group_name, 'test group')
+ group = self.ec2.create_security_group(security_group_name,
+ 'test group')
self.expect_http()
self.mox.ReplayAll()
@@ -313,9 +316,8 @@ class ApiEc2TestCase(test.BaseTestCase):
if group.name == security_group_name:
self.assertEquals(len(group.rules), 1)
self.assertEquals(len(group.rules[0].grants), 1)
- self.assertEquals(str(group.rules[0].grants[0]),
- '%s-%s' % (other_security_group_name, 'fake'))
-
+ self.assertEquals(str(group.rules[0].grants[0]), '%s-%s' %
+ (other_security_group_name, 'fake'))
self.expect_http()
self.mox.ReplayAll()
diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py
index 97d22d702..fe891beee 100644
--- a/nova/tests/auth_unittest.py
+++ b/nova/tests/auth_unittest.py
@@ -28,6 +28,7 @@ from nova.api.ec2 import cloud
FLAGS = flags.FLAGS
+
class user_generator(object):
def __init__(self, manager, **user_state):
if 'name' not in user_state:
@@ -41,6 +42,7 @@ class user_generator(object):
def __exit__(self, value, type, trace):
self.manager.delete_user(self.user)
+
class project_generator(object):
def __init__(self, manager, **project_state):
if 'name' not in project_state:
@@ -56,6 +58,7 @@ class project_generator(object):
def __exit__(self, value, type, trace):
self.manager.delete_project(self.project)
+
class user_and_project_generator(object):
def __init__(self, manager, user_state={}, project_state={}):
self.manager = manager
@@ -75,6 +78,7 @@ class user_and_project_generator(object):
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
+
class AuthManagerTestCase(object):
def setUp(self):
FLAGS.auth_driver = self.auth_driver
@@ -96,7 +100,7 @@ class AuthManagerTestCase(object):
self.assertEqual('private-party', u.access)
def test_004_signature_is_valid(self):
- #self.assertTrue(self.manager.authenticate( **boto.generate_url ... ? ? ? ))
+ #self.assertTrue(self.manager.authenticate(**boto.generate_url ...? ))
pass
#raise NotImplementedError
@@ -127,7 +131,7 @@ class AuthManagerTestCase(object):
self.assertFalse(self.manager.has_role('test1', 'itsec'))
def test_can_create_and_get_project(self):
- with user_and_project_generator(self.manager) as (u,p):
+ with user_and_project_generator(self.manager) as (u, p):
self.assert_(self.manager.get_user('test1'))
self.assert_(self.manager.get_user('test1'))
self.assert_(self.manager.get_project('testproj'))
@@ -321,6 +325,7 @@ class AuthManagerTestCase(object):
self.assertEqual('secret', user.secret)
self.assertTrue(user.is_admin())
+
class AuthManagerLdapTestCase(AuthManagerTestCase, test.TrialTestCase):
auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
@@ -337,6 +342,7 @@ class AuthManagerLdapTestCase(AuthManagerTestCase, test.TrialTestCase):
except:
self.skip = True
+
class AuthManagerDbTestCase(AuthManagerTestCase, test.TrialTestCase):
auth_driver = 'nova.auth.dbdriver.DbDriver'
diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py
index 835bfdf49..2d61d2675 100644
--- a/nova/tests/cloud_unittest.py
+++ b/nova/tests/cloud_unittest.py
@@ -46,13 +46,13 @@ from nova.objectstore import image
FLAGS = flags.FLAGS
-
# Temp dirs for working with image attributes through the cloud controller
# (stole this from objectstore_unittest.py)
OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-')
IMAGES_PATH = os.path.join(OSS_TEMPDIR, 'images')
os.makedirs(IMAGES_PATH)
+
class CloudTestCase(test.TrialTestCase):
def setUp(self):
super(CloudTestCase, self).setUp()
@@ -97,17 +97,17 @@ class CloudTestCase(test.TrialTestCase):
max_count = 1
kwargs = {'image_id': image_id,
'instance_type': instance_type,
- 'max_count': max_count }
+ 'max_count': max_count}
rv = yield self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
- output = yield self.cloud.get_console_output(context=self.context, instance_id=[instance_id])
+ output = yield self.cloud.get_console_output(context=self.context,
+ instance_id=[instance_id])
self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT')
# TODO(soren): We need this until we can stop polling in the rpc code
# for unit tests.
greenthread.sleep(0.3)
rv = yield self.cloud.terminate_instances(self.context, [instance_id])
-
def test_key_generation(self):
result = self._create_key('test')
private_key = result['private_key']
@@ -146,8 +146,10 @@ class CloudTestCase(test.TrialTestCase):
'max_count': max_count}
rv = yield self.cloud.run_instances(self.context, **kwargs)
# TODO: check for proper response
- instance = rv['reservationSet'][0][rv['reservationSet'][0].keys()[0]][0]
- logging.debug("Need to watch instance %s until it's running..." % instance['instance_id'])
+ instance_id = rv['reservationSet'][0].keys()[0]
+ instance = rv['reservationSet'][0][instance_id][0]
+ logging.debug("Need to watch instance %s until it's running..." %
+ instance['instance_id'])
while True:
rv = yield defer.succeed(time.sleep(1))
info = self.cloud._get_instance(instance['instance_id'])
@@ -157,14 +159,15 @@ class CloudTestCase(test.TrialTestCase):
self.assert_(rv)
if connection_type != 'fake':
- time.sleep(45) # Should use boto for polling here
+ time.sleep(45) # Should use boto for polling here
for reservations in rv['reservationSet']:
# for res_id in reservations.keys():
- # logging.debug(reservations[res_id])
- # for instance in reservations[res_id]:
- for instance in reservations[reservations.keys()[0]]:
- logging.debug("Terminating instance %s" % instance['instance_id'])
- rv = yield self.compute.terminate_instance(instance['instance_id'])
+ # logging.debug(reservations[res_id])
+ # for instance in reservations[res_id]:
+ for instance in reservations[reservations.keys()[0]]:
+ instance_id = instance['instance_id']
+ logging.debug("Terminating instance %s" % instance_id)
+ rv = yield self.compute.terminate_instance(instance_id)
def test_instance_update_state(self):
def instance(num):
@@ -183,8 +186,7 @@ class CloudTestCase(test.TrialTestCase):
'groups': ['default'],
'product_codes': None,
'state': 0x01,
- 'user_data': ''
- }
+ 'user_data': ''}
rv = self.cloud._format_describe_instances(self.context)
self.assert_(len(rv['reservationSet']) == 0)
@@ -199,7 +201,9 @@ class CloudTestCase(test.TrialTestCase):
#self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5)
# report 4 nodes each having 1 of the instances
#for i in xrange(4):
- # self.cloud.update_state('instances', {('node-%s' % i): {('i-%s' % i): instance(i)}})
+ # self.cloud.update_state('instances',
+ # {('node-%s' % i): {('i-%s' % i):
+ # instance(i)}})
# one instance should be pending still
#self.assert_(len(self.cloud.instances['pending'].keys()) == 1)
@@ -217,8 +221,10 @@ class CloudTestCase(test.TrialTestCase):
@staticmethod
def _fake_set_image_description(ctxt, image_id, description):
from nova.objectstore import handler
+
class req:
pass
+
request = req()
request.context = ctxt
request.args = {'image_id': [image_id],
diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py
index 01e1bcd30..01b5651df 100644
--- a/nova/tests/compute_unittest.py
+++ b/nova/tests/compute_unittest.py
@@ -37,7 +37,7 @@ FLAGS = flags.FLAGS
class ComputeTestCase(test.TrialTestCase):
"""Test case for compute"""
- def setUp(self): # pylint: disable-msg=C0103
+ def setUp(self):
logging.getLogger().setLevel(logging.DEBUG)
super(ComputeTestCase, self).setUp()
self.flags(connection_type='fake',
@@ -48,7 +48,7 @@ class ComputeTestCase(test.TrialTestCase):
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.get_admin_context()
- def tearDown(self): # pylint: disable-msg=C0103
+ def tearDown(self):
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
super(ComputeTestCase, self).tearDown()
diff --git a/nova/tests/flags_unittest.py b/nova/tests/flags_unittest.py
index 714170e5e..b97df075d 100644
--- a/nova/tests/flags_unittest.py
+++ b/nova/tests/flags_unittest.py
@@ -23,7 +23,9 @@ from nova import test
FLAGS = flags.FLAGS
flags.DEFINE_string('flags_unittest', 'foo', 'for testing purposes only')
+
class FlagsTestCase(test.TrialTestCase):
+
def setUp(self):
super(FlagsTestCase, self).setUp()
self.FLAGS = flags.FlagValues()
@@ -35,7 +37,8 @@ class FlagsTestCase(test.TrialTestCase):
self.assert_('false' not in self.FLAGS)
self.assert_('true' not in self.FLAGS)
- flags.DEFINE_string('string', 'default', 'desc', flag_values=self.FLAGS)
+ flags.DEFINE_string('string', 'default', 'desc',
+ flag_values=self.FLAGS)
flags.DEFINE_integer('int', 1, 'desc', flag_values=self.FLAGS)
flags.DEFINE_bool('false', False, 'desc', flag_values=self.FLAGS)
flags.DEFINE_bool('true', True, 'desc', flag_values=self.FLAGS)
diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index e8dd2624f..b7caed4fd 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -35,7 +35,7 @@ FLAGS = flags.FLAGS
class NetworkTestCase(test.TrialTestCase):
"""Test cases for network code"""
- def setUp(self): # pylint: disable-msg=C0103
+ def setUp(self):
super(NetworkTestCase, self).setUp()
# NOTE(vish): if you change these flags, make sure to change the
# flags in the corresponding section in nova-dhcpbridge
@@ -65,7 +65,7 @@ class NetworkTestCase(test.TrialTestCase):
instance_ref = self._create_instance(1)
self.instance2_id = instance_ref['id']
- def tearDown(self): # pylint: disable-msg=C0103
+ def tearDown(self):
super(NetworkTestCase, self).tearDown()
# TODO(termie): this should really be instantiating clean datastores
# in between runs, one failure kills all the tests
@@ -98,7 +98,6 @@ class NetworkTestCase(test.TrialTestCase):
self.context.project_id = self.projects[project_num].id
self.network.deallocate_fixed_ip(self.context, address)
-
def test_public_network_association(self):
"""Makes sure that we can allocaate a public ip"""
# TODO(vish): better way of adding floating ips
@@ -118,10 +117,12 @@ class NetworkTestCase(test.TrialTestCase):
lease_ip(fix_addr)
self.assertEqual(float_addr, str(pubnet[0]))
self.network.associate_floating_ip(self.context, float_addr, fix_addr)
- address = db.instance_get_floating_address(context.get_admin_context(), self.instance_id)
+ address = db.instance_get_floating_address(context.get_admin_context(),
+ self.instance_id)
self.assertEqual(address, float_addr)
self.network.disassociate_floating_ip(self.context, float_addr)
- address = db.instance_get_floating_address(context.get_admin_context(), self.instance_id)
+ address = db.instance_get_floating_address(context.get_admin_context(),
+ self.instance_id)
self.assertEqual(address, None)
self.network.deallocate_floating_ip(self.context, float_addr)
self.network.deallocate_fixed_ip(self.context, fix_addr)
@@ -254,18 +255,24 @@ class NetworkTestCase(test.TrialTestCase):
There are ips reserved at the bottom and top of the range.
services (network, gateway, CloudPipe, broadcast)
"""
- network = db.project_get_network(context.get_admin_context(), self.projects[0].id)
+ network = db.project_get_network(context.get_admin_context(),
+ self.projects[0].id)
net_size = flags.FLAGS.network_size
- total_ips = (db.network_count_available_ips(context.get_admin_context(), network['id']) +
- db.network_count_reserved_ips(context.get_admin_context(), network['id']) +
- db.network_count_allocated_ips(context.get_admin_context(), network['id']))
+ admin_context = context.get_admin_context()
+ total_ips = (db.network_count_available_ips(admin_context,
+ network['id']) +
+ db.network_count_reserved_ips(admin_context,
+ network['id']) +
+ db.network_count_allocated_ips(admin_context,
+ network['id']))
self.assertEqual(total_ips, net_size)
def test_too_many_addresses(self):
"""Test for a NoMoreAddresses exception when all fixed ips are used.
"""
- network = db.project_get_network(context.get_admin_context(), self.projects[0].id)
- num_available_ips = db.network_count_available_ips(context.get_admin_context(),
+ admin_context = context.get_admin_context()
+ network = db.project_get_network(admin_context, self.projects[0].id)
+ num_available_ips = db.network_count_available_ips(admin_context,
network['id'])
addresses = []
instance_ids = []
@@ -276,8 +283,9 @@ class NetworkTestCase(test.TrialTestCase):
addresses.append(address)
lease_ip(address)
- self.assertEqual(db.network_count_available_ips(context.get_admin_context(),
- network['id']), 0)
+ ip_count = db.network_count_available_ips(context.get_admin_context(),
+ network['id'])
+ self.assertEqual(ip_count, 0)
self.assertRaises(db.NoMoreAddresses,
self.network.allocate_fixed_ip,
self.context,
@@ -287,14 +295,15 @@ class NetworkTestCase(test.TrialTestCase):
self.network.deallocate_fixed_ip(self.context, addresses[i])
release_ip(addresses[i])
db.instance_destroy(context.get_admin_context(), instance_ids[i])
- self.assertEqual(db.network_count_available_ips(context.get_admin_context(),
- network['id']),
- num_available_ips)
+ ip_count = db.network_count_available_ips(context.get_admin_context(),
+ network['id'])
+ self.assertEqual(ip_count, num_available_ips)
def is_allocated_in_project(address, project_id):
"""Returns true if address is in specified project"""
- project_net = db.project_get_network(context.get_admin_context(), project_id)
+ project_net = db.project_get_network(context.get_admin_context(),
+ project_id)
network = db.fixed_ip_get_network(context.get_admin_context(), address)
instance = db.fixed_ip_get_instance(context.get_admin_context(), address)
# instance exists until release
@@ -308,8 +317,10 @@ def binpath(script):
def lease_ip(private_ip):
"""Run add command on dhcpbridge"""
- network_ref = db.fixed_ip_get_network(context.get_admin_context(), private_ip)
- instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), private_ip)
+ network_ref = db.fixed_ip_get_network(context.get_admin_context(),
+ private_ip)
+ instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
+ private_ip)
cmd = "%s add %s %s fake" % (binpath('nova-dhcpbridge'),
instance_ref['mac_address'],
private_ip)
@@ -322,8 +333,10 @@ def lease_ip(private_ip):
def release_ip(private_ip):
"""Run del command on dhcpbridge"""
- network_ref = db.fixed_ip_get_network(context.get_admin_context(), private_ip)
- instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), private_ip)
+ network_ref = db.fixed_ip_get_network(context.get_admin_context(),
+ private_ip)
+ instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
+ private_ip)
cmd = "%s del %s %s fake" % (binpath('nova-dhcpbridge'),
instance_ref['mac_address'],
private_ip)
diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py
index f096ac6fe..061799923 100644
--- a/nova/tests/objectstore_unittest.py
+++ b/nova/tests/objectstore_unittest.py
@@ -57,7 +57,7 @@ os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets'))
class ObjectStoreTestCase(test.TrialTestCase):
"""Test objectstore API directly."""
- def setUp(self): # pylint: disable-msg=C0103
+ def setUp(self):
"""Setup users and projects."""
super(ObjectStoreTestCase, self).setUp()
self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
@@ -73,7 +73,7 @@ class ObjectStoreTestCase(test.TrialTestCase):
self.auth_manager.create_project('proj2', 'user2', 'a proj', ['user2'])
self.context = context.RequestContext('user1', 'proj1')
- def tearDown(self): # pylint: disable-msg=C0103
+ def tearDown(self):
"""Tear down users and projects."""
self.auth_manager.delete_project('proj1')
self.auth_manager.delete_project('proj2')
@@ -181,7 +181,7 @@ class ObjectStoreTestCase(test.TrialTestCase):
class TestHTTPChannel(http.HTTPChannel):
"""Dummy site required for twisted.web"""
- def checkPersistence(self, _, __): # pylint: disable-msg=C0103
+ def checkPersistence(self, _, __): # pylint: disable-msg=C0103
"""Otherwise we end up with an unclean reactor."""
return False
@@ -194,7 +194,7 @@ class TestSite(server.Site):
class S3APITestCase(test.TrialTestCase):
"""Test objectstore through S3 API."""
- def setUp(self): # pylint: disable-msg=C0103
+ def setUp(self):
"""Setup users, projects, and start a test server."""
super(S3APITestCase, self).setUp()
@@ -217,7 +217,6 @@ class S3APITestCase(test.TrialTestCase):
# pylint: enable-msg=E1101
self.tcp_port = self.listening_port.getHost().port
-
if not boto.config.has_section('Boto'):
boto.config.add_section('Boto')
boto.config.set('Boto', 'num_retries', '0')
@@ -234,11 +233,11 @@ class S3APITestCase(test.TrialTestCase):
self.conn.get_http_connection = get_http_connection
- def _ensure_no_buckets(self, buckets): # pylint: disable-msg=C0111
+ def _ensure_no_buckets(self, buckets): # pylint: disable-msg=C0111
self.assertEquals(len(buckets), 0, "Bucket list was not empty")
return True
- def _ensure_one_bucket(self, buckets, name): # pylint: disable-msg=C0111
+ def _ensure_one_bucket(self, buckets, name): # pylint: disable-msg=C0111
self.assertEquals(len(buckets), 1,
"Bucket list didn't have exactly one element in it")
self.assertEquals(buckets[0].name, name, "Wrong name")
@@ -309,7 +308,7 @@ class S3APITestCase(test.TrialTestCase):
deferred.addCallback(self._ensure_no_buckets)
return deferred
- def tearDown(self): # pylint: disable-msg=C0103
+ def tearDown(self):
"""Tear down auth and test server."""
self.auth_manager.delete_user('admin')
self.auth_manager.delete_project('admin')
diff --git a/nova/tests/process_unittest.py b/nova/tests/process_unittest.py
index 25c60c616..67245af03 100644
--- a/nova/tests/process_unittest.py
+++ b/nova/tests/process_unittest.py
@@ -38,6 +38,7 @@ class ProcessTestCase(test.TrialTestCase):
def test_execute_stdout(self):
pool = process.ProcessPool(2)
d = pool.simple_execute('echo test')
+
def _check(rv):
self.assertEqual(rv[0], 'test\n')
self.assertEqual(rv[1], '')
@@ -49,6 +50,7 @@ class ProcessTestCase(test.TrialTestCase):
def test_execute_stderr(self):
pool = process.ProcessPool(2)
d = pool.simple_execute('cat BAD_FILE', check_exit_code=False)
+
def _check(rv):
self.assertEqual(rv[0], '')
self.assert_('No such file' in rv[1])
@@ -72,6 +74,7 @@ class ProcessTestCase(test.TrialTestCase):
d4 = pool.simple_execute('sleep 0.005')
called = []
+
def _called(rv, name):
called.append(name)
diff --git a/nova/tests/quota_unittest.py b/nova/tests/quota_unittest.py
index 72e44bf52..9e3afbf4e 100644
--- a/nova/tests/quota_unittest.py
+++ b/nova/tests/quota_unittest.py
@@ -33,7 +33,7 @@ FLAGS = flags.FLAGS
class QuotaTestCase(test.TrialTestCase):
- def setUp(self): # pylint: disable-msg=C0103
+ def setUp(self):
logging.getLogger().setLevel(logging.DEBUG)
super(QuotaTestCase, self).setUp()
self.flags(connection_type='fake',
@@ -51,7 +51,7 @@ class QuotaTestCase(test.TrialTestCase):
self.context = context.RequestContext(project=self.project,
user=self.user)
- def tearDown(self): # pylint: disable-msg=C0103
+ def tearDown(self):
manager.AuthManager().delete_project(self.project)
manager.AuthManager().delete_user(self.user)
super(QuotaTestCase, self).tearDown()
@@ -141,12 +141,13 @@ class QuotaTestCase(test.TrialTestCase):
try:
db.floating_ip_get_by_address(context.get_admin_context(), address)
except exception.NotFound:
- db.floating_ip_create(context.get_admin_context(), {'address': address,
- 'host': FLAGS.host})
+ db.floating_ip_create(context.get_admin_context(),
+ {'address': address, 'host': FLAGS.host})
float_addr = self.network.allocate_floating_ip(self.context,
self.project.id)
# NOTE(vish): This assert never fails. When cloud attempts to
# make an rpc.call, the test just finishes with OK. It
# appears to be something in the magic inline callbacks
# that is breaking.
- self.assertRaises(cloud.QuotaError, self.cloud.allocate_address, self.context)
+ self.assertRaises(cloud.QuotaError, self.cloud.allocate_address,
+ self.context)
diff --git a/nova/tests/rpc_unittest.py b/nova/tests/rpc_unittest.py
index 5d2bb1046..f35b65a39 100644
--- a/nova/tests/rpc_unittest.py
+++ b/nova/tests/rpc_unittest.py
@@ -33,7 +33,7 @@ FLAGS = flags.FLAGS
class RpcTestCase(test.TrialTestCase):
"""Test cases for rpc"""
- def setUp(self): # pylint: disable-msg=C0103
+ def setUp(self):
super(RpcTestCase, self).setUp()
self.conn = rpc.Connection.instance()
self.receiver = TestReceiver()
@@ -41,7 +41,7 @@ class RpcTestCase(test.TrialTestCase):
topic='test',
proxy=self.receiver)
self.consumer.attach_to_twisted()
- self.context= context.get_admin_context()
+ self.context = context.get_admin_context()
def test_call_succeed(self):
"""Get a value through rpc call"""
@@ -67,9 +67,9 @@ class RpcTestCase(test.TrialTestCase):
to an int in the test.
"""
value = 42
- self.assertFailure(rpc.call_twisted(self.context,
- 'test', {"method": "fail",
- "args": {"value": value}}),
+ self.assertFailure(rpc.call_twisted(self.context, 'test',
+ {"method": "fail",
+ "args": {"value": value}}),
rpc.RemoteError)
try:
yield rpc.call_twisted(self.context,
@@ -101,4 +101,3 @@ class TestReceiver(object):
def fail(context, value):
"""Raises an exception with the value sent in"""
raise Exception(value)
-
diff --git a/nova/tests/scheduler_unittest.py b/nova/tests/scheduler_unittest.py
index 379f8cdc8..27345d055 100644
--- a/nova/tests/scheduler_unittest.py
+++ b/nova/tests/scheduler_unittest.py
@@ -34,6 +34,7 @@ from nova.scheduler import driver
FLAGS = flags.FLAGS
flags.DECLARE('max_cores', 'nova.scheduler.simple')
+
class TestDriver(driver.Scheduler):
"""Scheduler Driver for Tests"""
def schedule(context, topic, *args, **kwargs):
@@ -42,9 +43,10 @@ class TestDriver(driver.Scheduler):
def schedule_named_method(context, topic, num):
return 'named_host'
+
class SchedulerTestCase(test.TrialTestCase):
"""Test case for scheduler"""
- def setUp(self): # pylint: disable=C0103
+ def setUp(self):
super(SchedulerTestCase, self).setUp()
self.flags(scheduler_driver='nova.tests.scheduler_unittest.TestDriver')
@@ -73,7 +75,7 @@ class SchedulerTestCase(test.TrialTestCase):
class SimpleDriverTestCase(test.TrialTestCase):
"""Test case for simple driver"""
- def setUp(self): # pylint: disable-msg=C0103
+ def setUp(self):
super(SimpleDriverTestCase, self).setUp()
self.flags(connection_type='fake',
max_cores=4,
@@ -87,7 +89,7 @@ class SimpleDriverTestCase(test.TrialTestCase):
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.get_admin_context()
- def tearDown(self): # pylint: disable-msg=C0103
+ def tearDown(self):
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py
index 61db52742..a268bc4fe 100644
--- a/nova/tests/service_unittest.py
+++ b/nova/tests/service_unittest.py
@@ -23,8 +23,8 @@ Unit Tests for remote procedure calls using queue
import mox
from twisted.application.app import startApplication
+from twisted.internet import defer
-from nova import context
from nova import exception
from nova import flags
from nova import rpc
@@ -39,21 +39,58 @@ flags.DEFINE_string("fake_manager", "nova.tests.service_unittest.FakeManager",
class FakeManager(manager.Manager):
"""Fake manager for tests"""
- pass
+ def test_method(self):
+ return 'manager'
-class ServiceTestCase(test.BaseTestCase):
- """Test cases for rpc"""
+class ExtendedService(service.Service):
+ def test_method(self):
+ return 'service'
- def setUp(self): # pylint: disable=C0103
+
+class ServiceManagerTestCase(test.TrialTestCase):
+ """Test cases for Services"""
+
+ def test_attribute_error_for_no_manager(self):
+ serv = service.Service('test',
+ 'test',
+ 'test',
+ 'nova.tests.service_unittest.FakeManager')
+ self.assertRaises(AttributeError, getattr, serv, 'test_method')
+
+ def test_message_gets_to_manager(self):
+ serv = service.Service('test',
+ 'test',
+ 'test',
+ 'nova.tests.service_unittest.FakeManager')
+ serv.startService()
+ self.assertEqual(serv.test_method(), 'manager')
+
+ def test_override_manager_method(self):
+ serv = ExtendedService('test',
+ 'test',
+ 'test',
+ 'nova.tests.service_unittest.FakeManager')
+ serv.startService()
+ self.assertEqual(serv.test_method(), 'service')
+
+
+class ServiceTestCase(test.TrialTestCase):
+ """Test cases for Services"""
+
+ def setUp(self):
super(ServiceTestCase, self).setUp()
self.mox.StubOutWithMock(service, 'db')
- self.context = context.get_admin_context()
def test_create(self):
host = 'foo'
binary = 'nova-fake'
topic = 'fake'
+
+ # NOTE(vish): Create was moved out of mox replay to make sure that
+ # the looping calls are created in StartService.
+ app = service.Service.create(host=host, binary=binary)
+
self.mox.StubOutWithMock(rpc,
'AdapterConsumer',
use_mock_anything=True)
@@ -99,7 +136,6 @@ class ServiceTestCase(test.BaseTestCase):
service_create).AndReturn(service_ref)
self.mox.ReplayAll()
- app = service.Service.create(host=host, binary=binary)
startApplication(app, False)
self.assert_(app)
@@ -107,86 +143,103 @@ class ServiceTestCase(test.BaseTestCase):
# whether it is disconnected, it looks for a variable on itself called
# 'model_disconnected' and report_state doesn't really do much so this
# these are mostly just for coverage
- def test_report_state(self):
- host = 'foo'
- binary = 'bar'
- service_ref = {'host': host,
- 'binary': binary,
- 'report_count': 0,
- 'id': 1}
- service.db.__getattr__('report_state')
- service.db.service_get_by_args(self.context,
- host,
- binary).AndReturn(service_ref)
- service.db.service_update(self.context, service_ref['id'],
- mox.ContainsKeyValue('report_count', 1))
-
- self.mox.ReplayAll()
- s = service.Service()
- rv = yield s.report_state(host, binary)
-
+ @defer.inlineCallbacks
def test_report_state_no_service(self):
host = 'foo'
binary = 'bar'
+ topic = 'test'
service_create = {'host': host,
'binary': binary,
+ 'topic': topic,
'report_count': 0}
service_ref = {'host': host,
- 'binary': binary,
- 'report_count': 0,
- 'id': 1}
+ 'binary': binary,
+ 'topic': topic,
+ 'report_count': 0,
+ 'id': 1}
- service.db.__getattr__('report_state')
- service.db.service_get_by_args(self.context,
+ service.db.service_get_by_args(mox.IgnoreArg(),
host,
binary).AndRaise(exception.NotFound())
- service.db.service_create(self.context,
+ service.db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
- service.db.service_get(self.context, service_ref['id']).AndReturn(service_ref)
- service.db.service_update(self.context, service_ref['id'],
+ service.db.service_get(mox.IgnoreArg(),
+ service_ref['id']).AndReturn(service_ref)
+ service.db.service_update(mox.IgnoreArg(), service_ref['id'],
mox.ContainsKeyValue('report_count', 1))
self.mox.ReplayAll()
- s = service.Service()
- rv = yield s.report_state(host, binary)
-
+ serv = service.Service(host,
+ binary,
+ topic,
+ 'nova.tests.service_unittest.FakeManager')
+ serv.startService()
+ yield serv.report_state()
+
+ @defer.inlineCallbacks
def test_report_state_newly_disconnected(self):
host = 'foo'
binary = 'bar'
+ topic = 'test'
+ service_create = {'host': host,
+ 'binary': binary,
+ 'topic': topic,
+ 'report_count': 0}
service_ref = {'host': host,
- 'binary': binary,
- 'report_count': 0,
- 'id': 1}
+ 'binary': binary,
+ 'topic': topic,
+ 'report_count': 0,
+ 'id': 1}
- service.db.__getattr__('report_state')
- service.db.service_get_by_args(self.context,
- host,
- binary).AndRaise(Exception())
+ service.db.service_get_by_args(mox.IgnoreArg(),
+ host,
+ binary).AndRaise(exception.NotFound())
+ service.db.service_create(mox.IgnoreArg(),
+ service_create).AndReturn(service_ref)
+ service.db.service_get(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndRaise(Exception())
self.mox.ReplayAll()
- s = service.Service()
- rv = yield s.report_state(host, binary)
-
- self.assert_(s.model_disconnected)
-
+ serv = service.Service(host,
+ binary,
+ topic,
+ 'nova.tests.service_unittest.FakeManager')
+ serv.startService()
+ yield serv.report_state()
+ self.assert_(serv.model_disconnected)
+
+ @defer.inlineCallbacks
def test_report_state_newly_connected(self):
host = 'foo'
binary = 'bar'
+ topic = 'test'
+ service_create = {'host': host,
+ 'binary': binary,
+ 'topic': topic,
+ 'report_count': 0}
service_ref = {'host': host,
- 'binary': binary,
- 'report_count': 0,
- 'id': 1}
+ 'binary': binary,
+ 'topic': topic,
+ 'report_count': 0,
+ 'id': 1}
- service.db.__getattr__('report_state')
- service.db.service_get_by_args(self.context,
- host,
- binary).AndReturn(service_ref)
- service.db.service_update(self.context, service_ref['id'],
+ service.db.service_get_by_args(mox.IgnoreArg(),
+ host,
+ binary).AndRaise(exception.NotFound())
+ service.db.service_create(mox.IgnoreArg(),
+ service_create).AndReturn(service_ref)
+ service.db.service_get(mox.IgnoreArg(),
+ service_ref['id']).AndReturn(service_ref)
+ service.db.service_update(mox.IgnoreArg(), service_ref['id'],
mox.ContainsKeyValue('report_count', 1))
self.mox.ReplayAll()
- s = service.Service()
- s.model_disconnected = True
- rv = yield s.report_state(host, binary)
-
- self.assert_(not s.model_disconnected)
+ serv = service.Service(host,
+ binary,
+ topic,
+ 'nova.tests.service_unittest.FakeManager')
+ serv.startService()
+ serv.model_disconnected = True
+ yield serv.report_state()
+
+ self.assert_(not serv.model_disconnected)
diff --git a/nova/tests/validator_unittest.py b/nova/tests/validator_unittest.py
index 84daa135e..b5f1c0667 100644
--- a/nova/tests/validator_unittest.py
+++ b/nova/tests/validator_unittest.py
@@ -35,7 +35,8 @@ class ValidationTestCase(test.TrialTestCase):
self.assertTrue(type_case("foo", 5, 1))
self.assertRaises(TypeError, type_case, "bar", "5", 1)
self.assertRaises(TypeError, type_case, None, 5, 1)
-
+
+
@validate.typetest(instanceid=str, size=int, number_of_instances=int)
def type_case(instanceid, size, number_of_instances):
return True
diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py
index 76af5cabd..d49383fb7 100644
--- a/nova/tests/virt_unittest.py
+++ b/nova/tests/virt_unittest.py
@@ -29,11 +29,13 @@ from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
flags.DECLARE('instances_path', 'nova.compute.manager')
+
class LibvirtConnTestCase(test.TrialTestCase):
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
self.manager = manager.AuthManager()
- self.user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
+ self.user = self.manager.create_user('fake', 'fake', 'fake',
+ admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.network = utils.import_object(FLAGS.network_manager)
FLAGS.instances_path = ''
@@ -41,15 +43,15 @@ class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
ip = '10.11.12.13'
- instance = { 'internal_id' : 1,
- 'memory_kb' : '1024000',
- 'basepath' : '/some/path',
- 'bridge_name' : 'br100',
- 'mac_address' : '02:12:34:46:56:67',
- 'vcpus' : 2,
- 'project_id' : 'fake',
- 'bridge' : 'br101',
- 'instance_type' : 'm1.small'}
+ instance = {'internal_id': 1,
+ 'memory_kb': '1024000',
+ 'basepath': '/some/path',
+ 'bridge_name': 'br100',
+ 'mac_address': '02:12:34:46:56:67',
+ 'vcpus': 2,
+ 'project_id': 'fake',
+ 'bridge': 'br101',
+ 'instance_type': 'm1.small'}
user_context = context.RequestContext(project=self.project,
user=self.user)
@@ -58,40 +60,38 @@ class LibvirtConnTestCase(test.TrialTestCase):
self.network.set_network_host(context.get_admin_context(),
network_ref['id'])
- fixed_ip = { 'address' : ip,
- 'network_id' : network_ref['id'] }
+ fixed_ip = {'address': ip,
+ 'network_id': network_ref['id']}
ctxt = context.get_admin_context()
fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
db.fixed_ip_update(ctxt, ip, {'allocated': True,
- 'instance_id': instance_ref['id'] })
-
- type_uri_map = { 'qemu' : ('qemu:///system',
- [(lambda t: t.find('.').get('type'), 'qemu'),
- (lambda t: t.find('./os/type').text, 'hvm'),
- (lambda t: t.find('./devices/emulator'), None)]),
- 'kvm' : ('qemu:///system',
- [(lambda t: t.find('.').get('type'), 'kvm'),
- (lambda t: t.find('./os/type').text, 'hvm'),
- (lambda t: t.find('./devices/emulator'), None)]),
- 'uml' : ('uml:///system',
- [(lambda t: t.find('.').get('type'), 'uml'),
- (lambda t: t.find('./os/type').text, 'uml')]),
- }
-
- common_checks = [(lambda t: t.find('.').tag, 'domain'),
- (lambda t: \
- t.find('./devices/interface/filterref/parameter') \
- .get('name'), 'IP'),
- (lambda t: \
- t.find('./devices/interface/filterref/parameter') \
- .get('value'), '10.11.12.13')]
-
- for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
+ 'instance_id': instance_ref['id']})
+
+ type_uri_map = {'qemu': ('qemu:///system',
+ [(lambda t: t.find('.').get('type'), 'qemu'),
+ (lambda t: t.find('./os/type').text, 'hvm'),
+ (lambda t: t.find('./devices/emulator'), None)]),
+ 'kvm': ('qemu:///system',
+ [(lambda t: t.find('.').get('type'), 'kvm'),
+ (lambda t: t.find('./os/type').text, 'hvm'),
+ (lambda t: t.find('./devices/emulator'), None)]),
+ 'uml': ('uml:///system',
+ [(lambda t: t.find('.').get('type'), 'uml'),
+ (lambda t: t.find('./os/type').text, 'uml')])}
+
+ common_checks = [
+ (lambda t: t.find('.').tag, 'domain'),
+ (lambda t: t.find('./devices/interface/filterref/parameter').\
+ get('name'), 'IP'),
+ (lambda t: t.find('./devices/interface/filterref/parameter').\
+ get('value'), '10.11.12.13')]
+
+ for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
- uri, template = conn.get_uri_and_template()
+ uri, _template, _rescue = conn.get_uri_and_templates()
self.assertEquals(uri, expected_uri)
xml = conn.to_xml(instance_ref)
@@ -111,19 +111,20 @@ class LibvirtConnTestCase(test.TrialTestCase):
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
- for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
+ for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
- uri, template = conn.get_uri_and_template()
+ uri, _template, _rescue = conn.get_uri_and_templates()
self.assertEquals(uri, testuri)
-
def tearDown(self):
super(LibvirtConnTestCase, self).tearDown()
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
+
class NWFilterTestCase(test.TrialTestCase):
+
def setUp(self):
super(NWFilterTestCase, self).setUp()
@@ -131,7 +132,8 @@ class NWFilterTestCase(test.TrialTestCase):
pass
self.manager = manager.AuthManager()
- self.user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
+ self.user = self.manager.create_user('fake', 'fake', 'fake',
+ admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.RequestContext(self.user, self.project)
@@ -143,7 +145,6 @@ class NWFilterTestCase(test.TrialTestCase):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
-
def test_cidr_rule_nwfilter_xml(self):
cloud_controller = cloud.CloudController()
cloud_controller.create_security_group(self.context,
@@ -156,7 +157,6 @@ class NWFilterTestCase(test.TrialTestCase):
ip_protocol='tcp',
cidr_ip='0.0.0.0/0')
-
security_group = db.security_group_get_by_name(self.context,
'fake',
'testgroup')
@@ -182,15 +182,12 @@ class NWFilterTestCase(test.TrialTestCase):
self.assertEqual(ip_conditions[0].getAttribute('srcipmask'), '0.0.0.0')
self.assertEqual(ip_conditions[0].getAttribute('dstportstart'), '80')
self.assertEqual(ip_conditions[0].getAttribute('dstportend'), '81')
-
-
self.teardown_security_group()
def teardown_security_group(self):
cloud_controller = cloud.CloudController()
cloud_controller.delete_security_group(self.context, 'testgroup')
-
def setup_and_return_security_group(self):
cloud_controller = cloud.CloudController()
cloud_controller.create_security_group(self.context,
@@ -244,16 +241,19 @@ class NWFilterTestCase(test.TrialTestCase):
for required in [secgroup_filter, 'allow-dhcp-server',
'no-arp-spoofing', 'no-ip-spoofing',
'no-mac-spoofing']:
- self.assertTrue(required in self.recursive_depends[instance_filter],
- "Instance's filter does not include %s" % required)
+ self.assertTrue(required in
+ self.recursive_depends[instance_filter],
+ "Instance's filter does not include %s" %
+ required)
self.security_group = self.setup_and_return_security_group()
- db.instance_add_security_group(self.context, inst_id, self.security_group.id)
+ db.instance_add_security_group(self.context, inst_id,
+ self.security_group.id)
instance = db.instance_get(self.context, inst_id)
d = self.fw.setup_nwfilters_for_instance(instance)
d.addCallback(_ensure_all_called)
- d.addCallback(lambda _:self.teardown_security_group())
+ d.addCallback(lambda _: self.teardown_security_group())
return d
diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py
index 8e2fa11c1..fdee30b48 100644
--- a/nova/tests/volume_unittest.py
+++ b/nova/tests/volume_unittest.py
@@ -34,7 +34,7 @@ FLAGS = flags.FLAGS
class VolumeTestCase(test.TrialTestCase):
"""Test Case for volumes"""
- def setUp(self): # pylint: disable-msg=C0103
+ def setUp(self):
logging.getLogger().setLevel(logging.DEBUG)
super(VolumeTestCase, self).setUp()
self.compute = utils.import_object(FLAGS.compute_manager)
@@ -59,7 +59,8 @@ class VolumeTestCase(test.TrialTestCase):
"""Test volume can be created and deleted"""
volume_id = self._create_volume()
yield self.volume.create_volume(self.context, volume_id)
- self.assertEqual(volume_id, db.volume_get(context.get_admin_context(), volume_id).id)
+ self.assertEqual(volume_id, db.volume_get(context.get_admin_context(),
+ volume_id).id)
yield self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.NotFound,
@@ -114,7 +115,8 @@ class VolumeTestCase(test.TrialTestCase):
volume_id = self._create_volume()
yield self.volume.create_volume(self.context, volume_id)
if FLAGS.fake_tests:
- db.volume_attached(self.context, volume_id, instance_id, mountpoint)
+ db.volume_attached(self.context, volume_id, instance_id,
+ mountpoint)
else:
yield self.compute.attach_volume(self.context,
instance_id,
@@ -154,7 +156,8 @@ class VolumeTestCase(test.TrialTestCase):
def _check(volume_id):
"""Make sure blades aren't duplicated"""
volume_ids.append(volume_id)
- (shelf_id, blade_id) = db.volume_get_shelf_and_blade(context.get_admin_context(),
+ admin_context = context.get_admin_context()
+ (shelf_id, blade_id) = db.volume_get_shelf_and_blade(admin_context,
volume_id)
shelf_blade = '%s.%s' % (shelf_id, blade_id)
self.assert_(shelf_blade not in shelf_blades)
diff --git a/nova/twistd.py b/nova/twistd.py
index df75b603e..3ec0ff61e 100644
--- a/nova/twistd.py
+++ b/nova/twistd.py
@@ -53,6 +53,7 @@ class TwistdServerOptions(ServerOptions):
class FlagParser(object):
# this is a required attribute for gflags
syntactic_help = ''
+
def __init__(self, parser):
self.parser = parser
@@ -63,6 +64,7 @@ class FlagParser(object):
def WrapTwistedOptions(wrapped):
class TwistedOptionsToFlags(wrapped):
subCommands = None
+
def __init__(self):
# NOTE(termie): _data exists because Twisted stuff expects
# to be able to set arbitrary things that are
@@ -80,7 +82,8 @@ def WrapTwistedOptions(wrapped):
def _absorbFlags(self):
twistd_flags = []
- reflect.accumulateClassList(self.__class__, 'optFlags', twistd_flags)
+ reflect.accumulateClassList(self.__class__, 'optFlags',
+ twistd_flags)
for flag in twistd_flags:
key = flag[0].replace('-', '_')
if hasattr(FLAGS, key):
@@ -89,7 +92,8 @@ def WrapTwistedOptions(wrapped):
def _absorbParameters(self):
twistd_params = []
- reflect.accumulateClassList(self.__class__, 'optParameters', twistd_params)
+ reflect.accumulateClassList(self.__class__, 'optParameters',
+ twistd_params)
for param in twistd_params:
key = param[0].replace('-', '_')
if hasattr(FLAGS, key):
@@ -103,13 +107,14 @@ def WrapTwistedOptions(wrapped):
def _absorbHandlers(self):
twistd_handlers = {}
- reflect.addMethodNamesToDict(self.__class__, twistd_handlers, "opt_")
+ reflect.addMethodNamesToDict(self.__class__, twistd_handlers,
+ "opt_")
# NOTE(termie): Much of the following is derived/copied from
# twisted.python.usage with the express purpose of
# providing compatibility
for name in twistd_handlers.keys():
- method = getattr(self, 'opt_'+name)
+ method = getattr(self, 'opt_' + name)
takesArg = not usage.flagFunction(method, name)
doc = getattr(method, '__doc__', None)
@@ -125,7 +130,6 @@ def WrapTwistedOptions(wrapped):
flags.DEFINE_string(name, None, doc)
self._paramHandlers[name] = method
-
def _doHandlers(self):
for flag, handler in self._flagHandlers.iteritems():
if self[flag]:
@@ -195,7 +199,7 @@ def stop(pidfile):
"""
# Get the pid from the pidfile
try:
- pf = file(pidfile,'r')
+ pf = file(pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
@@ -204,7 +208,8 @@ def stop(pidfile):
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % pidfile)
- return # not an error in a restart
+ # Not an error in a restart
+ return
# Try killing the daemon process
try:
diff --git a/nova/utils.py b/nova/utils.py
index 10b27ffec..bc495a691 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -28,6 +28,7 @@ import random
import subprocess
import socket
import sys
+from xml.sax import saxutils
from twisted.internet.threads import deferToThread
@@ -39,6 +40,7 @@ from nova.exception import ProcessExecutionError
FLAGS = flags.FLAGS
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
+
def import_class(import_str):
"""Returns a class from a string including module and class"""
mod_str, _sep, class_str = import_str.rpartition('.')
@@ -48,6 +50,7 @@ def import_class(import_str):
except (ImportError, ValueError, AttributeError):
raise exception.NotFound('Class %s cannot be found' % class_str)
+
def import_object(import_str):
"""Returns an object including a module or module and class"""
try:
@@ -57,6 +60,7 @@ def import_object(import_str):
cls = import_class(import_str)
return cls()
+
def fetchfile(url, target):
logging.debug("Fetching %s" % url)
# c = pycurl.Curl()
@@ -68,6 +72,7 @@ def fetchfile(url, target):
# fp.close()
execute("curl --fail %s -o %s" % (url, target))
+
def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
logging.debug("Running cmd: %s", cmd)
env = os.environ.copy()
@@ -83,7 +88,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
obj.stdin.close()
if obj.returncode:
logging.debug("Result was %s" % (obj.returncode))
- if check_exit_code and obj.returncode <> 0:
+ if check_exit_code and obj.returncode != 0:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=obj.returncode,
stdout=stdout,
@@ -106,7 +111,8 @@ def default_flagfile(filename='nova.conf'):
script_dir = os.path.dirname(inspect.stack()[-1][1])
filename = os.path.abspath(os.path.join(script_dir, filename))
if os.path.exists(filename):
- sys.argv = sys.argv[:1] + ['--flagfile=%s' % filename] + sys.argv[1:]
+ flagfile = ['--flagfile=%s' % filename]
+ sys.argv = sys.argv[:1] + flagfile + sys.argv[1:]
def debug(arg):
@@ -114,11 +120,11 @@ def debug(arg):
return arg
-def runthis(prompt, cmd, check_exit_code = True):
+def runthis(prompt, cmd, check_exit_code=True):
logging.debug("Running %s" % (cmd))
exit_code = subprocess.call(cmd.split(" "))
logging.debug(prompt % (exit_code))
- if check_exit_code and exit_code <> 0:
+ if check_exit_code and exit_code != 0:
raise ProcessExecutionError(exit_code=exit_code,
stdout=None,
stderr=None,
@@ -126,19 +132,16 @@ def runthis(prompt, cmd, check_exit_code = True):
def generate_uid(topic, size=8):
- if topic == "i":
- # Instances have integer internal ids.
- return random.randint(0, 2**32-1)
- else:
- characters = '01234567890abcdefghijklmnopqrstuvwxyz'
- choices = [random.choice(characters) for x in xrange(size)]
- return '%s-%s' % (topic, ''.join(choices))
+ characters = '01234567890abcdefghijklmnopqrstuvwxyz'
+ choices = [random.choice(characters) for x in xrange(size)]
+ return '%s-%s' % (topic, ''.join(choices))
def generate_mac():
- mac = [0x02, 0x16, 0x3e, random.randint(0x00, 0x7f),
- random.randint(0x00, 0xff), random.randint(0x00, 0xff)
- ]
+ mac = [0x02, 0x16, 0x3e,
+ random.randint(0x00, 0x7f),
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff)]
return ':'.join(map(lambda x: "%02x" % x, mac))
@@ -201,7 +204,31 @@ class LazyPluggable(object):
backend = self.__get_backend()
return getattr(backend, key)
+
def deferredToThread(f):
def g(*args, **kwargs):
return deferToThread(f, *args, **kwargs)
return g
+
+
+def xhtml_escape(value):
+ """Escapes a string so it is valid within XML or XHTML.
+
+ Code is directly from the utf8 function in
+ http://github.com/facebook/tornado/blob/master/tornado/escape.py
+
+ """
+ return saxutils.escape(value, {'"': "&quot;"})
+
+
+def utf8(value):
+ """Try to turn a string into utf-8 if possible.
+
+ Code is directly from the utf8 function in
+ http://github.com/facebook/tornado/blob/master/tornado/escape.py
+
+ """
+ if isinstance(value, unicode):
+ return value.encode("utf-8")
+ assert isinstance(value, str)
+ return value
diff --git a/nova/validate.py b/nova/validate.py
index 21f4ed286..7ea27daa6 100644
--- a/nova/validate.py
+++ b/nova/validate.py
@@ -16,18 +16,20 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
- Decorators for argument validation, courtesy of
- http://rmi.net/~lutz/rangetest.html
-"""
+"""Decorators for argument validation, courtesy of
+http://rmi.net/~lutz/rangetest.html"""
-def rangetest(**argchecks): # validate ranges for both+defaults
- def onDecorator(func): # onCall remembers func and argchecks
+
+def rangetest(**argchecks):
+ """Validate ranges for both + defaults"""
+
+ def onDecorator(func):
+ """onCall remembers func and argchecks"""
import sys
code = func.__code__ if sys.version_info[0] == 3 else func.func_code
- allargs = code.co_varnames[:code.co_argcount]
+ allargs = code.co_varnames[:code.co_argcount]
funcname = func.__name__
-
+
def onCall(*pargs, **kargs):
# all pargs match first N args by position
# the rest must be in kargs or omitted defaults
@@ -38,7 +40,8 @@ def rangetest(**argchecks): # validate ranges for both+defaults
# for all args to be checked
if argname in kargs:
# was passed by name
- if float(kargs[argname]) < low or float(kargs[argname]) > high:
+ if float(kargs[argname]) < low or \
+ float(kargs[argname]) > high:
errmsg = '{0} argument "{1}" not in {2}..{3}'
errmsg = errmsg.format(funcname, argname, low, high)
raise TypeError(errmsg)
@@ -46,9 +49,12 @@ def rangetest(**argchecks): # validate ranges for both+defaults
elif argname in positionals:
# was passed by position
position = positionals.index(argname)
- if float(pargs[position]) < low or float(pargs[position]) > high:
- errmsg = '{0} argument "{1}" with value of {4} not in {2}..{3}'
- errmsg = errmsg.format(funcname, argname, low, high, pargs[position])
+ if float(pargs[position]) < low or \
+ float(pargs[position]) > high:
+ errmsg = '{0} argument "{1}" with value of {4} ' \
+ 'not in {2}..{3}'
+ errmsg = errmsg.format(funcname, argname, low, high,
+ pargs[position])
raise TypeError(errmsg)
else:
pass
@@ -62,9 +68,9 @@ def typetest(**argchecks):
def onDecorator(func):
import sys
code = func.__code__ if sys.version_info[0] == 3 else func.func_code
- allargs = code.co_varnames[:code.co_argcount]
+ allargs = code.co_varnames[:code.co_argcount]
funcname = func.__name__
-
+
def onCall(*pargs, **kargs):
positionals = list(allargs)[:len(pargs)]
for (argname, typeof) in argchecks.items():
@@ -76,12 +82,13 @@ def typetest(**argchecks):
elif argname in positionals:
position = positionals.index(argname)
if not isinstance(pargs[position], typeof):
- errmsg = '{0} argument "{1}" with value of {2} not of type {3}'
- errmsg = errmsg.format(funcname, argname, pargs[position], typeof)
+ errmsg = '{0} argument "{1}" with value of {2} ' \
+ 'not of type {3}'
+ errmsg = errmsg.format(funcname, argname,
+ pargs[position], typeof)
raise TypeError(errmsg)
else:
pass
return func(*pargs, **kargs)
return onCall
return onDecorator
-
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index dc6112f20..66eff4c66 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -22,10 +22,9 @@ A fake (in-memory) hypervisor+api. Allows nova testing w/o a hypervisor.
This module also documents the semantics of real hypervisor connections.
"""
-import logging
-
from twisted.internet import defer
+from nova import exception
from nova.compute import power_state
@@ -119,6 +118,18 @@ class FakeConnection(object):
"""
return defer.succeed(None)
+ def rescue(self, instance):
+ """
+ Rescue the specified instance.
+ """
+ return defer.succeed(None)
+
+ def unrescue(self, instance):
+ """
+ Unrescue the specified instance.
+ """
+ return defer.succeed(None)
+
def destroy(self, instance):
"""
Destroy (shutdown and delete) the specified instance.
@@ -148,7 +159,12 @@ class FakeConnection(object):
current memory the instance has, in KiB, 'num_cpu': The current number
of virtual CPUs the instance has, 'cpu_time': The total CPU time used
by the instance, in nanoseconds.
+
+ This method should raise exception.NotFound if the hypervisor has no
+ knowledge of the instance
"""
+ if instance_name not in self.instances:
+ raise exception.NotFound("Instance %s Not Found" % instance_name)
i = self.instances[instance_name]
return {'state': i._state,
'max_mem': 0,
@@ -226,6 +242,7 @@ class FakeConnection(object):
def get_console_output(self, instance):
return 'FAKE CONSOLE OUTPUT'
+
class FakeInstance(object):
def __init__(self):
self._state = power_state.NOSTATE
diff --git a/nova/virt/images.py b/nova/virt/images.py
index dc50764d9..981aa5cf3 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -62,8 +62,8 @@ def _fetch_s3_image(image, path, user, project):
headers['Authorization'] = 'AWS %s:%s' % (access, signature)
cmd = ['/usr/bin/curl', '--fail', '--silent', url]
- for (k,v) in headers.iteritems():
- cmd += ['-H', '%s: %s' % (k,v)]
+ for (k, v) in headers.iteritems():
+ cmd += ['-H', '%s: %s' % (k, v)]
cmd += ['-o', path]
return process.SharedPool().execute(executable=cmd[0], args=cmd[1:])
diff --git a/nova/virt/libvirt.rescue.qemu.xml.template b/nova/virt/libvirt.rescue.qemu.xml.template
new file mode 100644
index 000000000..c0ffbdcee
--- /dev/null
+++ b/nova/virt/libvirt.rescue.qemu.xml.template
@@ -0,0 +1,37 @@
+<domain type='%(type)s'>
+ <name>%(name)s</name>
+ <os>
+ <type>hvm</type>
+ <kernel>%(basepath)s/rescue-kernel</kernel>
+ <initrd>%(basepath)s/rescue-ramdisk</initrd>
+ <cmdline>root=/dev/vda1 console=ttyS0</cmdline>
+ </os>
+ <features>
+ <acpi/>
+ </features>
+ <memory>%(memory_kb)s</memory>
+ <vcpu>%(vcpus)s</vcpu>
+ <devices>
+ <disk type='file'>
+ <source file='%(basepath)s/rescue-disk'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='file'>
+ <source file='%(basepath)s/disk'/>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
+ <interface type='bridge'>
+ <source bridge='%(bridge_name)s'/>
+ <mac address='%(mac_address)s'/>
+ <!-- <model type='virtio'/> CANT RUN virtio network right now -->
+ <filterref filter="nova-instance-%(name)s">
+ <parameter name="IP" value="%(ip_address)s" />
+ <parameter name="DHCPSERVER" value="%(dhcp_server)s" />
+ </filterref>
+ </interface>
+ <serial type="file">
+ <source path='%(basepath)s/console.log'/>
+ <target port='1'/>
+ </serial>
+ </devices>
+</domain>
diff --git a/nova/virt/libvirt.rescue.uml.xml.template b/nova/virt/libvirt.rescue.uml.xml.template
new file mode 100644
index 000000000..836f47532
--- /dev/null
+++ b/nova/virt/libvirt.rescue.uml.xml.template
@@ -0,0 +1,26 @@
+<domain type='%(type)s'>
+ <name>%(name)s</name>
+ <memory>%(memory_kb)s</memory>
+ <os>
+ <type>%(type)s</type>
+ <kernel>/usr/bin/linux</kernel>
+ <root>/dev/ubda1</root>
+ </os>
+ <devices>
+ <disk type='file'>
+ <source file='%(basepath)s/rescue-disk'/>
+ <target dev='ubd0' bus='uml'/>
+ </disk>
+ <disk type='file'>
+ <source file='%(basepath)s/disk'/>
+ <target dev='ubd1' bus='uml'/>
+ </disk>
+ <interface type='bridge'>
+ <source bridge='%(bridge_name)s'/>
+ <mac address='%(mac_address)s'/>
+ </interface>
+ <console type="file">
+ <source path='%(basepath)s/console.log'/>
+ </console>
+ </devices>
+</domain>
diff --git a/nova/virt/libvirt.rescue.xen.xml.template b/nova/virt/libvirt.rescue.xen.xml.template
new file mode 100644
index 000000000..3b8d27237
--- /dev/null
+++ b/nova/virt/libvirt.rescue.xen.xml.template
@@ -0,0 +1,34 @@
+<domain type='%(type)s'>
+ <name>%(name)s</name>
+ <os>
+ <type>linux</type>
+ <kernel>%(basepath)s/kernel</kernel>
+ <initrd>%(basepath)s/ramdisk</initrd>
+ <root>/dev/xvda1</root>
+ <cmdline>ro</cmdline>
+ </os>
+ <features>
+ <acpi/>
+ </features>
+ <memory>%(memory_kb)s</memory>
+ <vcpu>%(vcpus)s</vcpu>
+ <devices>
+ <disk type='file'>
+ <source file='%(basepath)s/rescue-disk'/>
+ <target dev='sda' />
+ </disk>
+ <disk type='file'>
+ <source file='%(basepath)s/disk'/>
+ <target dev='sdb' />
+ </disk>
+ <interface type='bridge'>
+ <source bridge='%(bridge_name)s'/>
+ <mac address='%(mac_address)s'/>
+ </interface>
+ <console type="file">
+ <source path='%(basepath)s/console.log'/>
+ <target port='1'/>
+ </console>
+ </devices>
+</domain>
+
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 7250d31e5..e32945fa5 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -48,6 +48,19 @@ libxml2 = None
FLAGS = flags.FLAGS
+flags.DEFINE_string('libvirt_rescue_xml_template',
+ utils.abspath('virt/libvirt.rescue.qemu.xml.template'),
+ 'Libvirt RESCUE XML Template for QEmu/KVM')
+flags.DEFINE_string('libvirt_rescue_xen_xml_template',
+ utils.abspath('virt/libvirt.rescue.xen.xml.template'),
+ 'Libvirt RESCUE XML Template for xen')
+flags.DEFINE_string('libvirt_rescue_uml_xml_template',
+ utils.abspath('virt/libvirt.rescue.uml.xml.template'),
+ 'Libvirt RESCUE XML Template for user-mode-linux')
+# TODO(vish): These flags should probably go into a shared location
+flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image')
+flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image')
+flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image')
flags.DEFINE_string('libvirt_xml_template',
utils.abspath('virt/libvirt.qemu.xml.template'),
'Libvirt XML Template for QEmu/KVM')
@@ -62,7 +75,8 @@ flags.DEFINE_string('injected_network_template',
'Template file for injected network')
flags.DEFINE_string('libvirt_type',
'kvm',
- 'Libvirt domain type (valid options are: kvm, qemu, uml, xen)')
+ 'Libvirt domain type (valid options are: '
+ 'kvm, qemu, uml, xen)')
flags.DEFINE_string('libvirt_uri',
'',
'Override the default libvirt URI (which is dependent'
@@ -86,9 +100,12 @@ def get_connection(read_only):
class LibvirtConnection(object):
def __init__(self, read_only):
- self.libvirt_uri, template_file = self.get_uri_and_template()
+ (self.libvirt_uri,
+ template_file,
+ rescue_file) = self.get_uri_and_templates()
self.libvirt_xml = open(template_file).read()
+ self.rescue_xml = open(rescue_file).read()
self._wrapped_conn = None
self.read_only = read_only
@@ -96,7 +113,8 @@ class LibvirtConnection(object):
def _conn(self):
if not self._wrapped_conn or not self._test_connection():
logging.debug('Connecting to libvirt: %s' % self.libvirt_uri)
- self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only)
+ self._wrapped_conn = self._connect(self.libvirt_uri,
+ self.read_only)
return self._wrapped_conn
def _test_connection(self):
@@ -110,17 +128,20 @@ class LibvirtConnection(object):
return False
raise
- def get_uri_and_template(self):
+ def get_uri_and_templates(self):
if FLAGS.libvirt_type == 'uml':
uri = FLAGS.libvirt_uri or 'uml:///system'
template_file = FLAGS.libvirt_uml_xml_template
+ rescue_file = FLAGS.libvirt_rescue_uml_xml_template
elif FLAGS.libvirt_type == 'xen':
uri = FLAGS.libvirt_uri or 'xen:///'
template_file = FLAGS.libvirt_xen_xml_template
+ rescue_file = FLAGS.libvirt_rescue_xen_xml_template
else:
uri = FLAGS.libvirt_uri or 'qemu:///system'
template_file = FLAGS.libvirt_xml_template
- return uri, template_file
+ rescue_file = FLAGS.libvirt_rescue_xml_template
+ return uri, template_file, rescue_file
def _connect(self, uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT],
@@ -136,7 +157,7 @@ class LibvirtConnection(object):
return [self._conn.lookupByID(x).name()
for x in self._conn.listDomainsID()]
- def destroy(self, instance):
+ def destroy(self, instance, cleanup=True):
try:
virt_dom = self._conn.lookupByName(instance['name'])
virt_dom.destroy()
@@ -144,12 +165,14 @@ class LibvirtConnection(object):
pass
# If the instance is already terminated, we're still happy
d = defer.Deferred()
- d.addCallback(lambda _: self._cleanup(instance))
+ if cleanup:
+ d.addCallback(lambda _: self._cleanup(instance))
# FIXME: What does this comment mean?
# TODO(termie): short-circuit me for tests
- # WE'LL save this for when we do shutdown,
+ # WE'LL save this for when we do shutdown,
# instead of destroy - but destroy returns immediately
timer = task.LoopingCall(f=None)
+
def _wait_for_shutdown():
try:
state = self.get_info(instance['name'])['state']
@@ -164,6 +187,7 @@ class LibvirtConnection(object):
power_state.SHUTDOWN)
timer.stop()
d.callback(None)
+
timer.f = _wait_for_shutdown
timer.start(interval=0.5, now=True)
return d
@@ -195,12 +219,13 @@ class LibvirtConnection(object):
@defer.inlineCallbacks
@exception.wrap_exception
def reboot(self, instance):
+ yield self.destroy(instance, False)
xml = self.to_xml(instance)
- yield self._conn.lookupByName(instance['name']).destroy()
yield self._conn.createXML(xml, 0)
d = defer.Deferred()
timer = task.LoopingCall(f=None)
+
def _wait_for_reboot():
try:
state = self.get_info(instance['name'])['state']
@@ -217,27 +242,70 @@ class LibvirtConnection(object):
power_state.SHUTDOWN)
timer.stop()
d.callback(None)
+
timer.f = _wait_for_reboot
timer.start(interval=0.5, now=True)
yield d
@defer.inlineCallbacks
@exception.wrap_exception
+ def rescue(self, instance):
+ yield self.destroy(instance, False)
+
+ xml = self.to_xml(instance, rescue=True)
+ rescue_images = {'image_id': FLAGS.rescue_image_id,
+ 'kernel_id': FLAGS.rescue_kernel_id,
+ 'ramdisk_id': FLAGS.rescue_ramdisk_id}
+ yield self._create_image(instance, xml, 'rescue-', rescue_images)
+ yield self._conn.createXML(xml, 0)
+
+ d = defer.Deferred()
+ timer = task.LoopingCall(f=None)
+
+ def _wait_for_rescue():
+ try:
+ state = self.get_info(instance['name'])['state']
+ db.instance_set_state(None, instance['id'], state)
+ if state == power_state.RUNNING:
+ logging.debug('instance %s: rescued', instance['name'])
+ timer.stop()
+ d.callback(None)
+ except Exception, exn:
+ logging.error('_wait_for_rescue failed: %s', exn)
+ db.instance_set_state(None,
+ instance['id'],
+ power_state.SHUTDOWN)
+ timer.stop()
+ d.callback(None)
+
+ timer.f = _wait_for_rescue
+ timer.start(interval=0.5, now=True)
+ yield d
+
+ @defer.inlineCallbacks
+ @exception.wrap_exception
+ def unrescue(self, instance):
+ # NOTE(vish): Because reboot destroys and recreates an instance using
+ # the normal xml file, we can just call reboot here
+ yield self.reboot(instance)
+
+ @defer.inlineCallbacks
+ @exception.wrap_exception
def spawn(self, instance):
xml = self.to_xml(instance)
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.NOSTATE,
'launching')
- yield NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance)
+ yield NWFilterFirewall(self._conn).\
+ setup_nwfilters_for_instance(instance)
yield self._create_image(instance, xml)
yield self._conn.createXML(xml, 0)
- # TODO(termie): this should actually register
- # a callback to check for successful boot
logging.debug("instance %s: is running", instance['name'])
local_d = defer.Deferred()
timer = task.LoopingCall(f=None)
+
def _wait_for_boot():
try:
state = self.get_info(instance['name'])['state']
@@ -265,8 +333,9 @@ class LibvirtConnection(object):
if virsh_output.startswith('/dev/'):
logging.info('cool, it\'s a device')
- d = process.simple_execute("sudo dd if=%s iflag=nonblock" % virsh_output, check_exit_code=False)
- d.addCallback(lambda r:r[0])
+ d = process.simple_execute("sudo dd if=%s iflag=nonblock" %
+ virsh_output, check_exit_code=False)
+ d.addCallback(lambda r: r[0])
return d
else:
return ''
@@ -285,11 +354,15 @@ class LibvirtConnection(object):
@exception.wrap_exception
def get_console_output(self, instance):
- console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log')
- d = process.simple_execute('sudo chown %d %s' % (os.getuid(), console_log))
+ console_log = os.path.join(FLAGS.instances_path, instance['name'],
+ 'console.log')
+ d = process.simple_execute('sudo chown %d %s' % (os.getuid(),
+ console_log))
if FLAGS.libvirt_type == 'xen':
# Xen is spethial
- d.addCallback(lambda _: process.simple_execute("virsh ttyconsole %s" % instance['name']))
+ d.addCallback(lambda _:
+ process.simple_execute("virsh ttyconsole %s" %
+ instance['name']))
d.addCallback(self._flush_xen_console)
d.addCallback(self._append_to_file, console_log)
else:
@@ -297,18 +370,17 @@ class LibvirtConnection(object):
d.addCallback(self._dump_file)
return d
-
@defer.inlineCallbacks
- def _create_image(self, inst, libvirt_xml):
+ def _create_image(self, inst, libvirt_xml, prefix='', disk_images=None):
# syntactic nicety
- basepath = lambda fname='': os.path.join(FLAGS.instances_path,
+ basepath = lambda fname='', prefix=prefix: os.path.join(
+ FLAGS.instances_path,
inst['name'],
- fname)
+ prefix + fname)
# ensure directories exist and are writable
- yield process.simple_execute('mkdir -p %s' % basepath())
- yield process.simple_execute('chmod 0777 %s' % basepath())
-
+ yield process.simple_execute('mkdir -p %s' % basepath(prefix=''))
+ yield process.simple_execute('chmod 0777 %s' % basepath(prefix=''))
# TODO(termie): these are blocking calls, it would be great
# if they weren't.
@@ -317,17 +389,26 @@ class LibvirtConnection(object):
f.write(libvirt_xml)
f.close()
- os.close(os.open(basepath('console.log'), os.O_CREAT | os.O_WRONLY, 0660))
+ # NOTE(vish): No need add the prefix to console.log
+ os.close(os.open(basepath('console.log', ''),
+ os.O_CREAT | os.O_WRONLY, 0660))
user = manager.AuthManager().get_user(inst['user_id'])
project = manager.AuthManager().get_project(inst['project_id'])
+ if not disk_images:
+ disk_images = {'image_id': inst['image_id'],
+ 'kernel_id': inst['kernel_id'],
+ 'ramdisk_id': inst['ramdisk_id']}
if not os.path.exists(basepath('disk')):
- yield images.fetch(inst.image_id, basepath('disk-raw'), user, project)
+ yield images.fetch(inst.image_id, basepath('disk-raw'), user,
+ project)
if not os.path.exists(basepath('kernel')):
- yield images.fetch(inst.kernel_id, basepath('kernel'), user, project)
+ yield images.fetch(inst.kernel_id, basepath('kernel'), user,
+ project)
if not os.path.exists(basepath('ramdisk')):
- yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, project)
+ yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), user,
+ project)
execute = lambda cmd, process_input=None, check_exit_code=True: \
process.simple_execute(cmd=cmd,
@@ -339,8 +420,8 @@ class LibvirtConnection(object):
network_ref = db.network_get_by_instance(context.get_admin_context(),
inst['id'])
if network_ref['injected']:
- address = db.instance_get_fixed_address(context.get_admin_context(),
- inst['id'])
+ admin_context = context.get_admin_context()
+ address = db.instance_get_fixed_address(admin_context, inst['id'])
with open(FLAGS.injected_network_template) as f:
net = f.read() % {'address': address,
'netmask': network_ref['netmask'],
@@ -354,7 +435,8 @@ class LibvirtConnection(object):
if net:
logging.info('instance %s: injecting net into image %s',
inst['name'], inst.image_id)
- yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute)
+ yield disk.inject_data(basepath('disk-raw'), key, net,
+ execute=execute)
if os.path.exists(basepath('disk')):
yield process.simple_execute('rm -f %s' % basepath('disk'))
@@ -363,7 +445,9 @@ class LibvirtConnection(object):
['local_gb']
* 1024 * 1024 * 1024)
- resize = inst['instance_type'] != 'm1.tiny'
+ resize = True
+ if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-':
+ resize = False
yield disk.partition(basepath('disk-raw'), basepath('disk'),
local_bytes, resize, execute=execute)
@@ -371,13 +455,14 @@ class LibvirtConnection(object):
yield process.simple_execute('sudo chown root %s' %
basepath('disk'))
- def to_xml(self, instance):
+ def to_xml(self, instance, rescue=False):
# TODO(termie): cache?
logging.debug('instance %s: starting toXML method', instance['name'])
network = db.project_get_network(context.get_admin_context(),
instance['project_id'])
# FIXME(vish): stick this in db
- instance_type = instance_types.INSTANCE_TYPES[instance['instance_type']]
+ instance_type = instance['instance_type']
+ instance_type = instance_types.INSTANCE_TYPES[instance_type]
ip_address = db.instance_get_fixed_address(context.get_admin_context(),
instance['id'])
# Assume that the gateway also acts as the dhcp server.
@@ -391,14 +476,20 @@ class LibvirtConnection(object):
'bridge_name': network['bridge'],
'mac_address': instance['mac_address'],
'ip_address': ip_address,
- 'dhcp_server': dhcp_server }
- libvirt_xml = self.libvirt_xml % xml_info
+ 'dhcp_server': dhcp_server}
+ if rescue:
+ libvirt_xml = self.rescue_xml % xml_info
+ else:
+ libvirt_xml = self.libvirt_xml % xml_info
logging.debug('instance %s: finished toXML method', instance['name'])
return libvirt_xml
def get_info(self, instance_name):
- virt_dom = self._conn.lookupByName(instance_name)
+ try:
+ virt_dom = self._conn.lookupByName(instance_name)
+ except:
+ raise exception.NotFound("Instance %s not found" % instance_name)
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
return {'state': state,
'max_mem': max_mem,
@@ -506,7 +597,6 @@ class LibvirtConnection(object):
domain = self._conn.lookupByName(instance_name)
return domain.interfaceStats(interface)
-
def refresh_security_group(self, security_group_id):
fw = NWFilterFirewall(self._conn)
fw.ensure_security_group_filter(security_group_id)
@@ -557,7 +647,6 @@ class NWFilterFirewall(object):
def __init__(self, get_connection):
self._conn = get_connection
-
nova_base_filter = '''<filter name='nova-base' chain='root'>
<uuid>26717364-50cf-42d1-8185-29bf893ab110</uuid>
<filterref filter='no-mac-spoofing'/>
@@ -578,7 +667,8 @@ class NWFilterFirewall(object):
srcportstart='68'
dstportstart='67'/>
</rule>
- <rule action='accept' direction='in' priority='100'>
+ <rule action='accept' direction='in'
+ priority='100'>
<udp srcipaddr='$DHCPSERVER'
srcportstart='67'
dstportstart='68'/>
@@ -588,8 +678,8 @@ class NWFilterFirewall(object):
def nova_base_ipv4_filter(self):
retval = "<filter name='nova-base-ipv4' chain='ipv4'>"
for protocol in ['tcp', 'udp', 'icmp']:
- for direction,action,priority in [('out','accept', 399),
- ('inout','drop', 400)]:
+ for direction, action, priority in [('out', 'accept', 399),
+ ('inout', 'drop', 400)]:
retval += """<rule action='%s' direction='%s' priority='%d'>
<%s />
</rule>""" % (action, direction,
@@ -597,12 +687,11 @@ class NWFilterFirewall(object):
retval += '</filter>'
return retval
-
def nova_base_ipv6_filter(self):
retval = "<filter name='nova-base-ipv6' chain='ipv6'>"
for protocol in ['tcp', 'udp', 'icmp']:
- for direction,action,priority in [('out','accept',399),
- ('inout','drop',400)]:
+ for direction, action, priority in [('out', 'accept', 399),
+ ('inout', 'drop', 400)]:
retval += """<rule action='%s' direction='%s' priority='%d'>
<%s-ipv6 />
</rule>""" % (action, direction,
@@ -610,7 +699,6 @@ class NWFilterFirewall(object):
retval += '</filter>'
return retval
-
def nova_project_filter(self, project, net, mask):
retval = "<filter name='nova-project-%s' chain='ipv4'>" % project
for protocol in ['tcp', 'udp', 'icmp']:
@@ -620,14 +708,12 @@ class NWFilterFirewall(object):
retval += '</filter>'
return retval
-
def _define_filter(self, xml):
if callable(xml):
xml = xml()
d = threads.deferToThread(self._conn.nwfilterDefineXML, xml)
return d
-
@staticmethod
def _get_net_and_mask(cidr):
net = IPy.IP(cidr)
@@ -646,9 +732,9 @@ class NWFilterFirewall(object):
yield self._define_filter(self.nova_dhcp_filter)
yield self._define_filter(self.nova_base_filter)
- nwfilter_xml = ("<filter name='nova-instance-%s' chain='root'>\n" +
- " <filterref filter='nova-base' />\n"
- ) % instance['name']
+ nwfilter_xml = "<filter name='nova-instance-%s' chain='root'>\n" \
+ " <filterref filter='nova-base' />\n" % \
+ instance['name']
if FLAGS.allow_project_net_traffic:
network_ref = db.project_get_network(context.get_admin_context(),
@@ -658,14 +744,14 @@ class NWFilterFirewall(object):
net, mask)
yield self._define_filter(project_filter)
- nwfilter_xml += (" <filterref filter='nova-project-%s' />\n"
- ) % instance['project_id']
+ nwfilter_xml += " <filterref filter='nova-project-%s' />\n" % \
+ instance['project_id']
for security_group in instance.security_groups:
yield self.ensure_security_group_filter(security_group['id'])
- nwfilter_xml += (" <filterref filter='nova-secgroup-%d' />\n"
- ) % security_group['id']
+ nwfilter_xml += " <filterref filter='nova-secgroup-%d' />\n" % \
+ security_group['id']
nwfilter_xml += "</filter>"
yield self._define_filter(nwfilter_xml)
@@ -675,7 +761,6 @@ class NWFilterFirewall(object):
return self._define_filter(
self.security_group_to_nwfilter_xml(security_group_id))
-
def security_group_to_nwfilter_xml(self, security_group_id):
security_group = db.security_group_get(context.get_admin_context(),
security_group_id)
@@ -684,12 +769,15 @@ class NWFilterFirewall(object):
rule_xml += "<rule action='accept' direction='in' priority='300'>"
if rule.cidr:
net, mask = self._get_net_and_mask(rule.cidr)
- rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % (rule.protocol, net, mask)
+ rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
+ (rule.protocol, net, mask)
if rule.protocol in ['tcp', 'udp']:
rule_xml += "dstportstart='%s' dstportend='%s' " % \
(rule.from_port, rule.to_port)
elif rule.protocol == 'icmp':
- logging.info('rule.protocol: %r, rule.from_port: %r, rule.to_port: %r' % (rule.protocol, rule.from_port, rule.to_port))
+ logging.info('rule.protocol: %r, rule.from_port: %r, '
+ 'rule.to_port: %r' %
+ (rule.protocol, rule.from_port, rule.to_port))
if rule.from_port != -1:
rule_xml += "type='%s' " % rule.from_port
if rule.to_port != -1:
@@ -697,5 +785,6 @@ class NWFilterFirewall(object):
rule_xml += '/>\n'
rule_xml += "</rule>\n"
- xml = '''<filter name='nova-secgroup-%s' chain='ipv4'>%s</filter>''' % (security_group_id, rule_xml,)
+ xml = "<filter name='nova-secgroup-%s' chain='ipv4'>%s</filter>" % \
+ (security_group_id, rule_xml,)
return xml
diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py
index 04e830b64..a17e405ab 100644
--- a/nova/virt/xenapi.py
+++ b/nova/virt/xenapi.py
@@ -75,12 +75,11 @@ flags.DEFINE_float('xenapi_task_poll_interval',
XENAPI_POWER_STATE = {
- 'Halted' : power_state.SHUTDOWN,
- 'Running' : power_state.RUNNING,
- 'Paused' : power_state.PAUSED,
- 'Suspended': power_state.SHUTDOWN, # FIXME
- 'Crashed' : power_state.CRASHED
-}
+ 'Halted': power_state.SHUTDOWN,
+ 'Running': power_state.RUNNING,
+ 'Paused': power_state.PAUSED,
+ 'Suspended': power_state.SHUTDOWN, # FIXME
+ 'Crashed': power_state.CRASHED}
def get_connection(_):
@@ -90,12 +89,15 @@ def get_connection(_):
# library when not using XenAPI.
global XenAPI
if XenAPI is None:
- XenAPI = __import__('XenAPI')
+ XenAPI = __import__('XenAPI')
url = FLAGS.xenapi_connection_url
username = FLAGS.xenapi_connection_username
password = FLAGS.xenapi_connection_password
if not url or password is None:
- raise Exception('Must specify xenapi_connection_url, xenapi_connection_username (optionally), and xenapi_connection_password to use connection_type=xenapi')
+ raise Exception('Must specify xenapi_connection_url, '
+ 'xenapi_connection_username (optionally), and '
+ 'xenapi_connection_password to use '
+ 'connection_type=xenapi')
return XenAPIConnection(url, username, password)
@@ -141,7 +143,7 @@ class XenAPIConnection(object):
def _create_vm(self, instance, kernel, ramdisk):
"""Create a VM record. Returns a Deferred that gives the new
VM reference."""
-
+
instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
vcpus = str(instance_type['vcpus'])
@@ -183,7 +185,7 @@ class XenAPIConnection(object):
def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable):
"""Create a VBD record. Returns a Deferred that gives the new
VBD reference."""
-
+
vbd_rec = {}
vbd_rec['VM'] = vm_ref
vbd_rec['VDI'] = vdi_ref
@@ -207,10 +209,10 @@ class XenAPIConnection(object):
def _create_vif(self, vm_ref, network_ref, mac_address):
"""Create a VIF record. Returns a Deferred that gives the new
VIF reference."""
-
+
vif_rec = {}
vif_rec['device'] = '0'
- vif_rec['network']= network_ref
+ vif_rec['network'] = network_ref
vif_rec['VM'] = vm_ref
vif_rec['MAC'] = mac_address
vif_rec['MTU'] = '1500'
@@ -303,7 +305,7 @@ class XenAPIConnection(object):
def _lookup_blocking(self, i):
vms = self._conn.xenapi.VM.get_by_name_label(i)
- n = len(vms)
+ n = len(vms)
if n == 0:
return None
elif n > 1:
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index cca619550..3fa29ba37 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -61,7 +61,6 @@ class AOEDriver(object):
"Try number %s", tries)
yield self._execute("sleep %s" % tries ** 2)
-
@defer.inlineCallbacks
def create_volume(self, volume_name, size):
"""Creates a logical volume"""
diff --git a/nova/wsgi.py b/nova/wsgi.py
index b91d91121..eb305a3d3 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -94,11 +94,11 @@ class Middleware(Application):
behavior.
"""
- def __init__(self, application): # pylint: disable-msg=W0231
+ def __init__(self, application): # pylint: disable-msg=W0231
self.application = application
@webob.dec.wsgify
- def __call__(self, req): # pylint: disable-msg=W0221
+ def __call__(self, req): # pylint: disable-msg=W0221
"""Override to implement middleware behavior."""
return self.application
@@ -216,7 +216,7 @@ class Controller(object):
arg_dict['req'] = req
result = method(**arg_dict)
if type(result) is dict:
- return self._serialize(result, req)
+ return self._serialize(result, req)
else:
return result
@@ -240,6 +240,7 @@ class Controller(object):
serializer = Serializer(request.environ, _metadata)
return serializer.deserialize(data)
+
class Serializer(object):
"""
Serializes and deserializes dictionaries to certain MIME types.
@@ -263,12 +264,13 @@ class Serializer(object):
elif 'application/xml' in req.accept:
self.handler = self._to_xml
else:
- self.handler = self._to_json # default
+ # This is the default
+ self.handler = self._to_json
def to_content_type(self, data):
"""
Serialize a dictionary into a string.
-
+
The format of the string will be decided based on the Content Type
requested in self.environ: by Accept: header, or by URL suffix.
"""
@@ -277,7 +279,7 @@ class Serializer(object):
def deserialize(self, datastring):
"""
Deserialize a string to a dictionary.
-
+
The string must be in the format of a supported MIME type.
"""
datastring = datastring.strip()
@@ -298,7 +300,7 @@ class Serializer(object):
def _from_xml_node(self, node, listnames):
"""
Convert a minidom node to a simple Python type.
-
+
listnames is a collection of names of XML nodes whose subnodes should
be considered list items.
"""
@@ -312,7 +314,8 @@ class Serializer(object):
result[attr] = node.attributes[attr].nodeValue
for child in node.childNodes:
if child.nodeType != node.TEXT_NODE:
- result[child.nodeName] = self._from_xml_node(child, listnames)
+ result[child.nodeName] = self._from_xml_node(child,
+ listnames)
return result
def _to_json(self, data):
@@ -347,7 +350,8 @@ class Serializer(object):
else:
node = self._to_xml_node(doc, metadata, k, v)
result.appendChild(node)
- else: # atom
+ else:
+ # Type is atom
node = doc.createTextNode(str(data))
result.appendChild(node)
return result
diff --git a/pylintrc b/pylintrc
index 024802835..f07b14980 100644
--- a/pylintrc
+++ b/pylintrc
@@ -13,7 +13,7 @@ argument-rgx=[a-z_][a-z0-9_]{1,30}$
# Method names should be at least 3 characters long
# and be lowecased with underscores
-method-rgx=[a-z_][a-z0-9_]{2,50}$
+method-rgx=([a-z_][a-z0-9_]{2,50}|setUp|tearDown)$
# Module names matching nova-* are ok (files in bin/)
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(nova-[a-z0-9_-]+))$
diff --git a/tools/pip-requires b/tools/pip-requires
index c76fad86f..548073326 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -13,7 +13,6 @@ python-daemon==1.5.5
python-gflags==1.3
redis==2.0.0
routes==1.12.3
-tornado==1.0
WebOb==0.9.8
wsgiref==0.1.2
zope.interface==3.6.1