summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjaypipes@gmail.com <>2010-08-30 10:36:59 -0400
committerjaypipes@gmail.com <>2010-08-30 10:36:59 -0400
commita1791cdca8dbca8f9bf3555b21324503aba58fda (patch)
tree12f297f1616172ca7e4bce76ecac1dcd737c83af
parentbf2549282067a7a824ea97e66a5b2f0ca06416bd (diff)
parent5f14a7955b9ef90afed91bda0343130d83e15a73 (diff)
downloadnova-a1791cdca8dbca8f9bf3555b21324503aba58fda.tar.gz
nova-a1791cdca8dbca8f9bf3555b21324503aba58fda.tar.xz
nova-a1791cdca8dbca8f9bf3555b21324503aba58fda.zip
Resolve conflicts and merge trunk
-rwxr-xr-xbin/nova-api8
-rwxr-xr-xbin/nova-api-new (renamed from bin/nova-rsapi)8
-rwxr-xr-xbin/nova-compute2
-rwxr-xr-xbin/nova-dhcpbridge21
-rwxr-xr-xbin/nova-import-canonical-imagestore10
-rwxr-xr-xbin/nova-instancemonitor5
-rwxr-xr-xbin/nova-manage13
-rwxr-xr-xbin/nova-network1
-rwxr-xr-xbin/nova-objectstore2
-rwxr-xr-xbin/nova-volume2
-rw-r--r--doc/source/getting.started.rst1
-rw-r--r--nova/adminclient.py48
-rw-r--r--nova/api/__init__.py37
-rw-r--r--nova/api/ec2/__init__.py42
-rw-r--r--nova/api/rackspace/__init__.py81
-rw-r--r--nova/api/rackspace/base.py30
-rw-r--r--nova/api/rackspace/flavors.py18
-rw-r--r--nova/api/rackspace/images.py117
-rw-r--r--nova/api/rackspace/notes.txt23
-rw-r--r--nova/api/rackspace/servers.py83
-rw-r--r--nova/api/rackspace/sharedipgroups.py18
-rw-r--r--nova/api/test.py61
-rw-r--r--nova/auth/fakeldap.py38
-rw-r--r--nova/auth/ldapdriver.py98
-rw-r--r--nova/auth/manager.py116
-rw-r--r--nova/auth/rbac.py40
-rw-r--r--nova/auth/signer.py57
-rw-r--r--nova/cloudpipe/api.py3
-rwxr-xr-xnova/cloudpipe/bootscript.sh4
-rw-r--r--nova/cloudpipe/pipelib.py2
-rw-r--r--nova/compute/disk.py4
-rw-r--r--nova/compute/model.py2
-rw-r--r--nova/compute/monitor.py35
-rw-r--r--nova/compute/service.py1
-rw-r--r--nova/crypto.py8
-rw-r--r--nova/datastore.py15
-rw-r--r--nova/endpoint/__init__.py32
-rw-r--r--nova/endpoint/admin.py19
-rwxr-xr-xnova/endpoint/api.py7
-rw-r--r--nova/endpoint/cloud.py159
-rw-r--r--nova/endpoint/images.py7
-rw-r--r--nova/endpoint/rackspace.py183
-rw-r--r--nova/exception.py8
-rw-r--r--nova/fakerabbit.py5
-rw-r--r--nova/flags.py42
-rw-r--r--nova/image/service.py90
-rw-r--r--nova/network/exception.py22
-rw-r--r--nova/network/linux_net.py121
-rw-r--r--nova/network/model.py418
-rw-r--r--nova/network/service.py88
-rw-r--r--nova/network/vpn.py42
-rw-r--r--nova/objectstore/bucket.py1
-rw-r--r--nova/objectstore/handler.py58
-rw-r--r--nova/objectstore/image.py20
-rw-r--r--nova/objectstore/stored.py4
-rw-r--r--nova/process.py174
-rw-r--r--nova/rpc.py146
-rw-r--r--nova/server.py6
-rw-r--r--nova/test.py5
-rw-r--r--nova/tests/auth_unittest.py16
-rw-r--r--nova/tests/cloud_unittest.py6
-rw-r--r--nova/tests/network_unittest.py248
-rw-r--r--nova/tests/process_unittest.py2
-rw-r--r--nova/tests/rpc_unittest.py85
-rw-r--r--nova/tests/virt_unittest.py69
-rw-r--r--nova/tests/volume_unittest.py77
-rw-r--r--nova/twistd.py26
-rw-r--r--nova/utils.py52
-rw-r--r--nova/validate.py1
-rw-r--r--nova/virt/connection.py9
-rw-r--r--nova/virt/fake.py144
-rw-r--r--nova/virt/images.py22
-rw-r--r--nova/virt/interfaces.template (renamed from nova/compute/interfaces.template)0
-rw-r--r--nova/virt/libvirt.qemu.xml.template (renamed from nova/compute/libvirt.xml.template)0
-rw-r--r--nova/virt/libvirt.uml.xml.template25
-rw-r--r--nova/virt/libvirt_conn.py90
-rw-r--r--nova/virt/xenapi.py290
-rw-r--r--nova/volume/service.py133
-rw-r--r--nova/wsgi.py243
-rw-r--r--nova/wsgi_test.py96
-rw-r--r--plugins/xenapi/README6
-rw-r--r--plugins/xenapi/etc/xapi.d/plugins/objectstore231
-rwxr-xr-xplugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py216
-rw-r--r--pylintrc19
-rw-r--r--run_tests.py10
-rwxr-xr-xrun_tests.sh63
-rw-r--r--tools/install_venv.py78
-rw-r--r--tools/pip-requires5
88 files changed, 3646 insertions, 1327 deletions
diff --git a/bin/nova-api b/bin/nova-api
index 13baf22a7..a3ad5a0e1 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -26,7 +26,6 @@ from tornado import httpserver
from tornado import ioloop
from nova import flags
-from nova import rpc
from nova import server
from nova import utils
from nova.endpoint import admin
@@ -43,14 +42,7 @@ def main(_argv):
'Admin': admin.AdminController()}
_app = api.APIServerApplication(controllers)
- conn = rpc.Connection.instance()
- consumer = rpc.AdapterConsumer(connection=conn,
- topic=FLAGS.cloud_topic,
- proxy=controllers['Cloud'])
-
io_inst = ioloop.IOLoop.instance()
- _injected = consumer.attach_to_tornado(io_inst)
-
http_server = httpserver.HTTPServer(_app)
http_server.listen(FLAGS.cc_port)
logging.debug('Started HTTP server on %s', FLAGS.cc_port)
diff --git a/bin/nova-rsapi b/bin/nova-api-new
index 026880d5a..fda42339c 100755
--- a/bin/nova-rsapi
+++ b/bin/nova-api-new
@@ -18,17 +18,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""
- Daemon for the Rackspace API endpoint.
+Nova API daemon.
"""
+from nova import api
from nova import flags
from nova import utils
from nova import wsgi
-from nova.endpoint import rackspace
FLAGS = flags.FLAGS
-flags.DEFINE_integer('cc_port', 8773, 'cloud controller port')
+flags.DEFINE_integer('api_port', 8773, 'API port')
if __name__ == '__main__':
utils.default_flagfile()
- wsgi.run_server(rackspace.API(), FLAGS.cc_port)
+ wsgi.run_server(api.API(), FLAGS.api_port)
diff --git a/bin/nova-compute b/bin/nova-compute
index e0c12354f..ed9a55565 100755
--- a/bin/nova-compute
+++ b/bin/nova-compute
@@ -29,4 +29,4 @@ if __name__ == '__main__':
twistd.serve(__file__)
if __name__ == '__builtin__':
- application = service.ComputeService.create()
+ application = service.ComputeService.create() # pylint: disable-msg=C0103
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index ed1af206a..1f2ed4f89 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -40,38 +40,37 @@ from nova.network import service
FLAGS = flags.FLAGS
-def add_lease(_mac, ip, _hostname, _interface):
+def add_lease(_mac, ip_address, _hostname, _interface):
"""Set the IP that was assigned by the DHCP server."""
if FLAGS.fake_rabbit:
- service.VlanNetworkService().lease_ip(ip)
+ service.VlanNetworkService().lease_ip(ip_address)
else:
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
{"method": "lease_ip",
- "args": {"fixed_ip": ip}})
+ "args": {"fixed_ip": ip_address}})
-def old_lease(_mac, _ip, _hostname, _interface):
+def old_lease(_mac, _ip_address, _hostname, _interface):
"""Do nothing, just an old lease update."""
logging.debug("Adopted old lease or got a change of mac/hostname")
-def del_lease(_mac, ip, _hostname, _interface):
- """Remove the leased IP from the databases."""
+def del_lease(_mac, ip_address, _hostname, _interface):
+ """Called when a lease expires."""
if FLAGS.fake_rabbit:
- service.VlanNetworkService().release_ip(ip)
+ service.VlanNetworkService().release_ip(ip_address)
else:
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
{"method": "release_ip",
- "args": {"fixed_ip": ip}})
+ "args": {"fixed_ip": ip_address}})
def init_leases(interface):
"""Get the list of hosts for an interface."""
net = model.get_network_by_interface(interface)
res = ""
- for host_name in net.hosts:
- res += "%s\n" % linux_net.hostDHCP(net, host_name,
- net.hosts[host_name])
+ for address in net.assigned_objs:
+ res += "%s\n" % linux_net.host_dhcp(address)
return res
diff --git a/bin/nova-import-canonical-imagestore b/bin/nova-import-canonical-imagestore
index 5165109b2..2bc61cf0c 100755
--- a/bin/nova-import-canonical-imagestore
+++ b/bin/nova-import-canonical-imagestore
@@ -35,12 +35,12 @@ from nova.objectstore import image
FLAGS = flags.FLAGS
-api_url = 'https://imagestore.canonical.com/api/dashboard'
+API_URL = 'https://imagestore.canonical.com/api/dashboard'
def get_images():
"""Get a list of the images from the imagestore URL."""
- images = json.load(urllib2.urlopen(api_url))['images']
+ images = json.load(urllib2.urlopen(API_URL))['images']
images = [img for img in images if img['title'].find('amd64') > -1]
return images
@@ -56,21 +56,21 @@ def download(img):
for f in img['files']:
if f['kind'] == 'kernel':
dest = os.path.join(tempdir, 'kernel')
- subprocess.call(['curl', f['url'], '-o', dest])
+ subprocess.call(['curl', '--fail', f['url'], '-o', dest])
kernel_id = image.Image.add(dest,
description='kernel/' + img['title'], kernel=True)
for f in img['files']:
if f['kind'] == 'ramdisk':
dest = os.path.join(tempdir, 'ramdisk')
- subprocess.call(['curl', f['url'], '-o', dest])
+ subprocess.call(['curl', '--fail', f['url'], '-o', dest])
ramdisk_id = image.Image.add(dest,
description='ramdisk/' + img['title'], ramdisk=True)
for f in img['files']:
if f['kind'] == 'image':
dest = os.path.join(tempdir, 'image')
- subprocess.call(['curl', f['url'], '-o', dest])
+ subprocess.call(['curl', '--fail', f['url'], '-o', dest])
ramdisk_id = image.Image.add(dest,
description=img['title'], kernel=kernel_id, ramdisk=ramdisk_id)
diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor
index 911fb6f42..fbac58889 100755
--- a/bin/nova-instancemonitor
+++ b/bin/nova-instancemonitor
@@ -35,9 +35,10 @@ if __name__ == '__main__':
if __name__ == '__builtin__':
logging.warn('Starting instance monitor')
- m = monitor.InstanceMonitor()
+ # pylint: disable-msg=C0103
+ monitor = monitor.InstanceMonitor()
# This is the parent service that twistd will be looking for when it
# parses this file, return it so that we can get it into globals below
application = service.Application('nova-instancemonitor')
- m.setServiceParent(application)
+ monitor.setServiceParent(application)
diff --git a/bin/nova-manage b/bin/nova-manage
index 2dd569df0..145294d3d 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -56,7 +56,8 @@ class VpnCommands(object):
vpn = self._vpn_for(project.id)
if vpn:
command = "ping -c1 -w1 %s > /dev/null; echo $?"
- out, _err = utils.execute(command % vpn['private_dns_name'])
+ out, _err = utils.execute( command % vpn['private_dns_name'],
+ check_exit_code=False)
if out.strip() == '0':
net = 'up'
else:
@@ -203,15 +204,15 @@ class ProjectCommands(object):
arguments: project user"""
self.manager.remove_from_project(user, project)
- def create_zip(self, project_id, user_id, filename='nova.zip'):
+ def zipfile(self, project_id, user_id, filename='nova.zip'):
"""Exports credentials for project to a zip file
arguments: project_id user_id [filename='nova.zip]"""
- zip_file = self.manager.get_credentials(project_id, user_id)
+ zip_file = self.manager.get_credentials(user_id, project_id)
with open(filename, 'w') as f:
f.write(zip_file)
-categories = [
+CATEGORIES = [
('user', UserCommands),
('project', ProjectCommands),
('role', RoleCommands),
@@ -258,11 +259,11 @@ def main():
if len(argv) < 1:
print script_name + " category action [<args>]"
print "Available categories:"
- for k, _ in categories:
+ for k, _ in CATEGORIES:
print "\t%s" % k
sys.exit(2)
category = argv.pop(0)
- matches = lazy_match(category, categories)
+ matches = lazy_match(category, CATEGORIES)
# instantiate the command group object
category, fn = matches[0]
command_object = fn()
diff --git a/bin/nova-network b/bin/nova-network
index ba9063f56..5753aafbe 100755
--- a/bin/nova-network
+++ b/bin/nova-network
@@ -33,4 +33,5 @@ if __name__ == '__main__':
twistd.serve(__file__)
if __name__ == '__builtin__':
+ # pylint: disable-msg=C0103
application = service.type_to_class(FLAGS.network_type).create()
diff --git a/bin/nova-objectstore b/bin/nova-objectstore
index 02f2bcb48..afcf13e24 100755
--- a/bin/nova-objectstore
+++ b/bin/nova-objectstore
@@ -35,4 +35,4 @@ if __name__ == '__main__':
if __name__ == '__builtin__':
utils.default_flagfile()
- application = handler.get_application()
+ application = handler.get_application() # pylint: disable-msg=C0103
diff --git a/bin/nova-volume b/bin/nova-volume
index f7a8fad37..8ef006ebc 100755
--- a/bin/nova-volume
+++ b/bin/nova-volume
@@ -29,4 +29,4 @@ if __name__ == '__main__':
twistd.serve(__file__)
if __name__ == '__builtin__':
- application = service.VolumeService.create()
+ application = service.VolumeService.create() # pylint: disable-msg=C0103
diff --git a/doc/source/getting.started.rst b/doc/source/getting.started.rst
index 3eadd0882..f683bb256 100644
--- a/doc/source/getting.started.rst
+++ b/doc/source/getting.started.rst
@@ -40,6 +40,7 @@ Python libraries we don't vendor
* M2Crypto: python library interface for openssl
* curl
+* XenAPI: Needed only for Xen Cloud Platform or XenServer support. Available from http://wiki.xensource.com/xenwiki/XCP_SDK or http://community.citrix.com/cdn/xs/sdks.
Vendored python libaries (don't require any installation)
diff --git a/nova/adminclient.py b/nova/adminclient.py
index 25d5e71cb..0ca32b1e5 100644
--- a/nova/adminclient.py
+++ b/nova/adminclient.py
@@ -20,6 +20,7 @@ Nova User API client library.
"""
import base64
+
import boto
from boto.ec2.regioninfo import RegionInfo
@@ -57,6 +58,30 @@ class UserInfo(object):
elif name == 'secretkey':
self.secretkey = str(value)
+
+class UserRole(object):
+ """
+ Information about a Nova user's role, as parsed through SAX.
+ Fields include:
+ role
+ """
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.role = None
+
+ def __repr__(self):
+ return 'UserRole:%s' % self.role
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'role':
+ self.role = value
+ else:
+ setattr(self, name, str(value))
+
+
class ProjectInfo(object):
"""
Information about a Nova project, as parsed through SAX
@@ -92,12 +117,14 @@ class ProjectInfo(object):
else:
setattr(self, name, str(value))
+
class ProjectMember(object):
"""
Information about a Nova project member, as parsed through SAX.
Fields include:
memberId
"""
+
def __init__(self, connection=None):
self.connection = connection
self.memberId = None
@@ -113,8 +140,8 @@ class ProjectMember(object):
self.memberId = value
else:
setattr(self, name, str(value))
-
+
class HostInfo(object):
"""
Information about a Nova Host, as parsed through SAX:
@@ -142,6 +169,7 @@ class HostInfo(object):
def endElement(self, name, value, connection):
setattr(self, name, value)
+
class NovaAdminClient(object):
def __init__(self, clc_ip='127.0.0.1', region='nova', access_key='admin',
secret_key='admin', **kwargs):
@@ -196,6 +224,24 @@ class NovaAdminClient(object):
""" deletes a user """
return self.apiconn.get_object('DeregisterUser', {'Name': username}, UserInfo)
+ def get_roles(self, project_roles=True):
+ """Returns a list of available roles."""
+ return self.apiconn.get_list('DescribeRoles',
+ {'ProjectRoles': project_roles},
+ [('item', UserRole)])
+
+ def get_user_roles(self, user, project=None):
+ """Returns a list of roles for the given user.
+ Omitting project will return any global roles that the user has.
+ Specifying project will return only project specific roles.
+ """
+ params = {'User':user}
+ if project:
+ params['Project'] = project
+ return self.apiconn.get_list('DescribeUserRoles',
+ params,
+ [('item', UserRole)])
+
def add_user_role(self, user, role, project=None):
"""
Add a role to a user either globally or for a specific project.
diff --git a/nova/api/__init__.py b/nova/api/__init__.py
new file mode 100644
index 000000000..b9b9e3988
--- /dev/null
+++ b/nova/api/__init__.py
@@ -0,0 +1,37 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Root WSGI middleware for all API controllers.
+"""
+
+import routes
+
+from nova import wsgi
+from nova.api import ec2
+from nova.api import rackspace
+
+
+class API(wsgi.Router):
+ """Routes top-level requests to the appropriate controller."""
+
+ def __init__(self):
+ mapper = routes.Mapper()
+ mapper.connect("/v1.0/{path_info:.*}", controller=rackspace.API())
+ mapper.connect("/ec2/{path_info:.*}", controller=ec2.API())
+ super(API, self).__init__(mapper)
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
new file mode 100644
index 000000000..6eec0abf7
--- /dev/null
+++ b/nova/api/ec2/__init__.py
@@ -0,0 +1,42 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+WSGI middleware for EC2 API controllers.
+"""
+
+import routes
+import webob.dec
+
+from nova import wsgi
+
+
+class API(wsgi.Router):
+ """Routes EC2 requests to the appropriate controller."""
+
+ def __init__(self):
+ mapper = routes.Mapper()
+ mapper.connect(None, "{all:.*}", controller=self.dummy)
+ super(API, self).__init__(mapper)
+
+ @staticmethod
+ @webob.dec.wsgify
+ def dummy(req):
+ """Temporary dummy controller."""
+ msg = "dummy response -- please hook up __init__() to cloud.py instead"
+ return repr({'dummy': msg,
+ 'kwargs': repr(req.environ['wsgiorg.routing_args'][1])})
diff --git a/nova/api/rackspace/__init__.py b/nova/api/rackspace/__init__.py
new file mode 100644
index 000000000..27e78f801
--- /dev/null
+++ b/nova/api/rackspace/__init__.py
@@ -0,0 +1,81 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+WSGI middleware for Rackspace API controllers.
+"""
+
+import json
+import time
+
+import routes
+import webob.dec
+import webob.exc
+
+from nova import flags
+from nova import wsgi
+from nova.api.rackspace import flavors
+from nova.api.rackspace import images
+from nova.api.rackspace import servers
+from nova.api.rackspace import sharedipgroups
+from nova.auth import manager
+
+
+class API(wsgi.Middleware):
+ """WSGI entry point for all Rackspace API requests."""
+
+ def __init__(self):
+ app = AuthMiddleware(APIRouter())
+ super(API, self).__init__(app)
+
+
+class AuthMiddleware(wsgi.Middleware):
+ """Authorize the rackspace API request or return an HTTP Forbidden."""
+
+ #TODO(gundlach): isn't this the old Nova API's auth? Should it be replaced
+ #with correct RS API auth?
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ context = {}
+ if "HTTP_X_AUTH_TOKEN" in req.environ:
+ context['user'] = manager.AuthManager().get_user_from_access_key(
+ req.environ['HTTP_X_AUTH_TOKEN'])
+ if context['user']:
+ context['project'] = manager.AuthManager().get_project(
+ context['user'].name)
+ if "user" not in context:
+ return webob.exc.HTTPForbidden()
+ req.environ['nova.context'] = context
+ return self.application
+
+
+class APIRouter(wsgi.Router):
+ """
+ Routes requests on the Rackspace API to the appropriate controller
+ and method.
+ """
+
+ def __init__(self):
+ mapper = routes.Mapper()
+ mapper.resource("server", "servers", controller=servers.Controller())
+ mapper.resource("image", "images", controller=images.Controller())
+ mapper.resource("flavor", "flavors", controller=flavors.Controller())
+ mapper.resource("sharedipgroup", "sharedipgroups",
+ controller=sharedipgroups.Controller())
+ super(APIRouter, self).__init__(mapper)
diff --git a/nova/api/rackspace/base.py b/nova/api/rackspace/base.py
new file mode 100644
index 000000000..dd2c6543c
--- /dev/null
+++ b/nova/api/rackspace/base.py
@@ -0,0 +1,30 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import wsgi
+
+
+class Controller(wsgi.Controller):
+ """TODO(eday): Base controller for all rackspace controllers. What is this
+ for? Is this just Rackspace specific? """
+
+ @classmethod
+ def render(cls, instance):
+ if isinstance(instance, list):
+ return {cls.entity_name: cls.render(instance)}
+ else:
+ return {"TODO": "TODO"}
diff --git a/nova/api/rackspace/flavors.py b/nova/api/rackspace/flavors.py
new file mode 100644
index 000000000..986f11434
--- /dev/null
+++ b/nova/api/rackspace/flavors.py
@@ -0,0 +1,18 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+class Controller(object): pass
diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py
new file mode 100644
index 000000000..370980fe9
--- /dev/null
+++ b/nova/api/rackspace/images.py
@@ -0,0 +1,117 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import datastore
+from nova import image
+from nova.api.rackspace import base
+from webob import exc
+
+class Controller(base.Controller):
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "image": [ "id", "name", "updated", "created", "status",
+ "serverId", "progress" ]
+ }
+ }
+ }
+
+ def __init__(self):
+ self._service = image.service.ImageService.load()
+ self._id_translator = RackspaceAPIImageIdTranslator()
+
+ def _to_rs_id(self, image_id):
+ """
+ Convert an image id from the format of our ImageService strategy
+ to the Rackspace API format (an int).
+ """
+ strategy = self._service.__class__.__name__
+ return self._id_translator.to_rs_id(strategy, image_id)
+
+ def _from_rs_id(self, rs_image_id):
+ """
+ Convert an image id from the Rackspace API format (an int) to the
+ format of our ImageService strategy.
+ """
+ strategy = self._service.__class__.__name__
+ return self._id_translator.from_rs_id(strategy, rs_image_id)
+
+ def index(self, req):
+ """Return all public images."""
+ data = self._service.index()
+ for img in data:
+ img['id'] = self._to_rs_id(img['id'])
+ return dict(images=data)
+
+ def show(self, req, id):
+ """Return data about the given image id."""
+ opaque_id = self._from_rs_id(id)
+ img = self._service.show(opaque_id)
+ img['id'] = id
+ return dict(image=img)
+
+ def delete(self, req, id):
+ # Only public images are supported for now.
+ raise exc.HTTPNotFound()
+
+ def create(self, req):
+ # Only public images are supported for now, so a request to
+ # make a backup of a server cannot be supproted.
+ raise exc.HTTPNotFound()
+
+ def update(self, req, id):
+ # Users may not modify public images, and that's all that
+ # we support for now.
+ raise exc.HTTPNotFound()
+
+
+class RackspaceAPIImageIdTranslator(object):
+ """
+ Converts Rackspace API image ids to and from the id format for a given
+ strategy.
+ """
+
+ def __init__(self):
+ self._store = datastore.Redis.instance()
+ self._key_template = "rsapi.idstrategies.image.%s.%s"
+
+ def to_rs_id(self, strategy_name, opaque_id):
+ """Convert an id from a strategy-specific one to a Rackspace one."""
+ key = self._key_template % (strategy_name, "fwd")
+ result = self._store.hget(key, str(opaque_id))
+ if result: # we have a mapping from opaque to RS for this strategy
+ return int(result)
+ else:
+ # Store the mapping.
+ nextid = self._store.incr("%s.lastid" % key)
+ if self._store.hsetnx(key, str(opaque_id), nextid):
+ # If someone else didn't beat us to it, store the reverse
+ # mapping as well.
+ key = self._key_template % (strategy_name, "rev")
+ self._store.hset(key, nextid, str(opaque_id))
+ return nextid
+ else:
+ # Someone beat us to it; use their number instead, and
+ # discard nextid (which is OK -- we don't require that
+ # every int id be used.)
+ return int(self._store.hget(key, str(opaque_id)))
+
+ def from_rs_id(self, strategy_name, rs_id):
+ """Convert a Rackspace id to a strategy-specific one."""
+ key = self._key_template % (strategy_name, "rev")
+ return self._store.hget(key, rs_id)
diff --git a/nova/api/rackspace/notes.txt b/nova/api/rackspace/notes.txt
new file mode 100644
index 000000000..e133bf5ea
--- /dev/null
+++ b/nova/api/rackspace/notes.txt
@@ -0,0 +1,23 @@
+We will need:
+
+ImageService
+a service that can do crud on image information. not user-specific. opaque
+image ids.
+
+GlanceImageService(ImageService):
+image ids are URIs.
+
+LocalImageService(ImageService):
+image ids are random strings.
+
+RackspaceAPITranslationStore:
+translates RS server/images/flavor/etc ids into formats required
+by a given ImageService strategy.
+
+api.rackspace.images.Controller:
+uses an ImageService strategy behind the scenes to do its fetching; it just
+converts int image id into a strategy-specific image id.
+
+who maintains the mapping from user to [images he owns]? nobody, because
+we have no way of enforcing access to his images, without kryptex which
+won't be in Austin.
diff --git a/nova/api/rackspace/servers.py b/nova/api/rackspace/servers.py
new file mode 100644
index 000000000..25d1fe9c8
--- /dev/null
+++ b/nova/api/rackspace/servers.py
@@ -0,0 +1,83 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import rpc
+from nova.compute import model as compute
+from nova.api.rackspace import base
+
+
+class Controller(base.Controller):
+ entity_name = 'servers'
+
+ def index(self, **kwargs):
+ instances = []
+ for inst in compute.InstanceDirectory().all:
+ instances.append(instance_details(inst))
+
+ def show(self, **kwargs):
+ instance_id = kwargs['id']
+ return compute.InstanceDirectory().get(instance_id)
+
+ def delete(self, **kwargs):
+ instance_id = kwargs['id']
+ instance = compute.InstanceDirectory().get(instance_id)
+ if not instance:
+ raise ServerNotFound("The requested server was not found")
+ instance.destroy()
+ return True
+
+ def create(self, **kwargs):
+ inst = self.build_server_instance(kwargs['server'])
+ rpc.cast(
+ FLAGS.compute_topic, {
+ "method": "run_instance",
+ "args": {"instance_id": inst.instance_id}})
+
+ def update(self, **kwargs):
+ instance_id = kwargs['id']
+ instance = compute.InstanceDirectory().get(instance_id)
+ if not instance:
+ raise ServerNotFound("The requested server was not found")
+ instance.update(kwargs['server'])
+ instance.save()
+
+ def build_server_instance(self, env):
+ """Build instance data structure and save it to the data store."""
+ reservation = utils.generate_uid('r')
+ ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
+ inst = self.instdir.new()
+ inst['name'] = env['server']['name']
+ inst['image_id'] = env['server']['imageId']
+ inst['instance_type'] = env['server']['flavorId']
+ inst['user_id'] = env['user']['id']
+ inst['project_id'] = env['project']['id']
+ inst['reservation_id'] = reservation
+ inst['launch_time'] = ltime
+ inst['mac_address'] = utils.generate_mac()
+ address = self.network.allocate_ip(
+ inst['user_id'],
+ inst['project_id'],
+ mac=inst['mac_address'])
+ inst['private_dns_name'] = str(address)
+ inst['bridge_name'] = network.BridgedNetwork.get_network_for_project(
+ inst['user_id'],
+ inst['project_id'],
+ 'default')['bridge_name']
+ # key_data, key_name, ami_launch_index
+ # TODO(todd): key data or root password
+ inst.save()
+ return inst
diff --git a/nova/api/rackspace/sharedipgroups.py b/nova/api/rackspace/sharedipgroups.py
new file mode 100644
index 000000000..986f11434
--- /dev/null
+++ b/nova/api/rackspace/sharedipgroups.py
@@ -0,0 +1,18 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+class Controller(object): pass
diff --git a/nova/api/test.py b/nova/api/test.py
new file mode 100644
index 000000000..51b114b8e
--- /dev/null
+++ b/nova/api/test.py
@@ -0,0 +1,61 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test for the root WSGI middleware for all API controllers.
+"""
+
+import unittest
+
+import stubout
+import webob
+import webob.dec
+
+from nova import api
+
+
+class Test(unittest.TestCase):
+
+ def setUp(self): # pylint: disable-msg=C0103
+ self.stubs = stubout.StubOutForTesting()
+
+ def tearDown(self): # pylint: disable-msg=C0103
+ self.stubs.UnsetAll()
+
+ def test_rackspace(self):
+ self.stubs.Set(api.rackspace, 'API', APIStub)
+ result = webob.Request.blank('/v1.0/cloud').get_response(api.API())
+ self.assertEqual(result.body, "/cloud")
+
+ def test_ec2(self):
+ self.stubs.Set(api.ec2, 'API', APIStub)
+ result = webob.Request.blank('/ec2/cloud').get_response(api.API())
+ self.assertEqual(result.body, "/cloud")
+
+ def test_not_found(self):
+ self.stubs.Set(api.ec2, 'API', APIStub)
+ self.stubs.Set(api.rackspace, 'API', APIStub)
+ result = webob.Request.blank('/test/cloud').get_response(api.API())
+ self.assertNotEqual(result.body, "/cloud")
+
+
+class APIStub(object):
+ """Class to verify request and mark it was called."""
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ return req.path_info
diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py
index b420924af..bfc3433c5 100644
--- a/nova/auth/fakeldap.py
+++ b/nova/auth/fakeldap.py
@@ -30,20 +30,23 @@ from nova import datastore
SCOPE_BASE = 0
SCOPE_ONELEVEL = 1 # not implemented
-SCOPE_SUBTREE = 2
+SCOPE_SUBTREE = 2
MOD_ADD = 0
MOD_DELETE = 1
-class NO_SUCH_OBJECT(Exception):
+class NO_SUCH_OBJECT(Exception): # pylint: disable-msg=C0103
+ """Duplicate exception class from real LDAP module."""
pass
-class OBJECT_CLASS_VIOLATION(Exception):
+class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable-msg=C0103
+ """Duplicate exception class from real LDAP module."""
pass
-def initialize(uri):
+def initialize(_uri):
+ """Opens a fake connection with an LDAP server."""
return FakeLDAP()
@@ -68,7 +71,7 @@ def _match_query(query, attrs):
# cut off the ! and the nested parentheses
return not _match_query(query[2:-1], attrs)
- (k, sep, v) = inner.partition('=')
+ (k, _sep, v) = inner.partition('=')
return _match(k, v, attrs)
@@ -85,20 +88,20 @@ def _paren_groups(source):
if source[pos] == ')':
count -= 1
if count == 0:
- result.append(source[start:pos+1])
+ result.append(source[start:pos + 1])
return result
-def _match(k, v, attrs):
+def _match(key, value, attrs):
"""Match a given key and value against an attribute list."""
- if k not in attrs:
+ if key not in attrs:
return False
- if k != "objectclass":
- return v in attrs[k]
+ if key != "objectclass":
+ return value in attrs[key]
# it is an objectclass check, so check subclasses
- values = _subs(v)
- for value in values:
- if value in attrs[k]:
+ values = _subs(value)
+ for v in values:
+ if v in attrs[key]:
return True
return False
@@ -145,6 +148,7 @@ def _to_json(unencoded):
class FakeLDAP(object):
#TODO(vish): refactor this class to use a wrapper instead of accessing
# redis directly
+ """Fake LDAP connection."""
def simple_bind_s(self, dn, password):
"""This method is ignored, but provided for compatibility."""
@@ -207,6 +211,7 @@ class FakeLDAP(object):
# get the attributes from redis
attrs = redis.hgetall(key)
# turn the values from redis into lists
+ # pylint: disable-msg=E1103
attrs = dict([(k, _from_json(v))
for k, v in attrs.iteritems()])
# filter the objects by query
@@ -215,13 +220,12 @@ class FakeLDAP(object):
attrs = dict([(k, v) for k, v in attrs.iteritems()
if not fields or k in fields])
objects.append((key[len(self.__redis_prefix):], attrs))
+ # pylint: enable-msg=E1103
if objects == []:
raise NO_SUCH_OBJECT()
return objects
-
@property
- def __redis_prefix(self):
+ def __redis_prefix(self): # pylint: disable-msg=R0201
+ """Get the prefix to use for all redis keys."""
return 'ldap:'
-
-
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index ec739e134..74ba011b5 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -30,10 +30,11 @@ import sys
from nova import exception
from nova import flags
+
FLAGS = flags.FLAGS
flags.DEFINE_string('ldap_url', 'ldap://localhost',
'Point this at your ldap server')
-flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password')
+flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password')
flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com',
'DN of admin user')
flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users')
@@ -62,14 +63,18 @@ flags.DEFINE_string('ldap_developer',
# to define a set interface for AuthDrivers. I'm delaying
# creating this now because I'm expecting an auth refactor
# in which we may want to change the interface a bit more.
+
+
class LdapDriver(object):
"""Ldap Auth driver
Defines enter and exit and therefore supports the with/as syntax.
"""
+
def __init__(self):
"""Imports the LDAP module"""
self.ldap = __import__('ldap')
+ self.conn = None
def __enter__(self):
"""Creates the connection to LDAP"""
@@ -77,7 +82,7 @@ class LdapDriver(object):
self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password)
return self
- def __exit__(self, type, value, traceback):
+ def __exit__(self, exc_type, exc_value, traceback):
"""Destroys the connection to LDAP"""
self.conn.unbind_s()
return False
@@ -122,11 +127,11 @@ class LdapDriver(object):
def get_projects(self, uid=None):
"""Retrieve list of projects"""
- filter = '(objectclass=novaProject)'
+ pattern = '(objectclass=novaProject)'
if uid:
- filter = "(&%s(member=%s))" % (filter, self.__uid_to_dn(uid))
+ pattern = "(&%s(member=%s))" % (pattern, self.__uid_to_dn(uid))
attrs = self.__find_objects(FLAGS.ldap_project_subtree,
- filter)
+ pattern)
return [self.__to_project(attr) for attr in attrs]
def create_user(self, name, access_key, secret_key, is_admin):
@@ -182,7 +187,8 @@ class LdapDriver(object):
for member_uid in member_uids:
if not self.__user_exists(member_uid):
raise exception.NotFound("Project can't be created "
- "because user %s doesn't exist" % member_uid)
+ "because user %s doesn't exist"
+ % member_uid)
members.append(self.__uid_to_dn(member_uid))
# always add the manager as a member because members is required
if not manager_dn in members:
@@ -192,8 +198,7 @@ class LdapDriver(object):
('cn', [name]),
('description', [description]),
('projectManager', [manager_dn]),
- ('member', members)
- ]
+ ('member', members)]
self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr)
return self.__to_project(dict(attr))
@@ -236,6 +241,26 @@ class LdapDriver(object):
role_dn = self.__role_to_dn(role, project_id)
return self.__remove_from_group(uid, role_dn)
+ def get_user_roles(self, uid, project_id=None):
+ """Retrieve list of roles for user (or user and project)"""
+ if project_id is None:
+ # NOTE(vish): This is unneccesarily slow, but since we can't
+ # guarantee that the global roles are located
+ # together in the ldap tree, we're doing this version.
+ roles = []
+ for role in FLAGS.allowed_roles:
+ role_dn = self.__role_to_dn(role)
+ if self.__is_in_group(uid, role_dn):
+ roles.append(role)
+ return roles
+ else:
+ project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
+ roles = self.__find_objects(project_dn,
+ '(&(&(objectclass=groupOfNames)'
+ '(!(objectclass=novaProject)))'
+ '(member=%s))' % self.__uid_to_dn(uid))
+ return [role['cn'][0] for role in roles]
+
def delete_user(self, uid):
"""Delete a user"""
if not self.__user_exists(uid):
@@ -253,24 +278,23 @@ class LdapDriver(object):
self.conn.delete_s('cn=%s,uid=%s,%s' % (key_name, uid,
FLAGS.ldap_user_subtree))
- def delete_project(self, name):
+ def delete_project(self, project_id):
"""Delete a project"""
- project_dn = 'cn=%s,%s' % (name, FLAGS.ldap_project_subtree)
+ project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
self.__delete_roles(project_dn)
self.__delete_group(project_dn)
- def __user_exists(self, name):
+ def __user_exists(self, uid):
"""Check if user exists"""
- return self.get_user(name) != None
+ return self.get_user(uid) != None
def __key_pair_exists(self, uid, key_name):
"""Check if key pair exists"""
- return self.get_user(uid) != None
return self.get_key_pair(uid, key_name) != None
- def __project_exists(self, name):
+ def __project_exists(self, project_id):
"""Check if project exists"""
- return self.get_project(name) != None
+ return self.get_project(project_id) != None
def __find_object(self, dn, query=None, scope=None):
"""Find an object by dn and query"""
@@ -288,7 +312,7 @@ class LdapDriver(object):
except self.ldap.NO_SUCH_OBJECT:
return []
# just return the DNs
- return [dn for dn, attributes in res]
+ return [dn for dn, _attributes in res]
def __find_objects(self, dn, query=None, scope=None):
"""Find objects by query"""
@@ -324,7 +348,8 @@ class LdapDriver(object):
for key in keys:
self.delete_key_pair(uid, key['name'])
- def __role_to_dn(self, role, project_id=None):
+ @staticmethod
+ def __role_to_dn(role, project_id=None):
"""Convert role to corresponding dn"""
if project_id == None:
return FLAGS.__getitem__("ldap_%s" % role).value
@@ -334,7 +359,7 @@ class LdapDriver(object):
FLAGS.ldap_project_subtree)
def __create_group(self, group_dn, name, uid,
- description, member_uids = None):
+ description, member_uids=None):
"""Create a group"""
if self.__group_exists(group_dn):
raise exception.Duplicate("Group can't be created because "
@@ -353,8 +378,7 @@ class LdapDriver(object):
('objectclass', ['groupOfNames']),
('cn', [name]),
('description', [description]),
- ('member', members)
- ]
+ ('member', members)]
self.conn.add_s(group_dn, attr)
def __is_in_group(self, uid, group_dn):
@@ -380,9 +404,7 @@ class LdapDriver(object):
if self.__is_in_group(uid, group_dn):
raise exception.Duplicate("User %s is already a member of "
"the group %s" % (uid, group_dn))
- attr = [
- (self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))
- ]
+ attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))]
self.conn.modify_s(group_dn, attr)
def __remove_from_group(self, uid, group_dn):
@@ -410,7 +432,7 @@ class LdapDriver(object):
self.conn.modify_s(group_dn, attr)
except self.ldap.OBJECT_CLASS_VIOLATION:
logging.debug("Attempted to remove the last member of a group. "
- "Deleting the group at %s instead." % group_dn )
+ "Deleting the group at %s instead.", group_dn)
self.__delete_group(group_dn)
def __remove_from_all(self, uid):
@@ -418,7 +440,6 @@ class LdapDriver(object):
if not self.__user_exists(uid):
raise exception.NotFound("User %s can't be removed from all "
"because the user doesn't exist" % (uid,))
- dn = self.__uid_to_dn(uid)
role_dns = self.__find_group_dns_with_member(
FLAGS.role_project_subtree, uid)
for role_dn in role_dns:
@@ -426,7 +447,7 @@ class LdapDriver(object):
project_dns = self.__find_group_dns_with_member(
FLAGS.ldap_project_subtree, uid)
for project_dn in project_dns:
- self.__safe_remove_from_group(uid, role_dn)
+ self.__safe_remove_from_group(uid, project_dn)
def __delete_group(self, group_dn):
"""Delete Group"""
@@ -439,7 +460,8 @@ class LdapDriver(object):
for role_dn in self.__find_role_dns(project_dn):
self.__delete_group(role_dn)
- def __to_user(self, attr):
+ @staticmethod
+ def __to_user(attr):
"""Convert ldap attributes to User object"""
if attr == None:
return None
@@ -448,10 +470,10 @@ class LdapDriver(object):
'name': attr['cn'][0],
'access': attr['accessKey'][0],
'secret': attr['secretKey'][0],
- 'admin': (attr['isAdmin'][0] == 'TRUE')
- }
+ 'admin': (attr['isAdmin'][0] == 'TRUE')}
- def __to_key_pair(self, owner, attr):
+ @staticmethod
+ def __to_key_pair(owner, attr):
"""Convert ldap attributes to KeyPair object"""
if attr == None:
return None
@@ -460,8 +482,7 @@ class LdapDriver(object):
'name': attr['cn'][0],
'owner_id': owner,
'public_key': attr['sshPublicKey'][0],
- 'fingerprint': attr['keyFingerprint'][0],
- }
+ 'fingerprint': attr['keyFingerprint'][0]}
def __to_project(self, attr):
"""Convert ldap attributes to Project object"""
@@ -473,21 +494,22 @@ class LdapDriver(object):
'name': attr['cn'][0],
'project_manager_id': self.__dn_to_uid(attr['projectManager'][0]),
'description': attr.get('description', [None])[0],
- 'member_ids': [self.__dn_to_uid(x) for x in member_dns]
- }
+ 'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
- def __dn_to_uid(self, dn):
+ @staticmethod
+ def __dn_to_uid(dn):
"""Convert user dn to uid"""
return dn.split(',')[0].split('=')[1]
- def __uid_to_dn(self, dn):
+ @staticmethod
+ def __uid_to_dn(dn):
"""Convert uid to dn"""
return 'uid=%s,%s' % (dn, FLAGS.ldap_user_subtree)
class FakeLdapDriver(LdapDriver):
"""Fake Ldap Auth driver"""
- def __init__(self):
+
+ def __init__(self): # pylint: disable-msg=W0231
__import__('nova.auth.fakeldap')
self.ldap = sys.modules['nova.auth.fakeldap']
-
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index d44ed52b2..284b29502 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -23,22 +23,23 @@ Nova authentication management
import logging
import os
import shutil
-import string
+import string # pylint: disable-msg=W0402
import tempfile
import uuid
import zipfile
from nova import crypto
-from nova import datastore
from nova import exception
from nova import flags
-from nova import objectstore # for flags
from nova import utils
-from nova.auth import ldapdriver # for flags
from nova.auth import signer
from nova.network import vpn
+
FLAGS = flags.FLAGS
+flags.DEFINE_list('allowed_roles',
+ ['cloudadmin', 'itsec', 'sysadmin', 'netadmin', 'developer'],
+ 'Allowed roles for project')
# NOTE(vish): a user with one of these roles will be a superuser and
# have access to all api commands
@@ -50,7 +51,6 @@ flags.DEFINE_list('superuser_roles', ['cloudadmin'],
flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'],
'Roles that apply to all projects')
-
flags.DEFINE_string('credentials_template',
utils.abspath('auth/novarc.template'),
'Template for creating users rc file')
@@ -65,15 +65,14 @@ flags.DEFINE_string('credential_cert_file', 'cert.pem',
'Filename of certificate in credentials zip')
flags.DEFINE_string('credential_rc_file', 'novarc',
'Filename of rc in credentials zip')
-
flags.DEFINE_string('credential_cert_subject',
'/C=US/ST=California/L=MountainView/O=AnsoLabs/'
'OU=NovaDev/CN=%s-%s',
'Subject for certificate for users')
-
flags.DEFINE_string('auth_driver', 'nova.auth.ldapdriver.FakeLdapDriver',
'Driver that auth manager uses')
+
class AuthBase(object):
"""Base class for objects relating to auth
@@ -81,6 +80,7 @@ class AuthBase(object):
an id member. They may optionally contain methods that delegate to
AuthManager, but should not implement logic themselves.
"""
+
@classmethod
def safe_id(cls, obj):
"""Safe get object id
@@ -98,7 +98,9 @@ class AuthBase(object):
class User(AuthBase):
"""Object representing a user"""
+
def __init__(self, id, name, access, secret, admin):
+ AuthBase.__init__(self)
self.id = id
self.name = name
self.access = access
@@ -158,7 +160,9 @@ class KeyPair(AuthBase):
Even though this object is named KeyPair, only the public key and
fingerprint is stored. The user's private key is not saved.
"""
+
def __init__(self, id, name, owner_id, public_key, fingerprint):
+ AuthBase.__init__(self)
self.id = id
self.name = name
self.owner_id = owner_id
@@ -175,7 +179,9 @@ class KeyPair(AuthBase):
class Project(AuthBase):
"""Represents a Project returned from the datastore"""
+
def __init__(self, id, name, project_manager_id, description, member_ids):
+ AuthBase.__init__(self)
self.id = id
self.name = name
self.project_manager_id = project_manager_id
@@ -188,12 +194,12 @@ class Project(AuthBase):
@property
def vpn_ip(self):
- ip, port = AuthManager().get_project_vpn_data(self)
+ ip, _port = AuthManager().get_project_vpn_data(self)
return ip
@property
def vpn_port(self):
- ip, port = AuthManager().get_project_vpn_data(self)
+ _ip, port = AuthManager().get_project_vpn_data(self)
return port
def has_manager(self, user):
@@ -215,12 +221,9 @@ class Project(AuthBase):
return AuthManager().get_credentials(user, self)
def __repr__(self):
- return "Project('%s', '%s', '%s', '%s', %s)" % (self.id,
- self.name,
- self.project_manager_id,
- self.description,
- self.member_ids)
-
+ return "Project('%s', '%s', '%s', '%s', %s)" % \
+ (self.id, self.name, self.project_manager_id, self.description,
+ self.member_ids)
class AuthManager(object):
@@ -234,7 +237,9 @@ class AuthManager(object):
AuthManager also manages associated data related to Auth objects that
need to be more accessible, such as vpn ips and ports.
"""
- _instance=None
+
+ _instance = None
+
def __new__(cls, *args, **kwargs):
"""Returns the AuthManager singleton"""
if not cls._instance:
@@ -248,7 +253,7 @@ class AuthManager(object):
reset the driver if it is not set or a new driver is specified.
"""
if driver or not getattr(self, 'driver', None):
- self.driver = utils.import_class(driver or FLAGS.auth_driver)
+ self.driver = utils.import_class(driver or FLAGS.auth_driver)
def authenticate(self, access, signature, params, verb='GET',
server_string='127.0.0.1:8773', path='/',
@@ -290,7 +295,7 @@ class AuthManager(object):
@return: User and project that the request represents.
"""
# TODO(vish): check for valid timestamp
- (access_key, sep, project_id) = access.partition(':')
+ (access_key, _sep, project_id) = access.partition(':')
logging.info('Looking up user: %r', access_key)
user = self.get_user_from_access_key(access_key)
@@ -313,7 +318,8 @@ class AuthManager(object):
raise exception.NotFound('User %s is not a member of project %s' %
(user.id, project.id))
if check_type == 's3':
- expected_signature = signer.Signer(user.secret.encode()).s3_authorization(headers, verb, path)
+ sign = signer.Signer(user.secret.encode())
+ expected_signature = sign.s3_authorization(headers, verb, path)
logging.debug('user.secret: %s', user.secret)
logging.debug('expected_signature: %s', expected_signature)
logging.debug('signature: %s', signature)
@@ -431,6 +437,10 @@ class AuthManager(object):
@type project: Project or project_id
@param project: Project in which to add local role.
"""
+ if role not in FLAGS.allowed_roles:
+ raise exception.NotFound("The %s role can not be found" % role)
+ if project is not None and role in FLAGS.global_roles:
+ raise exception.NotFound("The %s role is global only" % role)
with self.driver() as drv:
drv.add_role(User.safe_id(user), role, Project.safe_id(project))
@@ -454,6 +464,20 @@ class AuthManager(object):
with self.driver() as drv:
drv.remove_role(User.safe_id(user), role, Project.safe_id(project))
+ @staticmethod
+ def get_roles(project_roles=True):
+ """Get list of allowed roles"""
+ if project_roles:
+ return list(set(FLAGS.allowed_roles) - set(FLAGS.global_roles))
+ else:
+ return FLAGS.allowed_roles
+
+ def get_user_roles(self, user, project=None):
+ """Get user global or per-project roles"""
+ with self.driver() as drv:
+ return drv.get_user_roles(User.safe_id(user),
+ Project.safe_id(project))
+
def get_project(self, pid):
"""Get project object by id"""
with self.driver() as drv:
@@ -494,10 +518,10 @@ class AuthManager(object):
if member_users:
member_users = [User.safe_id(u) for u in member_users]
with self.driver() as drv:
- project_dict = drv.create_project(name,
- User.safe_id(manager_user),
- description,
- member_users)
+ project_dict = drv.create_project(name,
+ User.safe_id(manager_user),
+ description,
+ member_users)
if project_dict:
return Project(**project_dict)
@@ -525,7 +549,8 @@ class AuthManager(object):
return drv.remove_from_project(User.safe_id(user),
Project.safe_id(project))
- def get_project_vpn_data(self, project):
+ @staticmethod
+ def get_project_vpn_data(project):
"""Gets vpn ip and port for project
@type project: Project or project_id
@@ -589,8 +614,10 @@ class AuthManager(object):
@rtype: User
@return: The new user.
"""
- if access == None: access = str(uuid.uuid4())
- if secret == None: secret = str(uuid.uuid4())
+ if access == None:
+ access = str(uuid.uuid4())
+ if secret == None:
+ secret = str(uuid.uuid4())
with self.driver() as drv:
user_dict = drv.create_user(name, access, secret, admin)
if user_dict:
@@ -632,10 +659,10 @@ class AuthManager(object):
def create_key_pair(self, user, key_name, public_key, fingerprint):
"""Creates a key pair for user"""
with self.driver() as drv:
- kp_dict = drv.create_key_pair(User.safe_id(user),
- key_name,
- public_key,
- fingerprint)
+ kp_dict = drv.create_key_pair(User.safe_id(user),
+ key_name,
+ public_key,
+ fingerprint)
if kp_dict:
return KeyPair(**kp_dict)
@@ -678,7 +705,7 @@ class AuthManager(object):
network_data = vpn.NetworkData.lookup(pid)
if network_data:
- configfile = open(FLAGS.vpn_client_template,"r")
+ configfile = open(FLAGS.vpn_client_template, "r")
s = string.Template(configfile.read())
configfile.close()
config = s.substitute(keyfile=FLAGS.credential_key_file,
@@ -693,10 +720,10 @@ class AuthManager(object):
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id))
zippy.close()
with open(zf, 'rb') as f:
- buffer = f.read()
+ read_buffer = f.read()
shutil.rmtree(tmpdir)
- return buffer
+ return read_buffer
def get_environment_rc(self, user, project=None):
"""Get credential zip for user in project"""
@@ -707,18 +734,18 @@ class AuthManager(object):
pid = Project.safe_id(project)
return self.__generate_rc(user.access, user.secret, pid)
- def __generate_rc(self, access, secret, pid):
+ @staticmethod
+ def __generate_rc(access, secret, pid):
"""Generate rc file for user"""
rc = open(FLAGS.credentials_template).read()
- rc = rc % { 'access': access,
- 'project': pid,
- 'secret': secret,
- 'ec2': FLAGS.ec2_url,
- 's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port),
- 'nova': FLAGS.ca_file,
- 'cert': FLAGS.credential_cert_file,
- 'key': FLAGS.credential_key_file,
- }
+ rc = rc % {'access': access,
+ 'project': pid,
+ 'secret': secret,
+ 'ec2': FLAGS.ec2_url,
+ 's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port),
+ 'nova': FLAGS.ca_file,
+ 'cert': FLAGS.credential_cert_file,
+ 'key': FLAGS.credential_key_file}
return rc
def _generate_x509_cert(self, uid, pid):
@@ -729,6 +756,7 @@ class AuthManager(object):
signed_cert = crypto.sign_csr(csr, pid)
return (private_key, signed_cert)
- def __cert_subject(self, uid):
+ @staticmethod
+ def __cert_subject(uid):
"""Helper to generate cert subject"""
return FLAGS.credential_cert_subject % (uid, utils.isotime())
diff --git a/nova/auth/rbac.py b/nova/auth/rbac.py
index 7fab9419f..d157f44b3 100644
--- a/nova/auth/rbac.py
+++ b/nova/auth/rbac.py
@@ -16,38 +16,54 @@
# License for the specific language governing permissions and limitations
# under the License.
+"""Role-based access control decorators to use fpr wrapping other
+methods with."""
+
from nova import exception
-from nova.auth import manager
def allow(*roles):
- def wrap(f):
- def wrapped_f(self, context, *args, **kwargs):
+ """Allow the given roles access the wrapped function."""
+
+ def wrap(func): # pylint: disable-msg=C0111
+
+ def wrapped_func(self, context, *args,
+ **kwargs): # pylint: disable-msg=C0111
if context.user.is_superuser():
- return f(self, context, *args, **kwargs)
+ return func(self, context, *args, **kwargs)
for role in roles:
if __matches_role(context, role):
- return f(self, context, *args, **kwargs)
+ return func(self, context, *args, **kwargs)
raise exception.NotAuthorized()
- return wrapped_f
+
+ return wrapped_func
+
return wrap
+
def deny(*roles):
- def wrap(f):
- def wrapped_f(self, context, *args, **kwargs):
+ """Deny the given roles access the wrapped function."""
+
+ def wrap(func): # pylint: disable-msg=C0111
+
+ def wrapped_func(self, context, *args,
+ **kwargs): # pylint: disable-msg=C0111
if context.user.is_superuser():
- return f(self, context, *args, **kwargs)
+ return func(self, context, *args, **kwargs)
for role in roles:
if __matches_role(context, role):
raise exception.NotAuthorized()
- return f(self, context, *args, **kwargs)
- return wrapped_f
+ return func(self, context, *args, **kwargs)
+
+ return wrapped_func
+
return wrap
+
def __matches_role(context, role):
+ """Check if a role is allowed."""
if role == 'all':
return True
if role == 'none':
return False
return context.project.has_role(context.user.id, role)
-
diff --git a/nova/auth/signer.py b/nova/auth/signer.py
index 634f22f0d..f7d29f534 100644
--- a/nova/auth/signer.py
+++ b/nova/auth/signer.py
@@ -48,13 +48,17 @@ import hashlib
import hmac
import logging
import urllib
-import boto # NOTE(vish): for new boto
-import boto.utils # NOTE(vish): for old boto
+
+# NOTE(vish): for new boto
+import boto
+# NOTE(vish): for old boto
+import boto.utils
from nova.exception import Error
+
class Signer(object):
- """ hacked up code from boto/connection.py """
+ """Hacked up code from boto/connection.py"""
def __init__(self, secret_key):
self.hmac = hmac.new(secret_key, digestmod=hashlib.sha1)
@@ -62,23 +66,27 @@ class Signer(object):
self.hmac_256 = hmac.new(secret_key, digestmod=hashlib.sha256)
def s3_authorization(self, headers, verb, path):
+ """Generate S3 authorization string."""
c_string = boto.utils.canonical_string(verb, path, headers)
- hmac = self.hmac.copy()
- hmac.update(c_string)
- b64_hmac = base64.encodestring(hmac.digest()).strip()
+ hmac_copy = self.hmac.copy()
+ hmac_copy.update(c_string)
+ b64_hmac = base64.encodestring(hmac_copy.digest()).strip()
return b64_hmac
def generate(self, params, verb, server_string, path):
+ """Generate auth string according to what SignatureVersion is given."""
if params['SignatureVersion'] == '0':
return self._calc_signature_0(params)
if params['SignatureVersion'] == '1':
return self._calc_signature_1(params)
if params['SignatureVersion'] == '2':
return self._calc_signature_2(params, verb, server_string, path)
- raise Error('Unknown Signature Version: %s' % self.SignatureVersion)
+ raise Error('Unknown Signature Version: %s' %
+ params['SignatureVersion'])
-
- def _get_utf8_value(self, value):
+ @staticmethod
+ def _get_utf8_value(value):
+ """Get the UTF8-encoded version of a value."""
if not isinstance(value, str) and not isinstance(value, unicode):
value = str(value)
if isinstance(value, unicode):
@@ -87,10 +95,11 @@ class Signer(object):
return value
def _calc_signature_0(self, params):
+ """Generate AWS signature version 0 string."""
s = params['Action'] + params['Timestamp']
self.hmac.update(s)
keys = params.keys()
- keys.sort(cmp = lambda x, y: cmp(x.lower(), y.lower()))
+ keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
pairs = []
for key in keys:
val = self._get_utf8_value(params[key])
@@ -98,8 +107,9 @@ class Signer(object):
return base64.b64encode(self.hmac.digest())
def _calc_signature_1(self, params):
+ """Generate AWS signature version 1 string."""
keys = params.keys()
- keys.sort(cmp = lambda x, y: cmp(x.lower(), y.lower()))
+ keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
pairs = []
for key in keys:
self.hmac.update(key)
@@ -109,29 +119,34 @@ class Signer(object):
return base64.b64encode(self.hmac.digest())
def _calc_signature_2(self, params, verb, server_string, path):
+ """Generate AWS signature version 2 string."""
logging.debug('using _calc_signature_2')
string_to_sign = '%s\n%s\n%s\n' % (verb, server_string, path)
if self.hmac_256:
- hmac = self.hmac_256
+ current_hmac = self.hmac_256
params['SignatureMethod'] = 'HmacSHA256'
else:
- hmac = self.hmac
+ current_hmac = self.hmac
params['SignatureMethod'] = 'HmacSHA1'
keys = params.keys()
keys.sort()
pairs = []
for key in keys:
val = self._get_utf8_value(params[key])
- pairs.append(urllib.quote(key, safe='') + '=' + urllib.quote(val, safe='-_~'))
+ val = urllib.quote(val, safe='-_~')
+ pairs.append(urllib.quote(key, safe='') + '=' + val)
qs = '&'.join(pairs)
- logging.debug('query string: %s' % qs)
+ logging.debug('query string: %s', qs)
string_to_sign += qs
- logging.debug('string_to_sign: %s' % string_to_sign)
- hmac.update(string_to_sign)
- b64 = base64.b64encode(hmac.digest())
- logging.debug('len(b64)=%d' % len(b64))
- logging.debug('base64 encoded digest: %s' % b64)
+ logging.debug('string_to_sign: %s', string_to_sign)
+ current_hmac.update(string_to_sign)
+ b64 = base64.b64encode(current_hmac.digest())
+ logging.debug('len(b64)=%d', len(b64))
+ logging.debug('base64 encoded digest: %s', b64)
return b64
+
if __name__ == '__main__':
- print Signer('foo').generate({"SignatureMethod": 'HmacSHA256', 'SignatureVersion': '2'}, "get", "server", "/foo")
+ print Signer('foo').generate({'SignatureMethod': 'HmacSHA256',
+ 'SignatureVersion': '2'},
+ 'get', 'server', '/foo')
diff --git a/nova/cloudpipe/api.py b/nova/cloudpipe/api.py
index 0bffe9aa3..56aa89834 100644
--- a/nova/cloudpipe/api.py
+++ b/nova/cloudpipe/api.py
@@ -21,9 +21,10 @@ Tornado REST API Request Handlers for CloudPipe
"""
import logging
-import tornado.web
import urllib
+import tornado.web
+
from nova import crypto
from nova.auth import manager
diff --git a/nova/cloudpipe/bootscript.sh b/nova/cloudpipe/bootscript.sh
index 82ec2012a..30d9ad102 100755
--- a/nova/cloudpipe/bootscript.sh
+++ b/nova/cloudpipe/bootscript.sh
@@ -44,8 +44,8 @@ CSRTEXT=$(python -c "import urllib; print urllib.quote('''$CSRTEXT''')")
# SIGN the csr and save as server.crt
# CURL fetch to the supervisor, POSTing the CSR text, saving the result as the CRT file
-curl $SUPERVISOR -d "cert=$CSRTEXT" > /etc/openvpn/server.crt
-curl $SUPERVISOR/getca/ > /etc/openvpn/ca.crt
+curl --fail $SUPERVISOR -d "cert=$CSRTEXT" > /etc/openvpn/server.crt
+curl --fail $SUPERVISOR/getca/ > /etc/openvpn/ca.crt
# Customize the server.conf.template
cd /etc/openvpn
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index 5b0ed3471..2867bcb21 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -36,11 +36,11 @@ from nova.endpoint import api
FLAGS = flags.FLAGS
-
flags.DEFINE_string('boot_script_template',
utils.abspath('cloudpipe/bootscript.sh'),
'Template for script to run on cloudpipe instance boot')
+
class CloudPipe(object):
def __init__(self, cloud_controller):
self.controller = cloud_controller
diff --git a/nova/compute/disk.py b/nova/compute/disk.py
index 1ffcca685..c340c5a79 100644
--- a/nova/compute/disk.py
+++ b/nova/compute/disk.py
@@ -24,6 +24,7 @@ Includes injection of SSH PGP keys into authorized_keys file.
import logging
import os
import tempfile
+
from twisted.internet import defer
from nova import exception
@@ -84,6 +85,7 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None):
yield execute('dd if=%s of=%s bs=%d seek=%d conv=notrunc,fsync'
% (infile, outfile, sector_size, primary_first))
+
@defer.inlineCallbacks
def inject_data(image, key=None, net=None, partition=None, execute=None):
"""Injects a ssh key and optionally net data into a disk image.
@@ -137,6 +139,7 @@ def inject_data(image, key=None, net=None, partition=None, execute=None):
# remove loopback
yield execute('sudo losetup -d %s' % device)
+
@defer.inlineCallbacks
def _inject_key_into_fs(key, fs, execute=None):
sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh')
@@ -146,6 +149,7 @@ def _inject_key_into_fs(key, fs, execute=None):
keyfile = os.path.join(sshdir, 'authorized_keys')
yield execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n')
+
@defer.inlineCallbacks
def _inject_net_into_fs(net, fs, execute=None):
netfile = os.path.join(os.path.join(os.path.join(
diff --git a/nova/compute/model.py b/nova/compute/model.py
index 266a93b9a..84432b55f 100644
--- a/nova/compute/model.py
+++ b/nova/compute/model.py
@@ -168,6 +168,7 @@ class Instance(datastore.BasicModel):
self.unassociate_with("ip", self.state['private_dns_name'])
return super(Instance, self).destroy()
+
class Host(datastore.BasicModel):
"""A Host is the machine where a Daemon is running."""
@@ -235,6 +236,7 @@ class Daemon(datastore.BasicModel):
for x in cls.associated_to("host", hostname):
yield x
+
class SessionToken(datastore.BasicModel):
"""This is a short-lived auth token that is passed through web requests"""
diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py
index 19e1a483d..268864900 100644
--- a/nova/compute/monitor.py
+++ b/nova/compute/monitor.py
@@ -24,14 +24,15 @@ Instance Monitoring:
in the object store.
"""
-import boto
-import boto.s3
import datetime
import logging
import os
-import rrdtool
import sys
import time
+
+import boto
+import boto.s3
+import rrdtool
from twisted.internet import defer
from twisted.internet import task
from twisted.application import service
@@ -41,13 +42,12 @@ from nova.virt import connection as virt_connection
FLAGS = flags.FLAGS
-flags.DEFINE_integer(
- 'monitoring_instances_delay', 5, 'Sleep time between updates')
-flags.DEFINE_integer(
- 'monitoring_instances_step', 300, 'Interval of RRD updates')
-flags.DEFINE_string(
- 'monitoring_rrd_path', '/var/nova/monitor/instances',
- 'Location of RRD files')
+flags.DEFINE_integer('monitoring_instances_delay', 5,
+ 'Sleep time between updates')
+flags.DEFINE_integer('monitoring_instances_step', 300,
+ 'Interval of RRD updates')
+flags.DEFINE_string('monitoring_rrd_path', '/var/nova/monitor/instances',
+ 'Location of RRD files')
RRD_VALUES = {
@@ -61,7 +61,7 @@ RRD_VALUES = {
'RRA:MAX:0.5:6:800',
'RRA:MAX:0.5:24:800',
'RRA:MAX:0.5:288:800',
- ],
+ ],
'net': [
'DS:rx:COUNTER:600:0:1250000',
'DS:tx:COUNTER:600:0:1250000',
@@ -73,7 +73,7 @@ RRD_VALUES = {
'RRA:MAX:0.5:6:800',
'RRA:MAX:0.5:24:800',
'RRA:MAX:0.5:288:800',
- ],
+ ],
'disk': [
'DS:rd:COUNTER:600:U:U',
'DS:wr:COUNTER:600:U:U',
@@ -85,12 +85,13 @@ RRD_VALUES = {
'RRA:MAX:0.5:6:800',
'RRA:MAX:0.5:24:800',
'RRA:MAX:0.5:444:800',
- ]
-}
+ ]
+ }
utcnow = datetime.datetime.utcnow
+
def update_rrd(instance, name, data):
"""
Updates the specified RRD file.
@@ -106,6 +107,7 @@ def update_rrd(instance, name, data):
'%d:%s' % (timestamp, data)
)
+
def init_rrd(instance, name):
"""
Initializes the specified RRD file.
@@ -124,6 +126,7 @@ def init_rrd(instance, name):
'--start', '0',
*RRD_VALUES[name]
)
+
def graph_cpu(instance, duration):
"""
@@ -148,6 +151,7 @@ def graph_cpu(instance, duration):
store_graph(instance.instance_id, filename)
+
def graph_net(instance, duration):
"""
Creates a graph of network usage for the specified instance and duration.
@@ -174,6 +178,7 @@ def graph_net(instance, duration):
)
store_graph(instance.instance_id, filename)
+
def graph_disk(instance, duration):
"""
@@ -202,6 +207,7 @@ def graph_disk(instance, duration):
store_graph(instance.instance_id, filename)
+
def store_graph(instance_id, filename):
"""
Transmits the specified graph file to internal object store on cloud
@@ -387,6 +393,7 @@ class InstanceMonitor(object, service.Service):
"""
Monitors the running instances of the current machine.
"""
+
def __init__(self):
"""
Initialize the monitoring loop.
diff --git a/nova/compute/service.py b/nova/compute/service.py
index 820116453..e59f3fb34 100644
--- a/nova/compute/service.py
+++ b/nova/compute/service.py
@@ -29,6 +29,7 @@ import json
import logging
import os
import sys
+
from twisted.internet import defer
from twisted.internet import task
diff --git a/nova/crypto.py b/nova/crypto.py
index cc84f5e45..b05548ea1 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -24,7 +24,6 @@ SSH keypairs and x509 certificates.
import base64
import hashlib
import logging
-import M2Crypto
import os
import shutil
import struct
@@ -32,6 +31,8 @@ import tempfile
import time
import utils
+import M2Crypto
+
from nova import exception
from nova import flags
@@ -42,11 +43,13 @@ flags.DEFINE_string('keys_path', utils.abspath('../keys'), 'Where we keep our ke
flags.DEFINE_string('ca_path', utils.abspath('../CA'), 'Where we keep our root CA')
flags.DEFINE_boolean('use_intermediate_ca', False, 'Should we use intermediate CAs for each project?')
+
def ca_path(project_id):
if project_id:
return "%s/INTER/%s/cacert.pem" % (FLAGS.ca_path, project_id)
return "%s/cacert.pem" % (FLAGS.ca_path)
+
def fetch_ca(project_id=None, chain=True):
if not FLAGS.use_intermediate_ca:
project_id = None
@@ -60,6 +63,7 @@ def fetch_ca(project_id=None, chain=True):
buffer += cafile.read()
return buffer
+
def generate_key_pair(bits=1024):
# what is the magic 65537?
@@ -109,6 +113,7 @@ def generate_x509_cert(subject, bits=1024):
shutil.rmtree(tmpdir)
return (private_key, csr)
+
def sign_csr(csr_text, intermediate=None):
if not FLAGS.use_intermediate_ca:
intermediate = None
@@ -122,6 +127,7 @@ def sign_csr(csr_text, intermediate=None):
os.chdir(start)
return _sign_csr(csr_text, user_ca)
+
def _sign_csr(csr_text, ca_folder):
tmpfolder = tempfile.mkdtemp()
csrfile = open("%s/inbound.csr" % (tmpfolder), "w")
diff --git a/nova/datastore.py b/nova/datastore.py
index 51ef7a758..5dc6ed107 100644
--- a/nova/datastore.py
+++ b/nova/datastore.py
@@ -124,13 +124,17 @@ class BasicModel(object):
yield cls(identifier)
@classmethod
- @absorb_connection_error
def associated_to(cls, foreign_type, foreign_id):
- redis_set = cls._redis_association_name(foreign_type, foreign_id)
- for identifier in Redis.instance().smembers(redis_set):
+ for identifier in cls.associated_keys(foreign_type, foreign_id):
yield cls(identifier)
@classmethod
+ @absorb_connection_error
+ def associated_keys(cls, foreign_type, foreign_id):
+ redis_set = cls._redis_association_name(foreign_type, foreign_id)
+ return Redis.instance().smembers(redis_set) or []
+
+ @classmethod
def _redis_set_name(cls, kls_name):
# stupidly pluralize (for compatiblity with previous codebase)
return kls_name.lower() + "s"
@@ -138,7 +142,7 @@ class BasicModel(object):
@classmethod
def _redis_association_name(cls, foreign_type, foreign_id):
return cls._redis_set_name("%s:%s:%s" %
- (foreign_type, foreign_id, cls.__name__))
+ (foreign_type, foreign_id, cls._redis_name()))
@property
def identifier(self):
@@ -170,6 +174,9 @@ class BasicModel(object):
def setdefault(self, item, default):
return self.state.setdefault(item, default)
+ def __contains__(self, item):
+ return item in self.state
+
def __getitem__(self, item):
return self.state[item]
diff --git a/nova/endpoint/__init__.py b/nova/endpoint/__init__.py
index 753685149..e69de29bb 100644
--- a/nova/endpoint/__init__.py
+++ b/nova/endpoint/__init__.py
@@ -1,32 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-:mod:`nova.endpoint` -- Main NOVA Api endpoints
-=====================================================
-
-.. automodule:: nova.endpoint
- :platform: Unix
- :synopsis: REST APIs for all nova functions
-.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
-.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
-.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
-.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
-.. moduleauthor:: Manish Singh <yosh@gimp.org>
-.. moduleauthor:: Andy Smith <andy@anarkystic.com>
-"""
diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py
index c4b8c05ca..d6f622755 100644
--- a/nova/endpoint/admin.py
+++ b/nova/endpoint/admin.py
@@ -37,6 +37,7 @@ def user_dict(user, base64_file=None):
else:
return {}
+
def project_dict(project):
"""Convert the project object to a result dict"""
if project:
@@ -47,6 +48,7 @@ def project_dict(project):
else:
return {}
+
def host_dict(host):
"""Convert a host model object to a result dict"""
if host:
@@ -54,6 +56,7 @@ def host_dict(host):
else:
return {}
+
def admin_only(target):
"""Decorator for admin-only API calls"""
def wrapper(*args, **kwargs):
@@ -66,6 +69,7 @@ def admin_only(target):
return wrapper
+
class AdminController(object):
"""
API Controller for users, hosts, nodes, and workers.
@@ -103,6 +107,21 @@ class AdminController(object):
return True
@admin_only
+ def describe_roles(self, context, project_roles=True, **kwargs):
+ """Returns a list of allowed roles."""
+ roles = manager.AuthManager().get_roles(project_roles)
+ return { 'roles': [{'role': r} for r in roles]}
+
+ @admin_only
+ def describe_user_roles(self, context, user, project=None, **kwargs):
+ """Returns a list of roles for the given user.
+ Omitting project will return any global roles that the user has.
+ Specifying project will return only project specific roles.
+ """
+ roles = manager.AuthManager().get_user_roles(user, project=project)
+ return { 'roles': [{'role': r} for r in roles]}
+
+ @admin_only
def modify_user_role(self, context, user, role, project=None,
operation='add', **kwargs):
"""Add or remove a role for a user and project."""
diff --git a/nova/endpoint/api.py b/nova/endpoint/api.py
index 78a18b9ea..40be00bb7 100755
--- a/nova/endpoint/api.py
+++ b/nova/endpoint/api.py
@@ -25,12 +25,13 @@ import logging
import multiprocessing
import random
import re
-import tornado.web
-from twisted.internet import defer
import urllib
# TODO(termie): replace minidom with etree
from xml.dom import minidom
+import tornado.web
+from twisted.internet import defer
+
from nova import crypto
from nova import exception
from nova import flags
@@ -43,6 +44,7 @@ from nova.endpoint import cloud
FLAGS = flags.FLAGS
flags.DEFINE_integer('cc_port', 8773, 'cloud controller port')
+
_log = logging.getLogger("api")
_log.setLevel(logging.DEBUG)
@@ -227,6 +229,7 @@ class MetadataRequestHandler(tornado.web.RequestHandler):
self.print_data(data)
self.finish()
+
class APIRequestHandler(tornado.web.RequestHandler):
def get(self, controller_name):
self.execute(controller_name)
diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index 878d54a15..8e2beb1e3 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -26,6 +26,7 @@ import base64
import logging
import os
import time
+
from twisted.internet import defer
from nova import datastore
@@ -45,7 +46,6 @@ from nova.volume import service
FLAGS = flags.FLAGS
-flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
def _gen_key(user_id, key_name):
""" Tuck this into AuthManager """
@@ -85,7 +85,7 @@ class CloudController(object):
""" Ensure the keychains and folders exist. """
# Create keys folder, if it doesn't exist
if not os.path.exists(FLAGS.keys_path):
- os.makedirs(os.path.abspath(FLAGS.keys_path))
+ os.makedirs(FLAGS.keys_path)
# Gen root CA, if we don't have one
root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file)
if not os.path.exists(root_ca_path):
@@ -102,15 +102,16 @@ class CloudController(object):
result = {}
for instance in self.instdir.all:
if instance['project_id'] == project_id:
- line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus'])
+ line = '%s slots=%d' % (instance['private_dns_name'],
+ INSTANCE_TYPES[instance['instance_type']]['vcpus'])
if instance['key_name'] in result:
result[instance['key_name']].append(line)
else:
result[instance['key_name']] = [line]
return result
- def get_metadata(self, ip):
- i = self.get_instance_by_ip(ip)
+ def get_metadata(self, ipaddress):
+ i = self.get_instance_by_ip(ipaddress)
if i is None:
return None
mpi = self._get_mpi_data(i['project_id'])
@@ -123,6 +124,12 @@ class CloudController(object):
}
else:
keys = ''
+
+ address_record = network_model.FixedIp(i['private_dns_name'])
+ if address_record:
+ hostname = address_record['hostname']
+ else:
+ hostname = 'ip-%s' % i['private_dns_name'].replace('.', '-')
data = {
'user-data': base64.b64decode(i['user_data']),
'meta-data': {
@@ -135,19 +142,19 @@ class CloudController(object):
'root': '/dev/sda1',
'swap': 'sda3'
},
- 'hostname': i['private_dns_name'], # is this public sometimes?
+ 'hostname': hostname,
'instance-action': 'none',
'instance-id': i['instance_id'],
'instance-type': i.get('instance_type', ''),
- 'local-hostname': i['private_dns_name'],
+ 'local-hostname': hostname,
'local-ipv4': i['private_dns_name'], # TODO: switch to IP
'kernel-id': i.get('kernel_id', ''),
'placement': {
'availaibility-zone': i.get('availability_zone', 'nova'),
},
- 'public-hostname': i.get('dns_name', ''),
+ 'public-hostname': hostname,
'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP
- 'public-keys' : keys,
+ 'public-keys': keys,
'ramdisk-id': i.get('ramdisk_id', ''),
'reservation-id': i['reservation_id'],
'security-groups': i.get('groups', ''),
@@ -203,26 +210,22 @@ class CloudController(object):
'keyFingerprint': key_pair.fingerprint,
})
- return { 'keypairsSet': result }
+ return {'keypairsSet': result}
@rbac.allow('all')
def create_key_pair(self, context, key_name, **kwargs):
- try:
- d = defer.Deferred()
- p = context.handler.application.settings.get('pool')
- def _complete(kwargs):
- if 'exception' in kwargs:
- d.errback(kwargs['exception'])
- return
- d.callback({'keyName': key_name,
- 'keyFingerprint': kwargs['fingerprint'],
- 'keyMaterial': kwargs['private_key']})
- p.apply_async(_gen_key, [context.user.id, key_name],
- callback=_complete)
- return d
-
- except manager.UserError as e:
- raise
+ dcall = defer.Deferred()
+ pool = context.handler.application.settings.get('pool')
+ def _complete(kwargs):
+ if 'exception' in kwargs:
+ dcall.errback(kwargs['exception'])
+ return
+ dcall.callback({'keyName': key_name,
+ 'keyFingerprint': kwargs['fingerprint'],
+ 'keyMaterial': kwargs['private_key']})
+ pool.apply_async(_gen_key, [context.user.id, key_name],
+ callback=_complete)
+ return dcall
@rbac.allow('all')
def delete_key_pair(self, context, key_name, **kwargs):
@@ -232,7 +235,7 @@ class CloudController(object):
@rbac.allow('all')
def describe_security_groups(self, context, group_names, **kwargs):
- groups = { 'securityGroupSet': [] }
+ groups = {'securityGroupSet': []}
# Stubbed for now to unblock other things.
return groups
@@ -251,7 +254,7 @@ class CloudController(object):
instance = self._get_instance(context, instance_id[0])
return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
{"method": "get_console_output",
- "args" : {"instance_id": instance_id[0]}})
+ "args": {"instance_id": instance_id[0]}})
def _get_user_id(self, context):
if context and context.user:
@@ -285,10 +288,10 @@ class CloudController(object):
if volume['attach_status'] == 'attached':
v['attachmentSet'] = [{'attachTime': volume['attach_time'],
'deleteOnTermination': volume['delete_on_termination'],
- 'device' : volume['mountpoint'],
- 'instanceId' : volume['instance_id'],
- 'status' : 'attached',
- 'volume_id' : volume['volume_id']}]
+ 'device': volume['mountpoint'],
+ 'instanceId': volume['instance_id'],
+ 'status': 'attached',
+ 'volume_id': volume['volume_id']}]
else:
v['attachmentSet'] = [{}]
return v
@@ -298,16 +301,16 @@ class CloudController(object):
def create_volume(self, context, size, **kwargs):
# TODO(vish): refactor this to create the volume object here and tell service to create it
result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume",
- "args" : {"size": size,
+ "args": {"size": size,
"user_id": context.user.id,
"project_id": context.project.id}})
# NOTE(vish): rpc returned value is in the result key in the dictionary
- volume = self._get_volume(context, result['result'])
+ volume = self._get_volume(context, result)
defer.returnValue({'volumeSet': [self.format_volume(context, volume)]})
def _get_address(self, context, public_ip):
# FIXME(vish) this should move into network.py
- address = network_model.PublicAddress.lookup(public_ip)
+ address = network_model.ElasticIp.lookup(public_ip)
if address and (context.user.is_admin() or address['project_id'] == context.project.id):
return address
raise exception.NotFound("Address at ip %s not found" % public_ip)
@@ -348,16 +351,15 @@ class CloudController(object):
compute_node = instance['node_name']
rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node),
{"method": "attach_volume",
- "args" : {"volume_id": volume_id,
- "instance_id" : instance_id,
- "mountpoint" : device}})
- return defer.succeed({'attachTime' : volume['attach_time'],
- 'device' : volume['mountpoint'],
- 'instanceId' : instance_id,
- 'requestId' : context.request_id,
- 'status' : volume['attach_status'],
- 'volumeId' : volume_id})
-
+ "args": {"volume_id": volume_id,
+ "instance_id": instance_id,
+ "mountpoint": device}})
+ return defer.succeed({'attachTime': volume['attach_time'],
+ 'device': volume['mountpoint'],
+ 'instanceId': instance_id,
+ 'requestId': context.request_id,
+ 'status': volume['attach_status'],
+ 'volumeId': volume_id})
@rbac.allow('projectmanager', 'sysadmin')
def detach_volume(self, context, volume_id, **kwargs):
@@ -372,18 +374,18 @@ class CloudController(object):
instance = self._get_instance(context, instance_id)
rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
{"method": "detach_volume",
- "args" : {"instance_id": instance_id,
+ "args": {"instance_id": instance_id,
"volume_id": volume_id}})
except exception.NotFound:
# If the instance doesn't exist anymore,
# then we need to call detach blind
volume.finish_detach()
- return defer.succeed({'attachTime' : volume['attach_time'],
- 'device' : volume['mountpoint'],
- 'instanceId' : instance_id,
- 'requestId' : context.request_id,
- 'status' : volume['attach_status'],
- 'volumeId' : volume_id})
+ return defer.succeed({'attachTime': volume['attach_time'],
+ 'device': volume['mountpoint'],
+ 'instanceId': instance_id,
+ 'requestId': context.request_id,
+ 'status': volume['attach_status'],
+ 'volumeId': volume_id})
def _convert_to_set(self, lst, label):
if lst == None or lst == []:
@@ -394,7 +396,15 @@ class CloudController(object):
@rbac.allow('all')
def describe_instances(self, context, **kwargs):
- return defer.succeed(self._format_instances(context))
+ return defer.succeed(self._format_describe_instances(context))
+
+ def _format_describe_instances(self, context):
+ return { 'reservationSet': self._format_instances(context) }
+
+ def _format_run_instances(self, context, reservation_id):
+ i = self._format_instances(context, reservation_id)
+ assert len(i) == 1
+ return i[0]
def _format_instances(self, context, reservation_id = None):
reservations = {}
@@ -425,7 +435,8 @@ class CloudController(object):
i['key_name'] = instance.get('key_name', None)
if context.user.is_admin():
i['key_name'] = '%s (%s, %s)' % (i['key_name'],
- instance.get('project_id', None), instance.get('node_name',''))
+ instance.get('project_id', None),
+ instance.get('node_name', ''))
i['product_codes_set'] = self._convert_to_set(
instance.get('product_codes', None), 'product_code')
i['instance_type'] = instance.get('instance_type', None)
@@ -442,8 +453,7 @@ class CloudController(object):
reservations[res_id] = r
reservations[res_id]['instances_set'].append(i)
- instance_response = {'reservationSet' : list(reservations.values()) }
- return instance_response
+ return list(reservations.values())
@rbac.allow('all')
def describe_addresses(self, context, **kwargs):
@@ -451,13 +461,13 @@ class CloudController(object):
def format_addresses(self, context):
addresses = []
- for address in network_model.PublicAddress.all():
+ for address in network_model.ElasticIp.all():
# TODO(vish): implement a by_project iterator for addresses
if (context.user.is_admin() or
address['project_id'] == context.project.id):
address_rv = {
'public_ip': address['address'],
- 'instance_id' : address.get('instance_id', 'free')
+ 'instance_id': address.get('instance_id', 'free')
}
if context.user.is_admin():
address_rv['instance_id'] = "%s (%s, %s)" % (
@@ -472,12 +482,11 @@ class CloudController(object):
@defer.inlineCallbacks
def allocate_address(self, context, **kwargs):
network_topic = yield self._get_network_topic(context)
- alloc_result = yield rpc.call(network_topic,
+ public_ip = yield rpc.call(network_topic,
{"method": "allocate_elastic_ip",
"args": {"user_id": context.user.id,
"project_id": context.project.id}})
- public_ip = alloc_result['result']
- defer.returnValue({'addressSet': [{'publicIp' : public_ip}]})
+ defer.returnValue({'addressSet': [{'publicIp': public_ip}]})
@rbac.allow('netadmin')
@defer.inlineCallbacks
@@ -517,11 +526,10 @@ class CloudController(object):
"""Retrieves the network host for a project"""
host = network_service.get_host_for_project(context.project.id)
if not host:
- result = yield rpc.call(FLAGS.network_topic,
+ host = yield rpc.call(FLAGS.network_topic,
{"method": "set_network_host",
"args": {"user_id": context.user.id,
"project_id": context.project.id}})
- host = result['result']
defer.returnValue('%s.%s' %(FLAGS.network_topic, host))
@rbac.allow('projectmanager', 'sysadmin')
@@ -561,17 +569,17 @@ class CloudController(object):
# TODO: Get the real security group of launch in here
security_group = "default"
for num in range(int(kwargs['max_count'])):
- vpn = False
+ is_vpn = False
if image_id == FLAGS.vpn_image_id:
- vpn = True
- allocate_result = yield rpc.call(network_topic,
+ is_vpn = True
+ inst = self.instdir.new()
+ allocate_data = yield rpc.call(network_topic,
{"method": "allocate_fixed_ip",
"args": {"user_id": context.user.id,
"project_id": context.project.id,
"security_group": security_group,
- "vpn": vpn}})
- allocate_data = allocate_result['result']
- inst = self.instdir.new()
+ "is_vpn": is_vpn,
+ "hostname": inst.instance_id}})
inst['image_id'] = image_id
inst['kernel_id'] = kernel_id
inst['ramdisk_id'] = ramdisk_id
@@ -585,17 +593,18 @@ class CloudController(object):
inst['project_id'] = context.project.id
inst['ami_launch_index'] = num
inst['security_group'] = security_group
+ inst['hostname'] = inst.instance_id
for (key, value) in allocate_data.iteritems():
inst[key] = value
inst.save()
rpc.cast(FLAGS.compute_topic,
{"method": "run_instance",
- "args": {"instance_id" : inst.instance_id}})
+ "args": {"instance_id": inst.instance_id}})
logging.debug("Casting to node for %s's instance with IP of %s" %
(context.user.name, inst['private_dns_name']))
# TODO: Make Network figure out the network name from ip.
- defer.returnValue(self._format_instances(context, reservation_id))
+ defer.returnValue(self._format_run_instances(context, reservation_id))
@rbac.allow('projectmanager', 'sysadmin')
@defer.inlineCallbacks
@@ -646,7 +655,7 @@ class CloudController(object):
instance = self._get_instance(context, i)
rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
{"method": "reboot_instance",
- "args" : {"instance_id": i}})
+ "args": {"instance_id": i}})
return defer.succeed(True)
@rbac.allow('projectmanager', 'sysadmin')
@@ -656,7 +665,7 @@ class CloudController(object):
volume_node = volume['node_name']
rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node),
{"method": "delete_volume",
- "args" : {"volume_id": volume_id}})
+ "args": {"volume_id": volume_id}})
return defer.succeed(True)
@rbac.allow('all')
@@ -689,9 +698,9 @@ class CloudController(object):
image = images.list(context, image_id)[0]
except IndexError:
raise exception.ApiError('invalid id: %s' % image_id)
- result = { 'image_id': image_id, 'launchPermission': [] }
+ result = {'image_id': image_id, 'launchPermission': []}
if image['isPublic']:
- result['launchPermission'].append({ 'group': 'all' })
+ result['launchPermission'].append({'group': 'all'})
return defer.succeed(result)
@rbac.allow('projectmanager', 'sysadmin')
diff --git a/nova/endpoint/images.py b/nova/endpoint/images.py
index fe7cb5d11..2a88d66af 100644
--- a/nova/endpoint/images.py
+++ b/nova/endpoint/images.py
@@ -21,10 +21,11 @@ Proxy AMI-related calls from the cloud controller, to the running
objectstore daemon.
"""
-import boto.s3.connection
import json
import urllib
+import boto.s3.connection
+
from nova import flags
from nova import utils
from nova.auth import manager
@@ -32,6 +33,7 @@ from nova.auth import manager
FLAGS = flags.FLAGS
+
def modify(context, image_id, operation):
conn(context).make_request(
method='POST',
@@ -53,6 +55,7 @@ def register(context, image_location):
return image_id
+
def list(context, filter_list=[]):
""" return a list of all images that a user can see
@@ -68,6 +71,7 @@ def list(context, filter_list=[]):
return [i for i in result if i['imageId'] in filter_list]
return result
+
def deregister(context, image_id):
""" unregister an image """
conn(context).make_request(
@@ -75,6 +79,7 @@ def deregister(context, image_id):
bucket='_images',
query_args=qs({'image_id': image_id}))
+
def conn(context):
access = manager.AuthManager().get_access_key(context.user,
context.project)
diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py
deleted file mode 100644
index 75b828e91..000000000
--- a/nova/endpoint/rackspace.py
+++ /dev/null
@@ -1,183 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Rackspace API Endpoint
-"""
-
-import json
-import time
-
-import webob.dec
-import webob.exc
-
-from nova import flags
-from nova import rpc
-from nova import utils
-from nova import wsgi
-from nova.auth import manager
-from nova.compute import model as compute
-from nova.network import model as network
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
-
-
-class API(wsgi.Middleware):
- """Entry point for all requests."""
-
- def __init__(self):
- super(API, self).__init__(Router(webob.exc.HTTPNotFound()))
-
- def __call__(self, environ, start_response):
- context = {}
- if "HTTP_X_AUTH_TOKEN" in environ:
- context['user'] = manager.AuthManager().get_user_from_access_key(
- environ['HTTP_X_AUTH_TOKEN'])
- if context['user']:
- context['project'] = manager.AuthManager().get_project(
- context['user'].name)
- if "user" not in context:
- return webob.exc.HTTPForbidden()(environ, start_response)
- environ['nova.context'] = context
- return self.application(environ, start_response)
-
-
-class Router(wsgi.Router):
- """Route requests to the next WSGI application."""
-
- def _build_map(self):
- """Build routing map for authentication and cloud."""
- self._connect("/v1.0", controller=AuthenticationAPI())
- cloud = CloudServerAPI()
- self._connect("/servers", controller=cloud.launch_server,
- conditions={"method": ["POST"]})
- self._connect("/servers/{server_id}", controller=cloud.delete_server,
- conditions={'method': ["DELETE"]})
- self._connect("/servers", controller=cloud)
-
-
-class AuthenticationAPI(wsgi.Application):
- """Handle all authorization requests through WSGI applications."""
-
- @webob.dec.wsgify
- def __call__(self, req): # pylint: disable-msg=W0221
- # TODO(todd): make a actual session with a unique token
- # just pass the auth key back through for now
- res = webob.Response()
- res.status = '204 No Content'
- res.headers.add('X-Server-Management-Url', req.host_url)
- res.headers.add('X-Storage-Url', req.host_url)
- res.headers.add('X-CDN-Managment-Url', req.host_url)
- res.headers.add('X-Auth-Token', req.headers['X-Auth-Key'])
- return res
-
-
-class CloudServerAPI(wsgi.Application):
- """Handle all server requests through WSGI applications."""
-
- def __init__(self):
- super(CloudServerAPI, self).__init__()
- self.instdir = compute.InstanceDirectory()
- self.network = network.PublicNetworkController()
-
- @webob.dec.wsgify
- def __call__(self, req): # pylint: disable-msg=W0221
- value = {"servers": []}
- for inst in self.instdir.all:
- value["servers"].append(self.instance_details(inst))
- return json.dumps(value)
-
- def instance_details(self, inst): # pylint: disable-msg=R0201
- """Build the data structure to represent details for an instance."""
- return {
- "id": inst.get("instance_id", None),
- "imageId": inst.get("image_id", None),
- "flavorId": inst.get("instacne_type", None),
- "hostId": inst.get("node_name", None),
- "status": inst.get("state", "pending"),
- "addresses": {
- "public": [network.get_public_ip_for_instance(
- inst.get("instance_id", None))],
- "private": [inst.get("private_dns_name", None)]},
-
- # implemented only by Rackspace, not AWS
- "name": inst.get("name", "Not-Specified"),
-
- # not supported
- "progress": "Not-Supported",
- "metadata": {
- "Server Label": "Not-Supported",
- "Image Version": "Not-Supported"}}
-
- @webob.dec.wsgify
- def launch_server(self, req):
- """Launch a new instance."""
- data = json.loads(req.body)
- inst = self.build_server_instance(data, req.environ['nova.context'])
- rpc.cast(
- FLAGS.compute_topic, {
- "method": "run_instance",
- "args": {"instance_id": inst.instance_id}})
-
- return json.dumps({"server": self.instance_details(inst)})
-
- def build_server_instance(self, env, context):
- """Build instance data structure and save it to the data store."""
- reservation = utils.generate_uid('r')
- ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
- inst = self.instdir.new()
- inst['name'] = env['server']['name']
- inst['image_id'] = env['server']['imageId']
- inst['instance_type'] = env['server']['flavorId']
- inst['user_id'] = context['user'].id
- inst['project_id'] = context['project'].id
- inst['reservation_id'] = reservation
- inst['launch_time'] = ltime
- inst['mac_address'] = utils.generate_mac()
- address = self.network.allocate_ip(
- inst['user_id'],
- inst['project_id'],
- mac=inst['mac_address'])
- inst['private_dns_name'] = str(address)
- inst['bridge_name'] = network.BridgedNetwork.get_network_for_project(
- inst['user_id'],
- inst['project_id'],
- 'default')['bridge_name']
- # key_data, key_name, ami_launch_index
- # TODO(todd): key data or root password
- inst.save()
- return inst
-
- @webob.dec.wsgify
- @wsgi.route_args
- def delete_server(self, req, route_args): # pylint: disable-msg=R0201
- """Delete an instance."""
- owner_hostname = None
- instance = compute.Instance.lookup(route_args['server_id'])
- if instance:
- owner_hostname = instance["node_name"]
- if not owner_hostname:
- return webob.exc.HTTPNotFound("Did not find image, or it was "
- "not in a running state.")
- rpc_transport = "%s:%s" % (FLAGS.compute_topic, owner_hostname)
- rpc.cast(rpc_transport,
- {"method": "reboot_instance",
- "args": {"instance_id": route_args['server_id']}})
- req.status = "202 Accepted"
diff --git a/nova/exception.py b/nova/exception.py
index 52497a19e..29bcb17f8 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -25,31 +25,39 @@ import logging
import sys
import traceback
+
class Error(Exception):
def __init__(self, message=None):
super(Error, self).__init__(message)
+
class ApiError(Error):
def __init__(self, message='Unknown', code='Unknown'):
self.message = message
self.code = code
super(ApiError, self).__init__('%s: %s'% (code, message))
+
class NotFound(Error):
pass
+
class Duplicate(Error):
pass
+
class NotAuthorized(Error):
pass
+
class NotEmpty(Error):
pass
+
class Invalid(Error):
pass
+
def wrap_exception(f):
def _wrap(*args, **kw):
try:
diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py
index 689194513..068025249 100644
--- a/nova/fakerabbit.py
+++ b/nova/fakerabbit.py
@@ -16,12 +16,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-""" Based a bit on the carrot.backeds.queue backend... but a lot better """
+"""Based a bit on the carrot.backeds.queue backend... but a lot better."""
-from carrot.backends import base
import logging
import Queue as queue
+from carrot.backends import base
+
class Message(base.BaseMessage):
pass
diff --git a/nova/flags.py b/nova/flags.py
index b3bdd088f..2bca36f7e 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -141,6 +141,7 @@ def _wrapper(func):
return _wrapped
+DEFINE = _wrapper(gflags.DEFINE)
DEFINE_string = _wrapper(gflags.DEFINE_string)
DEFINE_integer = _wrapper(gflags.DEFINE_integer)
DEFINE_bool = _wrapper(gflags.DEFINE_bool)
@@ -168,36 +169,31 @@ def DECLARE(name, module_string, flag_values=FLAGS):
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
DEFINE_integer('s3_port', 3333, 's3 port')
DEFINE_string('s3_host', '127.0.0.1', 's3 host')
-#DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
DEFINE_string('volume_topic', 'volume', 'the topic volume nodes listen on')
DEFINE_string('network_topic', 'network', 'the topic network nodes listen on')
DEFINE_bool('verbose', False, 'show debug output')
DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit')
-DEFINE_bool('fake_network', False, 'should we use fake network devices and addresses')
+DEFINE_bool('fake_network', False,
+ 'should we use fake network devices and addresses')
DEFINE_string('rabbit_host', 'localhost', 'rabbit host')
DEFINE_integer('rabbit_port', 5672, 'rabbit port')
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
DEFINE_string('rabbit_password', 'guest', 'rabbit password')
DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
-DEFINE_string('ec2_url',
- 'http://127.0.0.1:8773/services/Cloud',
- 'Url to ec2 api server')
-
-DEFINE_string('default_image',
- 'ami-11111',
- 'default image to use, testing only')
-DEFINE_string('default_kernel',
- 'aki-11111',
- 'default kernel to use, testing only')
-DEFINE_string('default_ramdisk',
- 'ari-11111',
- 'default ramdisk to use, testing only')
-DEFINE_string('default_instance_type',
- 'm1.small',
- 'default instance type to use, testing only')
+DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud',
+ 'Url to ec2 api server')
+
+DEFINE_string('default_image', 'ami-11111',
+ 'default image to use, testing only')
+DEFINE_string('default_kernel', 'aki-11111',
+ 'default kernel to use, testing only')
+DEFINE_string('default_ramdisk', 'ari-11111',
+ 'default ramdisk to use, testing only')
+DEFINE_string('default_instance_type', 'm1.small',
+ 'default instance type to use, testing only')
DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server')
DEFINE_string('vpn_key_suffix',
@@ -207,10 +203,8 @@ DEFINE_string('vpn_key_suffix',
DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
# UNUSED
-DEFINE_string('node_availability_zone',
- 'nova',
- 'availability zone of this node')
-DEFINE_string('node_name',
- socket.gethostname(),
- 'name of this node')
+DEFINE_string('node_availability_zone', 'nova',
+ 'availability zone of this node')
+DEFINE_string('node_name', socket.gethostname(),
+ 'name of this node')
diff --git a/nova/image/service.py b/nova/image/service.py
new file mode 100644
index 000000000..1a7a258b7
--- /dev/null
+++ b/nova/image/service.py
@@ -0,0 +1,90 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import cPickle as pickle
+import os.path
+import random
+import string
+
+class ImageService(object):
+ """Provides storage and retrieval of disk image objects."""
+
+ @staticmethod
+ def load():
+ """Factory method to return image service."""
+ #TODO(gundlach): read from config.
+ class_ = LocalImageService
+ return class_()
+
+ def index(self):
+ """
+ Return a dict from opaque image id to image data.
+ """
+
+ def show(self, id):
+ """
+ Returns a dict containing image data for the given opaque image id.
+ """
+
+
+class GlanceImageService(ImageService):
+ """Provides storage and retrieval of disk image objects within Glance."""
+ # TODO(gundlach): once Glance has an API, build this.
+ pass
+
+
+class LocalImageService(ImageService):
+ """Image service storing images to local disk."""
+
+ def __init__(self):
+ self._path = "/tmp/nova/images"
+ try:
+ os.makedirs(self._path)
+ except OSError: # exists
+ pass
+
+ def _path_to(self, image_id=''):
+ return os.path.join(self._path, image_id)
+
+ def _ids(self):
+ """The list of all image ids."""
+ return os.listdir(self._path)
+
+ def index(self):
+ return [ self.show(id) for id in self._ids() ]
+
+ def show(self, id):
+ return pickle.load(open(self._path_to(id)))
+
+ def create(self, data):
+ """
+ Store the image data and return the new image id.
+ """
+ id = ''.join(random.choice(string.letters) for _ in range(20))
+ data['id'] = id
+ self.update(id, data)
+ return id
+
+ def update(self, image_id, data):
+ """Replace the contents of the given image with the new data."""
+ pickle.dump(data, open(self._path_to(image_id), 'w'))
+
+ def delete(self, image_id):
+ """
+ Delete the given image. Raises OSError if the image does not exist.
+ """
+ os.unlink(self._path_to(image_id))
diff --git a/nova/network/exception.py b/nova/network/exception.py
index 5722e9672..2a3f5ec14 100644
--- a/nova/network/exception.py
+++ b/nova/network/exception.py
@@ -20,21 +20,29 @@
Exceptions for network errors.
"""
-from nova.exception import Error
+from nova import exception
-class NoMoreAddresses(Error):
+class NoMoreAddresses(exception.Error):
+ """No More Addresses are available in the network"""
pass
-class AddressNotAllocated(Error):
- pass
-class AddressAlreadyAssociated(Error):
+class AddressNotAllocated(exception.Error):
+ """The specified address has not been allocated"""
pass
-class AddressNotAssociated(Error):
+
+class AddressAlreadyAssociated(exception.Error):
+ """The specified address has already been associated"""
pass
-class NotValidNetworkSize(Error):
+
+class AddressNotAssociated(exception.Error):
+ """The specified address is not associated"""
pass
+
+class NotValidNetworkSize(exception.Error):
+ """The network size is not valid"""
+ pass
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 4a4b4c8a8..9e5aabd97 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
@@ -15,85 +13,102 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+"""
+Implements vlans, bridges, and iptables rules using linux utilities.
+"""
import logging
-import signal
import os
-import subprocess
+import signal
-# todo(ja): does the definition of network_path belong here?
+# TODO(ja): does the definition of network_path belong here?
+from nova import flags
from nova import utils
-from nova import flags
-FLAGS=flags.FLAGS
+FLAGS = flags.FLAGS
flags.DEFINE_string('dhcpbridge_flagfile',
'/etc/nova/nova-dhcpbridge.conf',
'location of flagfile for dhcpbridge')
+
def execute(cmd, addl_env=None):
+ """Wrapper around utils.execute for fake_network"""
if FLAGS.fake_network:
- logging.debug("FAKE NET: %s" % cmd)
+ logging.debug("FAKE NET: %s", cmd)
return "fake", 0
else:
return utils.execute(cmd, addl_env=addl_env)
+
def runthis(desc, cmd):
+ """Wrapper around utils.runthis for fake_network"""
if FLAGS.fake_network:
return execute(cmd)
else:
- return utils.runthis(desc,cmd)
-
-def Popen(cmd):
- if FLAGS.fake_network:
- execute(' '.join(cmd))
- else:
- subprocess.Popen(cmd)
+ return utils.runthis(desc, cmd)
def device_exists(device):
- (out, err) = execute("ifconfig %s" % device)
+ """Check if ethernet device exists"""
+ (_out, err) = execute("ifconfig %s" % device)
return not err
+
def confirm_rule(cmd):
+ """Delete and re-add iptables rule"""
execute("sudo iptables --delete %s" % (cmd))
execute("sudo iptables -I %s" % (cmd))
+
def remove_rule(cmd):
+ """Remove iptables rule"""
execute("sudo iptables --delete %s" % (cmd))
-def bind_public_ip(ip, interface):
- runthis("Binding IP to interface: %s", "sudo ip addr add %s dev %s" % (ip, interface))
-def unbind_public_ip(ip, interface):
- runthis("Binding IP to interface: %s", "sudo ip addr del %s dev %s" % (ip, interface))
+def bind_public_ip(public_ip, interface):
+ """Bind ip to an interface"""
+ runthis("Binding IP to interface: %s",
+ "sudo ip addr add %s dev %s" % (public_ip, interface))
+
+
+def unbind_public_ip(public_ip, interface):
+ """Unbind a public ip from an interface"""
+ runthis("Binding IP to interface: %s",
+ "sudo ip addr del %s dev %s" % (public_ip, interface))
+
def vlan_create(net):
- """ create a vlan on on a bridge device unless vlan already exists """
+ """Create a vlan on on a bridge device unless vlan already exists"""
if not device_exists("vlan%s" % net['vlan']):
logging.debug("Starting VLAN inteface for %s network", (net['vlan']))
execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD")
execute("sudo vconfig add %s %s" % (FLAGS.bridge_dev, net['vlan']))
execute("sudo ifconfig vlan%s up" % (net['vlan']))
+
def bridge_create(net):
- """ create a bridge on a vlan unless it already exists """
+ """Create a bridge on a vlan unless it already exists"""
if not device_exists(net['bridge_name']):
logging.debug("Starting Bridge inteface for %s network", (net['vlan']))
execute("sudo brctl addbr %s" % (net['bridge_name']))
execute("sudo brctl setfd %s 0" % (net.bridge_name))
# execute("sudo brctl setageing %s 10" % (net.bridge_name))
execute("sudo brctl stp %s off" % (net['bridge_name']))
- execute("sudo brctl addif %s vlan%s" % (net['bridge_name'], net['vlan']))
+ execute("sudo brctl addif %s vlan%s" % (net['bridge_name'],
+ net['vlan']))
if net.bridge_gets_ip:
execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \
(net['bridge_name'], net.gateway, net.broadcast, net.netmask))
- confirm_rule("FORWARD --in-interface %s -j ACCEPT" % (net['bridge_name']))
+ confirm_rule("FORWARD --in-interface %s -j ACCEPT" %
+ (net['bridge_name']))
else:
execute("sudo ifconfig %s up" % net['bridge_name'])
-def dnsmasq_cmd(net):
+
+def _dnsmasq_cmd(net):
+ """Builds dnsmasq command"""
cmd = ['sudo -E dnsmasq',
' --strict-order',
' --bind-interfaces',
@@ -101,76 +116,81 @@ def dnsmasq_cmd(net):
' --pid-file=%s' % dhcp_file(net['vlan'], 'pid'),
' --listen-address=%s' % net.dhcp_listen_address,
' --except-interface=lo',
- ' --dhcp-range=%s,static,600s' % (net.dhcp_range_start),
+ ' --dhcp-range=%s,static,120s' % net.dhcp_range_start,
' --dhcp-hostsfile=%s' % dhcp_file(net['vlan'], 'conf'),
' --dhcp-script=%s' % bin_file('nova-dhcpbridge'),
' --leasefile-ro']
return ''.join(cmd)
-def hostDHCP(network, host, mac):
- idx = host.split(".")[-1] # Logically, the idx of instances they've launched in this net
- return "%s,%s-%s-%s.novalocal,%s" % \
- (mac, network['user_id'], network['vlan'], idx, host)
-# todo(ja): if the system has restarted or pid numbers have wrapped
+def host_dhcp(address):
+ """Return a host string for an address object"""
+ return "%s,%s.novalocal,%s" % (address['mac'],
+ address['hostname'],
+ address.address)
+
+
+# TODO(ja): if the system has restarted or pid numbers have wrapped
# then you cannot be certain that the pid refers to the
# dnsmasq. As well, sending a HUP only reloads the hostfile,
# so any configuration options (like dchp-range, vlan, ...)
# aren't reloaded
def start_dnsmasq(network):
- """ (re)starts a dnsmasq server for a given network
+ """(Re)starts a dnsmasq server for a given network
if a dnsmasq instance is already running then send a HUP
signal causing it to reload, otherwise spawn a new instance
"""
with open(dhcp_file(network['vlan'], 'conf'), 'w') as f:
- for host_name in network.hosts:
- f.write("%s\n" % hostDHCP(network, host_name, network.hosts[host_name]))
+ for address in network.assigned_objs:
+ f.write("%s\n" % host_dhcp(address))
pid = dnsmasq_pid_for(network)
# if dnsmasq is already running, then tell it to reload
if pid:
- # todo(ja): use "/proc/%d/cmdline" % (pid) to determine if pid refers
+ # TODO(ja): use "/proc/%d/cmdline" % (pid) to determine if pid refers
# correct dnsmasq process
try:
os.kill(pid, signal.SIGHUP)
- except Exception, e:
- logging.debug("Hupping dnsmasq threw %s", e)
-
- # otherwise delete the existing leases file and start dnsmasq
- lease_file = dhcp_file(network['vlan'], 'leases')
- if os.path.exists(lease_file):
- os.unlink(lease_file)
+ return
+ except Exception as exc: # pylint: disable-msg=W0703
+ logging.debug("Hupping dnsmasq threw %s", exc)
# FLAGFILE and DNSMASQ_INTERFACE in env
env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile,
'DNSMASQ_INTERFACE': network['bridge_name']}
- execute(dnsmasq_cmd(network), addl_env=env)
+ execute(_dnsmasq_cmd(network), addl_env=env)
+
def stop_dnsmasq(network):
- """ stops the dnsmasq instance for a given network """
+ """Stops the dnsmasq instance for a given network"""
pid = dnsmasq_pid_for(network)
if pid:
try:
os.kill(pid, signal.SIGTERM)
- except Exception, e:
- logging.debug("Killing dnsmasq threw %s", e)
+ except Exception as exc: # pylint: disable-msg=W0703
+ logging.debug("Killing dnsmasq threw %s", exc)
+
def dhcp_file(vlan, kind):
- """ return path to a pid, leases or conf file for a vlan """
+ """Return path to a pid, leases or conf file for a vlan"""
return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path, vlan, kind))
+
def bin_file(script):
+ """Return the absolute path to scipt in the bin directory"""
return os.path.abspath(os.path.join(__file__, "../../../bin", script))
+
def dnsmasq_pid_for(network):
- """ the pid for prior dnsmasq instance for a vlan,
- returns None if no pid file exists
+ """Returns he pid for prior dnsmasq instance for a vlan
+
+ Returns None if no pid file exists
- if machine has rebooted pid might be incorrect (caller should check)
+ If machine has rebooted pid might be incorrect (caller should check)
"""
pid_file = dhcp_file(network['vlan'], 'pid')
@@ -178,4 +198,3 @@ def dnsmasq_pid_for(network):
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
return int(f.read())
-
diff --git a/nova/network/model.py b/nova/network/model.py
index daac035e4..557fc92a6 100644
--- a/nova/network/model.py
+++ b/nova/network/model.py
@@ -20,11 +20,11 @@
Model Classes for network control, including VLANs, DHCP, and IP allocation.
"""
-import IPy
import logging
import os
import time
+import IPy
from nova import datastore
from nova import exception as nova_exception
from nova import flags
@@ -53,11 +53,13 @@ flags.DEFINE_integer('cnt_vpn_clients', 5,
flags.DEFINE_integer('cloudpipe_start_port', 12000,
'Starting port for mapped CloudPipe external ports')
+
logging.getLogger().setLevel(logging.DEBUG)
class Vlan(datastore.BasicModel):
- def __init__(self, project, vlan):
+ """Tracks vlans assigned to project it the datastore"""
+ def __init__(self, project, vlan): # pylint: disable-msg=W0231
"""
Since we don't want to try and find a vlan by its identifier,
but by a project id, we don't call super-init.
@@ -67,10 +69,12 @@ class Vlan(datastore.BasicModel):
@property
def identifier(self):
+ """Datastore identifier"""
return "%s:%s" % (self.project_id, self.vlan_id)
@classmethod
def create(cls, project, vlan):
+ """Create a Vlan object"""
instance = cls(project, vlan)
instance.save()
return instance
@@ -78,6 +82,7 @@ class Vlan(datastore.BasicModel):
@classmethod
@datastore.absorb_connection_error
def lookup(cls, project):
+ """Returns object by project if it exists in datastore or None"""
set_name = cls._redis_set_name(cls.__name__)
vlan = datastore.Redis.instance().hget(set_name, project)
if vlan:
@@ -88,20 +93,20 @@ class Vlan(datastore.BasicModel):
@classmethod
@datastore.absorb_connection_error
def dict_by_project(cls):
- """a hash of project:vlan"""
+ """A hash of project:vlan"""
set_name = cls._redis_set_name(cls.__name__)
- return datastore.Redis.instance().hgetall(set_name)
+ return datastore.Redis.instance().hgetall(set_name) or {}
@classmethod
@datastore.absorb_connection_error
def dict_by_vlan(cls):
- """a hash of vlan:project"""
+ """A hash of vlan:project"""
set_name = cls._redis_set_name(cls.__name__)
- rv = {}
- h = datastore.Redis.instance().hgetall(set_name)
- for v in h.keys():
- rv[h[v]] = v
- return rv
+ retvals = {}
+ hashset = datastore.Redis.instance().hgetall(set_name) or {}
+ for (key, val) in hashset.iteritems():
+ retvals[val] = key
+ return retvals
@classmethod
@datastore.absorb_connection_error
@@ -119,39 +124,100 @@ class Vlan(datastore.BasicModel):
default way of saving into "vlan:ID" and adding to a set of "vlans".
"""
set_name = self._redis_set_name(self.__class__.__name__)
- datastore.Redis.instance().hset(set_name, self.project_id, self.vlan_id)
+ datastore.Redis.instance().hset(set_name,
+ self.project_id,
+ self.vlan_id)
@datastore.absorb_connection_error
def destroy(self):
+ """Removes the object from the datastore"""
set_name = self._redis_set_name(self.__class__.__name__)
datastore.Redis.instance().hdel(set_name, self.project_id)
def subnet(self):
+ """Returns a string containing the subnet"""
vlan = int(self.vlan_id)
network = IPy.IP(FLAGS.private_range)
- start = (vlan-FLAGS.vlan_start) * FLAGS.network_size
+ start = (vlan - FLAGS.vlan_start) * FLAGS.network_size
# minus one for the gateway.
return "%s-%s" % (network[start],
network[start + FLAGS.network_size - 1])
-# CLEANUP:
-# TODO(ja): Save the IPs at the top of each subnet for cloudpipe vpn clients
-# TODO(ja): does vlanpool "keeper" need to know the min/max - shouldn't FLAGS always win?
-# TODO(joshua): Save the IPs at the top of each subnet for cloudpipe vpn clients
+class FixedIp(datastore.BasicModel):
+ """Represents a fixed ip in the datastore"""
+
+ def __init__(self, address):
+ self.address = address
+ super(FixedIp, self).__init__()
+
+ @property
+ def identifier(self):
+ return self.address
+
+ # NOTE(vish): address states allocated, leased, deallocated
+ def default_state(self):
+ return {'address': self.address,
+ 'state': 'none'}
+
+ @classmethod
+ # pylint: disable-msg=R0913
+ def create(cls, user_id, project_id, address, mac, hostname, network_id):
+ """Creates an FixedIp object"""
+ addr = cls(address)
+ addr['user_id'] = user_id
+ addr['project_id'] = project_id
+ addr['mac'] = mac
+ if hostname is None:
+ hostname = "ip-%s" % address.replace('.', '-')
+ addr['hostname'] = hostname
+ addr['network_id'] = network_id
+ addr['state'] = 'allocated'
+ addr.save()
+ return addr
+
+ def save(self):
+ is_new = self.is_new_record()
+ success = super(FixedIp, self).save()
+ if success and is_new:
+ self.associate_with("network", self['network_id'])
+
+ def destroy(self):
+ self.unassociate_with("network", self['network_id'])
+ super(FixedIp, self).destroy()
+
+
+class ElasticIp(FixedIp):
+ """Represents an elastic ip in the datastore"""
+ override_type = "address"
+
+ def default_state(self):
+ return {'address': self.address,
+ 'instance_id': 'available',
+ 'private_ip': 'available'}
+
+
+# CLEANUP:
+# TODO(ja): does vlanpool "keeper" need to know the min/max -
+# shouldn't FLAGS always win?
class BaseNetwork(datastore.BasicModel):
+ """Implements basic logic for allocating ips in a network"""
override_type = 'network'
- NUM_STATIC_IPS = 3 # Network, Gateway, and CloudPipe
+ address_class = FixedIp
@property
def identifier(self):
+ """Datastore identifier"""
return self.network_id
def default_state(self):
+ """Default values for new objects"""
return {'network_id': self.network_id, 'network_str': self.network_str}
@classmethod
+ # pylint: disable-msg=R0913
def create(cls, user_id, project_id, security_group, vlan, network_str):
+ """Create a BaseNetwork object"""
network_id = "%s:%s" % (project_id, security_group)
net = cls(network_id, network_str)
net['user_id'] = user_id
@@ -165,95 +231,148 @@ class BaseNetwork(datastore.BasicModel):
self.network_id = network_id
self.network_str = network_str
super(BaseNetwork, self).__init__()
- self.save()
+ if self.is_new_record():
+ self._create_assigned_set()
@property
def network(self):
+ """Returns a string representing the network"""
return IPy.IP(self['network_str'])
@property
def netmask(self):
+ """Returns the netmask of this network"""
return self.network.netmask()
@property
def gateway(self):
+ """Returns the network gateway address"""
return self.network[1]
@property
def broadcast(self):
+ """Returns the network broadcast address"""
return self.network.broadcast()
@property
def bridge_name(self):
+ """Returns the bridge associated with this network"""
return "br%s" % (self["vlan"])
@property
def user(self):
+ """Returns the user associated with this network"""
return manager.AuthManager().get_user(self['user_id'])
@property
def project(self):
+ """Returns the project associated with this network"""
return manager.AuthManager().get_project(self['project_id'])
- @property
- def _hosts_key(self):
- return "network:%s:hosts" % (self['network_str'])
+ # pylint: disable-msg=R0913
+ def _add_host(self, user_id, project_id, ip_address, mac, hostname):
+ """Add a host to the datastore"""
+ self.address_class.create(user_id, project_id, ip_address,
+ mac, hostname, self.identifier)
- @property
- def hosts(self):
- return datastore.Redis.instance().hgetall(self._hosts_key) or {}
+ def _rem_host(self, ip_address):
+ """Remove a host from the datastore"""
+ self.address_class(ip_address).destroy()
+
+ def _create_assigned_set(self):
+ for idx in range(self.num_bottom_reserved_ips,
+ len(self.network) - self.num_top_reserved_ips):
+ redis = datastore.Redis.instance()
+ redis.sadd(self._available_key, str(self.network[idx]))
- def _add_host(self, _user_id, _project_id, host, target):
- datastore.Redis.instance().hset(self._hosts_key, host, target)
+ @property
+ def _available_key(self):
+ return 'available:%s' % self.identifier
- def _rem_host(self, host):
- datastore.Redis.instance().hdel(self._hosts_key, host)
+ @property
+ def num_available_ips(self):
+ redis = datastore.Redis.instance()
+ return redis.scard(self._available_key)
@property
def assigned(self):
- return datastore.Redis.instance().hkeys(self._hosts_key)
+ """Returns a list of all assigned addresses"""
+ return self.address_class.associated_keys('network', self.identifier)
+
+ @property
+ def assigned_objs(self):
+ """Returns a list of all assigned addresses as objects"""
+ return self.address_class.associated_to('network', self.identifier)
+
+ def get_address(self, ip_address):
+ """Returns a specific ip as an object"""
+ if ip_address in self.assigned:
+ return self.address_class(ip_address)
+ return None
@property
- def available(self):
- # the .2 address is always CloudPipe
- # and the top <n> are for vpn clients
- for idx in range(self.num_static_ips, len(self.network)-(1 + FLAGS.cnt_vpn_clients)):
- address = str(self.network[idx])
- if not address in self.hosts.keys():
- yield address
+ def num_bottom_reserved_ips(self):
+ """Returns number of ips reserved at the bottom of the range"""
+ return 2 # Network, Gateway
@property
- def num_static_ips(self):
- return BaseNetwork.NUM_STATIC_IPS
-
- def allocate_ip(self, user_id, project_id, mac):
- for address in self.available:
- logging.debug("Allocating IP %s to %s" % (address, project_id))
- self._add_host(user_id, project_id, address, mac)
- self.express(address=address)
- return address
- raise exception.NoMoreAddresses("Project %s with network %s" %
- (project_id, str(self.network)))
+ def num_top_reserved_ips(self):
+ """Returns number of ips reserved at the top of the range"""
+ return 1 # Broadcast
+
+ def allocate_ip(self, user_id, project_id, mac, hostname=None):
+ """Allocates an ip to a mac address"""
+ address = datastore.Redis.instance().spop(self._available_key)
+ if not address:
+ raise exception.NoMoreAddresses("Project %s with network %s" %
+ (project_id, str(self.network)))
+ logging.debug("Allocating IP %s to %s", address, project_id)
+ self._add_host(user_id, project_id, address, mac, hostname)
+ self.express(address=address)
+ return address
def lease_ip(self, ip_str):
- logging.debug("Leasing allocated IP %s" % (ip_str))
+ """Called when DHCP lease is activated"""
+ if not ip_str in self.assigned:
+ raise exception.AddressNotAllocated()
+ address = self.get_address(ip_str)
+ if address:
+ logging.debug("Leasing allocated IP %s", ip_str)
+ address['state'] = 'leased'
+ address.save()
def release_ip(self, ip_str):
+ """Called when DHCP lease expires
+
+ Removes the ip from the assigned list"""
if not ip_str in self.assigned:
raise exception.AddressNotAllocated()
- self.deexpress(address=ip_str)
+ logging.debug("Releasing IP %s", ip_str)
self._rem_host(ip_str)
+ self.deexpress(address=ip_str)
+ datastore.Redis.instance().sadd(self._available_key, ip_str)
def deallocate_ip(self, ip_str):
- # Do nothing for now, cleanup on ip release
- pass
+ """Deallocates an allocated ip"""
+ if not ip_str in self.assigned:
+ raise exception.AddressNotAllocated()
+ address = self.get_address(ip_str)
+ if address:
+ if address['state'] != 'leased':
+ # NOTE(vish): address hasn't been leased, so release it
+ self.release_ip(ip_str)
+ else:
+ logging.debug("Deallocating allocated IP %s", ip_str)
+ address['state'] == 'deallocated'
+ address.save()
- def list_addresses(self):
- for address in self.hosts:
- yield address
+ def express(self, address=None):
+ """Set up network. Implemented in subclasses"""
+ pass
- def express(self, address=None): pass
- def deexpress(self, address=None): pass
+ def deexpress(self, address=None):
+ """Tear down network. Implemented in subclasses"""
+ pass
class BridgedNetwork(BaseNetwork):
@@ -277,7 +396,11 @@ class BridgedNetwork(BaseNetwork):
override_type = 'network'
@classmethod
- def get_network_for_project(cls, user_id, project_id, security_group):
+ def get_network_for_project(cls,
+ user_id,
+ project_id,
+ security_group='default'):
+ """Returns network for a given project"""
vlan = get_vlan_for_project(project_id)
network_str = vlan.subnet()
return cls.create(user_id, project_id, security_group, vlan.vlan_id,
@@ -286,37 +409,42 @@ class BridgedNetwork(BaseNetwork):
def __init__(self, *args, **kwargs):
super(BridgedNetwork, self).__init__(*args, **kwargs)
self['bridge_dev'] = FLAGS.bridge_dev
- self.save()
def express(self, address=None):
super(BridgedNetwork, self).express(address=address)
linux_net.vlan_create(self)
linux_net.bridge_create(self)
+
class DHCPNetwork(BridgedNetwork):
- """
- properties:
- dhcp_listen_address: the ip of the gateway / dhcp host
- dhcp_range_start: the first ip to give out
- dhcp_range_end: the last ip to give out
- """
+ """Network supporting DHCP"""
bridge_gets_ip = True
override_type = 'network'
def __init__(self, *args, **kwargs):
super(DHCPNetwork, self).__init__(*args, **kwargs)
- # logging.debug("Initing DHCPNetwork object...")
- self.dhcp_listen_address = self.network[1]
- self.dhcp_range_start = self.network[3]
- self.dhcp_range_end = self.network[-(1 + FLAGS.cnt_vpn_clients)]
- try:
+ if not(os.path.exists(FLAGS.networks_path)):
os.makedirs(FLAGS.networks_path)
- # NOTE(todd): I guess this is a lazy way to not have to check if the
- # directory exists, but shouldn't we be smarter about
- # telling the difference between existing directory and
- # permission denied? (Errno 17 vs 13, OSError)
- except Exception, err:
- pass
+
+ @property
+ def num_bottom_reserved_ips(self):
+ # For cloudpipe
+ return super(DHCPNetwork, self).num_bottom_reserved_ips + 1
+
+ @property
+ def num_top_reserved_ips(self):
+ return super(DHCPNetwork, self).num_top_reserved_ips + \
+ FLAGS.cnt_vpn_clients
+
+ @property
+ def dhcp_listen_address(self):
+ """Address where dhcp server should listen"""
+ return self.gateway
+
+ @property
+ def dhcp_range_start(self):
+ """Starting address dhcp server should use"""
+ return self.network[self.num_bottom_reserved_ips]
def express(self, address=None):
super(DHCPNetwork, self).express(address=address)
@@ -326,20 +454,23 @@ class DHCPNetwork(BridgedNetwork):
linux_net.start_dnsmasq(self)
else:
logging.debug("Not launching dnsmasq: no hosts.")
- self.express_cloudpipe()
+ self.express_vpn()
- def allocate_vpn_ip(self, user_id, project_id, mac):
+ def allocate_vpn_ip(self, user_id, project_id, mac, hostname=None):
+ """Allocates the reserved ip to a vpn instance"""
address = str(self.network[2])
- self._add_host(user_id, project_id, address, mac)
+ self._add_host(user_id, project_id, address, mac, hostname)
self.express(address=address)
return address
- def express_cloudpipe(self):
+ def express_vpn(self):
+ """Sets up routing rules for vpn"""
private_ip = str(self.network[2])
linux_net.confirm_rule("FORWARD -d %s -p udp --dport 1194 -j ACCEPT"
% (private_ip, ))
- linux_net.confirm_rule("PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194"
- % (self.project.vpn_ip, self.project.vpn_port, private_ip))
+ linux_net.confirm_rule(
+ "PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194"
+ % (self.project.vpn_ip, self.project.vpn_port, private_ip))
def deexpress(self, address=None):
# if this is the last address, stop dns
@@ -349,82 +480,39 @@ class DHCPNetwork(BridgedNetwork):
else:
linux_net.start_dnsmasq(self)
-class PublicAddress(datastore.BasicModel):
- override_type = "address"
-
- def __init__(self, address):
- self.address = address
- super(PublicAddress, self).__init__()
-
- @property
- def identifier(self):
- return self.address
-
- def default_state(self):
- return {'address': self.address}
-
- @classmethod
- def create(cls, user_id, project_id, address):
- addr = cls(address)
- addr['user_id'] = user_id
- addr['project_id'] = project_id
- addr['instance_id'] = 'available'
- addr['private_ip'] = 'available'
- addr.save()
- return addr
+DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)]
-DEFAULT_PORTS = [("tcp",80), ("tcp",22), ("udp",1194), ("tcp",443)]
class PublicNetworkController(BaseNetwork):
+ """Handles elastic ips"""
override_type = 'network'
+ address_class = ElasticIp
def __init__(self, *args, **kwargs):
network_id = "public:default"
- super(PublicNetworkController, self).__init__(network_id, FLAGS.public_range)
+ super(PublicNetworkController, self).__init__(network_id,
+ FLAGS.public_range, *args, **kwargs)
self['user_id'] = "public"
self['project_id'] = "public"
- self["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
+ self["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ',
+ time.gmtime())
self["vlan"] = FLAGS.public_vlan
self.save()
self.express()
- @property
- def available(self):
- for idx in range(2, len(self.network)-1):
- address = str(self.network[idx])
- if not address in self.hosts.keys():
- yield address
-
- @property
- def host_objs(self):
- for address in self.assigned:
- yield PublicAddress(address)
-
- def get_host(self, host):
- if host in self.assigned:
- return PublicAddress(host)
- return None
-
- def _add_host(self, user_id, project_id, host, _target):
- datastore.Redis.instance().hset(self._hosts_key, host, project_id)
- PublicAddress.create(user_id, project_id, host)
-
- def _rem_host(self, host):
- PublicAddress(host).destroy()
- datastore.Redis.instance().hdel(self._hosts_key, host)
-
def deallocate_ip(self, ip_str):
# NOTE(vish): cleanup is now done on release by the parent class
- self.release_ip(ip_str)
+ self.release_ip(ip_str)
def associate_address(self, public_ip, private_ip, instance_id):
+ """Associates a public ip to a private ip and instance id"""
if not public_ip in self.assigned:
raise exception.AddressNotAllocated()
- # TODO(joshua): Keep an index going both ways
- for addr in self.host_objs:
+ # TODO(josh): Keep an index going both ways
+ for addr in self.assigned_objs:
if addr.get('private_ip', None) == private_ip:
raise exception.AddressAlreadyAssociated()
- addr = self.get_host(public_ip)
+ addr = self.get_address(public_ip)
if addr.get('private_ip', 'available') != 'available':
raise exception.AddressAlreadyAssociated()
addr['private_ip'] = private_ip
@@ -433,9 +521,10 @@ class PublicNetworkController(BaseNetwork):
self.express(address=public_ip)
def disassociate_address(self, public_ip):
+ """Disassociates a public ip with its private ip"""
if not public_ip in self.assigned:
raise exception.AddressNotAllocated()
- addr = self.get_host(public_ip)
+ addr = self.get_address(public_ip)
if addr.get('private_ip', 'available') == 'available':
raise exception.AddressNotAssociated()
self.deexpress(address=public_ip)
@@ -444,11 +533,14 @@ class PublicNetworkController(BaseNetwork):
addr.save()
def express(self, address=None):
- addresses = self.host_objs
if address:
- addresses = [self.get_host(address)]
+ if not address in self.assigned:
+ raise exception.AddressNotAllocated()
+ addresses = [self.get_address(address)]
+ else:
+ addresses = self.assigned_objs
for addr in addresses:
- if addr.get('private_ip','available') == 'available':
+ if addr.get('private_ip', 'available') == 'available':
continue
public_ip = addr['address']
private_ip = addr['private_ip']
@@ -457,15 +549,16 @@ class PublicNetworkController(BaseNetwork):
% (public_ip, private_ip))
linux_net.confirm_rule("POSTROUTING -t nat -s %s -j SNAT --to %s"
% (private_ip, public_ip))
- # TODO: Get these from the secgroup datastore entries
+ # TODO(joshua): Get these from the secgroup datastore entries
linux_net.confirm_rule("FORWARD -d %s -p icmp -j ACCEPT"
% (private_ip))
for (protocol, port) in DEFAULT_PORTS:
- linux_net.confirm_rule("FORWARD -d %s -p %s --dport %s -j ACCEPT"
- % (private_ip, protocol, port))
+ linux_net.confirm_rule(
+ "FORWARD -d %s -p %s --dport %s -j ACCEPT"
+ % (private_ip, protocol, port))
def deexpress(self, address=None):
- addr = self.get_host(address)
+ addr = self.get_address(address)
private_ip = addr['private_ip']
linux_net.unbind_public_ip(address, FLAGS.public_interface)
linux_net.remove_rule("PREROUTING -t nat -d %s -j DNAT --to %s"
@@ -479,19 +572,18 @@ class PublicNetworkController(BaseNetwork):
% (private_ip, protocol, port))
-# FIXME(todd): does this present a race condition, or is there some piece of
-# architecture that mitigates it (only one queue listener per net)?
+# FIXME(todd): does this present a race condition, or is there some
+# piece of architecture that mitigates it (only one queue
+# listener per net)?
def get_vlan_for_project(project_id):
- """
- Allocate vlan IDs to individual users.
- """
+ """Allocate vlan IDs to individual users"""
vlan = Vlan.lookup(project_id)
if vlan:
return vlan
known_vlans = Vlan.dict_by_vlan()
for vnum in range(FLAGS.vlan_start, FLAGS.vlan_end):
vstr = str(vnum)
- if not known_vlans.has_key(vstr):
+ if not vstr in known_vlans:
return Vlan.create(project_id, vnum)
old_project_id = known_vlans[vstr]
if not manager.AuthManager().get_project(old_project_id):
@@ -515,8 +607,9 @@ def get_vlan_for_project(project_id):
return Vlan.create(project_id, vnum)
raise exception.AddressNotAllocated("Out of VLANs")
+
def get_project_network(project_id, security_group='default'):
- """ get a project's private network, allocating one if needed """
+ """Gets a project's private network, allocating one if needed"""
project = manager.AuthManager().get_project(project_id)
if not project:
raise nova_exception.NotFound("Project %s doesn't exist." % project_id)
@@ -527,28 +620,23 @@ def get_project_network(project_id, security_group='default'):
def get_network_by_address(address):
- # TODO(vish): This is completely the wrong way to do this, but
- # I'm getting the network binary working before I
- # tackle doing this the right way.
- logging.debug("Get Network By Address: %s" % address)
- for project in manager.AuthManager().get_projects():
- net = get_project_network(project.id)
- if address in net.assigned:
- logging.debug("Found %s in %s" % (address, project.id))
- return net
- raise exception.AddressNotAllocated()
+ """Gets the network for a given private ip"""
+ address_record = FixedIp.lookup(address)
+ if not address_record:
+ raise exception.AddressNotAllocated()
+ return get_project_network(address_record['project_id'])
def get_network_by_interface(iface, security_group='default'):
+ """Gets the network for a given interface"""
vlan = iface.rpartition("br")[2]
project_id = Vlan.dict_by_vlan().get(vlan)
return get_project_network(project_id, security_group)
-
def get_public_ip_for_instance(instance_id):
- # FIXME: this should be a lookup - iteration won't scale
- for address_record in PublicAddress.all():
+ """Gets the public ip for a given instance"""
+ # FIXME(josh): this should be a lookup - iteration won't scale
+ for address_record in ElasticIp.all():
if address_record.get('instance_id', 'available') == instance_id:
return address_record['address']
-
diff --git a/nova/network/service.py b/nova/network/service.py
index 1a61f49d4..3dba0a9ef 100644
--- a/nova/network/service.py
+++ b/nova/network/service.py
@@ -17,60 +17,67 @@
# under the License.
"""
-Network Nodes are responsible for allocating ips and setting up network
+Network Hosts are responsible for allocating ips and setting up network
"""
from nova import datastore
+from nova import exception
from nova import flags
from nova import service
from nova import utils
from nova.auth import manager
-from nova.exception import NotFound
-from nova.network import exception
+from nova.network import exception as network_exception
from nova.network import model
from nova.network import vpn
-FLAGS = flags.FLAGS
+FLAGS = flags.FLAGS
flags.DEFINE_string('network_type',
'flat',
'Service Class for Networking')
flags.DEFINE_string('flat_network_bridge', 'br100',
'Bridge for simple network instances')
flags.DEFINE_list('flat_network_ips',
- ['192.168.0.2','192.168.0.3','192.168.0.4'],
+ ['192.168.0.2', '192.168.0.3', '192.168.0.4'],
'Available ips for simple network')
flags.DEFINE_string('flat_network_network', '192.168.0.0',
- 'Network for simple network')
+ 'Network for simple network')
flags.DEFINE_string('flat_network_netmask', '255.255.255.0',
- 'Netmask for simple network')
+ 'Netmask for simple network')
flags.DEFINE_string('flat_network_gateway', '192.168.0.1',
- 'Broadcast for simple network')
+ 'Broadcast for simple network')
flags.DEFINE_string('flat_network_broadcast', '192.168.0.255',
- 'Broadcast for simple network')
+ 'Broadcast for simple network')
flags.DEFINE_string('flat_network_dns', '8.8.4.4',
- 'Dns for simple network')
+ 'Dns for simple network')
+
def type_to_class(network_type):
+ """Convert a network_type string into an actual Python class"""
if network_type == 'flat':
return FlatNetworkService
- elif network_type == 'vlan':
+ elif network_type == 'vlan':
return VlanNetworkService
- raise NotFound("Couldn't find %s network type" % network_type)
+ raise exception.NotFound("Couldn't find %s network type" % network_type)
def setup_compute_network(network_type, user_id, project_id, security_group):
+ """Sets up the network on a compute host"""
srv = type_to_class(network_type)
- srv.setup_compute_network(network_type, user_id, project_id, security_group)
+ srv.setup_compute_network(user_id,
+ project_id,
+ security_group)
def get_host_for_project(project_id):
+ """Get host allocated to project from datastore"""
redis = datastore.Redis.instance()
return redis.get(_host_key(project_id))
def _host_key(project_id):
- return "network_host:%s" % project_id
+ """Returns redis host key for network"""
+ return "networkhost:%s" % project_id
class BaseNetworkService(service.Service):
@@ -80,6 +87,7 @@ class BaseNetworkService(service.Service):
"""
def __init__(self, *args, **kwargs):
self.network = model.PublicNetworkController()
+ super(BaseNetworkService, self).__init__(*args, **kwargs)
def set_network_host(self, user_id, project_id, *args, **kwargs):
"""Safely sets the host of the projects network"""
@@ -109,7 +117,7 @@ class BaseNetworkService(service.Service):
pass
@classmethod
- def setup_compute_network(self, user_id, project_id, security_group,
+ def setup_compute_network(cls, user_id, project_id, security_group,
*args, **kwargs):
"""Sets up matching network for compute hosts"""
raise NotImplementedError()
@@ -138,12 +146,14 @@ class FlatNetworkService(BaseNetworkService):
"""Basic network where no vlans are used"""
@classmethod
- def setup_compute_network(self, user_id, project_id, security_group,
+ def setup_compute_network(cls, user_id, project_id, security_group,
*args, **kwargs):
"""Network is created manually"""
pass
- def allocate_fixed_ip(self, user_id, project_id,
+ def allocate_fixed_ip(self,
+ user_id,
+ project_id,
security_group='default',
*args, **kwargs):
"""Gets a fixed ip from the pool
@@ -152,14 +162,16 @@ class FlatNetworkService(BaseNetworkService):
"""
# NOTE(vish): Some automation could be done here. For example,
# creating the flat_network_bridge and setting up
- # a gateway. This is all done manually atm
+ # a gateway. This is all done manually atm.
redis = datastore.Redis.instance()
if not redis.exists('ips') and not len(redis.keys('instances:*')):
for fixed_ip in FLAGS.flat_network_ips:
redis.sadd('ips', fixed_ip)
fixed_ip = redis.spop('ips')
if not fixed_ip:
- raise exception.NoMoreAddresses()
+ raise network_exception.NoMoreAddresses()
+ # TODO(vish): some sort of dns handling for hostname should
+ # probably be done here.
return {'inject_network': True,
'network_type': FLAGS.network_type,
'mac_address': utils.generate_mac(),
@@ -175,37 +187,51 @@ class FlatNetworkService(BaseNetworkService):
"""Returns an ip to the pool"""
datastore.Redis.instance().sadd('ips', fixed_ip)
+
class VlanNetworkService(BaseNetworkService):
"""Vlan network with dhcp"""
# NOTE(vish): A lot of the interactions with network/model.py can be
# simplified and improved. Also there it may be useful
# to support vlans separately from dhcp, instead of having
# both of them together in this class.
- def allocate_fixed_ip(self, user_id, project_id,
+ # pylint: disable-msg=W0221
+ def allocate_fixed_ip(self,
+ user_id,
+ project_id,
security_group='default',
- vpn=False, *args, **kwargs):
- """Gets a fixed ip from the pool """
+ is_vpn=False,
+ hostname=None,
+ *args, **kwargs):
+ """Gets a fixed ip from the pool"""
mac = utils.generate_mac()
net = model.get_project_network(project_id)
- if vpn:
- fixed_ip = net.allocate_vpn_ip(user_id, project_id, mac)
+ if is_vpn:
+ fixed_ip = net.allocate_vpn_ip(user_id,
+ project_id,
+ mac,
+ hostname)
else:
- fixed_ip = net.allocate_ip(user_id, project_id, mac)
+ fixed_ip = net.allocate_ip(user_id,
+ project_id,
+ mac,
+ hostname)
return {'network_type': FLAGS.network_type,
'bridge_name': net['bridge_name'],
'mac_address': mac,
- 'private_dns_name' : fixed_ip}
+ 'private_dns_name': fixed_ip}
def deallocate_fixed_ip(self, fixed_ip,
*args, **kwargs):
"""Returns an ip to the pool"""
return model.get_network_by_address(fixed_ip).deallocate_ip(fixed_ip)
- def lease_ip(self, address):
- return model.get_network_by_address(address).lease_ip(address)
+ def lease_ip(self, fixed_ip):
+ """Called by bridge when ip is leased"""
+ return model.get_network_by_address(fixed_ip).lease_ip(fixed_ip)
- def release_ip(self, address):
- return model.get_network_by_address(address).release_ip(address)
+ def release_ip(self, fixed_ip):
+ """Called by bridge when ip is released"""
+ return model.get_network_by_address(fixed_ip).release_ip(fixed_ip)
def restart_nets(self):
"""Ensure the network for each user is enabled"""
@@ -218,7 +244,7 @@ class VlanNetworkService(BaseNetworkService):
vpn.NetworkData.create(project_id)
@classmethod
- def setup_compute_network(self, user_id, project_id, security_group,
+ def setup_compute_network(cls, user_id, project_id, security_group,
*args, **kwargs):
"""Sets up matching network for compute hosts"""
# NOTE(vish): Use BridgedNetwork instead of DHCPNetwork because
diff --git a/nova/network/vpn.py b/nova/network/vpn.py
index cec84287c..85366ed89 100644
--- a/nova/network/vpn.py
+++ b/nova/network/vpn.py
@@ -23,9 +23,8 @@ from nova import exception
from nova import flags
from nova import utils
-FLAGS = flags.FLAGS
-
+FLAGS = flags.FLAGS
flags.DEFINE_string('vpn_ip', utils.get_my_ip(),
'Public IP for the cloudpipe VPN servers')
flags.DEFINE_integer('vpn_start_port', 1000,
@@ -33,7 +32,9 @@ flags.DEFINE_integer('vpn_start_port', 1000,
flags.DEFINE_integer('vpn_end_port', 2000,
'End port for the cloudpipe VPN servers')
+
class NoMorePorts(exception.Error):
+ """No ports available to allocate for the given ip"""
pass
@@ -67,34 +68,44 @@ class NetworkData(datastore.BasicModel):
return network_data
@classmethod
- def find_free_port_for_ip(cls, ip):
+ def find_free_port_for_ip(cls, vpn_ip):
"""Finds a free port for a given ip from the redis set"""
# TODO(vish): these redis commands should be generalized and
# placed into a base class. Conceptually, it is
# similar to an association, but we are just
# storing a set of values instead of keys that
# should be turned into objects.
- redis = datastore.Redis.instance()
- key = 'ip:%s:ports' % ip
- # TODO(vish): these ports should be allocated through an admin
- # command instead of a flag
- if (not redis.exists(key) and
- not redis.exists(cls._redis_association_name('ip', ip))):
- for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
- redis.sadd(key, i)
+ cls._ensure_set_exists(vpn_ip)
- port = redis.spop(key)
+ port = datastore.Redis.instance().spop(cls._redis_ports_key(vpn_ip))
if not port:
raise NoMorePorts()
return port
@classmethod
- def num_ports_for_ip(cls, ip):
+ def _redis_ports_key(cls, vpn_ip):
+ """Key that ports are stored under in redis"""
+ return 'ip:%s:ports' % vpn_ip
+
+ @classmethod
+ def _ensure_set_exists(cls, vpn_ip):
+ """Creates the set of ports for the ip if it doesn't already exist"""
+ # TODO(vish): these ports should be allocated through an admin
+ # command instead of a flag
+ redis = datastore.Redis.instance()
+ if (not redis.exists(cls._redis_ports_key(vpn_ip)) and
+ not redis.exists(cls._redis_association_name('ip', vpn_ip))):
+ for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
+ redis.sadd(cls._redis_ports_key(vpn_ip), i)
+
+ @classmethod
+ def num_ports_for_ip(cls, vpn_ip):
"""Calculates the number of free ports for a given ip"""
- return datastore.Redis.instance().scard('ip:%s:ports' % ip)
+ cls._ensure_set_exists(vpn_ip)
+ return datastore.Redis.instance().scard('ip:%s:ports' % vpn_ip)
@property
- def ip(self):
+ def ip(self): # pylint: disable-msg=C0103
"""The ip assigned to the project"""
return self['ip']
@@ -113,4 +124,3 @@ class NetworkData(datastore.BasicModel):
self.unassociate_with('ip', self.ip)
datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port)
super(NetworkData, self).destroy()
-
diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py
index b42a96233..c2b412dd7 100644
--- a/nova/objectstore/bucket.py
+++ b/nova/objectstore/bucket.py
@@ -36,6 +36,7 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('buckets_path', utils.abspath('../buckets'),
'path to s3 buckets')
+
class Bucket(object):
def __init__(self, name):
self.name = name
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
index a5eab9828..5c3dc286b 100644
--- a/nova/objectstore/handler.py
+++ b/nova/objectstore/handler.py
@@ -38,17 +38,19 @@ S3 client with this module::
"""
import datetime
-import logging
import json
+import logging
import multiprocessing
import os
-from tornado import escape
import urllib
-from twisted.application import internet, service
-from twisted.web.resource import Resource
-from twisted.web import server, static, error
-
+from tornado import escape
+from twisted.application import internet
+from twisted.application import service
+from twisted.web import error
+from twisted.web import resource
+from twisted.web import server
+from twisted.web import static
from nova import exception
from nova import flags
@@ -60,6 +62,7 @@ from nova.objectstore import image
FLAGS = flags.FLAGS
+
def render_xml(request, value):
"""Writes value as XML string to request"""
assert isinstance(value, dict) and len(value) == 1
@@ -73,12 +76,14 @@ def render_xml(request, value):
request.write('</' + escape.utf8(name) + '>')
request.finish()
+
def finish(request, content=None):
"""Finalizer method for request"""
if content:
request.write(content)
request.finish()
+
def _render_parts(value, write_cb):
"""Helper method to render different Python objects to XML"""
if isinstance(value, basestring):
@@ -98,6 +103,7 @@ def _render_parts(value, write_cb):
else:
raise Exception("Unknown S3 value type %r", value)
+
def get_argument(request, key, default_value):
"""Returns the request's value at key, or default_value
if not found
@@ -106,6 +112,7 @@ def get_argument(request, key, default_value):
return request.args[key][0]
return default_value
+
def get_context(request):
"""Returns the supplied request's context object"""
try:
@@ -129,7 +136,7 @@ def get_context(request):
logging.debug("Authentication Failure: %s", ex)
raise exception.NotAuthorized
-class ErrorHandlingResource(Resource):
+class ErrorHandlingResource(resource.Resource):
"""Maps exceptions to 404 / 401 codes. Won't work for
exceptions thrown after NOT_DONE_YET is returned.
"""
@@ -141,7 +148,7 @@ class ErrorHandlingResource(Resource):
def render(self, request):
"""Renders the response as XML"""
try:
- return Resource.render(self, request)
+ return resource.Resource.render(self, request)
except exception.NotFound:
request.setResponseCode(404)
return ''
@@ -149,6 +156,7 @@ class ErrorHandlingResource(Resource):
request.setResponseCode(403)
return ''
+
class S3(ErrorHandlingResource):
"""Implementation of an S3-like storage server based on local files."""
def __init__(self):
@@ -175,10 +183,11 @@ class S3(ErrorHandlingResource):
}})
return server.NOT_DONE_YET
+
class BucketResource(ErrorHandlingResource):
"""A web resource containing an S3-like bucket"""
def __init__(self, name):
- ErrorHandlingResource.__init__(self)
+ resource.Resource.__init__(self)
self.name = name
def getChild(self, name, request):
@@ -239,9 +248,9 @@ class BucketResource(ErrorHandlingResource):
class ObjectResource(ErrorHandlingResource):
"""The resource returned from a bucket"""
- def __init__(self, bucket_name, name):
- ErrorHandlingResource.__init__(self)
- self.bucket = bucket_name
+ def __init__(self, bucket, name):
+ resource.Resource.__init__(self)
+ self.bucket = bucket
self.name = name
def render_GET(self, request):
@@ -298,12 +307,13 @@ class ObjectResource(ErrorHandlingResource):
request.setResponseCode(204)
return ''
+
class ImageResource(ErrorHandlingResource):
"""A web resource representing a single image"""
isLeaf = True
def __init__(self, name):
- ErrorHandlingResource.__init__(self)
+ resource.Resource.__init__(self)
self.img = image.Image(name)
def render_GET(self, request):
@@ -312,7 +322,7 @@ class ImageResource(ErrorHandlingResource):
defaultType='application/octet-stream'
).render_GET(request)
-class ImagesResource(Resource):
+class ImagesResource(resource.Resource):
"""A web resource representing a list of images"""
def getChild(self, name, _request):
"""Returns itself or an ImageResource if no name given"""
@@ -328,7 +338,23 @@ class ImagesResource(Resource):
images = [i for i in image.Image.all() \
if i.is_authorized(request.context, readonly=True)]
- request.write(json.dumps([i.metadata for i in images]))
+ # Bug #617776:
+ # We used to have 'type' in the image metadata, but this field
+ # should be called 'imageType', as per the EC2 specification.
+ # For compat with old metadata files we copy type to imageType if
+ # imageType is not present.
+ # For compat with euca2ools (and any other clients using the
+ # incorrect name) we copy imageType to type.
+ # imageType is primary if we end up with both in the metadata file
+ # (which should never happen).
+ def decorate(m):
+ if 'imageType' not in m and 'type' in m:
+ m[u'imageType'] = m['type']
+ elif 'imageType' in m:
+ m[u'type'] = m['imageType']
+ return m
+
+ request.write(json.dumps([decorate(i.metadata) for i in images]))
request.finish()
return server.NOT_DONE_YET
@@ -381,12 +407,14 @@ class ImagesResource(Resource):
request.setResponseCode(204)
return ''
+
def get_site():
"""Support for WSGI-like interfaces"""
root = S3()
site = server.Site(root)
return site
+
def get_application():
"""Support WSGI-like interfaces"""
factory = get_site()
diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py
index 860298ba6..f3c02a425 100644
--- a/nova/objectstore/image.py
+++ b/nova/objectstore/image.py
@@ -42,6 +42,7 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('images_path', utils.abspath('../images'),
'path to decrypted images')
+
class Image(object):
def __init__(self, image_id):
self.image_id = image_id
@@ -148,7 +149,7 @@ class Image(object):
'imageOwnerId': 'system',
'isPublic': public,
'architecture': 'x86_64',
- 'type': image_type,
+ 'imageType': image_type,
'state': 'available'
}
@@ -195,7 +196,7 @@ class Image(object):
'imageOwnerId': context.project.id,
'isPublic': False, # FIXME: grab public from manifest
'architecture': 'x86_64', # FIXME: grab architecture from manifest
- 'type' : image_type
+ 'imageType' : image_type
}
def write_state(state):
@@ -231,13 +232,22 @@ class Image(object):
@staticmethod
def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_private_key, decrypted_filename):
- key, err = utils.execute('openssl rsautl -decrypt -inkey %s' % cloud_private_key, encrypted_key)
+ key, err = utils.execute(
+ 'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
+ process_input=encrypted_key,
+ check_exit_code=False)
if err:
raise exception.Error("Failed to decrypt private key: %s" % err)
- iv, err = utils.execute('openssl rsautl -decrypt -inkey %s' % cloud_private_key, encrypted_iv)
+ iv, err = utils.execute(
+ 'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
+ process_input=encrypted_iv,
+ check_exit_code=False)
if err:
raise exception.Error("Failed to decrypt initialization vector: %s" % err)
- out, err = utils.execute('openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s' % (encrypted_filename, key, iv, decrypted_filename))
+ _out, err = utils.execute(
+ 'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s'
+ % (encrypted_filename, key, iv, decrypted_filename),
+ check_exit_code=False)
if err:
raise exception.Error("Failed to decrypt image file %s : %s" % (encrypted_filename, err))
diff --git a/nova/objectstore/stored.py b/nova/objectstore/stored.py
index 81c047b22..9829194cb 100644
--- a/nova/objectstore/stored.py
+++ b/nova/objectstore/stored.py
@@ -23,7 +23,7 @@ Properties of an object stored within a bucket.
import os
import nova.crypto
-from nova.exception import NotFound, NotAuthorized
+from nova import exception
class Object(object):
@@ -33,7 +33,7 @@ class Object(object):
self.key = key
self.path = bucket._object_path(key)
if not os.path.isfile(self.path):
- raise NotFound
+ raise exception.NotFound
def __repr__(self):
return "<Object %s/%s>" % (self.bucket, self.key)
diff --git a/nova/process.py b/nova/process.py
index 2dc56372f..425d9f162 100644
--- a/nova/process.py
+++ b/nova/process.py
@@ -2,6 +2,7 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2010 FathomDB Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -20,16 +21,12 @@
Process pool, still buggy right now.
"""
-import logging
-import multiprocessing
import StringIO
+
from twisted.internet import defer
from twisted.internet import error
-from twisted.internet import process
from twisted.internet import protocol
from twisted.internet import reactor
-from twisted.internet import threads
-from twisted.python import failure
from nova import flags
@@ -54,111 +51,100 @@ class UnexpectedErrorOutput(IOError):
IOError.__init__(self, "got stdout: %r\nstderr: %r" % (stdout, stderr))
-# NOTE(termie): this too
-class _BackRelay(protocol.ProcessProtocol):
+# This is based on _BackRelay from twister.internal.utils, but modified to
+# capture both stdout and stderr, without odd stderr handling, and also to
+# handle stdin
+class BackRelayWithInput(protocol.ProcessProtocol):
"""
Trivial protocol for communicating with a process and turning its output
into the result of a L{Deferred}.
@ivar deferred: A L{Deferred} which will be called back with all of stdout
- and, if C{errortoo} is true, all of stderr as well (mixed together in
- one string). If C{errortoo} is false and any bytes are received over
- stderr, this will fire with an L{_UnexpectedErrorOutput} instance and
- the attribute will be set to C{None}.
-
- @ivar onProcessEnded: If C{errortoo} is false and bytes are received over
- stderr, this attribute will refer to a L{Deferred} which will be called
- back when the process ends. This C{Deferred} is also associated with
- the L{_UnexpectedErrorOutput} which C{deferred} fires with earlier in
- this case so that users can determine when the process has actually
- ended, in addition to knowing when bytes have been received via stderr.
+ and all of stderr as well (as a tuple). C{terminate_on_stderr} is true
+ and any bytes are received over stderr, this will fire with an
+ L{_UnexpectedErrorOutput} instance and the attribute will be set to
+ C{None}.
+
+ @ivar onProcessEnded: If C{terminate_on_stderr} is false and bytes are
+ received over stderr, this attribute will refer to a L{Deferred} which
+ will be called back when the process ends. This C{Deferred} is also
+ associated with the L{_UnexpectedErrorOutput} which C{deferred} fires
+ with earlier in this case so that users can determine when the process
+ has actually ended, in addition to knowing when bytes have been received
+ via stderr.
"""
- def __init__(self, deferred, errortoo=0):
+ def __init__(self, deferred, started_deferred=None,
+ terminate_on_stderr=False, check_exit_code=True,
+ process_input=None):
self.deferred = deferred
- self.s = StringIO.StringIO()
- if errortoo:
- self.errReceived = self.errReceivedIsGood
- else:
- self.errReceived = self.errReceivedIsBad
-
- def errReceivedIsBad(self, text):
- if self.deferred is not None:
- self.onProcessEnded = defer.Deferred()
- err = UnexpectedErrorOutput(text, self.onProcessEnded)
- self.deferred.errback(failure.Failure(err))
+ self.stdout = StringIO.StringIO()
+ self.stderr = StringIO.StringIO()
+ self.started_deferred = started_deferred
+ self.terminate_on_stderr = terminate_on_stderr
+ self.check_exit_code = check_exit_code
+ self.process_input = process_input
+ self.on_process_ended = None
+
+ def errReceived(self, text):
+ self.stderr.write(text)
+ if self.terminate_on_stderr and (self.deferred is not None):
+ self.on_process_ended = defer.Deferred()
+ self.deferred.errback(UnexpectedErrorOutput(
+ stdout=self.stdout.getvalue(),
+ stderr=self.stderr.getvalue()))
self.deferred = None
self.transport.loseConnection()
- def errReceivedIsGood(self, text):
- self.s.write(text)
-
def outReceived(self, text):
- self.s.write(text)
-
- def processEnded(self, reason):
- if self.deferred is not None:
- self.deferred.callback(self.s.getvalue())
- elif self.onProcessEnded is not None:
- self.onProcessEnded.errback(reason)
-
-
-class BackRelayWithInput(_BackRelay):
- def __init__(self, deferred, startedDeferred=None, error_ok=0,
- input=None):
- # Twisted doesn't use new-style classes in most places :(
- _BackRelay.__init__(self, deferred, errortoo=error_ok)
- self.error_ok = error_ok
- self.input = input
- self.stderr = StringIO.StringIO()
- self.startedDeferred = startedDeferred
-
- def errReceivedIsBad(self, text):
- self.stderr.write(text)
- self.transport.loseConnection()
-
- def errReceivedIsGood(self, text):
- self.stderr.write(text)
-
- def connectionMade(self):
- if self.startedDeferred:
- self.startedDeferred.callback(self)
- if self.input:
- self.transport.write(self.input)
- self.transport.closeStdin()
+ self.stdout.write(text)
def processEnded(self, reason):
if self.deferred is not None:
- stdout, stderr = self.s.getvalue(), self.stderr.getvalue()
+ stdout, stderr = self.stdout.getvalue(), self.stderr.getvalue()
try:
- # NOTE(termie): current behavior means if error_ok is True
- # we won't throw an error even if the process
- # exited with a non-0 status, so you can't be
- # okay with stderr output and not with bad exit
- # codes.
- if not self.error_ok:
+ if self.check_exit_code:
reason.trap(error.ProcessDone)
self.deferred.callback((stdout, stderr))
except:
+ # NOTE(justinsb): This logic is a little suspicious to me...
+ # If the callback throws an exception, then errback will be
+ # called also. However, this is what the unit tests test for...
self.deferred.errback(UnexpectedErrorOutput(stdout, stderr))
+ elif self.on_process_ended is not None:
+ self.on_process_ended.errback(reason)
-def getProcessOutput(executable, args=None, env=None, path=None, reactor=None,
- error_ok=0, input=None, startedDeferred=None):
- if reactor is None:
- from twisted.internet import reactor
+ def connectionMade(self):
+ if self.started_deferred:
+ self.started_deferred.callback(self)
+ if self.process_input:
+ self.transport.write(self.process_input)
+ self.transport.closeStdin()
+
+def get_process_output(executable, args=None, env=None, path=None,
+ process_reactor=None, check_exit_code=True,
+ process_input=None, started_deferred=None,
+ terminate_on_stderr=False):
+ if process_reactor is None:
+ process_reactor = reactor
args = args and args or ()
env = env and env and {}
- d = defer.Deferred()
- p = BackRelayWithInput(
- d, startedDeferred=startedDeferred, error_ok=error_ok, input=input)
+ deferred = defer.Deferred()
+ process_handler = BackRelayWithInput(
+ deferred,
+ started_deferred=started_deferred,
+ check_exit_code=check_exit_code,
+ process_input=process_input,
+ terminate_on_stderr=terminate_on_stderr)
# NOTE(vish): commands come in as unicode, but self.executes needs
# strings or process.spawn raises a deprecation warning
executable = str(executable)
if not args is None:
args = [str(x) for x in args]
- reactor.spawnProcess(p, executable, (executable,)+tuple(args), env, path)
- return d
+ process_reactor.spawnProcess( process_handler, executable,
+ (executable,)+tuple(args), env, path)
+ return deferred
class ProcessPool(object):
@@ -184,26 +170,27 @@ class ProcessPool(object):
return self.execute(executable, args, **kw)
def execute(self, *args, **kw):
- d = self._pool.acquire()
+ deferred = self._pool.acquire()
- def _associateProcess(proto):
- d.process = proto.transport
+ def _associate_process(proto):
+ deferred.process = proto.transport
return proto.transport
started = defer.Deferred()
- started.addCallback(_associateProcess)
- kw.setdefault('startedDeferred', started)
+ started.addCallback(_associate_process)
+ kw.setdefault('started_deferred', started)
- d.process = None
- d.started = started
+ deferred.process = None
+ deferred.started = started
- d.addCallback(lambda _: getProcessOutput(*args, **kw))
- d.addBoth(self._release)
- return d
+ deferred.addCallback(lambda _: get_process_output(*args, **kw))
+ deferred.addBoth(self._release)
+ return deferred
- def _release(self, rv=None):
+ def _release(self, retval=None):
self._pool.release()
- return rv
+ return retval
+
class SharedPool(object):
_instance = None
@@ -213,5 +200,6 @@ class SharedPool(object):
def __getattr__(self, key):
return getattr(self._instance, key)
+
def simple_execute(cmd, **kwargs):
return SharedPool().simple_execute(cmd, **kwargs)
diff --git a/nova/rpc.py b/nova/rpc.py
index 2a550c3ae..84a9b5590 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -21,14 +21,14 @@ AMQP-based RPC. Queues have consumers and publishers.
No fan-out support yet.
"""
-from carrot import connection
-from carrot import messaging
import json
import logging
import sys
import uuid
+
+from carrot import connection as carrot_connection
+from carrot import messaging
from twisted.internet import defer
-from twisted.internet import reactor
from twisted.internet import task
from nova import exception
@@ -39,13 +39,15 @@ from nova import flags
FLAGS = flags.FLAGS
-_log = logging.getLogger('amqplib')
-_log.setLevel(logging.WARN)
+LOG = logging.getLogger('amqplib')
+LOG.setLevel(logging.DEBUG)
-class Connection(connection.BrokerConnection):
+class Connection(carrot_connection.BrokerConnection):
+ """Connection instance object"""
@classmethod
def instance(cls):
+ """Returns the instance"""
if not hasattr(cls, '_instance'):
params = dict(hostname=FLAGS.rabbit_host,
port=FLAGS.rabbit_port,
@@ -56,18 +58,33 @@ class Connection(connection.BrokerConnection):
if FLAGS.fake_rabbit:
params['backend_cls'] = fakerabbit.Backend
+ # NOTE(vish): magic is fun!
+ # pylint: disable-msg=W0142
cls._instance = cls(**params)
return cls._instance
@classmethod
def recreate(cls):
+ """Recreates the connection instance
+
+ This is necessary to recover from some network errors/disconnects"""
del cls._instance
return cls.instance()
+
class Consumer(messaging.Consumer):
+ """Consumer base class
+
+ Contains methods for connecting the fetch method to async loops
+ """
+ def __init__(self, *args, **kwargs):
+ self.failed_connection = False
+ super(Consumer, self).__init__(*args, **kwargs)
+
# TODO(termie): it would be nice to give these some way of automatically
# cleaning up after themselves
def attach_to_tornado(self, io_inst=None):
+ """Attach a callback to tornado that fires 10 times a second"""
from tornado import ioloop
if io_inst is None:
io_inst = ioloop.IOLoop.instance()
@@ -79,33 +96,44 @@ class Consumer(messaging.Consumer):
attachToTornado = attach_to_tornado
- def fetch(self, *args, **kwargs):
+ def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
+ """Wraps the parent fetch with some logic for failed connections"""
# TODO(vish): the logic for failed connections and logging should be
# refactored into some sort of connection manager object
try:
- if getattr(self, 'failed_connection', False):
- # attempt to reconnect
+ if self.failed_connection:
+ # NOTE(vish): conn is defined in the parent class, we can
+ # recreate it as long as we create the backend too
+ # pylint: disable-msg=W0201
self.conn = Connection.recreate()
self.backend = self.conn.create_backend()
- super(Consumer, self).fetch(*args, **kwargs)
- if getattr(self, 'failed_connection', False):
+ super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
+ if self.failed_connection:
logging.error("Reconnected to queue")
self.failed_connection = False
- except Exception, ex:
- if not getattr(self, 'failed_connection', False):
+ # NOTE(vish): This is catching all errors because we really don't
+ # exceptions to be logged 10 times a second if some
+ # persistent failure occurs.
+ except Exception: # pylint: disable-msg=W0703
+ if not self.failed_connection:
logging.exception("Failed to fetch message from queue")
self.failed_connection = True
def attach_to_twisted(self):
+ """Attach a callback to twisted that fires 10 times a second"""
loop = task.LoopingCall(self.fetch, enable_callbacks=True)
loop.start(interval=0.1)
+
class Publisher(messaging.Publisher):
+ """Publisher base class"""
pass
class TopicConsumer(Consumer):
+ """Consumes messages on a specific topic"""
exchange_type = "topic"
+
def __init__(self, connection=None, topic="broadcast"):
self.queue = topic
self.routing_key = topic
@@ -115,14 +143,24 @@ class TopicConsumer(Consumer):
class AdapterConsumer(TopicConsumer):
+ """Calls methods on a proxy object based on method and args"""
def __init__(self, connection=None, topic="broadcast", proxy=None):
- _log.debug('Initing the Adapter Consumer for %s' % (topic))
+ LOG.debug('Initing the Adapter Consumer for %s' % (topic))
self.proxy = proxy
- super(AdapterConsumer, self).__init__(connection=connection, topic=topic)
+ super(AdapterConsumer, self).__init__(connection=connection,
+ topic=topic)
@exception.wrap_exception
def receive(self, message_data, message):
- _log.debug('received %s' % (message_data))
+ """Magically looks for a method on the proxy object and calls it
+
+ Message data should be a dictionary with two keys:
+ method: string representing the method to call
+ args: dictionary of arg: value
+
+ Example: {'method': 'echo', 'args': {'value': 42}}
+ """
+ LOG.debug('received %s' % (message_data))
msg_id = message_data.pop('_msg_id', None)
method = message_data.get('method')
@@ -133,21 +171,25 @@ class AdapterConsumer(TopicConsumer):
# messages stay in the queue indefinitely, so for now
# we just log the message and send an error string
# back to the caller
- _log.warn('no method for message: %s' % (message_data))
+ LOG.warn('no method for message: %s' % (message_data))
msg_reply(msg_id, 'No method for message: %s' % message_data)
return
node_func = getattr(self.proxy, str(method))
node_args = dict((str(k), v) for k, v in args.iteritems())
+ # NOTE(vish): magic is fun!
+ # pylint: disable-msg=W0142
d = defer.maybeDeferred(node_func, **node_args)
if msg_id:
- d.addCallback(lambda rval: msg_reply(msg_id, rval))
- d.addErrback(lambda e: msg_reply(msg_id, str(e)))
+ d.addCallback(lambda rval: msg_reply(msg_id, rval, None))
+ d.addErrback(lambda e: msg_reply(msg_id, None, e))
return
class TopicPublisher(Publisher):
+ """Publishes messages on a specific topic"""
exchange_type = "topic"
+
def __init__(self, connection=None, topic="broadcast"):
self.routing_key = topic
self.exchange = FLAGS.control_exchange
@@ -156,7 +198,9 @@ class TopicPublisher(Publisher):
class DirectConsumer(Consumer):
+ """Consumes messages directly on a channel specified by msg_id"""
exchange_type = "direct"
+
def __init__(self, connection=None, msg_id=None):
self.queue = msg_id
self.routing_key = msg_id
@@ -166,7 +210,9 @@ class DirectConsumer(Consumer):
class DirectPublisher(Publisher):
+ """Publishes messages directly on a channel specified by msg_id"""
exchange_type = "direct"
+
def __init__(self, connection=None, msg_id=None):
self.routing_key = msg_id
self.exchange = msg_id
@@ -174,32 +220,63 @@ class DirectPublisher(Publisher):
super(DirectPublisher, self).__init__(connection=connection)
-def msg_reply(msg_id, reply):
+def msg_reply(msg_id, reply=None, failure=None):
+ """Sends a reply or an error on the channel signified by msg_id
+
+ failure should be a twisted failure object"""
+ if failure:
+ message = failure.getErrorMessage()
+ traceback = failure.getTraceback()
+ logging.error("Returning exception %s to caller", message)
+ logging.error(traceback)
+ failure = (failure.type.__name__, str(failure.value), traceback)
conn = Connection.instance()
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
-
try:
- publisher.send({'result': reply})
+ publisher.send({'result': reply, 'failure': failure})
except TypeError:
publisher.send(
{'result': dict((k, repr(v))
- for k, v in reply.__dict__.iteritems())
- })
+ for k, v in reply.__dict__.iteritems()),
+ 'failure': failure})
publisher.close()
+class RemoteError(exception.Error):
+ """Signifies that a remote class has raised an exception
+
+ Containes a string representation of the type of the original exception,
+ the value of the original exception, and the traceback. These are
+ sent to the parent as a joined string so printing the exception
+ contains all of the relevent info."""
+ def __init__(self, exc_type, value, traceback):
+ self.exc_type = exc_type
+ self.value = value
+ self.traceback = traceback
+ super(RemoteError, self).__init__("%s %s\n%s" % (exc_type,
+ value,
+ traceback))
+
+
def call(topic, msg):
- _log.debug("Making asynchronous call...")
+ """Sends a message on a topic and wait for a response"""
+ LOG.debug("Making asynchronous call...")
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
- _log.debug("MSG_ID is %s" % (msg_id))
+ LOG.debug("MSG_ID is %s" % (msg_id))
conn = Connection.instance()
d = defer.Deferred()
consumer = DirectConsumer(connection=conn, msg_id=msg_id)
+
def deferred_receive(data, message):
+ """Acks message and callbacks or errbacks"""
message.ack()
- d.callback(data)
+ if data['failure']:
+ return d.errback(RemoteError(*data['failure']))
+ else:
+ return d.callback(data['result'])
+
consumer.register_callback(deferred_receive)
injected = consumer.attach_to_tornado()
@@ -213,7 +290,8 @@ def call(topic, msg):
def cast(topic, msg):
- _log.debug("Making asynchronous cast...")
+ """Sends a message on a topic without waiting for a response"""
+ LOG.debug("Making asynchronous cast...")
conn = Connection.instance()
publisher = TopicPublisher(connection=conn, topic=topic)
publisher.send(msg)
@@ -221,16 +299,18 @@ def cast(topic, msg):
def generic_response(message_data, message):
- _log.debug('response %s', message_data)
+ """Logs a result and exits"""
+ LOG.debug('response %s', message_data)
message.ack()
sys.exit(0)
def send_message(topic, message, wait=True):
+ """Sends a message for testing"""
msg_id = uuid.uuid4().hex
message.update({'_msg_id': msg_id})
- _log.debug('topic is %s', topic)
- _log.debug('message %s', message)
+ LOG.debug('topic is %s', topic)
+ LOG.debug('message %s', message)
if wait:
consumer = messaging.Consumer(connection=Connection.instance(),
@@ -253,6 +333,8 @@ def send_message(topic, message, wait=True):
consumer.wait()
-# TODO: Replace with a docstring test
if __name__ == "__main__":
+ # NOTE(vish): you can send messages from the command line using
+ # topic and a json sting representing a dictionary
+ # for the method
send_message(sys.argv[1], json.loads(sys.argv[2]))
diff --git a/nova/server.py b/nova/server.py
index 96550f078..c6b60e090 100644
--- a/nova/server.py
+++ b/nova/server.py
@@ -44,6 +44,8 @@ flags.DEFINE_bool('use_syslog', True, 'output to syslog when daemonizing')
flags.DEFINE_string('logfile', None, 'log file to output to')
flags.DEFINE_string('pidfile', None, 'pid file to output to')
flags.DEFINE_string('working_directory', './', 'working directory...')
+flags.DEFINE_integer('uid', os.getuid(), 'uid under which to run')
+flags.DEFINE_integer('gid', os.getgid(), 'gid under which to run')
def stop(pidfile):
@@ -135,6 +137,8 @@ def daemonize(args, name, main):
threaded=False),
stdin=stdin,
stdout=stdout,
- stderr=stderr
+ stderr=stderr,
+ uid=FLAGS.uid,
+ gid=FLAGS.gid
):
main(args)
diff --git a/nova/test.py b/nova/test.py
index c7e08734f..c392c8a84 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -22,11 +22,11 @@ Allows overriding of flags for use of fakes,
and some black magic for inline callbacks.
"""
-import mox
-import stubout
import sys
import time
+import mox
+import stubout
from tornado import ioloop
from twisted.internet import defer
from twisted.trial import unittest
@@ -91,7 +91,6 @@ class TrialTestCase(unittest.TestCase):
setattr(FLAGS, k, v)
-
class BaseTestCase(TrialTestCase):
# TODO(jaypipes): Can this be moved into the TrialTestCase class?
"""Base test case class for all unit tests."""
diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py
index f7e0625a3..0b404bfdc 100644
--- a/nova/tests/auth_unittest.py
+++ b/nova/tests/auth_unittest.py
@@ -179,7 +179,21 @@ class AuthTestCase(test.BaseTestCase):
project.add_role('test1', 'sysadmin')
self.assertTrue(project.has_role('test1', 'sysadmin'))
- def test_211_can_remove_project_role(self):
+ def test_211_can_list_project_roles(self):
+ project = self.manager.get_project('testproj')
+ user = self.manager.get_user('test1')
+ self.manager.add_role(user, 'netadmin', project)
+ roles = self.manager.get_user_roles(user)
+ self.assertTrue('sysadmin' in roles)
+ self.assertFalse('netadmin' in roles)
+ project_roles = self.manager.get_user_roles(user, project)
+ self.assertTrue('sysadmin' in project_roles)
+ self.assertTrue('netadmin' in project_roles)
+ # has role should be false because global role is missing
+ self.assertFalse(self.manager.has_role(user, 'netadmin', project))
+
+
+ def test_212_can_remove_project_role(self):
project = self.manager.get_project('testproj')
self.assertTrue(project.has_role('test1', 'sysadmin'))
project.remove_role('test1', 'sysadmin')
diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py
index 40837405c..900ff5a97 100644
--- a/nova/tests/cloud_unittest.py
+++ b/nova/tests/cloud_unittest.py
@@ -47,10 +47,6 @@ class CloudTestCase(test.BaseTestCase):
# set up our cloud
self.cloud = cloud.CloudController()
- self.cloud_consumer = rpc.AdapterConsumer(connection=self.conn,
- topic=FLAGS.cloud_topic,
- proxy=self.cloud)
- self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop))
# set up a service
self.compute = service.ComputeService()
@@ -132,7 +128,7 @@ class CloudTestCase(test.BaseTestCase):
'state': 0x01,
'user_data': ''
}
- rv = self.cloud._format_instances(self.context)
+ rv = self.cloud._format_describe_instances(self.context)
self.assert_(len(rv['reservationSet']) == 0)
# simulate launch of 5 instances
diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index 879ee02a4..00b0b97e7 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -15,7 +15,9 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
+"""
+Unit Tests for network code
+"""
import IPy
import os
import logging
@@ -31,8 +33,10 @@ from nova.network.exception import NoMoreAddresses
FLAGS = flags.FLAGS
+
class NetworkTestCase(test.TrialTestCase):
- def setUp(self):
+ """Test cases for network code"""
+ def setUp(self): # pylint: disable-msg=C0103
super(NetworkTestCase, self).setUp()
# NOTE(vish): if you change these flags, make sure to change the
# flags in the corresponding section in nova-dhcpbridge
@@ -43,7 +47,6 @@ class NetworkTestCase(test.TrialTestCase):
network_size=32)
logging.getLogger().setLevel(logging.DEBUG)
self.manager = manager.AuthManager()
- self.dnsmasq = FakeDNSMasq()
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
self.projects = []
self.projects.append(self.manager.create_project('netuser',
@@ -54,47 +57,49 @@ class NetworkTestCase(test.TrialTestCase):
self.projects.append(self.manager.create_project(name,
'netuser',
name))
- self.network = model.PublicNetworkController()
+ vpn.NetworkData.create(self.projects[i].id)
self.service = service.VlanNetworkService()
- def tearDown(self):
+ def tearDown(self): # pylint: disable-msg=C0103
super(NetworkTestCase, self).tearDown()
for project in self.projects:
self.manager.delete_project(project)
self.manager.delete_user(self.user)
def test_public_network_allocation(self):
+ """Makes sure that we can allocaate a public ip"""
pubnet = IPy.IP(flags.FLAGS.public_range)
- address = self.network.allocate_ip(self.user.id, self.projects[0].id, "public")
+ address = self.service.allocate_elastic_ip(self.user.id,
+ self.projects[0].id)
self.assertTrue(IPy.IP(address) in pubnet)
- self.assertTrue(IPy.IP(address) in self.network.network)
def test_allocate_deallocate_fixed_ip(self):
- result = yield self.service.allocate_fixed_ip(
+ """Makes sure that we can allocate and deallocate a fixed ip"""
+ result = self.service.allocate_fixed_ip(
self.user.id, self.projects[0].id)
address = result['private_dns_name']
mac = result['mac_address']
- logging.debug("Was allocated %s" % (address))
net = model.get_project_network(self.projects[0].id, "default")
self.assertEqual(True, is_in_project(address, self.projects[0].id))
hostname = "test-host"
- self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
- rv = self.service.deallocate_fixed_ip(address)
+ issue_ip(mac, address, hostname, net.bridge_name)
+ self.service.deallocate_fixed_ip(address)
# Doesn't go away until it's dhcp released
self.assertEqual(True, is_in_project(address, self.projects[0].id))
- self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
+ release_ip(mac, address, hostname, net.bridge_name)
self.assertEqual(False, is_in_project(address, self.projects[0].id))
- def test_range_allocation(self):
- hostname = "test-host"
- result = yield self.service.allocate_fixed_ip(
- self.user.id, self.projects[0].id)
+ def test_side_effects(self):
+ """Ensures allocating and releasing has no side effects"""
+ hostname = "side-effect-host"
+ result = self.service.allocate_fixed_ip(self.user.id,
+ self.projects[0].id)
mac = result['mac_address']
address = result['private_dns_name']
- result = yield self.service.allocate_fixed_ip(
- self.user, self.projects[1].id)
+ result = self.service.allocate_fixed_ip(self.user,
+ self.projects[1].id)
secondmac = result['mac_address']
secondaddress = result['private_dns_name']
@@ -102,66 +107,74 @@ class NetworkTestCase(test.TrialTestCase):
secondnet = model.get_project_network(self.projects[1].id, "default")
self.assertEqual(True, is_in_project(address, self.projects[0].id))
- self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id))
+ self.assertEqual(True, is_in_project(secondaddress,
+ self.projects[1].id))
self.assertEqual(False, is_in_project(address, self.projects[1].id))
# Addresses are allocated before they're issued
- self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
- self.dnsmasq.issue_ip(secondmac, secondaddress,
- hostname, secondnet.bridge_name)
+ issue_ip(mac, address, hostname, net.bridge_name)
+ issue_ip(secondmac, secondaddress, hostname, secondnet.bridge_name)
- rv = self.service.deallocate_fixed_ip(address)
- self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
+ self.service.deallocate_fixed_ip(address)
+ release_ip(mac, address, hostname, net.bridge_name)
self.assertEqual(False, is_in_project(address, self.projects[0].id))
# First address release shouldn't affect the second
- self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id))
+ self.assertEqual(True, is_in_project(secondaddress,
+ self.projects[1].id))
- rv = self.service.deallocate_fixed_ip(secondaddress)
- self.dnsmasq.release_ip(secondmac, secondaddress,
- hostname, secondnet.bridge_name)
- self.assertEqual(False, is_in_project(secondaddress, self.projects[1].id))
+ self.service.deallocate_fixed_ip(secondaddress)
+ release_ip(secondmac, secondaddress, hostname, secondnet.bridge_name)
+ self.assertEqual(False, is_in_project(secondaddress,
+ self.projects[1].id))
def test_subnet_edge(self):
- result = yield self.service.allocate_fixed_ip(self.user.id,
+ """Makes sure that private ips don't overlap"""
+ result = self.service.allocate_fixed_ip(self.user.id,
self.projects[0].id)
firstaddress = result['private_dns_name']
hostname = "toomany-hosts"
- for i in range(1,5):
+ for i in range(1, 5):
project_id = self.projects[i].id
- result = yield self.service.allocate_fixed_ip(
+ result = self.service.allocate_fixed_ip(
self.user, project_id)
mac = result['mac_address']
address = result['private_dns_name']
- result = yield self.service.allocate_fixed_ip(
+ result = self.service.allocate_fixed_ip(
self.user, project_id)
mac2 = result['mac_address']
address2 = result['private_dns_name']
- result = yield self.service.allocate_fixed_ip(
+ result = self.service.allocate_fixed_ip(
self.user, project_id)
mac3 = result['mac_address']
address3 = result['private_dns_name']
- self.assertEqual(False, is_in_project(address, self.projects[0].id))
- self.assertEqual(False, is_in_project(address2, self.projects[0].id))
- self.assertEqual(False, is_in_project(address3, self.projects[0].id))
- rv = self.service.deallocate_fixed_ip(address)
- rv = self.service.deallocate_fixed_ip(address2)
- rv = self.service.deallocate_fixed_ip(address3)
net = model.get_project_network(project_id, "default")
- self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
- self.dnsmasq.release_ip(mac2, address2, hostname, net.bridge_name)
- self.dnsmasq.release_ip(mac3, address3, hostname, net.bridge_name)
+ issue_ip(mac, address, hostname, net.bridge_name)
+ issue_ip(mac2, address2, hostname, net.bridge_name)
+ issue_ip(mac3, address3, hostname, net.bridge_name)
+ self.assertEqual(False, is_in_project(address,
+ self.projects[0].id))
+ self.assertEqual(False, is_in_project(address2,
+ self.projects[0].id))
+ self.assertEqual(False, is_in_project(address3,
+ self.projects[0].id))
+ self.service.deallocate_fixed_ip(address)
+ self.service.deallocate_fixed_ip(address2)
+ self.service.deallocate_fixed_ip(address3)
+ release_ip(mac, address, hostname, net.bridge_name)
+ release_ip(mac2, address2, hostname, net.bridge_name)
+ release_ip(mac3, address3, hostname, net.bridge_name)
net = model.get_project_network(self.projects[0].id, "default")
- rv = self.service.deallocate_fixed_ip(firstaddress)
- self.dnsmasq.release_ip(mac, firstaddress, hostname, net.bridge_name)
+ self.service.deallocate_fixed_ip(firstaddress)
- def test_212_vpn_ip_and_port_looks_valid(self):
- vpn.NetworkData.create(self.projects[0].id)
+ def test_vpn_ip_and_port_looks_valid(self):
+ """Ensure the vpn ip and port are reasonable"""
self.assert_(self.projects[0].vpn_ip)
self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start_port)
self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_end_port)
def test_too_many_vpns(self):
+ """Ensure error is raised if we run out of vpn ports"""
vpns = []
for i in xrange(vpn.NetworkData.num_ports_for_ip(FLAGS.vpn_ip)):
vpns.append(vpn.NetworkData.create("vpnuser%s" % i))
@@ -169,84 +182,115 @@ class NetworkTestCase(test.TrialTestCase):
for network_datum in vpns:
network_datum.destroy()
- def test_release_before_deallocate(self):
- pass
+ def test_ips_are_reused(self):
+ """Makes sure that ip addresses that are deallocated get reused"""
+ net = model.get_project_network(self.projects[0].id, "default")
- def test_deallocate_before_issued(self):
- pass
+ hostname = "reuse-host"
+ macs = {}
+ addresses = {}
+ num_available_ips = net.num_available_ips
+ for i in range(num_available_ips - 1):
+ result = self.service.allocate_fixed_ip(self.user.id,
+ self.projects[0].id)
+ macs[i] = result['mac_address']
+ addresses[i] = result['private_dns_name']
+ issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
- def test_too_many_addresses(self):
- """
- Here, we test that a proper NoMoreAddresses exception is raised.
+ result = self.service.allocate_fixed_ip(self.user.id,
+ self.projects[0].id)
+ mac = result['mac_address']
+ address = result['private_dns_name']
- However, the number of available IP addresses depends on the test
- environment's setup.
+ issue_ip(mac, address, hostname, net.bridge_name)
+ self.service.deallocate_fixed_ip(address)
+ release_ip(mac, address, hostname, net.bridge_name)
- Network size is set in test fixture's setUp method.
+ result = self.service.allocate_fixed_ip(
+ self.user, self.projects[0].id)
+ secondmac = result['mac_address']
+ secondaddress = result['private_dns_name']
+ self.assertEqual(address, secondaddress)
+ issue_ip(secondmac, secondaddress, hostname, net.bridge_name)
+ self.service.deallocate_fixed_ip(secondaddress)
+ release_ip(secondmac, secondaddress, hostname, net.bridge_name)
+
+ for i in range(len(addresses)):
+ self.service.deallocate_fixed_ip(addresses[i])
+ release_ip(macs[i], addresses[i], hostname, net.bridge_name)
+
+ def test_available_ips(self):
+ """Make sure the number of available ips for the network is correct
- There are FLAGS.cnt_vpn_clients addresses reserved for VPN (NUM_RESERVED_VPN_IPS)
+ The number of available IP addresses depends on the test
+ environment's setup.
- And there are NUM_STATIC_IPS that are always reserved by Nova for the necessary
- services (gateway, CloudPipe, etc)
+ Network size is set in test fixture's setUp method.
- So we should get flags.network_size - (NUM_STATIC_IPS +
- NUM_PREALLOCATED_IPS +
- NUM_RESERVED_VPN_IPS)
- usable addresses
+ There are ips reserved at the bottom and top of the range.
+ services (network, gateway, CloudPipe, broadcast)
"""
net = model.get_project_network(self.projects[0].id, "default")
+ num_preallocated_ips = len(net.assigned)
+ net_size = flags.FLAGS.network_size
+ num_available_ips = net_size - (net.num_bottom_reserved_ips +
+ num_preallocated_ips +
+ net.num_top_reserved_ips)
+ self.assertEqual(num_available_ips, net.num_available_ips)
- # Determine expected number of available IP addresses
- num_static_ips = net.num_static_ips
- num_preallocated_ips = len(net.hosts.keys())
- num_reserved_vpn_ips = flags.FLAGS.cnt_vpn_clients
- num_available_ips = flags.FLAGS.network_size - (num_static_ips +
- num_preallocated_ips +
- num_reserved_vpn_ips)
+ def test_too_many_addresses(self):
+ """Test for a NoMoreAddresses exception when all fixed ips are used.
+ """
+ net = model.get_project_network(self.projects[0].id, "default")
hostname = "toomany-hosts"
macs = {}
addresses = {}
- for i in range(0, (num_available_ips - 1)):
- result = yield self.service.allocate_fixed_ip(self.user.id, self.projects[0].id)
+ num_available_ips = net.num_available_ips
+ for i in range(num_available_ips):
+ result = self.service.allocate_fixed_ip(self.user.id,
+ self.projects[0].id)
macs[i] = result['mac_address']
addresses[i] = result['private_dns_name']
- self.dnsmasq.issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
+ issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
+
+ self.assertEqual(net.num_available_ips, 0)
+ self.assertRaises(NoMoreAddresses, self.service.allocate_fixed_ip,
+ self.user.id, self.projects[0].id)
- self.assertFailure(self.service.allocate_fixed_ip(self.user.id, self.projects[0].id), NoMoreAddresses)
+ for i in range(len(addresses)):
+ self.service.deallocate_fixed_ip(addresses[i])
+ release_ip(macs[i], addresses[i], hostname, net.bridge_name)
+ self.assertEqual(net.num_available_ips, num_available_ips)
- for i in range(0, (num_available_ips - 1)):
- rv = self.service.deallocate_fixed_ip(addresses[i])
- self.dnsmasq.release_ip(macs[i], addresses[i], hostname, net.bridge_name)
def is_in_project(address, project_id):
- return address in model.get_project_network(project_id).list_addresses()
+ """Returns true if address is in specified project"""
+ return address in model.get_project_network(project_id).assigned
-def _get_project_addresses(project_id):
- project_addresses = []
- for addr in model.get_project_network(project_id).list_addresses():
- project_addresses.append(addr)
- return project_addresses
def binpath(script):
+ """Returns the absolute path to a script in bin"""
return os.path.abspath(os.path.join(__file__, "../../../bin", script))
-class FakeDNSMasq(object):
- def issue_ip(self, mac, ip, hostname, interface):
- cmd = "%s add %s %s %s" % (binpath('nova-dhcpbridge'),
- mac, ip, hostname)
- env = {'DNSMASQ_INTERFACE': interface,
- 'TESTING' : '1',
- 'FLAGFILE' : FLAGS.dhcpbridge_flagfile}
- (out, err) = utils.execute(cmd, addl_env=env)
- logging.debug("ISSUE_IP: %s, %s " % (out, err))
-
- def release_ip(self, mac, ip, hostname, interface):
- cmd = "%s del %s %s %s" % (binpath('nova-dhcpbridge'),
- mac, ip, hostname)
- env = {'DNSMASQ_INTERFACE': interface,
- 'TESTING' : '1',
- 'FLAGFILE' : FLAGS.dhcpbridge_flagfile}
- (out, err) = utils.execute(cmd, addl_env=env)
- logging.debug("RELEASE_IP: %s, %s " % (out, err))
+def issue_ip(mac, private_ip, hostname, interface):
+ """Run add command on dhcpbridge"""
+ cmd = "%s add %s %s %s" % (binpath('nova-dhcpbridge'),
+ mac, private_ip, hostname)
+ env = {'DNSMASQ_INTERFACE': interface,
+ 'TESTING': '1',
+ 'FLAGFILE': FLAGS.dhcpbridge_flagfile}
+ (out, err) = utils.execute(cmd, addl_env=env)
+ logging.debug("ISSUE_IP: %s, %s ", out, err)
+
+
+def release_ip(mac, private_ip, hostname, interface):
+ """Run del command on dhcpbridge"""
+ cmd = "%s del %s %s %s" % (binpath('nova-dhcpbridge'),
+ mac, private_ip, hostname)
+ env = {'DNSMASQ_INTERFACE': interface,
+ 'TESTING': '1',
+ 'FLAGFILE': FLAGS.dhcpbridge_flagfile}
+ (out, err) = utils.execute(cmd, addl_env=env)
+ logging.debug("RELEASE_IP: %s, %s ", out, err)
diff --git a/nova/tests/process_unittest.py b/nova/tests/process_unittest.py
index 75187e1fc..25c60c616 100644
--- a/nova/tests/process_unittest.py
+++ b/nova/tests/process_unittest.py
@@ -48,7 +48,7 @@ class ProcessTestCase(test.TrialTestCase):
def test_execute_stderr(self):
pool = process.ProcessPool(2)
- d = pool.simple_execute('cat BAD_FILE', error_ok=1)
+ d = pool.simple_execute('cat BAD_FILE', check_exit_code=False)
def _check(rv):
self.assertEqual(rv[0], '')
self.assert_('No such file' in rv[1])
diff --git a/nova/tests/rpc_unittest.py b/nova/tests/rpc_unittest.py
new file mode 100644
index 000000000..e12a28fbc
--- /dev/null
+++ b/nova/tests/rpc_unittest.py
@@ -0,0 +1,85 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for remote procedure calls using queue
+"""
+import logging
+
+from twisted.internet import defer
+
+from nova import flags
+from nova import rpc
+from nova import test
+
+
+FLAGS = flags.FLAGS
+
+
+class RpcTestCase(test.BaseTestCase):
+ """Test cases for rpc"""
+ def setUp(self): # pylint: disable-msg=C0103
+ super(RpcTestCase, self).setUp()
+ self.conn = rpc.Connection.instance()
+ self.receiver = TestReceiver()
+ self.consumer = rpc.AdapterConsumer(connection=self.conn,
+ topic='test',
+ proxy=self.receiver)
+
+ self.injected.append(self.consumer.attach_to_tornado(self.ioloop))
+
+ def test_call_succeed(self):
+ """Get a value through rpc call"""
+ value = 42
+ result = yield rpc.call('test', {"method": "echo",
+ "args": {"value": value}})
+ self.assertEqual(value, result)
+
+ def test_call_exception(self):
+ """Test that exception gets passed back properly
+
+ rpc.call returns a RemoteError object. The value of the
+ exception is converted to a string, so we convert it back
+ to an int in the test.
+ """
+ value = 42
+ self.assertFailure(rpc.call('test', {"method": "fail",
+ "args": {"value": value}}),
+ rpc.RemoteError)
+ try:
+ yield rpc.call('test', {"method": "fail",
+ "args": {"value": value}})
+ self.fail("should have thrown rpc.RemoteError")
+ except rpc.RemoteError as exc:
+ self.assertEqual(int(exc.value), value)
+
+
+class TestReceiver(object):
+ """Simple Proxy class so the consumer has methods to call
+
+ Uses static methods because we aren't actually storing any state"""
+
+ @staticmethod
+ def echo(value):
+ """Simply returns whatever value is sent in"""
+ logging.debug("Received %s", value)
+ return defer.succeed(value)
+
+ @staticmethod
+ def fail(value):
+ """Raises an exception with the value sent in"""
+ raise Exception(value)
diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py
new file mode 100644
index 000000000..2aab16809
--- /dev/null
+++ b/nova/tests/virt_unittest.py
@@ -0,0 +1,69 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2010 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import flags
+from nova import test
+from nova.virt import libvirt_conn
+
+FLAGS = flags.FLAGS
+
+
+class LibvirtConnTestCase(test.TrialTestCase):
+ def test_get_uri_and_template(self):
+ class MockDataModel(object):
+ def __init__(self):
+ self.datamodel = { 'name' : 'i-cafebabe',
+ 'memory_kb' : '1024000',
+ 'basepath' : '/some/path',
+ 'bridge_name' : 'br100',
+ 'mac_address' : '02:12:34:46:56:67',
+ 'vcpus' : 2 }
+
+ type_uri_map = { 'qemu' : ('qemu:///system',
+ [lambda s: '<domain type=\'qemu\'>' in s,
+ lambda s: 'type>hvm</type' in s,
+ lambda s: 'emulator>/usr/bin/kvm' not in s]),
+ 'kvm' : ('qemu:///system',
+ [lambda s: '<domain type=\'kvm\'>' in s,
+ lambda s: 'type>hvm</type' in s,
+ lambda s: 'emulator>/usr/bin/qemu<' not in s]),
+ 'uml' : ('uml:///system',
+ [lambda s: '<domain type=\'uml\'>' in s,
+ lambda s: 'type>uml</type' in s]),
+ }
+
+ for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
+ FLAGS.libvirt_type = libvirt_type
+ conn = libvirt_conn.LibvirtConnection(True)
+
+ uri, template = conn.get_uri_and_template()
+ self.assertEquals(uri, expected_uri)
+
+ for i, check in enumerate(checks):
+ xml = conn.toXml(MockDataModel())
+ self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
+
+ # Deliberately not just assigning this string to FLAGS.libvirt_uri and
+ # checking against that later on. This way we make sure the
+ # implementation doesn't fiddle around with the FLAGS.
+ testuri = 'something completely different'
+ FLAGS.libvirt_uri = testuri
+ for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
+ FLAGS.libvirt_type = libvirt_type
+ conn = libvirt_conn.LibvirtConnection(True)
+ uri, template = conn.get_uri_and_template()
+ self.assertEquals(uri, testuri)
+
diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py
index 0f4f0e34d..2a07afe69 100644
--- a/nova/tests/volume_unittest.py
+++ b/nova/tests/volume_unittest.py
@@ -17,6 +17,10 @@
# under the License.
import logging
+import shutil
+import tempfile
+
+from twisted.internet import defer
from nova import compute
from nova import exception
@@ -34,10 +38,16 @@ class VolumeTestCase(test.TrialTestCase):
super(VolumeTestCase, self).setUp()
self.compute = compute.service.ComputeService()
self.volume = None
+ self.tempdir = tempfile.mkdtemp()
self.flags(connection_type='fake',
- fake_storage=True)
+ fake_storage=True,
+ aoe_export_dir=self.tempdir)
self.volume = volume_service.VolumeService()
+ def tearDown(self):
+ shutil.rmtree(self.tempdir)
+
+ @defer.inlineCallbacks
def test_run_create_volume(self):
vol_size = '0'
user_id = 'fake'
@@ -48,34 +58,40 @@ class VolumeTestCase(test.TrialTestCase):
volume_service.get_volume(volume_id)['volume_id'])
rv = self.volume.delete_volume(volume_id)
- self.assertFailure(volume_service.get_volume(volume_id),
- exception.Error)
+ self.assertRaises(exception.Error, volume_service.get_volume, volume_id)
+ @defer.inlineCallbacks
def test_too_big_volume(self):
vol_size = '1001'
user_id = 'fake'
project_id = 'fake'
- self.assertRaises(TypeError,
- self.volume.create_volume,
- vol_size, user_id, project_id)
+ try:
+ yield self.volume.create_volume(vol_size, user_id, project_id)
+ self.fail("Should have thrown TypeError")
+ except TypeError:
+ pass
+ @defer.inlineCallbacks
def test_too_many_volumes(self):
vol_size = '1'
user_id = 'fake'
project_id = 'fake'
num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1
- total_slots = FLAGS.slots_per_shelf * num_shelves
+ total_slots = FLAGS.blades_per_shelf * num_shelves
vols = []
+ from nova import datastore
+ redis = datastore.Redis.instance()
for i in xrange(total_slots):
vid = yield self.volume.create_volume(vol_size, user_id, project_id)
vols.append(vid)
self.assertFailure(self.volume.create_volume(vol_size,
user_id,
project_id),
- volume_service.NoMoreVolumes)
+ volume_service.NoMoreBlades)
for id in vols:
yield self.volume.delete_volume(id)
+ @defer.inlineCallbacks
def test_run_attach_detach_volume(self):
# Create one volume and one compute to test with
instance_id = "storage-test"
@@ -84,22 +100,26 @@ class VolumeTestCase(test.TrialTestCase):
project_id = 'fake'
mountpoint = "/dev/sdf"
volume_id = yield self.volume.create_volume(vol_size, user_id, project_id)
-
volume_obj = volume_service.get_volume(volume_id)
volume_obj.start_attach(instance_id, mountpoint)
- rv = yield self.compute.attach_volume(volume_id,
- instance_id,
- mountpoint)
+ if FLAGS.fake_tests:
+ volume_obj.finish_attach()
+ else:
+ rv = yield self.compute.attach_volume(instance_id,
+ volume_id,
+ mountpoint)
self.assertEqual(volume_obj['status'], "in-use")
- self.assertEqual(volume_obj['attachStatus'], "attached")
+ self.assertEqual(volume_obj['attach_status'], "attached")
self.assertEqual(volume_obj['instance_id'], instance_id)
self.assertEqual(volume_obj['mountpoint'], mountpoint)
- self.assertRaises(exception.Error,
- self.volume.delete_volume,
- volume_id)
-
- rv = yield self.volume.detach_volume(volume_id)
+ self.assertFailure(self.volume.delete_volume(volume_id), exception.Error)
+ volume_obj.start_detach()
+ if FLAGS.fake_tests:
+ volume_obj.finish_detach()
+ else:
+ rv = yield self.volume.detach_volume(instance_id,
+ volume_id)
volume_obj = volume_service.get_volume(volume_id)
self.assertEqual(volume_obj['status'], "available")
@@ -108,6 +128,27 @@ class VolumeTestCase(test.TrialTestCase):
volume_service.get_volume,
volume_id)
+ @defer.inlineCallbacks
+ def test_multiple_volume_race_condition(self):
+ vol_size = "5"
+ user_id = "fake"
+ project_id = 'fake'
+ shelf_blades = []
+ def _check(volume_id):
+ vol = volume_service.get_volume(volume_id)
+ shelf_blade = '%s.%s' % (vol['shelf_id'], vol['blade_id'])
+ self.assert_(shelf_blade not in shelf_blades)
+ shelf_blades.append(shelf_blade)
+ logging.debug("got %s" % shelf_blade)
+ vol.destroy()
+ deferreds = []
+ for i in range(5):
+ d = self.volume.create_volume(vol_size, user_id, project_id)
+ d.addCallback(_check)
+ d.addErrback(self.fail)
+ deferreds.append(d)
+ yield defer.DeferredList(deferreds)
+
def test_multi_node(self):
# TODO(termie): Figure out how to test with two nodes,
# each of them having a different FLAG for storage_node
diff --git a/nova/twistd.py b/nova/twistd.py
index c83276daa..9511c231c 100644
--- a/nova/twistd.py
+++ b/nova/twistd.py
@@ -21,6 +21,7 @@ Twisted daemon helpers, specifically to parse out gFlags from twisted flags,
manage pid files and support syslogging.
"""
+import gflags
import logging
import os
import signal
@@ -49,6 +50,14 @@ class TwistdServerOptions(ServerOptions):
return
+class FlagParser(object):
+ def __init__(self, parser):
+ self.parser = parser
+
+ def Parse(self, s):
+ return self.parser(s)
+
+
def WrapTwistedOptions(wrapped):
class TwistedOptionsToFlags(wrapped):
subCommands = None
@@ -79,7 +88,12 @@ def WrapTwistedOptions(wrapped):
reflect.accumulateClassList(self.__class__, 'optParameters', twistd_params)
for param in twistd_params:
key = param[0].replace('-', '_')
- flags.DEFINE_string(key, param[2], str(param[-1]))
+ if len(param) > 4:
+ flags.DEFINE(FlagParser(param[4]),
+ key, param[2], str(param[3]),
+ serializer=gflags.ArgumentSerializer())
+ else:
+ flags.DEFINE_string(key, param[2], str(param[3]))
def _absorbHandlers(self):
twistd_handlers = {}
@@ -241,15 +255,7 @@ def serve(filename):
print 'usage: %s [options] [start|stop|restart]' % argv[0]
sys.exit(1)
- class NoNewlineFormatter(logging.Formatter):
- """Strips newlines from default formatter"""
- def format(self, record):
- """Grabs default formatter's output and strips newlines"""
- data = logging.Formatter.format(self, record)
- return data.replace("\n", "--")
-
- # NOTE(vish): syslog-ng doesn't handle newlines from trackbacks very well
- formatter = NoNewlineFormatter(
+ formatter = logging.Formatter(
'(%(name)s): %(levelname)s %(message)s')
handler = logging.StreamHandler(log.StdioOnnaStick())
handler.setFormatter(formatter)
diff --git a/nova/utils.py b/nova/utils.py
index 0b23de7cd..ef8405fc0 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -20,7 +20,7 @@
System-level utilities and helper functions.
"""
-from datetime import datetime, timedelta
+import datetime
import inspect
import logging
import os
@@ -29,12 +29,16 @@ import subprocess
import socket
import sys
+from twisted.internet.threads import deferToThread
+
from nova import exception
from nova import flags
+
FLAGS = flags.FLAGS
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
+
def import_class(import_str):
"""Returns a class from a string including module and class"""
mod_str, _sep, class_str = import_str.rpartition('.')
@@ -44,6 +48,7 @@ def import_class(import_str):
except (ImportError, ValueError, AttributeError):
raise exception.NotFound('Class %s cannot be found' % class_str)
+
def fetchfile(url, target):
logging.debug("Fetching %s" % url)
# c = pycurl.Curl()
@@ -53,22 +58,25 @@ def fetchfile(url, target):
# c.perform()
# c.close()
# fp.close()
- execute("curl %s -o %s" % (url, target))
+ execute("curl --fail %s -o %s" % (url, target))
-def execute(cmd, input=None, addl_env=None):
+def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
env = os.environ.copy()
if addl_env:
env.update(addl_env)
obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
result = None
- if input != None:
- result = obj.communicate(input)
+ if process_input != None:
+ result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close()
if obj.returncode:
logging.debug("Result was %s" % (obj.returncode))
+ if check_exit_code and obj.returncode <> 0:
+ raise Exception( "Unexpected exit code: %s. result=%s"
+ % (obj.returncode, result))
return result
@@ -94,9 +102,13 @@ def debug(arg):
return arg
-def runthis(prompt, cmd):
+def runthis(prompt, cmd, check_exit_code = True):
logging.debug("Running %s" % (cmd))
- logging.debug(prompt % (subprocess.call(cmd.split(" "))))
+ exit_code = subprocess.call(cmd.split(" "))
+ logging.debug(prompt % (exit_code))
+ if check_exit_code and exit_code <> 0:
+ raise Exception( "Unexpected exit code: %s from cmd: %s"
+ % (exit_code, cmd))
def generate_uid(topic, size=8):
@@ -119,16 +131,28 @@ def get_my_ip():
'''
if getattr(FLAGS, 'fake_tests', None):
return '127.0.0.1'
- csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- csock.connect(('www.google.com', 80))
- (addr, port) = csock.getsockname()
- csock.close()
- return addr
+ try:
+ csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ csock.connect(('www.google.com', 80))
+ (addr, port) = csock.getsockname()
+ csock.close()
+ return addr
+ except socket.gaierror as ex:
+ logging.warn("Couldn't get IP, using 127.0.0.1 %s", ex)
+ return "127.0.0.1"
+
def isotime(at=None):
if not at:
- at = datetime.utcnow()
+ at = datetime.datetime.utcnow()
return at.strftime(TIME_FORMAT)
+
def parse_isotime(timestr):
- return datetime.strptime(timestr, TIME_FORMAT)
+ return datetime.datetime.strptime(timestr, TIME_FORMAT)
+
+
+def deferredToThread(f):
+ def g(*args, **kwargs):
+ return deferToThread(f, *args, **kwargs)
+ return g
diff --git a/nova/validate.py b/nova/validate.py
index a69306fad..21f4ed286 100644
--- a/nova/validate.py
+++ b/nova/validate.py
@@ -57,6 +57,7 @@ def rangetest(**argchecks): # validate ranges for both+defaults
return onCall
return onDecorator
+
def typetest(**argchecks):
def onDecorator(func):
import sys
diff --git a/nova/virt/connection.py b/nova/virt/connection.py
index 004adb19d..90bc7fa0a 100644
--- a/nova/virt/connection.py
+++ b/nova/virt/connection.py
@@ -27,6 +27,15 @@ FLAGS = flags.FLAGS
def get_connection(read_only=False):
+ """Returns an object representing the connection to a virtualization
+ platform. This could be nova.virt.fake.FakeConnection in test mode,
+ a connection to KVM or QEMU via libvirt, or a connection to XenServer
+ or Xen Cloud Platform via XenAPI.
+
+ Any object returned here must conform to the interface documented by
+ FakeConnection.
+ """
+
# TODO(termie): maybe lazy load after initial check for permissions
# TODO(termie): check whether we can be disconnected
t = FLAGS.connection_type
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index d9ae5ac96..155833f3f 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -19,10 +19,13 @@
"""
A fake (in-memory) hypervisor+api. Allows nova testing w/o a hypervisor.
+This module also documents the semantics of real hypervisor connections.
"""
import logging
+from twisted.internet import defer
+
from nova.compute import power_state
@@ -32,6 +35,38 @@ def get_connection(_):
class FakeConnection(object):
+ """
+ The interface to this class talks in terms of 'instances' (Amazon EC2 and
+ internal Nova terminology), by which we mean 'running virtual machine'
+ (XenAPI terminology) or domain (Xen or libvirt terminology).
+
+ An instance has an ID, which is the identifier chosen by Nova to represent
+ the instance further up the stack. This is unfortunately also called a
+ 'name' elsewhere. As far as this layer is concerned, 'instance ID' and
+ 'instance name' are synonyms.
+
+ Note that the instance ID or name is not human-readable or
+ customer-controlled -- it's an internal ID chosen by Nova. At the
+ nova.virt layer, instances do not have human-readable names at all -- such
+ things are only known higher up the stack.
+
+ Most virtualization platforms will also have their own identity schemes,
+ to uniquely identify a VM or domain. These IDs must stay internal to the
+ platform-specific layer, and never escape the connection interface. The
+ platform-specific layer is responsible for keeping track of which instance
+ ID maps to which platform-specific ID, and vice versa.
+
+ In contrast, the list_disks and list_interfaces calls may return
+ platform-specific IDs. These identify a specific virtual disk or specific
+ virtual network interface, and these IDs are opaque to the rest of Nova.
+
+ Some methods here take an instance of nova.compute.service.Instance. This
+ is the datastructure used by nova.compute to store details regarding an
+ instance, and pass them into this layer. This layer is responsible for
+ translating that generic datastructure into terms that are specific to the
+ virtualization platform.
+ """
+
def __init__(self):
self.instances = {}
@@ -42,20 +77,70 @@ class FakeConnection(object):
return cls._instance
def list_instances(self):
+ """
+ Return the names of all the instances known to the virtualization
+ layer, as a list.
+ """
return self.instances.keys()
def spawn(self, instance):
+ """
+ Create a new instance/VM/domain on the virtualization platform.
+
+ The given parameter is an instance of nova.compute.service.Instance.
+ This function should use the data there to guide the creation of
+ the new instance.
+
+ The work will be done asynchronously. This function returns a
+ Deferred that allows the caller to detect when it is complete.
+
+ Once this successfully completes, the instance should be
+ running (power_state.RUNNING).
+
+ If this fails, any partial instance should be completely
+ cleaned up, and the virtualization platform should be in the state
+ that it was before this call began.
+ """
+
fake_instance = FakeInstance()
self.instances[instance.name] = fake_instance
fake_instance._state = power_state.RUNNING
+ return defer.succeed(None)
def reboot(self, instance):
- pass
-
+ """
+ Reboot the specified instance.
+
+ The given parameter is an instance of nova.compute.service.Instance,
+ and so the instance is being specified as instance.name.
+
+ The work will be done asynchronously. This function returns a
+ Deferred that allows the caller to detect when it is complete.
+ """
+ return defer.succeed(None)
+
def destroy(self, instance):
+ """
+ Destroy (shutdown and delete) the specified instance.
+
+ The given parameter is an instance of nova.compute.service.Instance,
+ and so the instance is being specified as instance.name.
+
+ The work will be done asynchronously. This function returns a
+ Deferred that allows the caller to detect when it is complete.
+ """
del self.instances[instance.name]
+ return defer.succeed(None)
def get_info(self, instance_id):
+ """
+ Get a block of information about the given instance. This is returned
+ as a dictionary containing 'state': The power_state of the instance,
+ 'max_mem': The maximum memory for the instance, in KiB, 'mem': The
+ current memory the instance has, in KiB, 'num_cpu': The current number
+ of virtual CPUs the instance has, 'cpu_time': The total CPU time used
+ by the instance, in nanoseconds.
+ """
i = self.instances[instance_id]
return {'state': i._state,
'max_mem': 0,
@@ -64,15 +149,70 @@ class FakeConnection(object):
'cpu_time': 0}
def list_disks(self, instance_id):
+ """
+ Return the IDs of all the virtual disks attached to the specified
+ instance, as a list. These IDs are opaque to the caller (they are
+ only useful for giving back to this layer as a parameter to
+ disk_stats). These IDs only need to be unique for a given instance.
+
+ Note that this function takes an instance ID, not a
+ compute.service.Instance, so that it can be called by compute.monitor.
+ """
return ['A_DISK']
def list_interfaces(self, instance_id):
+ """
+ Return the IDs of all the virtual network interfaces attached to the
+ specified instance, as a list. These IDs are opaque to the caller
+ (they are only useful for giving back to this layer as a parameter to
+ interface_stats). These IDs only need to be unique for a given
+ instance.
+
+ Note that this function takes an instance ID, not a
+ compute.service.Instance, so that it can be called by compute.monitor.
+ """
return ['A_VIF']
def block_stats(self, instance_id, disk_id):
+ """
+ Return performance counters associated with the given disk_id on the
+ given instance_id. These are returned as [rd_req, rd_bytes, wr_req,
+ wr_bytes, errs], where rd indicates read, wr indicates write, req is
+ the total number of I/O requests made, bytes is the total number of
+ bytes transferred, and errs is the number of requests held up due to a
+ full pipeline.
+
+ All counters are long integers.
+
+ This method is optional. On some platforms (e.g. XenAPI) performance
+ statistics can be retrieved directly in aggregate form, without Nova
+ having to do the aggregation. On those platforms, this method is
+ unused.
+
+ Note that this function takes an instance ID, not a
+ compute.service.Instance, so that it can be called by compute.monitor.
+ """
return [0L, 0L, 0L, 0L, null]
def interface_stats(self, instance_id, iface_id):
+ """
+ Return performance counters associated with the given iface_id on the
+ given instance_id. These are returned as [rx_bytes, rx_packets,
+ rx_errs, rx_drop, tx_bytes, tx_packets, tx_errs, tx_drop], where rx
+ indicates receive, tx indicates transmit, bytes and packets indicate
+ the total number of bytes or packets transferred, and errs and dropped
+ is the total number of packets failed / dropped.
+
+ All counters are long integers.
+
+ This method is optional. On some platforms (e.g. XenAPI) performance
+ statistics can be retrieved directly in aggregate form, without Nova
+ having to do the aggregation. On those platforms, this method is
+ unused.
+
+ Note that this function takes an instance ID, not a
+ compute.service.Instance, so that it can be called by compute.monitor.
+ """
return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L]
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 48a87b514..a60bcc4c1 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -23,14 +23,15 @@ Handling of VM disk images.
import os.path
import time
+import urlparse
from nova import flags
from nova import process
-from nova.auth import signer
from nova.auth import manager
+from nova.auth import signer
-FLAGS = flags.FLAGS
+FLAGS = flags.FLAGS
flags.DEFINE_bool('use_s3', True,
'whether to get images from s3 or use local copy')
@@ -42,8 +43,9 @@ def fetch(image, path, user, project):
f = _fetch_local_image
return f(image, path, user, project)
+
def _fetch_s3_image(image, path, user, project):
- url = _image_url('%s/image' % image)
+ url = image_url(image)
# This should probably move somewhere else, like e.g. a download_as
# method on User objects and at the same time get rewritten to use
@@ -51,26 +53,30 @@ def _fetch_s3_image(image, path, user, project):
headers = {}
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
- uri = '/' + url.partition('/')[2]
+ (_, _, url_path, _, _, _) = urlparse.urlparse(url)
access = manager.AuthManager().get_access_key(user, project)
signature = signer.Signer(user.secret.encode()).s3_authorization(headers,
'GET',
- uri)
+ url_path)
headers['Authorization'] = 'AWS %s:%s' % (access, signature)
- cmd = ['/usr/bin/curl', '--silent', url]
+ cmd = ['/usr/bin/curl', '--fail', '--silent', url]
for (k,v) in headers.iteritems():
cmd += ['-H', '%s: %s' % (k,v)]
cmd += ['-o', path]
return process.SharedPool().execute(executable=cmd[0], args=cmd[1:])
+
def _fetch_local_image(image, path, user, project):
source = _image_path('%s/image' % image)
return process.simple_execute('cp %s %s' % (source, path))
+
def _image_path(path):
return os.path.join(FLAGS.images_path, path)
-def _image_url(path):
- return "%s:%s/_images/%s" % (FLAGS.s3_host, FLAGS.s3_port, path)
+
+def image_url(image):
+ return "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port,
+ image)
diff --git a/nova/compute/interfaces.template b/nova/virt/interfaces.template
index 11df301f6..11df301f6 100644
--- a/nova/compute/interfaces.template
+++ b/nova/virt/interfaces.template
diff --git a/nova/compute/libvirt.xml.template b/nova/virt/libvirt.qemu.xml.template
index 307f9d03a..307f9d03a 100644
--- a/nova/compute/libvirt.xml.template
+++ b/nova/virt/libvirt.qemu.xml.template
diff --git a/nova/virt/libvirt.uml.xml.template b/nova/virt/libvirt.uml.xml.template
new file mode 100644
index 000000000..6f4290f98
--- /dev/null
+++ b/nova/virt/libvirt.uml.xml.template
@@ -0,0 +1,25 @@
+<domain type='%(type)s'>
+ <name>%(name)s</name>
+ <memory>%(memory_kb)s</memory>
+ <os>
+ <type>%(type)s</type>
+ <kernel>/usr/bin/linux</kernel>
+ <root>/dev/ubda1</root>
+ </os>
+ <devices>
+ <disk type='file'>
+ <source file='%(basepath)s/disk'/>
+ <target dev='ubd0' bus='uml'/>
+ </disk>
+ <interface type='bridge'>
+ <source bridge='%(bridge_name)s'/>
+ <mac address='%(mac_address)s'/>
+ </interface>
+ <console type="pty" />
+ <serial type="file">
+ <source path='%(basepath)s/console.log'/>
+ <target port='1'/>
+ </serial>
+ </devices>
+ <nova>%(nova)s</nova>
+</domain>
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 551ba6e54..524646ee5 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -42,17 +42,25 @@ from nova.virt import images
libvirt = None
libxml2 = None
+
FLAGS = flags.FLAGS
flags.DEFINE_string('libvirt_xml_template',
- utils.abspath('compute/libvirt.xml.template'),
- 'Libvirt XML Template')
+ utils.abspath('virt/libvirt.qemu.xml.template'),
+ 'Libvirt XML Template for QEmu/KVM')
+flags.DEFINE_string('libvirt_uml_xml_template',
+ utils.abspath('virt/libvirt.uml.xml.template'),
+ 'Libvirt XML Template for user-mode-linux')
flags.DEFINE_string('injected_network_template',
- utils.abspath('compute/interfaces.template'),
+ utils.abspath('virt/interfaces.template'),
'Template file for injected network')
-
flags.DEFINE_string('libvirt_type',
'kvm',
- 'Libvirt domain type (kvm, qemu, etc)')
+ 'Libvirt domain type (valid options are: kvm, qemu, uml)')
+flags.DEFINE_string('libvirt_uri',
+ '',
+ 'Override the default libvirt URI (which is dependent'
+ ' on libvirt_type)')
+
def get_connection(read_only):
# These are loaded late so that there's no need to install these
@@ -68,20 +76,41 @@ def get_connection(read_only):
class LibvirtConnection(object):
def __init__(self, read_only):
+ self.libvirt_uri, template_file = self.get_uri_and_template()
+
+ self.libvirt_xml = open(template_file).read()
+ self._wrapped_conn = None
+ self.read_only = read_only
+
+ @property
+ def _conn(self):
+ if not self._wrapped_conn:
+ self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only)
+ return self._wrapped_conn
+
+ def get_uri_and_template(self):
+ if FLAGS.libvirt_type == 'uml':
+ uri = FLAGS.libvirt_uri or 'uml:///system'
+ template_file = FLAGS.libvirt_uml_xml_template
+ else:
+ uri = FLAGS.libvirt_uri or 'qemu:///system'
+ template_file = FLAGS.libvirt_xml_template
+ return uri, template_file
+
+ def _connect(self, uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT],
'root',
None]
+
if read_only:
- self._conn = libvirt.openReadOnly('qemu:///system')
+ return libvirt.openReadOnly(uri)
else:
- self._conn = libvirt.openAuth('qemu:///system', auth, 0)
-
+ return libvirt.openAuth(uri, auth, 0)
def list_instances(self):
return [self._conn.lookupByID(x).name()
for x in self._conn.listDomainsID()]
-
def destroy(self, instance):
try:
virt_dom = self._conn.lookupByName(instance.name)
@@ -110,12 +139,11 @@ class LibvirtConnection(object):
timer.start(interval=0.5, now=True)
return d
-
def _cleanup(self, instance):
target = os.path.abspath(instance.datamodel['basepath'])
logging.info("Deleting instance files at %s", target)
- shutil.rmtree(target)
-
+ if os.path.exists(target):
+ shutil.rmtree(target)
@defer.inlineCallbacks
@exception.wrap_exception
@@ -142,7 +170,6 @@ class LibvirtConnection(object):
timer.start(interval=0.5, now=True)
yield d
-
@defer.inlineCallbacks
@exception.wrap_exception
def spawn(self, instance):
@@ -173,7 +200,6 @@ class LibvirtConnection(object):
timer.start(interval=0.5, now=True)
yield local_d
-
@defer.inlineCallbacks
def _create_image(self, instance, libvirt_xml):
# syntactic nicety
@@ -201,10 +227,10 @@ class LibvirtConnection(object):
if not os.path.exists(basepath('ramdisk')):
yield images.fetch(data['ramdisk_id'], basepath('ramdisk'), user, project)
- execute = lambda cmd, input=None: \
+ execute = lambda cmd, process_input=None: \
process.simple_execute(cmd=cmd,
- input=input,
- error_ok=1)
+ process_input=process_input,
+ check_exit_code=True)
key = data['key_data']
net = None
@@ -228,27 +254,23 @@ class LibvirtConnection(object):
yield disk.partition(
basepath('disk-raw'), basepath('disk'), bytes, execute=execute)
-
def basepath(self, instance, path=''):
return os.path.abspath(os.path.join(instance.datamodel['basepath'], path))
-
def toXml(self, instance):
# TODO(termie): cache?
logging.debug("Starting the toXML method")
- libvirt_xml = open(FLAGS.libvirt_xml_template).read()
xml_info = instance.datamodel.copy()
# TODO(joshua): Make this xml express the attached disks as well
# TODO(termie): lazy lazy hack because xml is annoying
xml_info['nova'] = json.dumps(instance.datamodel.copy())
xml_info['type'] = FLAGS.libvirt_type
- libvirt_xml = libvirt_xml % xml_info
+ libvirt_xml = self.libvirt_xml % xml_info
logging.debug("Finished the toXML method")
return libvirt_xml
-
def get_info(self, instance_id):
virt_dom = self._conn.lookupByName(instance_id)
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
@@ -258,14 +280,7 @@ class LibvirtConnection(object):
'num_cpu': num_cpu,
'cpu_time': cpu_time}
-
def get_disks(self, instance_id):
- """
- Note that this function takes an instance ID, not an Instance, so
- that it can be called by monitor.
-
- Returns a list of all block devices for this domain.
- """
domain = self._conn.lookupByName(instance_id)
# TODO(devcamcar): Replace libxml2 with etree.
xml = domain.XMLDesc(0)
@@ -301,14 +316,7 @@ class LibvirtConnection(object):
return disks
-
def get_interfaces(self, instance_id):
- """
- Note that this function takes an instance ID, not an Instance, so
- that it can be called by monitor.
-
- Returns a list of all network interfaces for this instance.
- """
domain = self._conn.lookupByName(instance_id)
# TODO(devcamcar): Replace libxml2 with etree.
xml = domain.XMLDesc(0)
@@ -344,20 +352,10 @@ class LibvirtConnection(object):
return interfaces
-
def block_stats(self, instance_id, disk):
- """
- Note that this function takes an instance ID, not an Instance, so
- that it can be called by monitor.
- """
domain = self._conn.lookupByName(instance_id)
return domain.blockStats(disk)
-
def interface_stats(self, instance_id, interface):
- """
- Note that this function takes an instance ID, not an Instance, so
- that it can be called by monitor.
- """
domain = self._conn.lookupByName(instance_id)
return domain.interfaceStats(interface)
diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py
index dc372e3e3..b44ac383a 100644
--- a/nova/virt/xenapi.py
+++ b/nova/virt/xenapi.py
@@ -16,30 +16,69 @@
"""
A connection to XenServer or Xen Cloud Platform.
+
+The concurrency model for this class is as follows:
+
+All XenAPI calls are on a thread (using t.i.t.deferToThread, via the decorator
+deferredToThread). They are remote calls, and so may hang for the usual
+reasons. They should not be allowed to block the reactor thread.
+
+All long-running XenAPI calls (VM.start, VM.reboot, etc) are called async
+(using XenAPI.VM.async_start etc). These return a task, which can then be
+polled for completion. Polling is handled using reactor.callLater.
+
+This combination of techniques means that we don't block the reactor thread at
+all, and at the same time we don't hold lots of threads waiting for
+long-running operations.
+
+FIXME: get_info currently doesn't conform to these rules, and will block the
+reactor thread if the VM.get_by_name_label or VM.get_record calls block.
"""
import logging
+import xmlrpclib
from twisted.internet import defer
+from twisted.internet import reactor
from twisted.internet import task
-from nova import exception
from nova import flags
from nova import process
+from nova import utils
+from nova.auth.manager import AuthManager
from nova.compute import power_state
+from nova.virt import images
XenAPI = None
+
FLAGS = flags.FLAGS
flags.DEFINE_string('xenapi_connection_url',
None,
- 'URL for connection to XenServer/Xen Cloud Platform. Required if connection_type=xenapi.')
+ 'URL for connection to XenServer/Xen Cloud Platform.'
+ ' Required if connection_type=xenapi.')
flags.DEFINE_string('xenapi_connection_username',
'root',
- 'Username for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi.')
+ 'Username for connection to XenServer/Xen Cloud Platform.'
+ ' Used only if connection_type=xenapi.')
flags.DEFINE_string('xenapi_connection_password',
None,
- 'Password for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi.')
+ 'Password for connection to XenServer/Xen Cloud Platform.'
+ ' Used only if connection_type=xenapi.')
+flags.DEFINE_float('xenapi_task_poll_interval',
+ 0.5,
+ 'The interval used for polling of remote tasks '
+ '(Async.VM.start, etc). Used only if '
+ 'connection_type=xenapi.')
+
+
+XENAPI_POWER_STATE = {
+ 'Halted' : power_state.SHUTDOWN,
+ 'Running' : power_state.RUNNING,
+ 'Paused' : power_state.PAUSED,
+ 'Suspended': power_state.SHUTDOWN, # FIXME
+ 'Crashed' : power_state.CRASHED
+}
def get_connection(_):
@@ -59,7 +98,6 @@ def get_connection(_):
class XenAPIConnection(object):
-
def __init__(self, url, user, pw):
self._conn = XenAPI.Session(url)
self._conn.login_with_password(user, pw)
@@ -69,12 +107,47 @@ class XenAPIConnection(object):
for vm in self._conn.xenapi.VM.get_all()]
@defer.inlineCallbacks
- @exception.wrap_exception
def spawn(self, instance):
- vm = self.lookup(instance.name)
+ vm = yield self._lookup(instance.name)
if vm is not None:
raise Exception('Attempted to create non-unique name %s' %
instance.name)
+
+ if 'bridge_name' in instance.datamodel:
+ network_ref = \
+ yield self._find_network_with_bridge(
+ instance.datamodel['bridge_name'])
+ else:
+ network_ref = None
+
+ if 'mac_address' in instance.datamodel:
+ mac_address = instance.datamodel['mac_address']
+ else:
+ mac_address = ''
+
+ user = AuthManager().get_user(instance.datamodel['user_id'])
+ project = AuthManager().get_project(instance.datamodel['project_id'])
+ vdi_uuid = yield self._fetch_image(
+ instance.datamodel['image_id'], user, project, True)
+ kernel = yield self._fetch_image(
+ instance.datamodel['kernel_id'], user, project, False)
+ ramdisk = yield self._fetch_image(
+ instance.datamodel['ramdisk_id'], user, project, False)
+ vdi_ref = yield self._call_xenapi('VDI.get_by_uuid', vdi_uuid)
+
+ vm_ref = yield self._create_vm(instance, kernel, ramdisk)
+ yield self._create_vbd(vm_ref, vdi_ref, 0, True)
+ if network_ref:
+ yield self._create_vif(vm_ref, network_ref, mac_address)
+ logging.debug('Starting VM %s...', vm_ref)
+ yield self._call_xenapi('VM.start', vm_ref, False, False)
+ logging.info('Spawning VM %s created %s.', instance.name, vm_ref)
+
+ @defer.inlineCallbacks
+ def _create_vm(self, instance, kernel, ramdisk):
+ """Create a VM record. Returns a Deferred that gives the new
+ VM reference."""
+
mem = str(long(instance.datamodel['memory_kb']) * 1024)
vcpus = str(instance.datamodel['vcpus'])
rec = {
@@ -92,9 +165,9 @@ class XenAPIConnection(object):
'actions_after_reboot': 'restart',
'actions_after_crash': 'destroy',
'PV_bootloader': '',
- 'PV_kernel': instance.datamodel['kernel_id'],
- 'PV_ramdisk': instance.datamodel['ramdisk_id'],
- 'PV_args': '',
+ 'PV_kernel': kernel,
+ 'PV_ramdisk': ramdisk,
+ 'PV_args': 'root=/dev/xvda1',
'PV_bootloader_args': '',
'PV_legacy_args': '',
'HVM_boot_policy': '',
@@ -106,34 +179,121 @@ class XenAPIConnection(object):
'user_version': '0',
'other_config': {},
}
- vm = yield self._conn.xenapi.VM.create(rec)
- #yield self._conn.xenapi.VM.start(vm, False, False)
+ logging.debug('Created VM %s...', instance.name)
+ vm_ref = yield self._call_xenapi('VM.create', rec)
+ logging.debug('Created VM %s as %s.', instance.name, vm_ref)
+ defer.returnValue(vm_ref)
+ @defer.inlineCallbacks
+ def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable):
+ """Create a VBD record. Returns a Deferred that gives the new
+ VBD reference."""
+
+ vbd_rec = {}
+ vbd_rec['VM'] = vm_ref
+ vbd_rec['VDI'] = vdi_ref
+ vbd_rec['userdevice'] = str(userdevice)
+ vbd_rec['bootable'] = bootable
+ vbd_rec['mode'] = 'RW'
+ vbd_rec['type'] = 'disk'
+ vbd_rec['unpluggable'] = True
+ vbd_rec['empty'] = False
+ vbd_rec['other_config'] = {}
+ vbd_rec['qos_algorithm_type'] = ''
+ vbd_rec['qos_algorithm_params'] = {}
+ vbd_rec['qos_supported_algorithms'] = []
+ logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref)
+ vbd_ref = yield self._call_xenapi('VBD.create', vbd_rec)
+ logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref,
+ vdi_ref)
+ defer.returnValue(vbd_ref)
+
+ @defer.inlineCallbacks
+ def _create_vif(self, vm_ref, network_ref, mac_address):
+ """Create a VIF record. Returns a Deferred that gives the new
+ VIF reference."""
+
+ vif_rec = {}
+ vif_rec['device'] = '0'
+ vif_rec['network']= network_ref
+ vif_rec['VM'] = vm_ref
+ vif_rec['MAC'] = mac_address
+ vif_rec['MTU'] = '1500'
+ vif_rec['other_config'] = {}
+ vif_rec['qos_algorithm_type'] = ''
+ vif_rec['qos_algorithm_params'] = {}
+ logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref,
+ network_ref)
+ vif_ref = yield self._call_xenapi('VIF.create', vif_rec)
+ logging.debug('Created VIF %s for VM %s, network %s.', vif_ref,
+ vm_ref, network_ref)
+ defer.returnValue(vif_ref)
+ @defer.inlineCallbacks
+ def _find_network_with_bridge(self, bridge):
+ expr = 'field "bridge" = "%s"' % bridge
+ networks = yield self._call_xenapi('network.get_all_records_where',
+ expr)
+ if len(networks) == 1:
+ defer.returnValue(networks.keys()[0])
+ elif len(networks) > 1:
+ raise Exception('Found non-unique network for bridge %s' % bridge)
+ else:
+ raise Exception('Found no network for bridge %s' % bridge)
+
+ @defer.inlineCallbacks
+ def _fetch_image(self, image, user, project, use_sr):
+ """use_sr: True to put the image as a VDI in an SR, False to place
+ it on dom0's filesystem. The former is for VM disks, the latter for
+ its kernel and ramdisk (if external kernels are being used).
+ Returns a Deferred that gives the new VDI UUID."""
+
+ url = images.image_url(image)
+ access = AuthManager().get_access_key(user, project)
+ logging.debug("Asking xapi to fetch %s as %s" % (url, access))
+ fn = use_sr and 'get_vdi' or 'get_kernel'
+ args = {}
+ args['src_url'] = url
+ args['username'] = access
+ args['password'] = user.secret
+ if use_sr:
+ args['add_partition'] = 'true'
+ task = yield self._async_call_plugin('objectstore', fn, args)
+ uuid = yield self._wait_for_task(task)
+ defer.returnValue(uuid)
+
+ @defer.inlineCallbacks
def reboot(self, instance):
- vm = self.lookup(instance.name)
+ vm = yield self._lookup(instance.name)
if vm is None:
raise Exception('instance not present %s' % instance.name)
- yield self._conn.xenapi.VM.clean_reboot(vm)
+ task = yield self._call_xenapi('Async.VM.clean_reboot', vm)
+ yield self._wait_for_task(task)
+ @defer.inlineCallbacks
def destroy(self, instance):
- vm = self.lookup(instance.name)
+ vm = yield self._lookup(instance.name)
if vm is None:
raise Exception('instance not present %s' % instance.name)
- yield self._conn.xenapi.VM.destroy(vm)
+ task = yield self._call_xenapi('Async.VM.destroy', vm)
+ yield self._wait_for_task(task)
def get_info(self, instance_id):
- vm = self.lookup(instance_id)
+ vm = self._lookup_blocking(instance_id)
if vm is None:
- raise Exception('instance not present %s' % instance.name)
+ raise Exception('instance not present %s' % instance_id)
rec = self._conn.xenapi.VM.get_record(vm)
- return {'state': power_state_from_xenapi[rec['power_state']],
+ return {'state': XENAPI_POWER_STATE[rec['power_state']],
'max_mem': long(rec['memory_static_max']) >> 10,
'mem': long(rec['memory_dynamic_max']) >> 10,
'num_cpu': rec['VCPUs_max'],
'cpu_time': 0}
- def lookup(self, i):
+ @utils.deferredToThread
+ def _lookup(self, i):
+ return self._lookup_blocking(i)
+
+ def _lookup_blocking(self, i):
vms = self._conn.xenapi.VM.get_by_name_label(i)
n = len(vms)
if n == 0:
@@ -143,10 +303,86 @@ class XenAPIConnection(object):
else:
return vms[0]
- power_state_from_xenapi = {
- 'Halted' : power_state.RUNNING, #FIXME
- 'Running' : power_state.RUNNING,
- 'Paused' : power_state.PAUSED,
- 'Suspended': power_state.SHUTDOWN, # FIXME
- 'Crashed' : power_state.CRASHED
- }
+ def _wait_for_task(self, task):
+ """Return a Deferred that will give the result of the given task.
+ The task is polled until it completes."""
+ d = defer.Deferred()
+ reactor.callLater(0, self._poll_task, task, d)
+ return d
+
+ @utils.deferredToThread
+ def _poll_task(self, task, deferred):
+ """Poll the given XenAPI task, and fire the given Deferred if we
+ get a result."""
+ try:
+ #logging.debug('Polling task %s...', task)
+ status = self._conn.xenapi.task.get_status(task)
+ if status == 'pending':
+ reactor.callLater(FLAGS.xenapi_task_poll_interval,
+ self._poll_task, task, deferred)
+ elif status == 'success':
+ result = self._conn.xenapi.task.get_result(task)
+ logging.info('Task %s status: success. %s', task, result)
+ deferred.callback(_parse_xmlrpc_value(result))
+ else:
+ error_info = self._conn.xenapi.task.get_error_info(task)
+ logging.warn('Task %s status: %s. %s', task, status,
+ error_info)
+ deferred.errback(XenAPI.Failure(error_info))
+ #logging.debug('Polling task %s done.', task)
+ except Exception, exn:
+ logging.warn(exn)
+ deferred.errback(exn)
+
+ @utils.deferredToThread
+ def _call_xenapi(self, method, *args):
+ """Call the specified XenAPI method on a background thread. Returns
+ a Deferred for the result."""
+ f = self._conn.xenapi
+ for m in method.split('.'):
+ f = f.__getattr__(m)
+ return f(*args)
+
+ @utils.deferredToThread
+ def _async_call_plugin(self, plugin, fn, args):
+ """Call Async.host.call_plugin on a background thread. Returns a
+ Deferred with the task reference."""
+ return _unwrap_plugin_exceptions(
+ self._conn.xenapi.Async.host.call_plugin,
+ self._get_xenapi_host(), plugin, fn, args)
+
+ def _get_xenapi_host(self):
+ return self._conn.xenapi.session.get_this_host(self._conn.handle)
+
+
+def _unwrap_plugin_exceptions(func, *args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except XenAPI.Failure, exn:
+ logging.debug("Got exception: %s", exn)
+ if (len(exn.details) == 4 and
+ exn.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
+ exn.details[2] == 'Failure'):
+ params = None
+ try:
+ params = eval(exn.details[3])
+ except:
+ raise exn
+ raise XenAPI.Failure(params)
+ else:
+ raise
+ except xmlrpclib.ProtocolError, exn:
+ logging.debug("Got exception: %s", exn)
+ raise
+
+
+def _parse_xmlrpc_value(val):
+ """Parse the given value as if it were an XML-RPC value. This is
+ sometimes used as the format for the task.result field."""
+ if not val:
+ return val
+ x = xmlrpclib.loads(
+ '<?xml version="1.0"?><methodResponse><params><param>' +
+ val +
+ '</param></params></methodResponse>')
+ return x[0][0]
diff --git a/nova/volume/service.py b/nova/volume/service.py
index e12f675a7..be62f621d 100644
--- a/nova/volume/service.py
+++ b/nova/volume/service.py
@@ -22,12 +22,8 @@ destroying persistent storage volumes, ala EBS.
Currently uses Ata-over-Ethernet.
"""
-import glob
import logging
import os
-import shutil
-import socket
-import tempfile
from twisted.internet import defer
@@ -47,9 +43,6 @@ flags.DEFINE_string('volume_group', 'nova-volumes',
'Name for the VG that will contain exported volumes')
flags.DEFINE_string('aoe_eth_dev', 'eth0',
'Which device to export the volumes on')
-flags.DEFINE_string('storage_name',
- socket.gethostname(),
- 'name of this service')
flags.DEFINE_integer('first_shelf_id',
utils.last_octet(utils.get_my_ip()) * 10,
'AoE starting shelf_id for this service')
@@ -59,9 +52,9 @@ flags.DEFINE_integer('last_shelf_id',
flags.DEFINE_string('aoe_export_dir',
'/var/lib/vblade-persist/vblades',
'AoE directory where exports are created')
-flags.DEFINE_integer('slots_per_shelf',
+flags.DEFINE_integer('blades_per_shelf',
16,
- 'Number of AoE slots per shelf')
+ 'Number of AoE blades per shelf')
flags.DEFINE_string('storage_availability_zone',
'nova',
'availability zone of this service')
@@ -69,18 +62,21 @@ flags.DEFINE_boolean('fake_storage', False,
'Should we make real storage volumes to attach?')
-class NoMoreVolumes(exception.Error):
+class NoMoreBlades(exception.Error):
pass
+
def get_volume(volume_id):
""" Returns a redis-backed volume object """
volume_class = Volume
if FLAGS.fake_storage:
volume_class = FakeVolume
- if datastore.Redis.instance().sismember('volumes', volume_id):
- return volume_class(volume_id=volume_id)
+ vol = volume_class.lookup(volume_id)
+ if vol:
+ return vol
raise exception.Error("Volume does not exist")
+
class VolumeService(service.Service):
"""
There is one VolumeNode running on each host.
@@ -91,18 +87,9 @@ class VolumeService(service.Service):
super(VolumeService, self).__init__()
self.volume_class = Volume
if FLAGS.fake_storage:
- FLAGS.aoe_export_dir = tempfile.mkdtemp()
self.volume_class = FakeVolume
self._init_volume_group()
- def __del__(self):
- # TODO(josh): Get rid of this destructor, volumes destroy themselves
- if FLAGS.fake_storage:
- try:
- shutil.rmtree(FLAGS.aoe_export_dir)
- except Exception, err:
- pass
-
@defer.inlineCallbacks
@validate.rangetest(size=(0, 1000))
def create_volume(self, size, user_id, project_id):
@@ -113,8 +100,6 @@ class VolumeService(service.Service):
"""
logging.debug("Creating volume of size: %s" % (size))
vol = yield self.volume_class.create(size, user_id, project_id)
- datastore.Redis.instance().sadd('volumes', vol['volume_id'])
- datastore.Redis.instance().sadd('volumes:%s' % (FLAGS.storage_name), vol['volume_id'])
logging.debug("restarting exports")
yield self._restart_exports()
defer.returnValue(vol['volume_id'])
@@ -134,22 +119,22 @@ class VolumeService(service.Service):
def delete_volume(self, volume_id):
logging.debug("Deleting volume with id of: %s" % (volume_id))
vol = get_volume(volume_id)
- if vol['status'] == "attached":
+ if vol['attach_status'] == "attached":
raise exception.Error("Volume is still attached")
- if vol['node_name'] != FLAGS.storage_name:
+ if vol['node_name'] != FLAGS.node_name:
raise exception.Error("Volume is not local to this node")
yield vol.destroy()
- datastore.Redis.instance().srem('volumes', vol['volume_id'])
- datastore.Redis.instance().srem('volumes:%s' % (FLAGS.storage_name), vol['volume_id'])
defer.returnValue(True)
@defer.inlineCallbacks
def _restart_exports(self):
if FLAGS.fake_storage:
return
- yield process.simple_execute("sudo vblade-persist auto all")
- # NOTE(vish): this command sometimes sends output to stderr for warnings
- yield process.simple_execute("sudo vblade-persist start all", error_ok=1)
+ # NOTE(vish): these commands sometimes sends output to stderr for warnings
+ yield process.simple_execute( "sudo vblade-persist auto all",
+ terminate_on_stderr=False)
+ yield process.simple_execute( "sudo vblade-persist start all",
+ terminate_on_stderr=False)
@defer.inlineCallbacks
def _init_volume_group(self):
@@ -161,6 +146,7 @@ class VolumeService(service.Service):
"sudo vgcreate %s %s" % (FLAGS.volume_group,
FLAGS.storage_dev))
+
class Volume(datastore.BasicModel):
def __init__(self, volume_id=None):
@@ -172,14 +158,15 @@ class Volume(datastore.BasicModel):
return self.volume_id
def default_state(self):
- return {"volume_id": self.volume_id}
+ return {"volume_id": self.volume_id,
+ "node_name": "unassigned"}
@classmethod
@defer.inlineCallbacks
def create(cls, size, user_id, project_id):
volume_id = utils.generate_uid('vol')
vol = cls(volume_id)
- vol['node_name'] = FLAGS.storage_name
+ vol['node_name'] = FLAGS.node_name
vol['size'] = size
vol['user_id'] = user_id
vol['project_id'] = project_id
@@ -225,14 +212,31 @@ class Volume(datastore.BasicModel):
self['attach_status'] = "detached"
self.save()
+ def save(self):
+ is_new = self.is_new_record()
+ super(Volume, self).save()
+ if is_new:
+ redis = datastore.Redis.instance()
+ key = self.__devices_key
+ # TODO(vish): these should be added by admin commands
+ more = redis.scard(self._redis_association_name("node",
+ self['node_name']))
+ if (not redis.exists(key) and not more):
+ for shelf_id in range(FLAGS.first_shelf_id,
+ FLAGS.last_shelf_id + 1):
+ for blade_id in range(FLAGS.blades_per_shelf):
+ redis.sadd(key, "%s.%s" % (shelf_id, blade_id))
+ self.associate_with("node", self['node_name'])
+
@defer.inlineCallbacks
def destroy(self):
- try:
- yield self._remove_export()
- except Exception as ex:
- logging.debug("Ingnoring failure to remove export %s" % ex)
- pass
+ yield self._remove_export()
yield self._delete_lv()
+ self.unassociate_with("node", self['node_name'])
+ if self.get('shelf_id', None) and self.get('blade_id', None):
+ redis = datastore.Redis.instance()
+ key = self.__devices_key
+ redis.sadd(key, "%s.%s" % (self['shelf_id'], self['blade_id']))
super(Volume, self).destroy()
@defer.inlineCallbacks
@@ -244,66 +248,75 @@ class Volume(datastore.BasicModel):
yield process.simple_execute(
"sudo lvcreate -L %s -n %s %s" % (sizestr,
self['volume_id'],
- FLAGS.volume_group))
+ FLAGS.volume_group),
+ terminate_on_stderr=False)
@defer.inlineCallbacks
def _delete_lv(self):
yield process.simple_execute(
"sudo lvremove -f %s/%s" % (FLAGS.volume_group,
- self['volume_id']))
+ self['volume_id']),
+ terminate_on_stderr=False)
+
+ @property
+ def __devices_key(self):
+ return 'volume_devices:%s' % FLAGS.node_name
@defer.inlineCallbacks
def _setup_export(self):
- (shelf_id, blade_id) = get_next_aoe_numbers()
+ redis = datastore.Redis.instance()
+ key = self.__devices_key
+ device = redis.spop(key)
+ if not device:
+ raise NoMoreBlades()
+ (shelf_id, blade_id) = device.split('.')
self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id)
self['shelf_id'] = shelf_id
self['blade_id'] = blade_id
self.save()
- yield self._exec_export()
+ yield self._exec_setup_export()
@defer.inlineCallbacks
- def _exec_export(self):
+ def _exec_setup_export(self):
yield process.simple_execute(
"sudo vblade-persist setup %s %s %s /dev/%s/%s" %
(self['shelf_id'],
self['blade_id'],
FLAGS.aoe_eth_dev,
FLAGS.volume_group,
- self['volume_id']))
+ self['volume_id']),
+ terminate_on_stderr=False)
@defer.inlineCallbacks
def _remove_export(self):
+ if not self.get('shelf_id', None) or not self.get('blade_id', None):
+ defer.returnValue(False)
+ yield self._exec_remove_export()
+ defer.returnValue(True)
+
+ @defer.inlineCallbacks
+ def _exec_remove_export(self):
yield process.simple_execute(
"sudo vblade-persist stop %s %s" % (self['shelf_id'],
- self['blade_id']))
+ self['blade_id']),
+ terminate_on_stderr=False)
yield process.simple_execute(
"sudo vblade-persist destroy %s %s" % (self['shelf_id'],
- self['blade_id']))
+ self['blade_id']),
+ terminate_on_stderr=False)
class FakeVolume(Volume):
def _create_lv(self):
pass
- def _exec_export(self):
+ def _exec_setup_export(self):
fname = os.path.join(FLAGS.aoe_export_dir, self['aoe_device'])
f = file(fname, "w")
f.close()
- def _remove_export(self):
- pass
+ def _exec_remove_export(self):
+ os.unlink(os.path.join(FLAGS.aoe_export_dir, self['aoe_device']))
def _delete_lv(self):
pass
-
-def get_next_aoe_numbers():
- for shelf_id in xrange(FLAGS.first_shelf_id, FLAGS.last_shelf_id + 1):
- aoes = glob.glob("%s/e%s.*" % (FLAGS.aoe_export_dir, shelf_id))
- if not aoes:
- blade_id = 0
- else:
- blade_id = int(max([int(a.rpartition('.')[2]) for a in aoes])) + 1
- if blade_id < FLAGS.slots_per_shelf:
- logging.debug("Next shelf.blade is %s.%s", shelf_id, blade_id)
- return (shelf_id, blade_id)
- raise NoMoreVolumes()
diff --git a/nova/wsgi.py b/nova/wsgi.py
index 4fd6e59e3..bec0a7b1c 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -29,6 +29,8 @@ import eventlet.wsgi
eventlet.patcher.monkey_patch(all=False, socket=True)
import routes
import routes.middleware
+import webob.dec
+import webob.exc
logging.getLogger("routes.middleware").addHandler(logging.StreamHandler())
@@ -41,6 +43,8 @@ def run_server(application, port):
class Application(object):
+# TODO(gundlach): I think we should toss this class, now that it has no
+# purpose.
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
def __call__(self, environ, start_response):
@@ -79,95 +83,210 @@ class Application(object):
raise NotImplementedError("You must implement __call__")
-class Middleware(Application): # pylint: disable-msg=W0223
- """Base WSGI middleware wrapper. These classes require an
- application to be initialized that will be called next."""
+class Middleware(Application):
+ """
+ Base WSGI middleware wrapper. These classes require an application to be
+ initialized that will be called next. By default the middleware will
+ simply call its wrapped app, or you can override __call__ to customize its
+ behavior.
+ """
def __init__(self, application): # pylint: disable-msg=W0231
self.application = application
+ @webob.dec.wsgify
+ def __call__(self, req): # pylint: disable-msg=W0221
+ """Override to implement middleware behavior."""
+ return self.application
+
class Debug(Middleware):
- """Helper class that can be insertd into any WSGI application chain
+ """Helper class that can be inserted into any WSGI application chain
to get information about the request and response."""
- def __call__(self, environ, start_response):
- for key, value in environ.items():
+ @webob.dec.wsgify
+ def __call__(self, req):
+ print ("*" * 40) + " REQUEST ENVIRON"
+ for key, value in req.environ.items():
print key, "=", value
print
- wrapper = debug_start_response(start_response)
- return debug_print_body(self.application(environ, wrapper))
-
-
-def debug_start_response(start_response):
- """Wrap the start_response to capture when called."""
+ resp = req.get_response(self.application)
- def wrapper(status, headers, exc_info=None):
- """Print out all headers when start_response is called."""
- print status
- for (key, value) in headers:
+ print ("*" * 40) + " RESPONSE HEADERS"
+ for (key, value) in resp.headers.iteritems():
print key, "=", value
print
- start_response(status, headers, exc_info)
- return wrapper
+ resp.app_iter = self.print_generator(resp.app_iter)
+ return resp
-def debug_print_body(body):
- """Print the body of the response as it is sent back."""
-
- class Wrapper(object):
- """Iterate through all the body parts and print before returning."""
-
- def __iter__(self):
- for part in body:
- sys.stdout.write(part)
- sys.stdout.flush()
- yield part
- print
+ @staticmethod
+ def print_generator(app_iter):
+ """
+ Iterator that prints the contents of a wrapper string iterator
+ when iterated.
+ """
+ print ("*" * 40) + " BODY"
+ for part in app_iter:
+ sys.stdout.write(part)
+ sys.stdout.flush()
+ yield part
+ print
- return Wrapper()
+class Router(object):
+ """
+ WSGI middleware that maps incoming requests to WSGI apps.
+ """
-class ParsedRoutes(Middleware):
- """Processed parsed routes from routes.middleware.RoutesMiddleware
- and call either the controller if found or the default application
- otherwise."""
+ def __init__(self, mapper):
+ """
+ Create a router for the given routes.Mapper.
- def __call__(self, environ, start_response):
- if environ['routes.route'] is None:
- return self.application(environ, start_response)
- app = environ['wsgiorg.routing_args'][1]['controller']
- return app(environ, start_response)
+ Each route in `mapper` must specify a 'controller', which is a
+ WSGI app to call. You'll probably want to specify an 'action' as
+ well and have your controller be a wsgi.Controller, who will route
+ the request to the action method.
+ Examples:
+ mapper = routes.Mapper()
+ sc = ServerController()
-class Router(Middleware): # pylint: disable-msg=R0921
- """Wrapper to help setup routes.middleware.RoutesMiddleware."""
+ # Explicit mapping of one route to a controller+action
+ mapper.connect(None, "/svrlist", controller=sc, action="list")
- def __init__(self, application):
- self.map = routes.Mapper()
- self._build_map()
- application = ParsedRoutes(application)
- application = routes.middleware.RoutesMiddleware(application, self.map)
- super(Router, self).__init__(application)
+ # Actions are all implicitly defined
+ mapper.resource("server", "servers", controller=sc)
- def __call__(self, environ, start_response):
- return self.application(environ, start_response)
+ # Pointing to an arbitrary WSGI app. You can specify the
+ # {path_info:.*} parameter so the target app can be handed just that
+ # section of the URL.
+ mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp())
+ """
+ self.map = mapper
+ self._router = routes.middleware.RoutesMiddleware(self._dispatch,
+ self.map)
- def _build_map(self):
- """Method to create new connections for the routing map."""
- raise NotImplementedError("You must implement _build_map")
+ @webob.dec.wsgify
+ def __call__(self, req):
+ """
+ Route the incoming request to a controller based on self.map.
+ If no match, return a 404.
+ """
+ return self._router
- def _connect(self, *args, **kwargs):
- """Wrapper for the map.connect method."""
- self.map.connect(*args, **kwargs)
+ @staticmethod
+ @webob.dec.wsgify
+ def _dispatch(req):
+ """
+ Called by self._router after matching the incoming request to a route
+ and putting the information into req.environ. Either returns 404
+ or the routed WSGI app's response.
+ """
+ match = req.environ['wsgiorg.routing_args'][1]
+ if not match:
+ return webob.exc.HTTPNotFound()
+ app = match['controller']
+ return app
+
+
+class Controller(object):
+ """
+ WSGI app that reads routing information supplied by RoutesMiddleware
+ and calls the requested action method upon itself. All action methods
+ must, in addition to their normal parameters, accept a 'req' argument
+ which is the incoming webob.Request. They raise a webob.exc exception,
+ or return a dict which will be serialized by requested content type.
+ """
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ """
+ Call the method specified in req.environ by RoutesMiddleware.
+ """
+ arg_dict = req.environ['wsgiorg.routing_args'][1]
+ action = arg_dict['action']
+ method = getattr(self, action)
+ del arg_dict['controller']
+ del arg_dict['action']
+ arg_dict['req'] = req
+ result = method(**arg_dict)
+ if type(result) is dict:
+ return self._serialize(result, req)
+ else:
+ return result
+
+ def _serialize(self, data, request):
+ """
+ Serialize the given dict to the response type requested in request.
+ Uses self._serialization_metadata if it exists, which is a dict mapping
+ MIME types to information needed to serialize to that type.
+ """
+ _metadata = getattr(type(self), "_serialization_metadata", {})
+ serializer = Serializer(request.environ, _metadata)
+ return serializer.to_content_type(data)
-def route_args(application):
- """Decorator to make grabbing routing args more convenient."""
+class Serializer(object):
+ """
+ Serializes a dictionary to a Content Type specified by a WSGI environment.
+ """
- def wrapper(self, req):
- """Call application with req and parsed routing args from."""
- return application(self, req, req.environ['wsgiorg.routing_args'][1])
+ def __init__(self, environ, metadata=None):
+ """
+ Create a serializer based on the given WSGI environment.
+ 'metadata' is an optional dict mapping MIME types to information
+ needed to serialize a dictionary to that type.
+ """
+ self.environ = environ
+ self.metadata = metadata or {}
- return wrapper
+ def to_content_type(self, data):
+ """
+ Serialize a dictionary into a string. The format of the string
+ will be decided based on the Content Type requested in self.environ:
+ by Accept: header, or by URL suffix.
+ """
+ mimetype = 'application/xml'
+ # TODO(gundlach): determine mimetype from request
+
+ if mimetype == 'application/json':
+ import json
+ return json.dumps(data)
+ elif mimetype == 'application/xml':
+ metadata = self.metadata.get('application/xml', {})
+ # We expect data to contain a single key which is the XML root.
+ root_key = data.keys()[0]
+ from xml.dom import minidom
+ doc = minidom.Document()
+ node = self._to_xml_node(doc, metadata, root_key, data[root_key])
+ return node.toprettyxml(indent=' ')
+ else:
+ return repr(data)
+
+ def _to_xml_node(self, doc, metadata, nodename, data):
+ """Recursive method to convert data members to XML nodes."""
+ result = doc.createElement(nodename)
+ if type(data) is list:
+ singular = metadata.get('plurals', {}).get(nodename, None)
+ if singular is None:
+ if nodename.endswith('s'):
+ singular = nodename[:-1]
+ else:
+ singular = 'item'
+ for item in data:
+ node = self._to_xml_node(doc, metadata, singular, item)
+ result.appendChild(node)
+ elif type(data) is dict:
+ attrs = metadata.get('attributes', {}).get(nodename, {})
+ for k, v in data.items():
+ if k in attrs:
+ result.setAttribute(k, str(v))
+ else:
+ node = self._to_xml_node(doc, metadata, k, v)
+ result.appendChild(node)
+ else: # atom
+ node = doc.createTextNode(str(data))
+ result.appendChild(node)
+ return result
diff --git a/nova/wsgi_test.py b/nova/wsgi_test.py
new file mode 100644
index 000000000..786dc1bce
--- /dev/null
+++ b/nova/wsgi_test.py
@@ -0,0 +1,96 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test WSGI basics and provide some helper functions for other WSGI tests.
+"""
+
+import unittest
+
+import routes
+import webob
+
+from nova import wsgi
+
+
+class Test(unittest.TestCase):
+
+ def test_debug(self):
+
+ class Application(wsgi.Application):
+ """Dummy application to test debug."""
+
+ def __call__(self, environ, start_response):
+ start_response("200", [("X-Test", "checking")])
+ return ['Test result']
+
+ application = wsgi.Debug(Application())
+ result = webob.Request.blank('/').get_response(application)
+ self.assertEqual(result.body, "Test result")
+
+ def test_router(self):
+
+ class Application(wsgi.Application):
+ """Test application to call from router."""
+
+ def __call__(self, environ, start_response):
+ start_response("200", [])
+ return ['Router result']
+
+ class Router(wsgi.Router):
+ """Test router."""
+
+ def __init__(self):
+ mapper = routes.Mapper()
+ mapper.connect("/test", controller=Application())
+ super(Router, self).__init__(mapper)
+
+ result = webob.Request.blank('/test').get_response(Router())
+ self.assertEqual(result.body, "Router result")
+ result = webob.Request.blank('/bad').get_response(Router())
+ self.assertNotEqual(result.body, "Router result")
+
+ def test_controller(self):
+
+ class Controller(wsgi.Controller):
+ """Test controller to call from router."""
+ test = self
+
+ def show(self, req, id): # pylint: disable-msg=W0622,C0103
+ """Default action called for requests with an ID."""
+ self.test.assertEqual(req.path_info, '/tests/123')
+ self.test.assertEqual(id, '123')
+ return id
+
+ class Router(wsgi.Router):
+ """Test router."""
+
+ def __init__(self):
+ mapper = routes.Mapper()
+ mapper.resource("test", "tests", controller=Controller())
+ super(Router, self).__init__(mapper)
+
+ result = webob.Request.blank('/tests/123').get_response(Router())
+ self.assertEqual(result.body, "123")
+ result = webob.Request.blank('/test/123').get_response(Router())
+ self.assertNotEqual(result.body, "123")
+
+ def test_serializer(self):
+ # TODO(eday): Placeholder for serializer testing.
+ pass
diff --git a/plugins/xenapi/README b/plugins/xenapi/README
new file mode 100644
index 000000000..fbd471035
--- /dev/null
+++ b/plugins/xenapi/README
@@ -0,0 +1,6 @@
+This directory contains files that are required for the XenAPI support. They
+should be installed in the XenServer / Xen Cloud Platform domain 0.
+
+Also, you need to
+
+chmod u+x /etc/xapi.d/plugins/objectstore
diff --git a/plugins/xenapi/etc/xapi.d/plugins/objectstore b/plugins/xenapi/etc/xapi.d/plugins/objectstore
new file mode 100644
index 000000000..271e7337f
--- /dev/null
+++ b/plugins/xenapi/etc/xapi.d/plugins/objectstore
@@ -0,0 +1,231 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2010 Citrix Systems, Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+#
+# XenAPI plugin for fetching images from nova-objectstore.
+#
+
+import base64
+import errno
+import hmac
+import os
+import os.path
+import sha
+import time
+import urlparse
+
+import XenAPIPlugin
+
+from pluginlib_nova import *
+configure_logging('objectstore')
+
+
+KERNEL_DIR = '/boot/guest'
+
+DOWNLOAD_CHUNK_SIZE = 2 * 1024 * 1024
+SECTOR_SIZE = 512
+MBR_SIZE_SECTORS = 63
+MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE
+
+
+def get_vdi(session, args):
+ src_url = exists(args, 'src_url')
+ username = exists(args, 'username')
+ password = exists(args, 'password')
+ add_partition = validate_bool(args, 'add_partition', 'false')
+
+ (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url)
+
+ sr = find_sr(session)
+ if sr is None:
+ raise Exception('Cannot find SR to write VDI to')
+
+ virtual_size = \
+ get_content_length(proto, netloc, url_path, username, password)
+ if virtual_size < 0:
+ raise Exception('Cannot get VDI size')
+
+ vdi_size = virtual_size
+ if add_partition:
+ # Make room for MBR.
+ vdi_size += MBR_SIZE_BYTES
+
+ vdi = create_vdi(session, sr, src_url, vdi_size, False)
+ with_vdi_in_dom0(session, vdi, False,
+ lambda dev: get_vdi_(proto, netloc, url_path,
+ username, password, add_partition,
+ virtual_size, '/dev/%s' % dev))
+ return session.xenapi.VDI.get_uuid(vdi)
+
+
+def get_vdi_(proto, netloc, url_path, username, password, add_partition,
+ virtual_size, dest):
+
+ if add_partition:
+ write_partition(virtual_size, dest)
+
+ offset = add_partition and MBR_SIZE_BYTES or 0
+ get(proto, netloc, url_path, username, password, dest, offset)
+
+
+def write_partition(virtual_size, dest):
+ mbr_last = MBR_SIZE_SECTORS - 1
+ primary_first = MBR_SIZE_SECTORS
+ primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
+
+ logging.debug('Writing partition table %d %d to %s...',
+ primary_first, primary_last, dest)
+
+ result = os.system('parted --script %s mklabel msdos' % dest)
+ if result != 0:
+ raise Exception('Failed to mklabel')
+ result = os.system('parted --script %s mkpart primary %ds %ds' %
+ (dest, primary_first, primary_last))
+ if result != 0:
+ raise Exception('Failed to mkpart')
+
+ logging.debug('Writing partition table %s done.', dest)
+
+
+def find_sr(session):
+ host = get_this_host(session)
+ srs = session.xenapi.SR.get_all()
+ for sr in srs:
+ sr_rec = session.xenapi.SR.get_record(sr)
+ if not ('i18n-key' in sr_rec['other_config'] and
+ sr_rec['other_config']['i18n-key'] == 'local-storage'):
+ continue
+ for pbd in sr_rec['PBDs']:
+ pbd_rec = session.xenapi.PBD.get_record(pbd)
+ if pbd_rec['host'] == host:
+ return sr
+ return None
+
+
+def get_kernel(session, args):
+ src_url = exists(args, 'src_url')
+ username = exists(args, 'username')
+ password = exists(args, 'password')
+
+ (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url)
+
+ dest = os.path.join(KERNEL_DIR, url_path[1:])
+
+ # Paranoid check against people using ../ to do rude things.
+ if os.path.commonprefix([KERNEL_DIR, dest]) != KERNEL_DIR:
+ raise Exception('Illegal destination %s %s', (url_path, dest))
+
+ dirname = os.path.dirname(dest)
+ try:
+ os.makedirs(dirname)
+ except os.error, e:
+ if e.errno != errno.EEXIST:
+ raise
+ if not os.path.isdir(dirname):
+ raise Exception('Cannot make directory %s', dirname)
+
+ try:
+ os.remove(dest)
+ except:
+ pass
+
+ get(proto, netloc, url_path, username, password, dest, 0)
+
+ return dest
+
+
+def get_content_length(proto, netloc, url_path, username, password):
+ headers = make_headers('HEAD', url_path, username, password)
+ return with_http_connection(
+ proto, netloc,
+ lambda conn: get_content_length_(url_path, headers, conn))
+
+
+def get_content_length_(url_path, headers, conn):
+ conn.request('HEAD', url_path, None, headers)
+ response = conn.getresponse()
+ if response.status != 200:
+ raise Exception('%d %s' % (response.status, response.reason))
+
+ return long(response.getheader('Content-Length', -1))
+
+
+def get(proto, netloc, url_path, username, password, dest, offset):
+ headers = make_headers('GET', url_path, username, password)
+ download(proto, netloc, url_path, headers, dest, offset)
+
+
+def make_headers(verb, url_path, username, password):
+ headers = {}
+ headers['Date'] = \
+ time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
+ headers['Authorization'] = \
+ 'AWS %s:%s' % (username,
+ s3_authorization(verb, url_path, password, headers))
+ return headers
+
+
+def s3_authorization(verb, path, password, headers):
+ sha1 = hmac.new(password, digestmod=sha)
+ sha1.update(plaintext(verb, path, headers))
+ return base64.encodestring(sha1.digest()).strip()
+
+
+def plaintext(verb, path, headers):
+ return '%s\n\n\n%s\n%s' % (verb,
+ "\n".join([headers[h] for h in headers]),
+ path)
+
+
+def download(proto, netloc, url_path, headers, dest, offset):
+ with_http_connection(
+ proto, netloc,
+ lambda conn: download_(url_path, dest, offset, headers, conn))
+
+
+def download_(url_path, dest, offset, headers, conn):
+ conn.request('GET', url_path, None, headers)
+ response = conn.getresponse()
+ if response.status != 200:
+ raise Exception('%d %s' % (response.status, response.reason))
+
+ length = response.getheader('Content-Length', -1)
+
+ with_file(
+ dest, 'a',
+ lambda dest_file: download_all(response, length, dest_file, offset))
+
+
+def download_all(response, length, dest_file, offset):
+ dest_file.seek(offset)
+ i = 0
+ while True:
+ buf = response.read(DOWNLOAD_CHUNK_SIZE)
+ if buf:
+ dest_file.write(buf)
+ else:
+ return
+ i += len(buf)
+ if length != -1 and i >= length:
+ return
+
+
+if __name__ == '__main__':
+ XenAPIPlugin.dispatch({'get_vdi': get_vdi,
+ 'get_kernel': get_kernel})
diff --git a/plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
new file mode 100755
index 000000000..2d323a016
--- /dev/null
+++ b/plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
@@ -0,0 +1,216 @@
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+#
+# Helper functions for the Nova xapi plugins. In time, this will merge
+# with the pluginlib.py shipped with xapi, but for now, that file is not
+# very stable, so it's easiest just to have a copy of all the functions
+# that we need.
+#
+
+import httplib
+import logging
+import logging.handlers
+import re
+import time
+
+
+##### Logging setup
+
+def configure_logging(name):
+ log = logging.getLogger()
+ log.setLevel(logging.DEBUG)
+ sysh = logging.handlers.SysLogHandler('/dev/log')
+ sysh.setLevel(logging.DEBUG)
+ formatter = logging.Formatter('%s: %%(levelname)-8s %%(message)s' % name)
+ sysh.setFormatter(formatter)
+ log.addHandler(sysh)
+
+
+##### Exceptions
+
+class PluginError(Exception):
+ """Base Exception class for all plugin errors."""
+ def __init__(self, *args):
+ Exception.__init__(self, *args)
+
+class ArgumentError(PluginError):
+ """Raised when required arguments are missing, argument values are invalid,
+ or incompatible arguments are given.
+ """
+ def __init__(self, *args):
+ PluginError.__init__(self, *args)
+
+
+##### Helpers
+
+def ignore_failure(func, *args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except XenAPI.Failure, e:
+ logging.error('Ignoring XenAPI.Failure %s', e)
+ return None
+
+
+##### Argument validation
+
+ARGUMENT_PATTERN = re.compile(r'^[a-zA-Z0-9_:\.\-,]+$')
+
+def validate_exists(args, key, default=None):
+ """Validates that a string argument to a RPC method call is given, and
+ matches the shell-safe regex, with an optional default value in case it
+ does not exist.
+
+ Returns the string.
+ """
+ if key in args:
+ if len(args[key]) == 0:
+ raise ArgumentError('Argument %r value %r is too short.' % (key, args[key]))
+ if not ARGUMENT_PATTERN.match(args[key]):
+ raise ArgumentError('Argument %r value %r contains invalid characters.' % (key, args[key]))
+ if args[key][0] == '-':
+ raise ArgumentError('Argument %r value %r starts with a hyphen.' % (key, args[key]))
+ return args[key]
+ elif default is not None:
+ return default
+ else:
+ raise ArgumentError('Argument %s is required.' % key)
+
+def validate_bool(args, key, default=None):
+ """Validates that a string argument to a RPC method call is a boolean string,
+ with an optional default value in case it does not exist.
+
+ Returns the python boolean value.
+ """
+ value = validate_exists(args, key, default)
+ if value.lower() == 'true':
+ return True
+ elif value.lower() == 'false':
+ return False
+ else:
+ raise ArgumentError("Argument %s may not take value %r. Valid values are ['true', 'false']." % (key, value))
+
+def exists(args, key):
+ """Validates that a freeform string argument to a RPC method call is given.
+ Returns the string.
+ """
+ if key in args:
+ return args[key]
+ else:
+ raise ArgumentError('Argument %s is required.' % key)
+
+def optional(args, key):
+ """If the given key is in args, return the corresponding value, otherwise
+ return None"""
+ return key in args and args[key] or None
+
+
+def get_this_host(session):
+ return session.xenapi.session.get_this_host(session.handle)
+
+
+def get_domain_0(session):
+ this_host_ref = get_this_host(session)
+ expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"' % this_host_ref
+ return session.xenapi.VM.get_all_records_where(expr).keys()[0]
+
+
+def create_vdi(session, sr_ref, name_label, virtual_size, read_only):
+ vdi_ref = session.xenapi.VDI.create(
+ { 'name_label': name_label,
+ 'name_description': '',
+ 'SR': sr_ref,
+ 'virtual_size': str(virtual_size),
+ 'type': 'User',
+ 'sharable': False,
+ 'read_only': read_only,
+ 'xenstore_data': {},
+ 'other_config': {},
+ 'sm_config': {},
+ 'tags': [] })
+ logging.debug('Created VDI %s (%s, %s, %s) on %s.', vdi_ref, name_label,
+ virtual_size, read_only, sr_ref)
+ return vdi_ref
+
+
+def with_vdi_in_dom0(session, vdi, read_only, f):
+ dom0 = get_domain_0(session)
+ vbd_rec = {}
+ vbd_rec['VM'] = dom0
+ vbd_rec['VDI'] = vdi
+ vbd_rec['userdevice'] = 'autodetect'
+ vbd_rec['bootable'] = False
+ vbd_rec['mode'] = read_only and 'RO' or 'RW'
+ vbd_rec['type'] = 'disk'
+ vbd_rec['unpluggable'] = True
+ vbd_rec['empty'] = False
+ vbd_rec['other_config'] = {}
+ vbd_rec['qos_algorithm_type'] = ''
+ vbd_rec['qos_algorithm_params'] = {}
+ vbd_rec['qos_supported_algorithms'] = []
+ logging.debug('Creating VBD for VDI %s ... ', vdi)
+ vbd = session.xenapi.VBD.create(vbd_rec)
+ logging.debug('Creating VBD for VDI %s done.', vdi)
+ try:
+ logging.debug('Plugging VBD %s ... ', vbd)
+ session.xenapi.VBD.plug(vbd)
+ logging.debug('Plugging VBD %s done.', vbd)
+ return f(session.xenapi.VBD.get_device(vbd))
+ finally:
+ logging.debug('Destroying VBD for VDI %s ... ', vdi)
+ vbd_unplug_with_retry(session, vbd)
+ ignore_failure(session.xenapi.VBD.destroy, vbd)
+ logging.debug('Destroying VBD for VDI %s done.', vdi)
+
+
+def vbd_unplug_with_retry(session, vbd):
+ """Call VBD.unplug on the given VBD, with a retry if we get
+ DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're
+ seeing the device still in use, even when all processes using the device
+ should be dead."""
+ while True:
+ try:
+ session.xenapi.VBD.unplug(vbd)
+ logging.debug('VBD.unplug successful first time.')
+ return
+ except XenAPI.Failure, e:
+ if (len(e.details) > 0 and
+ e.details[0] == 'DEVICE_DETACH_REJECTED'):
+ logging.debug('VBD.unplug rejected: retrying...')
+ time.sleep(1)
+ elif (len(e.details) > 0 and
+ e.details[0] == 'DEVICE_ALREADY_DETACHED'):
+ logging.debug('VBD.unplug successful eventually.')
+ return
+ else:
+ logging.error('Ignoring XenAPI.Failure in VBD.unplug: %s', e)
+ return
+
+
+def with_http_connection(proto, netloc, f):
+ conn = (proto == 'https' and
+ httplib.HTTPSConnection(netloc) or
+ httplib.HTTPConnection(netloc))
+ try:
+ return f(conn)
+ finally:
+ conn.close()
+
+
+def with_file(dest_path, mode, f):
+ dest = open(dest_path, mode)
+ try:
+ return f(dest)
+ finally:
+ dest.close()
diff --git a/pylintrc b/pylintrc
index 53d02d6b2..6702ca895 100644
--- a/pylintrc
+++ b/pylintrc
@@ -1,19 +1,26 @@
[Messages Control]
-disable-msg=C0103
+# W0511: TODOs in code comments are fine.
+# W0142: *args and **kwargs are fine.
+disable-msg=W0511,W0142
[Basic]
-# Variables can be 1 to 31 characters long, with
-# lowercase and underscores
+# Variable names can be 1 to 31 characters long, with lowercase and underscores
variable-rgx=[a-z_][a-z0-9_]{0,30}$
+# Argument names can be 2 to 31 characters long, with lowercase and underscores
+argument-rgx=[a-z_][a-z0-9_]{1,30}$
+
# Method names should be at least 3 characters long
# and be lowecased with underscores
method-rgx=[a-z_][a-z0-9_]{2,50}$
-[MESSAGES CONTROL]
-# TODOs in code comments are fine...
-disable-msg=W0511
+# Module names matching nova-* are ok (files in bin/)
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(nova-[a-z0-9_-]+))$
+
+# Don't require docstrings on tests.
+no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$
[Design]
max-public-methods=100
min-public-methods=0
+max-args=6
diff --git a/run_tests.py b/run_tests.py
index 7fe6e73ec..77aa9088a 100644
--- a/run_tests.py
+++ b/run_tests.py
@@ -38,11 +38,11 @@ Due to our use of multiprocessing it we frequently get some ignorable
'Interrupted system call' exceptions after test completion.
"""
+
import __main__
import os
import sys
-
from twisted.scripts import trial as trial_script
from nova import datastore
@@ -59,18 +59,18 @@ from nova.tests.model_unittest import *
from nova.tests.network_unittest import *
from nova.tests.objectstore_unittest import *
from nova.tests.process_unittest import *
+from nova.tests.rpc_unittest import *
from nova.tests.validator_unittest import *
from nova.tests.volume_unittest import *
FLAGS = flags.FLAGS
-
flags.DEFINE_bool('flush_db', True,
'Flush the database before running fake tests')
-
flags.DEFINE_string('tests_stderr', 'run_tests.err.log',
- 'Path to where to pipe STDERR during test runs. '
- 'Default = "run_tests.err.log"')
+ 'Path to where to pipe STDERR during test runs.'
+ ' Default = "run_tests.err.log"')
+
if __name__ == '__main__':
OptionsClass = twistd.WrapTwistedOptions(trial_script.Options)
diff --git a/run_tests.sh b/run_tests.sh
index 9b2de7aea..6ea40d95e 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -1,13 +1,66 @@
-#!/bin/bash
+#!/bin/bash
+
+function usage {
+ echo "Usage: $0 [OPTION]..."
+ echo "Run Nova's test suite(s)"
+ echo ""
+ echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
+ echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
+ echo " -h, --help Print this usage message"
+ echo ""
+ echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
+ echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
+ echo " prefer to run tests NOT in a virtual environment, simply pass the -N option."
+ exit
+}
+
+function process_options {
+ array=$1
+ elements=${#array[@]}
+ for (( x=0;x<$elements;x++)); do
+ process_option ${array[${x}]}
+ done
+}
+
+function process_option {
+ option=$1
+ case $option in
+ -h|--help) usage;;
+ -V|--virtual-env) let always_venv=1; let never_venv=0;;
+ -N|--no-virtual-env) let always_venv=0; let never_venv=1;;
+ esac
+}
venv=.nova-venv
with_venv=tools/with_venv.sh
+always_venv=0
+never_venv=0
+options=("$@")
+
+process_options $options
+
+if [ $never_venv -eq 1 ]; then
+ # Just run the test suites in current environment
+ python run_tests.py
+ exit
+fi
if [ -e ${venv} ]; then
${with_venv} python run_tests.py $@
else
- echo "You need to install the Nova virtualenv before you can run this."
- echo ""
- echo "Please run tools/install_venv.py"
- exit 1
+ if [ $always_venv -eq 1 ]; then
+ # Automatically install the virtualenv
+ python tools/install_venv.py
+ else
+ echo -e "No virtual environment found...create one? (Y/n) \c"
+ read use_ve
+ if [ "x$use_ve" = "xY" ]; then
+ # Install the virtualenv and run the test suite in it
+ python tools/install_venv.py
+ else
+ python run_tests.py
+ exit
+ fi
+ fi
+ ${with_venv} python run_tests.py $@
fi
diff --git a/tools/install_venv.py b/tools/install_venv.py
index 0b35fc8e9..5d2369a96 100644
--- a/tools/install_venv.py
+++ b/tools/install_venv.py
@@ -1,3 +1,23 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 OpenStack, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
"""
Installation script for Nova's development virtualenv
"""
@@ -7,61 +27,79 @@ import subprocess
import sys
-ROOT = os.path.dirname(os.path.dirname(__file__))
+ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
VENV = os.path.join(ROOT, '.nova-venv')
PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires')
TWISTED_NOVA='http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz'
-
def die(message, *args):
print >>sys.stderr, message % args
sys.exit(1)
-def run_command(cmd, redirect_output=True, error_ok=False):
- # Useful for debugging:
- #print >>sys.stderr, ' '.join(cmd)
+def run_command(cmd, redirect_output=True, check_exit_code=True):
+ """
+ Runs a command in an out-of-process shell, returning the
+ output of that command. Working directory is ROOT.
+ """
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
- proc = subprocess.Popen(cmd, stdout=stdout)
+ proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)
output = proc.communicate()[0]
- if not error_ok and proc.returncode != 0:
+ if check_exit_code and proc.returncode != 0:
die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return output
-def check_dependencies():
- """Make sure pip and virtualenv are on the path."""
- print 'Checking for pip...',
- if not run_command(['which', 'pip']).strip():
- die('ERROR: pip not found.\n\nNova development requires pip,'
- ' please install it using your favorite package management tool')
- print 'done.'
+HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install'], check_exit_code=False).strip())
+HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv'], check_exit_code=False).strip())
- print 'Checking for virtualenv...',
- if not run_command(['which', 'virtualenv']).strip():
- die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,'
- ' please install it using your favorite package management tool')
+
+def check_dependencies():
+ """Make sure virtualenv is in the path."""
+
+ if not HAS_VIRTUALENV:
+ print 'not found.'
+ # Try installing it via easy_install...
+ if HAS_EASY_INSTALL:
+ print 'Installing virtualenv via easy_install...',
+ if not run_command(['which', 'easy_install']):
+ die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,'
+ ' please install it using your favorite package management tool')
+ print 'done.'
print 'done.'
def create_virtualenv(venv=VENV):
+ """Creates the virtual environment and installs PIP only into the
+ virtual environment
+ """
print 'Creating venv...',
run_command(['virtualenv', '-q', '--no-site-packages', VENV])
print 'done.'
+ print 'Installing pip in virtualenv...',
+ if not run_command(['tools/with_venv.sh', 'easy_install', 'pip']).strip():
+ die("Failed to install pip.")
+ print 'done.'
def install_dependencies(venv=VENV):
print 'Installing dependencies with pip (this can take a while)...'
- run_command(['pip', 'install', '-E', venv, '-r', PIP_REQUIRES],
+ run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, '-r', PIP_REQUIRES],
redirect_output=False)
- run_command(['pip', 'install', '-E', venv, TWISTED_NOVA],
+ run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, TWISTED_NOVA],
redirect_output=False)
+ # Tell the virtual env how to "import nova"
+ pthfile = os.path.join(venv, "lib", "python2.6", "site-packages", "nova.pth")
+ f = open(pthfile, 'w')
+ f.write("%s\n" % ROOT)
+
+
def print_help():
help = """
Nova development environment setup is complete.
diff --git a/tools/pip-requires b/tools/pip-requires
index 4eb47ca2b..13e8e5f45 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -1,14 +1,19 @@
+pep8==0.5.0
+pylint==0.19
IPy==0.70
M2Crypto==0.20.2
amqplib==0.6.1
anyjson==0.2.4
boto==2.0b1
carrot==0.10.5
+eventlet==0.9.10
lockfile==0.8
python-daemon==1.5.5
python-gflags==1.3
redis==2.0.0
+routes==1.12.3
tornado==1.0
+webob==0.9.8
wsgiref==0.1.2
zope.interface==3.6.1
mox==0.5.0