summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVishvananda Ishaya <vishvananda@yahoo.com>2010-08-31 23:21:30 -0700
committerVishvananda Ishaya <vishvananda@yahoo.com>2010-08-31 23:21:30 -0700
commite7eb9443bc07a173b5885f634c80c616d9d59e9b (patch)
tree579aef2e8566adbf7d94ef82aa121120f3fdb37d
parentad7a20231a8fb11bf7c75f2e180735e2de450102 (diff)
parent975861fd0b8fe7c89ccb6a31b0d0c89948c18252 (diff)
merged orm branch
-rwxr-xr-xbin/nova-api8
-rwxr-xr-xbin/nova-api-new (renamed from bin/nova-rsapi)8
-rwxr-xr-xbin/nova-compute2
-rwxr-xr-xbin/nova-dhcpbridge53
-rwxr-xr-xbin/nova-import-canonical-imagestore10
-rwxr-xr-xbin/nova-instancemonitor5
-rwxr-xr-xbin/nova-manage9
-rwxr-xr-xbin/nova-network2
-rwxr-xr-xbin/nova-objectstore2
-rwxr-xr-xbin/nova-volume2
-rw-r--r--nova/adminclient.py7
-rw-r--r--nova/api/__init__.py (renamed from nova/network/exception.py)35
-rw-r--r--nova/api/ec2/__init__.py42
-rw-r--r--nova/api/rackspace/__init__.py83
-rw-r--r--nova/api/rackspace/_id_translator.py42
-rw-r--r--nova/api/rackspace/base.py30
-rw-r--r--nova/api/rackspace/flavors.py54
-rw-r--r--nova/api/rackspace/images.py70
-rw-r--r--nova/api/rackspace/notes.txt23
-rw-r--r--nova/api/rackspace/servers.py83
-rw-r--r--nova/api/rackspace/sharedipgroups.py18
-rw-r--r--nova/api/test.py61
-rw-r--r--nova/auth/fakeldap.py38
-rw-r--r--nova/auth/ldapdriver.py63
-rw-r--r--nova/auth/manager.py133
-rw-r--r--nova/auth/rbac.py40
-rw-r--r--nova/auth/signer.py57
-rw-r--r--nova/cloudpipe/api.py3
-rwxr-xr-xnova/cloudpipe/bootscript.sh4
-rw-r--r--nova/cloudpipe/pipelib.py2
-rw-r--r--nova/compute/disk.py4
-rw-r--r--nova/compute/instance_types.py14
-rw-r--r--nova/compute/manager.py199
-rw-r--r--nova/compute/model.py317
-rw-r--r--nova/compute/monitor.py35
-rw-r--r--nova/compute/service.py343
-rw-r--r--nova/crypto.py8
-rw-r--r--nova/datastore.old.py261
-rw-r--r--nova/datastore.py209
-rw-r--r--nova/db/__init__.py23
-rw-r--r--nova/db/api.py439
-rw-r--r--nova/db/sqlalchemy/__init__.py24
-rw-r--r--nova/db/sqlalchemy/api.py577
-rw-r--r--nova/db/sqlalchemy/models.py390
-rw-r--r--nova/db/sqlalchemy/session.py55
-rw-r--r--nova/endpoint/__init__.py32
-rw-r--r--nova/endpoint/admin.py4
-rwxr-xr-xnova/endpoint/api.py7
-rw-r--r--nova/endpoint/cloud.py546
-rw-r--r--nova/endpoint/images.py15
-rw-r--r--nova/endpoint/rackspace.py183
-rw-r--r--nova/exception.py8
-rw-r--r--nova/fakerabbit.py5
-rw-r--r--nova/flags.py69
-rw-r--r--nova/image/__init__.py0
-rw-r--r--nova/image/service.py90
-rw-r--r--nova/manager.py36
-rw-r--r--nova/network/linux_net.py280
-rw-r--r--nova/network/manager.py328
-rw-r--r--nova/network/model.py633
-rw-r--r--nova/network/service.py238
-rw-r--r--nova/network/vpn.py127
-rw-r--r--nova/objectstore/bucket.py1
-rw-r--r--nova/objectstore/handler.py171
-rw-r--r--nova/objectstore/image.py16
-rw-r--r--nova/objectstore/stored.py4
-rw-r--r--nova/process.py174
-rw-r--r--nova/rpc.py13
-rw-r--r--nova/server.py6
-rw-r--r--nova/service.py84
-rw-r--r--nova/test.py11
-rw-r--r--nova/tests/api_unittest.py83
-rw-r--r--nova/tests/auth_unittest.py1
-rw-r--r--nova/tests/cloud_unittest.py8
-rw-r--r--nova/tests/compute_unittest.py129
-rw-r--r--nova/tests/fake_flags.py16
-rw-r--r--nova/tests/model_unittest.py6
-rw-r--r--nova/tests/network_unittest.py316
-rw-r--r--nova/tests/process_unittest.py2
-rw-r--r--nova/tests/rpc_unittest.py2
-rw-r--r--nova/tests/service_unittest.py166
-rw-r--r--nova/tests/volume_unittest.py174
-rw-r--r--nova/twistd.py16
-rw-r--r--nova/utils.py79
-rw-r--r--nova/validate.py1
-rw-r--r--nova/virt/connection.py6
-rw-r--r--nova/virt/fake.py47
-rw-r--r--nova/virt/images.py10
-rw-r--r--nova/virt/libvirt.qemu.xml.template3
-rw-r--r--nova/virt/libvirt.uml.xml.template1
-rw-r--r--nova/virt/libvirt_conn.py177
-rw-r--r--nova/virt/xenapi.py206
-rw-r--r--nova/volume/driver.py105
-rw-r--r--nova/volume/manager.py121
-rw-r--r--nova/volume/service.py291
-rw-r--r--nova/wsgi.py243
-rw-r--r--nova/wsgi_test.py96
-rw-r--r--pylintrc19
-rw-r--r--run_tests.py12
-rwxr-xr-xrun_tests.sh60
-rw-r--r--setup.py2
-rw-r--r--tools/install_venv.py22
-rw-r--r--tools/pip-requires5
103 files changed, 5573 insertions, 3820 deletions
diff --git a/bin/nova-api b/bin/nova-api
index 13baf22a7..a3ad5a0e1 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -26,7 +26,6 @@ from tornado import httpserver
from tornado import ioloop
from nova import flags
-from nova import rpc
from nova import server
from nova import utils
from nova.endpoint import admin
@@ -43,14 +42,7 @@ def main(_argv):
'Admin': admin.AdminController()}
_app = api.APIServerApplication(controllers)
- conn = rpc.Connection.instance()
- consumer = rpc.AdapterConsumer(connection=conn,
- topic=FLAGS.cloud_topic,
- proxy=controllers['Cloud'])
-
io_inst = ioloop.IOLoop.instance()
- _injected = consumer.attach_to_tornado(io_inst)
-
http_server = httpserver.HTTPServer(_app)
http_server.listen(FLAGS.cc_port)
logging.debug('Started HTTP server on %s', FLAGS.cc_port)
diff --git a/bin/nova-rsapi b/bin/nova-api-new
index 026880d5a..fda42339c 100755
--- a/bin/nova-rsapi
+++ b/bin/nova-api-new
@@ -18,17 +18,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""
- Daemon for the Rackspace API endpoint.
+Nova API daemon.
"""
+from nova import api
from nova import flags
from nova import utils
from nova import wsgi
-from nova.endpoint import rackspace
FLAGS = flags.FLAGS
-flags.DEFINE_integer('cc_port', 8773, 'cloud controller port')
+flags.DEFINE_integer('api_port', 8773, 'API port')
if __name__ == '__main__':
utils.default_flagfile()
- wsgi.run_server(rackspace.API(), FLAGS.cc_port)
+ wsgi.run_server(api.API(), FLAGS.api_port)
diff --git a/bin/nova-compute b/bin/nova-compute
index e0c12354f..cf9de9bbf 100755
--- a/bin/nova-compute
+++ b/bin/nova-compute
@@ -29,4 +29,4 @@ if __name__ == '__main__':
twistd.serve(__file__)
if __name__ == '__builtin__':
- application = service.ComputeService.create()
+ application = service.ComputeService.create() # pylint: disable=C0103
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index f70a4482c..c416d07a7 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -25,56 +25,62 @@ import logging
import os
import sys
-#TODO(joshua): there is concern that the user dnsmasq runs under will not
-# have nova in the path. This should be verified and if it is
-# not true the ugly line below can be removed
+# TODO(joshua): there is concern that the user dnsmasq runs under will not
+# have nova in the path. This should be verified and if it is
+# not true the ugly line below can be removed
sys.path.append(os.path.abspath(os.path.join(__file__, "../../")))
+from nova import db
from nova import flags
from nova import rpc
from nova import utils
+from nova import datastore # for redis_db flag
+from nova.auth import manager # for auth flags
from nova.network import linux_net
-from nova.network import model
-from nova.network import service
+from nova.network import manager # for network flags
FLAGS = flags.FLAGS
-def add_lease(_mac, ip, _hostname, _interface):
+def add_lease(_mac, ip_address, _hostname, _interface):
"""Set the IP that was assigned by the DHCP server."""
if FLAGS.fake_rabbit:
- service.VlanNetworkService().lease_ip(ip)
+ logging.debug("leasing ip")
+ network_manager = utils.import_object(FLAGS.network_manager)
+ network_manager.lease_fixed_ip(None, ip_address)
else:
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
- {"method": "lease_ip",
- "args": {"fixed_ip": ip}})
+ {"method": "lease_fixed_ip",
+ "args": {"context": None,
+ "address": ip_address}})
-def old_lease(_mac, _ip, _hostname, _interface):
+def old_lease(_mac, _ip_address, _hostname, _interface):
"""Do nothing, just an old lease update."""
logging.debug("Adopted old lease or got a change of mac/hostname")
-def del_lease(_mac, ip, _hostname, _interface):
+def del_lease(_mac, ip_address, _hostname, _interface):
"""Called when a lease expires."""
if FLAGS.fake_rabbit:
- service.VlanNetworkService().release_ip(ip)
+ logging.debug("releasing ip")
+ network_manager = utils.import_object(FLAGS.network_manager)
+ network_manager.release_fixed_ip(None, ip_address)
else:
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
- {"method": "release_ip",
- "args": {"fixed_ip": ip}})
+ {"method": "release_fixed_ip",
+ "args": {"context": None,
+ "address": ip_address}})
def init_leases(interface):
"""Get the list of hosts for an interface."""
- net = model.get_network_by_interface(interface)
- res = ""
- for address in net.assigned_objs:
- res += "%s\n" % linux_net.host_dhcp(address)
- return res
+ network_ref = db.network_get_by_bridge(None, interface)
+ return linux_net.get_dhcp_hosts(None, network_ref['id'])
def main():
+ global network_manager
"""Parse environment and arguments and call the approproate action."""
flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile)
utils.default_flagfile(flagfile)
@@ -83,10 +89,17 @@ def main():
if int(os.environ.get('TESTING', '0')):
FLAGS.fake_rabbit = True
FLAGS.redis_db = 8
- FLAGS.network_size = 32
+ FLAGS.network_size = 16
FLAGS.connection_type = 'fake'
FLAGS.fake_network = True
FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
+ FLAGS.num_networks = 5
+ path = os.path.abspath(os.path.join(os.path.dirname(__file__),
+ '..',
+ '_trial_temp',
+ 'nova.sqlite'))
+ FLAGS.sql_connection = 'sqlite:///%s' % path
+ #FLAGS.sql_connection = 'mysql://root@localhost/test'
action = argv[1]
if action in ['add', 'del', 'old']:
mac = argv[2]
diff --git a/bin/nova-import-canonical-imagestore b/bin/nova-import-canonical-imagestore
index 5165109b2..2bc61cf0c 100755
--- a/bin/nova-import-canonical-imagestore
+++ b/bin/nova-import-canonical-imagestore
@@ -35,12 +35,12 @@ from nova.objectstore import image
FLAGS = flags.FLAGS
-api_url = 'https://imagestore.canonical.com/api/dashboard'
+API_URL = 'https://imagestore.canonical.com/api/dashboard'
def get_images():
"""Get a list of the images from the imagestore URL."""
- images = json.load(urllib2.urlopen(api_url))['images']
+ images = json.load(urllib2.urlopen(API_URL))['images']
images = [img for img in images if img['title'].find('amd64') > -1]
return images
@@ -56,21 +56,21 @@ def download(img):
for f in img['files']:
if f['kind'] == 'kernel':
dest = os.path.join(tempdir, 'kernel')
- subprocess.call(['curl', f['url'], '-o', dest])
+ subprocess.call(['curl', '--fail', f['url'], '-o', dest])
kernel_id = image.Image.add(dest,
description='kernel/' + img['title'], kernel=True)
for f in img['files']:
if f['kind'] == 'ramdisk':
dest = os.path.join(tempdir, 'ramdisk')
- subprocess.call(['curl', f['url'], '-o', dest])
+ subprocess.call(['curl', '--fail', f['url'], '-o', dest])
ramdisk_id = image.Image.add(dest,
description='ramdisk/' + img['title'], ramdisk=True)
for f in img['files']:
if f['kind'] == 'image':
dest = os.path.join(tempdir, 'image')
- subprocess.call(['curl', f['url'], '-o', dest])
+ subprocess.call(['curl', '--fail', f['url'], '-o', dest])
ramdisk_id = image.Image.add(dest,
description=img['title'], kernel=kernel_id, ramdisk=ramdisk_id)
diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor
index 911fb6f42..fbac58889 100755
--- a/bin/nova-instancemonitor
+++ b/bin/nova-instancemonitor
@@ -35,9 +35,10 @@ if __name__ == '__main__':
if __name__ == '__builtin__':
logging.warn('Starting instance monitor')
- m = monitor.InstanceMonitor()
+ # pylint: disable-msg=C0103
+ monitor = monitor.InstanceMonitor()
# This is the parent service that twistd will be looking for when it
# parses this file, return it so that we can get it into globals below
application = service.Application('nova-instancemonitor')
- m.setServiceParent(application)
+ monitor.setServiceParent(application)
diff --git a/bin/nova-manage b/bin/nova-manage
index 071436b13..7f20531dc 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -56,7 +56,8 @@ class VpnCommands(object):
vpn = self._vpn_for(project.id)
if vpn:
command = "ping -c1 -w1 %s > /dev/null; echo $?"
- out, _err = utils.execute(command % vpn['private_dns_name'])
+ out, _err = utils.execute(command % vpn['private_dns_name'],
+ check_exit_code=False)
if out.strip() == '0':
net = 'up'
else:
@@ -211,7 +212,7 @@ class ProjectCommands(object):
f.write(zip_file)
-categories = [
+CATEGORIES = [
('user', UserCommands),
('project', ProjectCommands),
('role', RoleCommands),
@@ -258,11 +259,11 @@ def main():
if len(argv) < 1:
print script_name + " category action [<args>]"
print "Available categories:"
- for k, _ in categories:
+ for k, _ in CATEGORIES:
print "\t%s" % k
sys.exit(2)
category = argv.pop(0)
- matches = lazy_match(category, categories)
+ matches = lazy_match(category, CATEGORIES)
# instantiate the command group object
category, fn = matches[0]
command_object = fn()
diff --git a/bin/nova-network b/bin/nova-network
index ba9063f56..6434b6ec3 100755
--- a/bin/nova-network
+++ b/bin/nova-network
@@ -33,4 +33,4 @@ if __name__ == '__main__':
twistd.serve(__file__)
if __name__ == '__builtin__':
- application = service.type_to_class(FLAGS.network_type).create()
+ application = service.NetworkService.create() # pylint: disable-msg=C0103
diff --git a/bin/nova-objectstore b/bin/nova-objectstore
index 02f2bcb48..7cb718b6f 100755
--- a/bin/nova-objectstore
+++ b/bin/nova-objectstore
@@ -35,4 +35,4 @@ if __name__ == '__main__':
if __name__ == '__builtin__':
utils.default_flagfile()
- application = handler.get_application()
+ application = handler.get_application() # pylint: disable-msg=C0103
diff --git a/bin/nova-volume b/bin/nova-volume
index f7a8fad37..25b5871a3 100755
--- a/bin/nova-volume
+++ b/bin/nova-volume
@@ -29,4 +29,4 @@ if __name__ == '__main__':
twistd.serve(__file__)
if __name__ == '__builtin__':
- application = service.VolumeService.create()
+ application = service.VolumeService.create() # pylint: disable-msg=C0103
diff --git a/nova/adminclient.py b/nova/adminclient.py
index 242298a75..0ca32b1e5 100644
--- a/nova/adminclient.py
+++ b/nova/adminclient.py
@@ -20,6 +20,7 @@ Nova User API client library.
"""
import base64
+
import boto
from boto.ec2.regioninfo import RegionInfo
@@ -57,6 +58,7 @@ class UserInfo(object):
elif name == 'secretkey':
self.secretkey = str(value)
+
class UserRole(object):
"""
Information about a Nova user's role, as parsed through SAX.
@@ -79,6 +81,7 @@ class UserRole(object):
else:
setattr(self, name, str(value))
+
class ProjectInfo(object):
"""
Information about a Nova project, as parsed through SAX
@@ -114,12 +117,14 @@ class ProjectInfo(object):
else:
setattr(self, name, str(value))
+
class ProjectMember(object):
"""
Information about a Nova project member, as parsed through SAX.
Fields include:
memberId
"""
+
def __init__(self, connection=None):
self.connection = connection
self.memberId = None
@@ -135,6 +140,7 @@ class ProjectMember(object):
self.memberId = value
else:
setattr(self, name, str(value))
+
class HostInfo(object):
"""
@@ -163,6 +169,7 @@ class HostInfo(object):
def endElement(self, name, value, connection):
setattr(self, name, value)
+
class NovaAdminClient(object):
def __init__(self, clc_ip='127.0.0.1', region='nova', access_key='admin',
secret_key='admin', **kwargs):
diff --git a/nova/network/exception.py b/nova/api/__init__.py
index 8d7aa1498..b9b9e3988 100644
--- a/nova/network/exception.py
+++ b/nova/api/__init__.py
@@ -17,32 +17,21 @@
# under the License.
"""
-Exceptions for network errors.
+Root WSGI middleware for all API controllers.
"""
-from nova.exception import Error
+import routes
+from nova import wsgi
+from nova.api import ec2
+from nova.api import rackspace
-class NoMoreAddresses(Error):
- """No More Addresses are available in the network"""
- pass
+class API(wsgi.Router):
+ """Routes top-level requests to the appropriate controller."""
-class AddressNotAllocated(Error):
- """The specified address has not been allocated"""
- pass
-
-
-class AddressAlreadyAssociated(Error):
- """The specified address has already been associated"""
- pass
-
-
-class AddressNotAssociated(Error):
- """The specified address is not associated"""
- pass
-
-
-class NotValidNetworkSize(Error):
- """The network size is not valid"""
- pass
+ def __init__(self):
+ mapper = routes.Mapper()
+ mapper.connect("/v1.0/{path_info:.*}", controller=rackspace.API())
+ mapper.connect("/ec2/{path_info:.*}", controller=ec2.API())
+ super(API, self).__init__(mapper)
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
new file mode 100644
index 000000000..6eec0abf7
--- /dev/null
+++ b/nova/api/ec2/__init__.py
@@ -0,0 +1,42 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+WSGI middleware for EC2 API controllers.
+"""
+
+import routes
+import webob.dec
+
+from nova import wsgi
+
+
+class API(wsgi.Router):
+ """Routes EC2 requests to the appropriate controller."""
+
+ def __init__(self):
+ mapper = routes.Mapper()
+ mapper.connect(None, "{all:.*}", controller=self.dummy)
+ super(API, self).__init__(mapper)
+
+ @staticmethod
+ @webob.dec.wsgify
+ def dummy(req):
+ """Temporary dummy controller."""
+ msg = "dummy response -- please hook up __init__() to cloud.py instead"
+ return repr({'dummy': msg,
+ 'kwargs': repr(req.environ['wsgiorg.routing_args'][1])})
diff --git a/nova/api/rackspace/__init__.py b/nova/api/rackspace/__init__.py
new file mode 100644
index 000000000..b4d666d63
--- /dev/null
+++ b/nova/api/rackspace/__init__.py
@@ -0,0 +1,83 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+WSGI middleware for Rackspace API controllers.
+"""
+
+import json
+import time
+
+import routes
+import webob.dec
+import webob.exc
+
+from nova import flags
+from nova import wsgi
+from nova.api.rackspace import flavors
+from nova.api.rackspace import images
+from nova.api.rackspace import servers
+from nova.api.rackspace import sharedipgroups
+from nova.auth import manager
+
+
+class API(wsgi.Middleware):
+ """WSGI entry point for all Rackspace API requests."""
+
+ def __init__(self):
+ app = AuthMiddleware(APIRouter())
+ super(API, self).__init__(app)
+
+
+class AuthMiddleware(wsgi.Middleware):
+ """Authorize the rackspace API request or return an HTTP Forbidden."""
+
+ #TODO(gundlach): isn't this the old Nova API's auth? Should it be replaced
+ #with correct RS API auth?
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ context = {}
+ if "HTTP_X_AUTH_TOKEN" in req.environ:
+ context['user'] = manager.AuthManager().get_user_from_access_key(
+ req.environ['HTTP_X_AUTH_TOKEN'])
+ if context['user']:
+ context['project'] = manager.AuthManager().get_project(
+ context['user'].name)
+ if "user" not in context:
+ return webob.exc.HTTPForbidden()
+ req.environ['nova.context'] = context
+ return self.application
+
+
+class APIRouter(wsgi.Router):
+ """
+ Routes requests on the Rackspace API to the appropriate controller
+ and method.
+ """
+
+ def __init__(self):
+ mapper = routes.Mapper()
+ mapper.resource("server", "servers", controller=servers.Controller())
+ mapper.resource("image", "images", controller=images.Controller(),
+ collection={'detail': 'GET'})
+ mapper.resource("flavor", "flavors", controller=flavors.Controller(),
+ collection={'detail': 'GET'})
+ mapper.resource("sharedipgroup", "sharedipgroups",
+ controller=sharedipgroups.Controller())
+ super(APIRouter, self).__init__(mapper)
diff --git a/nova/api/rackspace/_id_translator.py b/nova/api/rackspace/_id_translator.py
new file mode 100644
index 000000000..aec5fb6a5
--- /dev/null
+++ b/nova/api/rackspace/_id_translator.py
@@ -0,0 +1,42 @@
+from nova import datastore
+
+class RackspaceAPIIdTranslator(object):
+ """
+ Converts Rackspace API ids to and from the id format for a given
+ strategy.
+ """
+
+ def __init__(self, id_type, service_name):
+ """
+ Creates a translator for ids of the given type (e.g. 'flavor'), for the
+ given storage service backend class name (e.g. 'LocalFlavorService').
+ """
+
+ self._store = datastore.Redis.instance()
+ key_prefix = "rsapi.idtranslator.%s.%s" % (id_type, service_name)
+ # Forward (strategy format -> RS format) and reverse translation keys
+ self._fwd_key = "%s.fwd" % key_prefix
+ self._rev_key = "%s.rev" % key_prefix
+
+ def to_rs_id(self, opaque_id):
+ """Convert an id from a strategy-specific one to a Rackspace one."""
+ result = self._store.hget(self._fwd_key, str(opaque_id))
+ if result: # we have a mapping from opaque to RS for this strategy
+ return int(result)
+ else:
+ # Store the mapping.
+ nextid = self._store.incr("%s.lastid" % self._fwd_key)
+ if self._store.hsetnx(self._fwd_key, str(opaque_id), nextid):
+ # If someone else didn't beat us to it, store the reverse
+ # mapping as well.
+ self._store.hset(self._rev_key, nextid, str(opaque_id))
+ return nextid
+ else:
+ # Someone beat us to it; use their number instead, and
+ # discard nextid (which is OK -- we don't require that
+ # every int id be used.)
+ return int(self._store.hget(self._fwd_key, str(opaque_id)))
+
+ def from_rs_id(self, strategy_name, rs_id):
+ """Convert a Rackspace id to a strategy-specific one."""
+ return self._store.hget(self._rev_key, rs_id)
diff --git a/nova/api/rackspace/base.py b/nova/api/rackspace/base.py
new file mode 100644
index 000000000..dd2c6543c
--- /dev/null
+++ b/nova/api/rackspace/base.py
@@ -0,0 +1,30 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import wsgi
+
+
+class Controller(wsgi.Controller):
+ """TODO(eday): Base controller for all rackspace controllers. What is this
+ for? Is this just Rackspace specific? """
+
+ @classmethod
+ def render(cls, instance):
+ if isinstance(instance, list):
+ return {cls.entity_name: cls.render(instance)}
+ else:
+ return {"TODO": "TODO"}
diff --git a/nova/api/rackspace/flavors.py b/nova/api/rackspace/flavors.py
new file mode 100644
index 000000000..60b35c939
--- /dev/null
+++ b/nova/api/rackspace/flavors.py
@@ -0,0 +1,54 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.rackspace import base
+from nova.compute import instance_types
+from webob import exc
+
+class Controller(base.Controller):
+ """Flavor controller for the Rackspace API."""
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "flavor": [ "id", "name", "ram", "disk" ]
+ }
+ }
+ }
+
+ def index(self, req):
+ """Return all flavors in brief."""
+ return dict(flavors=[dict(id=flavor['id'], name=flavor['name'])
+ for flavor in self.detail(req)['flavors']])
+
+ def detail(self, req):
+ """Return all flavors in detail."""
+ items = [self.show(req, id)['flavor'] for id in self._all_ids()]
+ return dict(flavors=items)
+
+ def show(self, req, id):
+ """Return data about the given flavor id."""
+ for name, val in instance_types.INSTANCE_TYPES.iteritems():
+ if val['flavorid'] == int(id):
+ item = dict(ram=val['memory_mb'], disk=val['local_gb'],
+ id=val['flavorid'], name=name)
+ return dict(flavor=item)
+ raise exc.HTTPNotFound()
+
+ def _all_ids(self):
+ """Return the list of all flavorids."""
+ return [i['flavorid'] for i in instance_types.INSTANCE_TYPES.values()]
diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py
new file mode 100644
index 000000000..2f3e928b9
--- /dev/null
+++ b/nova/api/rackspace/images.py
@@ -0,0 +1,70 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import nova.image.service
+from nova.api.rackspace import base
+from nova.api.rackspace import _id_translator
+from webob import exc
+
+class Controller(base.Controller):
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "image": [ "id", "name", "updated", "created", "status",
+ "serverId", "progress" ]
+ }
+ }
+ }
+
+ def __init__(self):
+ self._service = nova.image.service.ImageService.load()
+ self._id_translator = _id_translator.RackspaceAPIIdTranslator(
+ "image", self._service.__class__.__name__)
+
+ def index(self, req):
+ """Return all public images in brief."""
+ return dict(images=[dict(id=img['id'], name=img['name'])
+ for img in self.detail(req)['images']])
+
+ def detail(self, req):
+ """Return all public images in detail."""
+ data = self._service.index()
+ for img in data:
+ img['id'] = self._id_translator.to_rs_id(img['id'])
+ return dict(images=data)
+
+ def show(self, req, id):
+ """Return data about the given image id."""
+ opaque_id = self._id_translator.from_rs_id(id)
+ img = self._service.show(opaque_id)
+ img['id'] = id
+ return dict(image=img)
+
+ def delete(self, req, id):
+ # Only public images are supported for now.
+ raise exc.HTTPNotFound()
+
+ def create(self, req):
+ # Only public images are supported for now, so a request to
+ # make a backup of a server cannot be supproted.
+ raise exc.HTTPNotFound()
+
+ def update(self, req, id):
+ # Users may not modify public images, and that's all that
+ # we support for now.
+ raise exc.HTTPNotFound()
diff --git a/nova/api/rackspace/notes.txt b/nova/api/rackspace/notes.txt
new file mode 100644
index 000000000..e133bf5ea
--- /dev/null
+++ b/nova/api/rackspace/notes.txt
@@ -0,0 +1,23 @@
+We will need:
+
+ImageService
+a service that can do crud on image information. not user-specific. opaque
+image ids.
+
+GlanceImageService(ImageService):
+image ids are URIs.
+
+LocalImageService(ImageService):
+image ids are random strings.
+
+RackspaceAPITranslationStore:
+translates RS server/images/flavor/etc ids into formats required
+by a given ImageService strategy.
+
+api.rackspace.images.Controller:
+uses an ImageService strategy behind the scenes to do its fetching; it just
+converts int image id into a strategy-specific image id.
+
+who maintains the mapping from user to [images he owns]? nobody, because
+we have no way of enforcing access to his images, without kryptex which
+won't be in Austin.
diff --git a/nova/api/rackspace/servers.py b/nova/api/rackspace/servers.py
new file mode 100644
index 000000000..25d1fe9c8
--- /dev/null
+++ b/nova/api/rackspace/servers.py
@@ -0,0 +1,83 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import rpc
+from nova.compute import model as compute
+from nova.api.rackspace import base
+
+
+class Controller(base.Controller):
+ entity_name = 'servers'
+
+ def index(self, **kwargs):
+ instances = []
+ for inst in compute.InstanceDirectory().all:
+ instances.append(instance_details(inst))
+
+ def show(self, **kwargs):
+ instance_id = kwargs['id']
+ return compute.InstanceDirectory().get(instance_id)
+
+ def delete(self, **kwargs):
+ instance_id = kwargs['id']
+ instance = compute.InstanceDirectory().get(instance_id)
+ if not instance:
+ raise ServerNotFound("The requested server was not found")
+ instance.destroy()
+ return True
+
+ def create(self, **kwargs):
+ inst = self.build_server_instance(kwargs['server'])
+ rpc.cast(
+ FLAGS.compute_topic, {
+ "method": "run_instance",
+ "args": {"instance_id": inst.instance_id}})
+
+ def update(self, **kwargs):
+ instance_id = kwargs['id']
+ instance = compute.InstanceDirectory().get(instance_id)
+ if not instance:
+ raise ServerNotFound("The requested server was not found")
+ instance.update(kwargs['server'])
+ instance.save()
+
+ def build_server_instance(self, env):
+ """Build instance data structure and save it to the data store."""
+ reservation = utils.generate_uid('r')
+ ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
+ inst = self.instdir.new()
+ inst['name'] = env['server']['name']
+ inst['image_id'] = env['server']['imageId']
+ inst['instance_type'] = env['server']['flavorId']
+ inst['user_id'] = env['user']['id']
+ inst['project_id'] = env['project']['id']
+ inst['reservation_id'] = reservation
+ inst['launch_time'] = ltime
+ inst['mac_address'] = utils.generate_mac()
+ address = self.network.allocate_ip(
+ inst['user_id'],
+ inst['project_id'],
+ mac=inst['mac_address'])
+ inst['private_dns_name'] = str(address)
+ inst['bridge_name'] = network.BridgedNetwork.get_network_for_project(
+ inst['user_id'],
+ inst['project_id'],
+ 'default')['bridge_name']
+ # key_data, key_name, ami_launch_index
+ # TODO(todd): key data or root password
+ inst.save()
+ return inst
diff --git a/nova/api/rackspace/sharedipgroups.py b/nova/api/rackspace/sharedipgroups.py
new file mode 100644
index 000000000..986f11434
--- /dev/null
+++ b/nova/api/rackspace/sharedipgroups.py
@@ -0,0 +1,18 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+class Controller(object): pass
diff --git a/nova/api/test.py b/nova/api/test.py
new file mode 100644
index 000000000..51b114b8e
--- /dev/null
+++ b/nova/api/test.py
@@ -0,0 +1,61 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test for the root WSGI middleware for all API controllers.
+"""
+
+import unittest
+
+import stubout
+import webob
+import webob.dec
+
+from nova import api
+
+
+class Test(unittest.TestCase):
+
+ def setUp(self): # pylint: disable-msg=C0103
+ self.stubs = stubout.StubOutForTesting()
+
+ def tearDown(self): # pylint: disable-msg=C0103
+ self.stubs.UnsetAll()
+
+ def test_rackspace(self):
+ self.stubs.Set(api.rackspace, 'API', APIStub)
+ result = webob.Request.blank('/v1.0/cloud').get_response(api.API())
+ self.assertEqual(result.body, "/cloud")
+
+ def test_ec2(self):
+ self.stubs.Set(api.ec2, 'API', APIStub)
+ result = webob.Request.blank('/ec2/cloud').get_response(api.API())
+ self.assertEqual(result.body, "/cloud")
+
+ def test_not_found(self):
+ self.stubs.Set(api.ec2, 'API', APIStub)
+ self.stubs.Set(api.rackspace, 'API', APIStub)
+ result = webob.Request.blank('/test/cloud').get_response(api.API())
+ self.assertNotEqual(result.body, "/cloud")
+
+
+class APIStub(object):
+ """Class to verify request and mark it was called."""
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ return req.path_info
diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py
index b420924af..bfc3433c5 100644
--- a/nova/auth/fakeldap.py
+++ b/nova/auth/fakeldap.py
@@ -30,20 +30,23 @@ from nova import datastore
SCOPE_BASE = 0
SCOPE_ONELEVEL = 1 # not implemented
-SCOPE_SUBTREE = 2
+SCOPE_SUBTREE = 2
MOD_ADD = 0
MOD_DELETE = 1
-class NO_SUCH_OBJECT(Exception):
+class NO_SUCH_OBJECT(Exception): # pylint: disable-msg=C0103
+ """Duplicate exception class from real LDAP module."""
pass
-class OBJECT_CLASS_VIOLATION(Exception):
+class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable-msg=C0103
+ """Duplicate exception class from real LDAP module."""
pass
-def initialize(uri):
+def initialize(_uri):
+ """Opens a fake connection with an LDAP server."""
return FakeLDAP()
@@ -68,7 +71,7 @@ def _match_query(query, attrs):
# cut off the ! and the nested parentheses
return not _match_query(query[2:-1], attrs)
- (k, sep, v) = inner.partition('=')
+ (k, _sep, v) = inner.partition('=')
return _match(k, v, attrs)
@@ -85,20 +88,20 @@ def _paren_groups(source):
if source[pos] == ')':
count -= 1
if count == 0:
- result.append(source[start:pos+1])
+ result.append(source[start:pos + 1])
return result
-def _match(k, v, attrs):
+def _match(key, value, attrs):
"""Match a given key and value against an attribute list."""
- if k not in attrs:
+ if key not in attrs:
return False
- if k != "objectclass":
- return v in attrs[k]
+ if key != "objectclass":
+ return value in attrs[key]
# it is an objectclass check, so check subclasses
- values = _subs(v)
- for value in values:
- if value in attrs[k]:
+ values = _subs(value)
+ for v in values:
+ if v in attrs[key]:
return True
return False
@@ -145,6 +148,7 @@ def _to_json(unencoded):
class FakeLDAP(object):
#TODO(vish): refactor this class to use a wrapper instead of accessing
# redis directly
+ """Fake LDAP connection."""
def simple_bind_s(self, dn, password):
"""This method is ignored, but provided for compatibility."""
@@ -207,6 +211,7 @@ class FakeLDAP(object):
# get the attributes from redis
attrs = redis.hgetall(key)
# turn the values from redis into lists
+ # pylint: disable-msg=E1103
attrs = dict([(k, _from_json(v))
for k, v in attrs.iteritems()])
# filter the objects by query
@@ -215,13 +220,12 @@ class FakeLDAP(object):
attrs = dict([(k, v) for k, v in attrs.iteritems()
if not fields or k in fields])
objects.append((key[len(self.__redis_prefix):], attrs))
+ # pylint: enable-msg=E1103
if objects == []:
raise NO_SUCH_OBJECT()
return objects
-
@property
- def __redis_prefix(self):
+ def __redis_prefix(self): # pylint: disable-msg=R0201
+ """Get the prefix to use for all redis keys."""
return 'ldap:'
-
-
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index 453fa196c..74ba011b5 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -30,10 +30,11 @@ import sys
from nova import exception
from nova import flags
+
FLAGS = flags.FLAGS
flags.DEFINE_string('ldap_url', 'ldap://localhost',
'Point this at your ldap server')
-flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password')
+flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password')
flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com',
'DN of admin user')
flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users')
@@ -62,14 +63,18 @@ flags.DEFINE_string('ldap_developer',
# to define a set interface for AuthDrivers. I'm delaying
# creating this now because I'm expecting an auth refactor
# in which we may want to change the interface a bit more.
+
+
class LdapDriver(object):
"""Ldap Auth driver
Defines enter and exit and therefore supports the with/as syntax.
"""
+
def __init__(self):
"""Imports the LDAP module"""
self.ldap = __import__('ldap')
+ self.conn = None
def __enter__(self):
"""Creates the connection to LDAP"""
@@ -77,7 +82,7 @@ class LdapDriver(object):
self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password)
return self
- def __exit__(self, type, value, traceback):
+ def __exit__(self, exc_type, exc_value, traceback):
"""Destroys the connection to LDAP"""
self.conn.unbind_s()
return False
@@ -122,11 +127,11 @@ class LdapDriver(object):
def get_projects(self, uid=None):
"""Retrieve list of projects"""
- filter = '(objectclass=novaProject)'
+ pattern = '(objectclass=novaProject)'
if uid:
- filter = "(&%s(member=%s))" % (filter, self.__uid_to_dn(uid))
+ pattern = "(&%s(member=%s))" % (pattern, self.__uid_to_dn(uid))
attrs = self.__find_objects(FLAGS.ldap_project_subtree,
- filter)
+ pattern)
return [self.__to_project(attr) for attr in attrs]
def create_user(self, name, access_key, secret_key, is_admin):
@@ -193,8 +198,7 @@ class LdapDriver(object):
('cn', [name]),
('description', [description]),
('projectManager', [manager_dn]),
- ('member', members)
- ]
+ ('member', members)]
self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr)
return self.__to_project(dict(attr))
@@ -286,7 +290,6 @@ class LdapDriver(object):
def __key_pair_exists(self, uid, key_name):
"""Check if key pair exists"""
- return self.get_user(uid) != None
return self.get_key_pair(uid, key_name) != None
def __project_exists(self, project_id):
@@ -309,7 +312,7 @@ class LdapDriver(object):
except self.ldap.NO_SUCH_OBJECT:
return []
# just return the DNs
- return [dn for dn, attributes in res]
+ return [dn for dn, _attributes in res]
def __find_objects(self, dn, query=None, scope=None):
"""Find objects by query"""
@@ -345,7 +348,8 @@ class LdapDriver(object):
for key in keys:
self.delete_key_pair(uid, key['name'])
- def __role_to_dn(self, role, project_id=None):
+ @staticmethod
+ def __role_to_dn(role, project_id=None):
"""Convert role to corresponding dn"""
if project_id == None:
return FLAGS.__getitem__("ldap_%s" % role).value
@@ -355,7 +359,7 @@ class LdapDriver(object):
FLAGS.ldap_project_subtree)
def __create_group(self, group_dn, name, uid,
- description, member_uids = None):
+ description, member_uids=None):
"""Create a group"""
if self.__group_exists(group_dn):
raise exception.Duplicate("Group can't be created because "
@@ -374,8 +378,7 @@ class LdapDriver(object):
('objectclass', ['groupOfNames']),
('cn', [name]),
('description', [description]),
- ('member', members)
- ]
+ ('member', members)]
self.conn.add_s(group_dn, attr)
def __is_in_group(self, uid, group_dn):
@@ -401,9 +404,7 @@ class LdapDriver(object):
if self.__is_in_group(uid, group_dn):
raise exception.Duplicate("User %s is already a member of "
"the group %s" % (uid, group_dn))
- attr = [
- (self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))
- ]
+ attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))]
self.conn.modify_s(group_dn, attr)
def __remove_from_group(self, uid, group_dn):
@@ -431,7 +432,7 @@ class LdapDriver(object):
self.conn.modify_s(group_dn, attr)
except self.ldap.OBJECT_CLASS_VIOLATION:
logging.debug("Attempted to remove the last member of a group. "
- "Deleting the group at %s instead." % group_dn )
+ "Deleting the group at %s instead.", group_dn)
self.__delete_group(group_dn)
def __remove_from_all(self, uid):
@@ -439,7 +440,6 @@ class LdapDriver(object):
if not self.__user_exists(uid):
raise exception.NotFound("User %s can't be removed from all "
"because the user doesn't exist" % (uid,))
- dn = self.__uid_to_dn(uid)
role_dns = self.__find_group_dns_with_member(
FLAGS.role_project_subtree, uid)
for role_dn in role_dns:
@@ -447,7 +447,7 @@ class LdapDriver(object):
project_dns = self.__find_group_dns_with_member(
FLAGS.ldap_project_subtree, uid)
for project_dn in project_dns:
- self.__safe_remove_from_group(uid, role_dn)
+ self.__safe_remove_from_group(uid, project_dn)
def __delete_group(self, group_dn):
"""Delete Group"""
@@ -460,7 +460,8 @@ class LdapDriver(object):
for role_dn in self.__find_role_dns(project_dn):
self.__delete_group(role_dn)
- def __to_user(self, attr):
+ @staticmethod
+ def __to_user(attr):
"""Convert ldap attributes to User object"""
if attr == None:
return None
@@ -469,10 +470,10 @@ class LdapDriver(object):
'name': attr['cn'][0],
'access': attr['accessKey'][0],
'secret': attr['secretKey'][0],
- 'admin': (attr['isAdmin'][0] == 'TRUE')
- }
+ 'admin': (attr['isAdmin'][0] == 'TRUE')}
- def __to_key_pair(self, owner, attr):
+ @staticmethod
+ def __to_key_pair(owner, attr):
"""Convert ldap attributes to KeyPair object"""
if attr == None:
return None
@@ -481,8 +482,7 @@ class LdapDriver(object):
'name': attr['cn'][0],
'owner_id': owner,
'public_key': attr['sshPublicKey'][0],
- 'fingerprint': attr['keyFingerprint'][0],
- }
+ 'fingerprint': attr['keyFingerprint'][0]}
def __to_project(self, attr):
"""Convert ldap attributes to Project object"""
@@ -494,21 +494,22 @@ class LdapDriver(object):
'name': attr['cn'][0],
'project_manager_id': self.__dn_to_uid(attr['projectManager'][0]),
'description': attr.get('description', [None])[0],
- 'member_ids': [self.__dn_to_uid(x) for x in member_dns]
- }
+ 'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
- def __dn_to_uid(self, dn):
+ @staticmethod
+ def __dn_to_uid(dn):
"""Convert user dn to uid"""
return dn.split(',')[0].split('=')[1]
- def __uid_to_dn(self, dn):
+ @staticmethod
+ def __uid_to_dn(dn):
"""Convert uid to dn"""
return 'uid=%s,%s' % (dn, FLAGS.ldap_user_subtree)
class FakeLdapDriver(LdapDriver):
"""Fake Ldap Auth driver"""
- def __init__(self):
+
+ def __init__(self): # pylint: disable-msg=W0231
__import__('nova.auth.fakeldap')
self.ldap = sys.modules['nova.auth.fakeldap']
-
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 064fd78bc..d5fbec7c5 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -23,21 +23,20 @@ Nova authentication management
import logging
import os
import shutil
-import string
+import string # pylint: disable-msg=W0402
import tempfile
import uuid
import zipfile
from nova import crypto
+from nova import db
from nova import exception
from nova import flags
from nova import utils
from nova.auth import signer
-from nova.network import vpn
FLAGS = flags.FLAGS
-
flags.DEFINE_list('allowed_roles',
['cloudadmin', 'itsec', 'sysadmin', 'netadmin', 'developer'],
'Allowed roles for project')
@@ -52,7 +51,6 @@ flags.DEFINE_list('superuser_roles', ['cloudadmin'],
flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'],
'Roles that apply to all projects')
-
flags.DEFINE_string('credentials_template',
utils.abspath('auth/novarc.template'),
'Template for creating users rc file')
@@ -67,15 +65,14 @@ flags.DEFINE_string('credential_cert_file', 'cert.pem',
'Filename of certificate in credentials zip')
flags.DEFINE_string('credential_rc_file', 'novarc',
'Filename of rc in credentials zip')
-
flags.DEFINE_string('credential_cert_subject',
'/C=US/ST=California/L=MountainView/O=AnsoLabs/'
'OU=NovaDev/CN=%s-%s',
'Subject for certificate for users')
-
flags.DEFINE_string('auth_driver', 'nova.auth.ldapdriver.FakeLdapDriver',
'Driver that auth manager uses')
+
class AuthBase(object):
"""Base class for objects relating to auth
@@ -83,6 +80,7 @@ class AuthBase(object):
an id member. They may optionally contain methods that delegate to
AuthManager, but should not implement logic themselves.
"""
+
@classmethod
def safe_id(cls, obj):
"""Safe get object id
@@ -100,6 +98,7 @@ class AuthBase(object):
class User(AuthBase):
"""Object representing a user"""
+
def __init__(self, id, name, access, secret, admin):
AuthBase.__init__(self)
self.id = id
@@ -161,6 +160,7 @@ class KeyPair(AuthBase):
Even though this object is named KeyPair, only the public key and
fingerprint is stored. The user's private key is not saved.
"""
+
def __init__(self, id, name, owner_id, public_key, fingerprint):
AuthBase.__init__(self)
self.id = id
@@ -179,6 +179,7 @@ class KeyPair(AuthBase):
class Project(AuthBase):
"""Represents a Project returned from the datastore"""
+
def __init__(self, id, name, project_manager_id, description, member_ids):
AuthBase.__init__(self)
self.id = id
@@ -193,12 +194,12 @@ class Project(AuthBase):
@property
def vpn_ip(self):
- ip, port = AuthManager().get_project_vpn_data(self)
+ ip, _port = AuthManager().get_project_vpn_data(self)
return ip
@property
def vpn_port(self):
- ip, port = AuthManager().get_project_vpn_data(self)
+ _ip, port = AuthManager().get_project_vpn_data(self)
return port
def has_manager(self, user):
@@ -220,12 +221,9 @@ class Project(AuthBase):
return AuthManager().get_credentials(user, self)
def __repr__(self):
- return "Project('%s', '%s', '%s', '%s', %s)" % (self.id,
- self.name,
- self.project_manager_id,
- self.description,
- self.member_ids)
-
+ return "Project('%s', '%s', '%s', '%s', %s)" % \
+ (self.id, self.name, self.project_manager_id, self.description,
+ self.member_ids)
class AuthManager(object):
@@ -239,7 +237,9 @@ class AuthManager(object):
AuthManager also manages associated data related to Auth objects that
need to be more accessible, such as vpn ips and ports.
"""
+
_instance = None
+
def __new__(cls, *args, **kwargs):
"""Returns the AuthManager singleton"""
if not cls._instance:
@@ -252,6 +252,7 @@ class AuthManager(object):
__init__ is run every time AuthManager() is called, so we only
reset the driver if it is not set or a new driver is specified.
"""
+ self.network_manager = utils.import_object(FLAGS.network_manager)
if driver or not getattr(self, 'driver', None):
self.driver = utils.import_class(driver or FLAGS.auth_driver)
@@ -295,7 +296,7 @@ class AuthManager(object):
@return: User and project that the request represents.
"""
# TODO(vish): check for valid timestamp
- (access_key, sep, project_id) = access.partition(':')
+ (access_key, _sep, project_id) = access.partition(':')
logging.info('Looking up user: %r', access_key)
user = self.get_user_from_access_key(access_key)
@@ -318,7 +319,8 @@ class AuthManager(object):
raise exception.NotFound('User %s is not a member of project %s' %
(user.id, project.id))
if check_type == 's3':
- expected_signature = signer.Signer(user.secret.encode()).s3_authorization(headers, verb, path)
+ sign = signer.Signer(user.secret.encode())
+ expected_signature = sign.s3_authorization(headers, verb, path)
logging.debug('user.secret: %s', user.secret)
logging.debug('expected_signature: %s', expected_signature)
logging.debug('signature: %s', signature)
@@ -463,7 +465,8 @@ class AuthManager(object):
with self.driver() as drv:
drv.remove_role(User.safe_id(user), role, Project.safe_id(project))
- def get_roles(self, project_roles=True):
+ @staticmethod
+ def get_roles(project_roles=True):
"""Get list of allowed roles"""
if project_roles:
return list(set(FLAGS.allowed_roles) - set(FLAGS.global_roles))
@@ -491,8 +494,8 @@ class AuthManager(object):
return []
return [Project(**project_dict) for project_dict in project_list]
- def create_project(self, name, manager_user,
- description=None, member_users=None):
+ def create_project(self, name, manager_user, description=None,
+ member_users=None, context=None):
"""Create a project
@type name: str
@@ -516,12 +519,19 @@ class AuthManager(object):
if member_users:
member_users = [User.safe_id(u) for u in member_users]
with self.driver() as drv:
- project_dict = drv.create_project(name,
- User.safe_id(manager_user),
- description,
- member_users)
+ project_dict = drv.create_project(name,
+ User.safe_id(manager_user),
+ description,
+ member_users)
if project_dict:
- return Project(**project_dict)
+ project = Project(**project_dict)
+ try:
+ self.network_manager.allocate_network(context,
+ project.id)
+ except:
+ drv.delete_project(project.id)
+ raise
+ return project
def add_to_project(self, user, project):
"""Add user to project"""
@@ -547,7 +557,8 @@ class AuthManager(object):
return drv.remove_from_project(User.safe_id(user),
Project.safe_id(project))
- def get_project_vpn_data(self, project):
+ @staticmethod
+ def get_project_vpn_data(project, context=None):
"""Gets vpn ip and port for project
@type project: Project or project_id
@@ -557,15 +568,26 @@ class AuthManager(object):
@return: A tuple containing (ip, port) or None, None if vpn has
not been allocated for user.
"""
- network_data = vpn.NetworkData.lookup(Project.safe_id(project))
- if not network_data:
+
+ network_ref = db.project_get_network(context,
+ Project.safe_id(project))
+
+ if not network_ref['vpn_public_port']:
raise exception.NotFound('project network data has not been set')
- return (network_data.ip, network_data.port)
+ return (network_ref['vpn_public_address'],
+ network_ref['vpn_public_port'])
- def delete_project(self, project):
+ def delete_project(self, project, context=None):
"""Deletes a project"""
+ try:
+ network_ref = db.project_get_network(context,
+ Project.safe_id(project))
+ db.network_destroy(context, network_ref['id'])
+ except:
+ logging.exception('Could not destroy network for %s',
+ project)
with self.driver() as drv:
- return drv.delete_project(Project.safe_id(project))
+ drv.delete_project(Project.safe_id(project))
def get_user(self, uid):
"""Retrieves a user by id"""
@@ -611,8 +633,10 @@ class AuthManager(object):
@rtype: User
@return: The new user.
"""
- if access == None: access = str(uuid.uuid4())
- if secret == None: secret = str(uuid.uuid4())
+ if access == None:
+ access = str(uuid.uuid4())
+ if secret == None:
+ secret = str(uuid.uuid4())
with self.driver() as drv:
user_dict = drv.create_user(name, access, secret, admin)
if user_dict:
@@ -654,10 +678,10 @@ class AuthManager(object):
def create_key_pair(self, user, key_name, public_key, fingerprint):
"""Creates a key pair for user"""
with self.driver() as drv:
- kp_dict = drv.create_key_pair(User.safe_id(user),
- key_name,
- public_key,
- fingerprint)
+ kp_dict = drv.create_key_pair(User.safe_id(user),
+ key_name,
+ public_key,
+ fingerprint)
if kp_dict:
return KeyPair(**kp_dict)
@@ -698,15 +722,15 @@ class AuthManager(object):
zippy.writestr(FLAGS.credential_key_file, private_key)
zippy.writestr(FLAGS.credential_cert_file, signed_cert)
- network_data = vpn.NetworkData.lookup(pid)
- if network_data:
- configfile = open(FLAGS.vpn_client_template,"r")
+ (vpn_ip, vpn_port) = self.get_project_vpn_data(project)
+ if vpn_ip:
+ configfile = open(FLAGS.vpn_client_template, "r")
s = string.Template(configfile.read())
configfile.close()
config = s.substitute(keyfile=FLAGS.credential_key_file,
certfile=FLAGS.credential_cert_file,
- ip=network_data.ip,
- port=network_data.port)
+ ip=vpn_ip,
+ port=vpn_port)
zippy.writestr(FLAGS.credential_vpn_file, config)
else:
logging.warn("No vpn data for project %s" %
@@ -715,10 +739,10 @@ class AuthManager(object):
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id))
zippy.close()
with open(zf, 'rb') as f:
- buffer = f.read()
+ read_buffer = f.read()
shutil.rmtree(tmpdir)
- return buffer
+ return read_buffer
def get_environment_rc(self, user, project=None):
"""Get credential zip for user in project"""
@@ -729,18 +753,18 @@ class AuthManager(object):
pid = Project.safe_id(project)
return self.__generate_rc(user.access, user.secret, pid)
- def __generate_rc(self, access, secret, pid):
+ @staticmethod
+ def __generate_rc(access, secret, pid):
"""Generate rc file for user"""
rc = open(FLAGS.credentials_template).read()
- rc = rc % { 'access': access,
- 'project': pid,
- 'secret': secret,
- 'ec2': FLAGS.ec2_url,
- 's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port),
- 'nova': FLAGS.ca_file,
- 'cert': FLAGS.credential_cert_file,
- 'key': FLAGS.credential_key_file,
- }
+ rc = rc % {'access': access,
+ 'project': pid,
+ 'secret': secret,
+ 'ec2': FLAGS.ec2_url,
+ 's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port),
+ 'nova': FLAGS.ca_file,
+ 'cert': FLAGS.credential_cert_file,
+ 'key': FLAGS.credential_key_file}
return rc
def _generate_x509_cert(self, uid, pid):
@@ -751,6 +775,7 @@ class AuthManager(object):
signed_cert = crypto.sign_csr(csr, pid)
return (private_key, signed_cert)
- def __cert_subject(self, uid):
+ @staticmethod
+ def __cert_subject(uid):
"""Helper to generate cert subject"""
return FLAGS.credential_cert_subject % (uid, utils.isotime())
diff --git a/nova/auth/rbac.py b/nova/auth/rbac.py
index 7fab9419f..d157f44b3 100644
--- a/nova/auth/rbac.py
+++ b/nova/auth/rbac.py
@@ -16,38 +16,54 @@
# License for the specific language governing permissions and limitations
# under the License.
+"""Role-based access control decorators to use fpr wrapping other
+methods with."""
+
from nova import exception
-from nova.auth import manager
def allow(*roles):
- def wrap(f):
- def wrapped_f(self, context, *args, **kwargs):
+ """Allow the given roles access the wrapped function."""
+
+ def wrap(func): # pylint: disable-msg=C0111
+
+ def wrapped_func(self, context, *args,
+ **kwargs): # pylint: disable-msg=C0111
if context.user.is_superuser():
- return f(self, context, *args, **kwargs)
+ return func(self, context, *args, **kwargs)
for role in roles:
if __matches_role(context, role):
- return f(self, context, *args, **kwargs)
+ return func(self, context, *args, **kwargs)
raise exception.NotAuthorized()
- return wrapped_f
+
+ return wrapped_func
+
return wrap
+
def deny(*roles):
- def wrap(f):
- def wrapped_f(self, context, *args, **kwargs):
+ """Deny the given roles access the wrapped function."""
+
+ def wrap(func): # pylint: disable-msg=C0111
+
+ def wrapped_func(self, context, *args,
+ **kwargs): # pylint: disable-msg=C0111
if context.user.is_superuser():
- return f(self, context, *args, **kwargs)
+ return func(self, context, *args, **kwargs)
for role in roles:
if __matches_role(context, role):
raise exception.NotAuthorized()
- return f(self, context, *args, **kwargs)
- return wrapped_f
+ return func(self, context, *args, **kwargs)
+
+ return wrapped_func
+
return wrap
+
def __matches_role(context, role):
+ """Check if a role is allowed."""
if role == 'all':
return True
if role == 'none':
return False
return context.project.has_role(context.user.id, role)
-
diff --git a/nova/auth/signer.py b/nova/auth/signer.py
index 634f22f0d..f7d29f534 100644
--- a/nova/auth/signer.py
+++ b/nova/auth/signer.py
@@ -48,13 +48,17 @@ import hashlib
import hmac
import logging
import urllib
-import boto # NOTE(vish): for new boto
-import boto.utils # NOTE(vish): for old boto
+
+# NOTE(vish): for new boto
+import boto
+# NOTE(vish): for old boto
+import boto.utils
from nova.exception import Error
+
class Signer(object):
- """ hacked up code from boto/connection.py """
+ """Hacked up code from boto/connection.py"""
def __init__(self, secret_key):
self.hmac = hmac.new(secret_key, digestmod=hashlib.sha1)
@@ -62,23 +66,27 @@ class Signer(object):
self.hmac_256 = hmac.new(secret_key, digestmod=hashlib.sha256)
def s3_authorization(self, headers, verb, path):
+ """Generate S3 authorization string."""
c_string = boto.utils.canonical_string(verb, path, headers)
- hmac = self.hmac.copy()
- hmac.update(c_string)
- b64_hmac = base64.encodestring(hmac.digest()).strip()
+ hmac_copy = self.hmac.copy()
+ hmac_copy.update(c_string)
+ b64_hmac = base64.encodestring(hmac_copy.digest()).strip()
return b64_hmac
def generate(self, params, verb, server_string, path):
+ """Generate auth string according to what SignatureVersion is given."""
if params['SignatureVersion'] == '0':
return self._calc_signature_0(params)
if params['SignatureVersion'] == '1':
return self._calc_signature_1(params)
if params['SignatureVersion'] == '2':
return self._calc_signature_2(params, verb, server_string, path)
- raise Error('Unknown Signature Version: %s' % self.SignatureVersion)
+ raise Error('Unknown Signature Version: %s' %
+ params['SignatureVersion'])
-
- def _get_utf8_value(self, value):
+ @staticmethod
+ def _get_utf8_value(value):
+ """Get the UTF8-encoded version of a value."""
if not isinstance(value, str) and not isinstance(value, unicode):
value = str(value)
if isinstance(value, unicode):
@@ -87,10 +95,11 @@ class Signer(object):
return value
def _calc_signature_0(self, params):
+ """Generate AWS signature version 0 string."""
s = params['Action'] + params['Timestamp']
self.hmac.update(s)
keys = params.keys()
- keys.sort(cmp = lambda x, y: cmp(x.lower(), y.lower()))
+ keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
pairs = []
for key in keys:
val = self._get_utf8_value(params[key])
@@ -98,8 +107,9 @@ class Signer(object):
return base64.b64encode(self.hmac.digest())
def _calc_signature_1(self, params):
+ """Generate AWS signature version 1 string."""
keys = params.keys()
- keys.sort(cmp = lambda x, y: cmp(x.lower(), y.lower()))
+ keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
pairs = []
for key in keys:
self.hmac.update(key)
@@ -109,29 +119,34 @@ class Signer(object):
return base64.b64encode(self.hmac.digest())
def _calc_signature_2(self, params, verb, server_string, path):
+ """Generate AWS signature version 2 string."""
logging.debug('using _calc_signature_2')
string_to_sign = '%s\n%s\n%s\n' % (verb, server_string, path)
if self.hmac_256:
- hmac = self.hmac_256
+ current_hmac = self.hmac_256
params['SignatureMethod'] = 'HmacSHA256'
else:
- hmac = self.hmac
+ current_hmac = self.hmac
params['SignatureMethod'] = 'HmacSHA1'
keys = params.keys()
keys.sort()
pairs = []
for key in keys:
val = self._get_utf8_value(params[key])
- pairs.append(urllib.quote(key, safe='') + '=' + urllib.quote(val, safe='-_~'))
+ val = urllib.quote(val, safe='-_~')
+ pairs.append(urllib.quote(key, safe='') + '=' + val)
qs = '&'.join(pairs)
- logging.debug('query string: %s' % qs)
+ logging.debug('query string: %s', qs)
string_to_sign += qs
- logging.debug('string_to_sign: %s' % string_to_sign)
- hmac.update(string_to_sign)
- b64 = base64.b64encode(hmac.digest())
- logging.debug('len(b64)=%d' % len(b64))
- logging.debug('base64 encoded digest: %s' % b64)
+ logging.debug('string_to_sign: %s', string_to_sign)
+ current_hmac.update(string_to_sign)
+ b64 = base64.b64encode(current_hmac.digest())
+ logging.debug('len(b64)=%d', len(b64))
+ logging.debug('base64 encoded digest: %s', b64)
return b64
+
if __name__ == '__main__':
- print Signer('foo').generate({"SignatureMethod": 'HmacSHA256', 'SignatureVersion': '2'}, "get", "server", "/foo")
+ print Signer('foo').generate({'SignatureMethod': 'HmacSHA256',
+ 'SignatureVersion': '2'},
+ 'get', 'server', '/foo')
diff --git a/nova/cloudpipe/api.py b/nova/cloudpipe/api.py
index 0bffe9aa3..56aa89834 100644
--- a/nova/cloudpipe/api.py
+++ b/nova/cloudpipe/api.py
@@ -21,9 +21,10 @@ Tornado REST API Request Handlers for CloudPipe
"""
import logging
-import tornado.web
import urllib
+import tornado.web
+
from nova import crypto
from nova.auth import manager
diff --git a/nova/cloudpipe/bootscript.sh b/nova/cloudpipe/bootscript.sh
index 82ec2012a..30d9ad102 100755
--- a/nova/cloudpipe/bootscript.sh
+++ b/nova/cloudpipe/bootscript.sh
@@ -44,8 +44,8 @@ CSRTEXT=$(python -c "import urllib; print urllib.quote('''$CSRTEXT''')")
# SIGN the csr and save as server.crt
# CURL fetch to the supervisor, POSTing the CSR text, saving the result as the CRT file
-curl $SUPERVISOR -d "cert=$CSRTEXT" > /etc/openvpn/server.crt
-curl $SUPERVISOR/getca/ > /etc/openvpn/ca.crt
+curl --fail $SUPERVISOR -d "cert=$CSRTEXT" > /etc/openvpn/server.crt
+curl --fail $SUPERVISOR/getca/ > /etc/openvpn/ca.crt
# Customize the server.conf.template
cd /etc/openvpn
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index 5b0ed3471..2867bcb21 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -36,11 +36,11 @@ from nova.endpoint import api
FLAGS = flags.FLAGS
-
flags.DEFINE_string('boot_script_template',
utils.abspath('cloudpipe/bootscript.sh'),
'Template for script to run on cloudpipe instance boot')
+
class CloudPipe(object):
def __init__(self, cloud_controller):
self.controller = cloud_controller
diff --git a/nova/compute/disk.py b/nova/compute/disk.py
index 1ffcca685..c340c5a79 100644
--- a/nova/compute/disk.py
+++ b/nova/compute/disk.py
@@ -24,6 +24,7 @@ Includes injection of SSH PGP keys into authorized_keys file.
import logging
import os
import tempfile
+
from twisted.internet import defer
from nova import exception
@@ -84,6 +85,7 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None):
yield execute('dd if=%s of=%s bs=%d seek=%d conv=notrunc,fsync'
% (infile, outfile, sector_size, primary_first))
+
@defer.inlineCallbacks
def inject_data(image, key=None, net=None, partition=None, execute=None):
"""Injects a ssh key and optionally net data into a disk image.
@@ -137,6 +139,7 @@ def inject_data(image, key=None, net=None, partition=None, execute=None):
# remove loopback
yield execute('sudo losetup -d %s' % device)
+
@defer.inlineCallbacks
def _inject_key_into_fs(key, fs, execute=None):
sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh')
@@ -146,6 +149,7 @@ def _inject_key_into_fs(key, fs, execute=None):
keyfile = os.path.join(sshdir, 'authorized_keys')
yield execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n')
+
@defer.inlineCallbacks
def _inject_net_into_fs(net, fs, execute=None):
netfile = os.path.join(os.path.join(os.path.join(
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 439be3c7d..0102bae54 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -21,10 +21,10 @@
The built-in instance properties.
"""
-INSTANCE_TYPES = {}
-INSTANCE_TYPES['m1.tiny'] = {'memory_mb': 512, 'vcpus': 1, 'local_gb': 0}
-INSTANCE_TYPES['m1.small'] = {'memory_mb': 1024, 'vcpus': 1, 'local_gb': 10}
-INSTANCE_TYPES['m1.medium'] = {'memory_mb': 2048, 'vcpus': 2, 'local_gb': 10}
-INSTANCE_TYPES['m1.large'] = {'memory_mb': 4096, 'vcpus': 4, 'local_gb': 10}
-INSTANCE_TYPES['m1.xlarge'] = {'memory_mb': 8192, 'vcpus': 4, 'local_gb': 10}
-INSTANCE_TYPES['c1.medium'] = {'memory_mb': 2048, 'vcpus': 4, 'local_gb': 10}
+INSTANCE_TYPES = {
+ 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
+ 'm1.small': dict(memory_mb=1024, vcpus=1, local_gb=10, flavorid=2),
+ 'm1.medium': dict(memory_mb=2048, vcpus=2, local_gb=10, flavorid=3),
+ 'm1.large': dict(memory_mb=4096, vcpus=4, local_gb=10, flavorid=4),
+ 'm1.xlarge': dict(memory_mb=8192, vcpus=4, local_gb=10, flavorid=5),
+ 'c1.medium': dict(memory_mb=2048, vcpus=4, local_gb=10, flavorid=6)}
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
new file mode 100644
index 000000000..c15c9e1f5
--- /dev/null
+++ b/nova/compute/manager.py
@@ -0,0 +1,199 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handles all code relating to instances (guest vms)
+"""
+
+import base64
+import logging
+import os
+
+from twisted.internet import defer
+
+from nova import db
+from nova import exception
+from nova import flags
+from nova import process
+from nova import manager
+from nova import utils
+from nova.compute import power_state
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('instances_path', utils.abspath('../instances'),
+ 'where instances are stored on disk')
+flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection',
+ 'Driver to use for volume creation')
+
+
+class ComputeManager(manager.Manager):
+ """
+ Manages the running instances.
+ """
+ def __init__(self, compute_driver=None, *args, **kwargs):
+ """Load configuration options and connect to the hypervisor."""
+ # TODO(vish): sync driver creation logic with the rest of the system
+ if not compute_driver:
+ compute_driver = FLAGS.compute_driver
+ self.driver = utils.import_object(compute_driver)
+ self.network_manager = utils.import_object(FLAGS.network_manager)
+ super(ComputeManager, self).__init__(*args, **kwargs)
+
+ def _update_state(self, context, instance_id):
+ """Update the state of an instance from the driver info"""
+ # FIXME(ja): include other fields from state?
+ instance_ref = db.instance_get(context, instance_id)
+ state = self.driver.get_info(instance_ref.name)['state']
+ db.instance_state(context, instance_id, state)
+
+ @defer.inlineCallbacks
+ @exception.wrap_exception
+ def run_instance(self, context, instance_id, **_kwargs):
+ """Launch a new instance with specified options."""
+ instance_ref = db.instance_get(context, instance_id)
+ if instance_ref['str_id'] in self.driver.list_instances():
+ raise exception.Error("Instance has already been created")
+ logging.debug("Starting instance %s...", instance_id)
+ project_id = instance_ref['project_id']
+ self.network_manager.setup_compute_network(context, project_id)
+ db.instance_update(context,
+ instance_id,
+ {'host': FLAGS.host})
+
+ # TODO(vish) check to make sure the availability zone matches
+ db.instance_state(context,
+ instance_id,
+ power_state.NOSTATE,
+ 'spawning')
+
+ try:
+ yield self.driver.spawn(instance_ref)
+ except: # pylint: disable-msg=W0702
+ logging.exception("Failed to spawn instance %s",
+ instance_ref['name'])
+ db.instance_state(context, instance_id, power_state.SHUTDOWN)
+
+ self._update_state(context, instance_id)
+
+ @defer.inlineCallbacks
+ @exception.wrap_exception
+ def terminate_instance(self, context, instance_id):
+ """Terminate an instance on this machine."""
+ logging.debug("Got told to terminate instance %s", instance_id)
+ instance_ref = db.instance_get(context, instance_id)
+
+ # TODO(vish): move this logic to layer?
+ if instance_ref['state'] == power_state.SHUTOFF:
+ db.instance_destroy(context, instance_id)
+ raise exception.Error('trying to destroy already destroyed'
+ ' instance: %s' % instance_id)
+
+ db.instance_state(context,
+ instance_id,
+ power_state.NOSTATE,
+ 'shutting_down')
+ yield self.driver.destroy(instance_ref)
+
+ # TODO(ja): should we keep it in a terminated state for a bit?
+ db.instance_destroy(context, instance_id)
+
+ @defer.inlineCallbacks
+ @exception.wrap_exception
+ def reboot_instance(self, context, instance_id):
+ """Reboot an instance on this server."""
+ self._update_state(context, instance_id)
+ instance_ref = db.instance_get(context, instance_id)
+
+ if instance_ref['state'] != power_state.RUNNING:
+ raise exception.Error(
+ 'trying to reboot a non-running'
+ 'instance: %s (state: %s excepted: %s)' %
+ (instance_ref['str_id'],
+ instance_ref['state'],
+ power_state.RUNNING))
+
+ logging.debug('rebooting instance %s', instance_ref['name'])
+ db.instance_state(context,
+ instance_id,
+ power_state.NOSTATE,
+ 'rebooting')
+ yield self.driver.reboot(instance_ref)
+ self._update_state(context, instance_id)
+
+ @exception.wrap_exception
+ def get_console_output(self, context, instance_id):
+ """Send the console output for an instance."""
+ # TODO(vish): Move this into the driver layer
+
+ logging.debug("Getting console output for %s", (instance_id))
+ instance_ref = db.instance_get(context, instance_id)
+
+ if FLAGS.connection_type == 'libvirt':
+ fname = os.path.abspath(os.path.join(FLAGS.instances_path,
+ instance_ref['str_id'],
+ 'console.log'))
+ with open(fname, 'r') as f:
+ output = f.read()
+ else:
+ output = 'FAKE CONSOLE OUTPUT'
+
+ # TODO(termie): this stuff belongs in the API layer, no need to
+ # munge the data we send to ourselves
+ output = {"InstanceId": instance_id,
+ "Timestamp": "2",
+ "output": base64.b64encode(output)}
+ return output
+
+ @defer.inlineCallbacks
+ @exception.wrap_exception
+ def attach_volume(self, context, instance_id, volume_id, mountpoint):
+ """Attach a volume to an instance."""
+ # TODO(termie): check that instance_id exists
+ volume_ref = db.volume_get(context, volume_id)
+ yield self._init_aoe()
+ # TODO(vish): Move this into the driver layer
+ yield process.simple_execute(
+ "sudo virsh attach-disk %s /dev/etherd/%s %s" %
+ (instance_id,
+ volume_ref['aoe_device'],
+ mountpoint.rpartition('/dev/')[2]))
+ db.volume_attached(context, volume_id, instance_id, mountpoint)
+ defer.returnValue(True)
+
+ @defer.inlineCallbacks
+ @exception.wrap_exception
+ def detach_volume(self, context, instance_id, volume_id):
+ """Detach a volume from an instance."""
+ # despite the documentation, virsh detach-disk just wants the device
+ # name without the leading /dev/
+ # TODO(termie): check that instance_id exists
+ volume_ref = db.volume_get(context, volume_id)
+ target = volume_ref['mountpoint'].rpartition('/dev/')[2]
+ # TODO(vish): Move this into the driver layer
+ yield process.simple_execute(
+ "sudo virsh detach-disk %s %s " % (instance_id, target))
+ db.volume_detached(context, volume_id)
+ defer.returnValue(True)
+
+ @defer.inlineCallbacks
+ def _init_aoe(self):
+ """Discover aoe exported devices"""
+ # TODO(vish): these shell calls should move into volume manager.
+ yield process.simple_execute("sudo aoe-discover")
+ yield process.simple_execute("sudo aoe-stat")
diff --git a/nova/compute/model.py b/nova/compute/model.py
deleted file mode 100644
index cde76dc58..000000000
--- a/nova/compute/model.py
+++ /dev/null
@@ -1,317 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Datastore Model objects for Compute Instances, with
-InstanceDirectory manager.
-
-# Create a new instance?
->>> InstDir = InstanceDirectory()
->>> inst = InstDir.new()
->>> inst.destroy()
-True
->>> inst = InstDir['i-123']
->>> inst['ip'] = "192.168.0.3"
->>> inst['project_id'] = "projectA"
->>> inst.save()
-True
-
->>> InstDir['i-123']
-<Instance:i-123>
->>> InstDir.all.next()
-<Instance:i-123>
-
->>> inst.destroy()
-True
-"""
-
-import datetime
-import uuid
-
-from nova import datastore
-from nova import exception
-from nova import flags
-from nova import utils
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_integer('total_memory_mb', 1000,
- 'amount of memory a node has for VMs in MB')
-flags.DEFINE_integer('total_disk_gb', 1000,
- 'amount of disk space a node has for VMs in GB')
-
-# TODO(todd): Implement this at the class level for Instance
-class InstanceDirectory(object):
- """an api for interacting with the global state of instances"""
-
- def get(self, instance_id):
- """returns an instance object for a given id"""
- return Instance(instance_id)
-
- def __getitem__(self, item):
- return self.get(item)
-
- @datastore.absorb_connection_error
- def by_project(self, project):
- """returns a list of instance objects for a project"""
- for instance_id in datastore.Redis.instance().smembers('project:%s:instances' % project):
- yield Instance(instance_id)
-
- @datastore.absorb_connection_error
- def by_node(self, node):
- """returns a list of instances for a node"""
- for instance_id in datastore.Redis.instance().smembers('node:%s:instances' % node):
- yield Instance(instance_id)
-
- def by_ip(self, ip):
- """returns an instance object that is using the IP"""
- # NOTE(vish): The ip association should be just a single value, but
- # to maintain consistency it is using the standard
- # association and the ugly method for retrieving
- # the first item in the set below.
- result = datastore.Redis.instance().smembers('ip:%s:instances' % ip)
- if not result:
- return None
- return Instance(list(result)[0])
-
- def by_volume(self, volume_id):
- """returns the instance a volume is attached to"""
- pass
-
- @datastore.absorb_connection_error
- def exists(self, instance_id):
- return datastore.Redis.instance().sismember('instances', instance_id)
-
- @property
- @datastore.absorb_connection_error
- def all(self):
- """returns a list of all instances"""
- for instance_id in datastore.Redis.instance().smembers('instances'):
- yield Instance(instance_id)
-
- def new(self):
- """returns an empty Instance object, with ID"""
- instance_id = utils.generate_uid('i')
- return self.get(instance_id)
-
-
-class Instance(datastore.BasicModel):
- """Wrapper around stored properties of an instance"""
-
- def __init__(self, instance_id):
- """loads an instance from the datastore if exists"""
- # set instance data before super call since it uses default_state
- self.instance_id = instance_id
- super(Instance, self).__init__()
-
- def default_state(self):
- return {'state': 0,
- 'state_description': 'pending',
- 'instance_id': self.instance_id,
- 'node_name': 'unassigned',
- 'project_id': 'unassigned',
- 'user_id': 'unassigned',
- 'private_dns_name': 'unassigned'}
-
- @property
- def identifier(self):
- return self.instance_id
-
- @property
- def project(self):
- if self.state.get('project_id', None):
- return self.state['project_id']
- return self.state.get('owner_id', 'unassigned')
-
- @property
- def volumes(self):
- """returns a list of attached volumes"""
- pass
-
- @property
- def reservation(self):
- """Returns a reservation object"""
- pass
-
- def save(self):
- """Call into superclass to save object, then save associations"""
- # NOTE(todd): doesn't track migration between projects/nodes,
- # it just adds the first one
- is_new = self.is_new_record()
- node_set = (self.state['node_name'] != 'unassigned' and
- self.initial_state.get('node_name', 'unassigned')
- == 'unassigned')
- success = super(Instance, self).save()
- if success and is_new:
- self.associate_with("project", self.project)
- self.associate_with("ip", self.state['private_dns_name'])
- if success and node_set:
- self.associate_with("node", self.state['node_name'])
- return True
-
- def destroy(self):
- """Destroy associations, then destroy the object"""
- self.unassociate_with("project", self.project)
- self.unassociate_with("node", self.state['node_name'])
- self.unassociate_with("ip", self.state['private_dns_name'])
- return super(Instance, self).destroy()
-
-class Host(datastore.BasicModel):
- """A Host is the machine where a Daemon is running."""
-
- def __init__(self, hostname):
- """loads an instance from the datastore if exists"""
- # set instance data before super call since it uses default_state
- self.hostname = hostname
- super(Host, self).__init__()
-
- def default_state(self):
- return {"hostname": self.hostname}
-
- @property
- def identifier(self):
- return self.hostname
-
-
-class Daemon(datastore.BasicModel):
- """A Daemon is a job (compute, api, network, ...) that runs on a host."""
-
- def __init__(self, host_or_combined, binpath=None):
- """loads an instance from the datastore if exists"""
- # set instance data before super call since it uses default_state
- # since loading from datastore expects a combined key that
- # is equivilent to identifier, we need to expect that, while
- # maintaining meaningful semantics (2 arguments) when creating
- # from within other code like the bin/nova-* scripts
- if binpath:
- self.hostname = host_or_combined
- self.binary = binpath
- else:
- self.hostname, self.binary = host_or_combined.split(":")
- super(Daemon, self).__init__()
-
- def default_state(self):
- return {"hostname": self.hostname,
- "binary": self.binary,
- "total_memory_mb": FLAGS.total_memory_mb,
- "total_disk_gb": FLAGS.total_disk_gb,
- "updated_at": utils.isotime()
- }
-
- @property
- def identifier(self):
- return "%s:%s" % (self.hostname, self.binary)
-
- def save(self):
- """Call into superclass to save object, then save associations"""
- # NOTE(todd): this makes no attempt to destroy itsself,
- # so after termination a record w/ old timestmap remains
- success = super(Daemon, self).save()
- if success:
- self.associate_with("host", self.hostname)
- return True
-
- def destroy(self):
- """Destroy associations, then destroy the object"""
- self.unassociate_with("host", self.hostname)
- return super(Daemon, self).destroy()
-
- def heartbeat(self):
- self['updated_at'] = utils.isotime()
- return self.save()
-
- @classmethod
- def by_host(cls, hostname):
- for x in cls.associated_to("host", hostname):
- yield x
-
-class SessionToken(datastore.BasicModel):
- """This is a short-lived auth token that is passed through web requests"""
-
- def __init__(self, session_token):
- self.token = session_token
- self.default_ttl = FLAGS.auth_token_ttl
- super(SessionToken, self).__init__()
-
- @property
- def identifier(self):
- return self.token
-
- def default_state(self):
- now = datetime.datetime.utcnow()
- diff = datetime.timedelta(seconds=self.default_ttl)
- expires = now + diff
- return {'user': None, 'session_type': None, 'token': self.token,
- 'expiry': expires.strftime(utils.TIME_FORMAT)}
-
- def save(self):
- """Call into superclass to save object, then save associations"""
- if not self['user']:
- raise exception.Invalid("SessionToken requires a User association")
- success = super(SessionToken, self).save()
- if success:
- self.associate_with("user", self['user'])
- return True
-
- @classmethod
- def lookup(cls, key):
- token = super(SessionToken, cls).lookup(key)
- if token:
- expires_at = utils.parse_isotime(token['expiry'])
- if datetime.datetime.utcnow() >= expires_at:
- token.destroy()
- return None
- return token
-
- @classmethod
- def generate(cls, userid, session_type=None):
- """make a new token for the given user"""
- token = str(uuid.uuid4())
- while cls.lookup(token):
- token = str(uuid.uuid4())
- instance = cls(token)
- instance['user'] = userid
- instance['session_type'] = session_type
- instance.save()
- return instance
-
- def update_expiry(self, **kwargs):
- """updates the expirty attribute, but doesn't save"""
- if not kwargs:
- kwargs['seconds'] = self.default_ttl
- time = datetime.datetime.utcnow()
- diff = datetime.timedelta(**kwargs)
- expires = time + diff
- self['expiry'] = expires.strftime(utils.TIME_FORMAT)
-
- def is_expired(self):
- now = datetime.datetime.utcnow()
- expires = utils.parse_isotime(self['expiry'])
- return expires <= now
-
- def ttl(self):
- """number of seconds remaining before expiration"""
- now = datetime.datetime.utcnow()
- expires = utils.parse_isotime(self['expiry'])
- delta = expires - now
- return (delta.seconds + (delta.days * 24 * 3600))
-
-
-if __name__ == "__main__":
- import doctest
- doctest.testmod()
diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py
index 19e1a483d..268864900 100644
--- a/nova/compute/monitor.py
+++ b/nova/compute/monitor.py
@@ -24,14 +24,15 @@ Instance Monitoring:
in the object store.
"""
-import boto
-import boto.s3
import datetime
import logging
import os
-import rrdtool
import sys
import time
+
+import boto
+import boto.s3
+import rrdtool
from twisted.internet import defer
from twisted.internet import task
from twisted.application import service
@@ -41,13 +42,12 @@ from nova.virt import connection as virt_connection
FLAGS = flags.FLAGS
-flags.DEFINE_integer(
- 'monitoring_instances_delay', 5, 'Sleep time between updates')
-flags.DEFINE_integer(
- 'monitoring_instances_step', 300, 'Interval of RRD updates')
-flags.DEFINE_string(
- 'monitoring_rrd_path', '/var/nova/monitor/instances',
- 'Location of RRD files')
+flags.DEFINE_integer('monitoring_instances_delay', 5,
+ 'Sleep time between updates')
+flags.DEFINE_integer('monitoring_instances_step', 300,
+ 'Interval of RRD updates')
+flags.DEFINE_string('monitoring_rrd_path', '/var/nova/monitor/instances',
+ 'Location of RRD files')
RRD_VALUES = {
@@ -61,7 +61,7 @@ RRD_VALUES = {
'RRA:MAX:0.5:6:800',
'RRA:MAX:0.5:24:800',
'RRA:MAX:0.5:288:800',
- ],
+ ],
'net': [
'DS:rx:COUNTER:600:0:1250000',
'DS:tx:COUNTER:600:0:1250000',
@@ -73,7 +73,7 @@ RRD_VALUES = {
'RRA:MAX:0.5:6:800',
'RRA:MAX:0.5:24:800',
'RRA:MAX:0.5:288:800',
- ],
+ ],
'disk': [
'DS:rd:COUNTER:600:U:U',
'DS:wr:COUNTER:600:U:U',
@@ -85,12 +85,13 @@ RRD_VALUES = {
'RRA:MAX:0.5:6:800',
'RRA:MAX:0.5:24:800',
'RRA:MAX:0.5:444:800',
- ]
-}
+ ]
+ }
utcnow = datetime.datetime.utcnow
+
def update_rrd(instance, name, data):
"""
Updates the specified RRD file.
@@ -106,6 +107,7 @@ def update_rrd(instance, name, data):
'%d:%s' % (timestamp, data)
)
+
def init_rrd(instance, name):
"""
Initializes the specified RRD file.
@@ -124,6 +126,7 @@ def init_rrd(instance, name):
'--start', '0',
*RRD_VALUES[name]
)
+
def graph_cpu(instance, duration):
"""
@@ -148,6 +151,7 @@ def graph_cpu(instance, duration):
store_graph(instance.instance_id, filename)
+
def graph_net(instance, duration):
"""
Creates a graph of network usage for the specified instance and duration.
@@ -174,6 +178,7 @@ def graph_net(instance, duration):
)
store_graph(instance.instance_id, filename)
+
def graph_disk(instance, duration):
"""
@@ -202,6 +207,7 @@ def graph_disk(instance, duration):
store_graph(instance.instance_id, filename)
+
def store_graph(instance_id, filename):
"""
Transmits the specified graph file to internal object store on cloud
@@ -387,6 +393,7 @@ class InstanceMonitor(object, service.Service):
"""
Monitors the running instances of the current machine.
"""
+
def __init__(self):
"""
Initialize the monitoring loop.
diff --git a/nova/compute/service.py b/nova/compute/service.py
index 820116453..4df7e7171 100644
--- a/nova/compute/service.py
+++ b/nova/compute/service.py
@@ -17,350 +17,15 @@
# under the License.
"""
-Compute Service:
-
- Runs on each compute host, managing the
- hypervisor using the virt module.
-
+Compute service allows rpc calls to the compute manager and reports state
+to the database.
"""
-import base64
-import json
-import logging
-import os
-import sys
-from twisted.internet import defer
-from twisted.internet import task
-
-from nova import exception
-from nova import flags
-from nova import process
from nova import service
-from nova import utils
-from nova.compute import disk
-from nova.compute import model
-from nova.compute import power_state
-from nova.compute.instance_types import INSTANCE_TYPES
-from nova.network import service as network_service
-from nova.objectstore import image # for image_path flag
-from nova.virt import connection as virt_connection
-from nova.volume import service as volume_service
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_string('instances_path', utils.abspath('../instances'),
- 'where instances are stored on disk')
class ComputeService(service.Service):
"""
- Manages the running instances.
+ Compute Service automatically passes commands on to the Compute Manager
"""
- def __init__(self):
- """ load configuration options for this node and connect to the hypervisor"""
- super(ComputeService, self).__init__()
- self._instances = {}
- self._conn = virt_connection.get_connection()
- self.instdir = model.InstanceDirectory()
- # TODO(joshua): This needs to ensure system state, specifically: modprobe aoe
-
- def noop(self):
- """ simple test of an AMQP message call """
- return defer.succeed('PONG')
-
- def get_instance(self, instance_id):
- # inst = self.instdir.get(instance_id)
- # return inst
- if self.instdir.exists(instance_id):
- return Instance.fromName(self._conn, instance_id)
- return None
-
- @exception.wrap_exception
- def adopt_instances(self):
- """ if there are instances already running, adopt them """
- return defer.succeed(0)
- instance_names = self._conn.list_instances()
- for name in instance_names:
- try:
- new_inst = Instance.fromName(self._conn, name)
- new_inst.update_state()
- except:
- pass
- return defer.succeed(len(self._instances))
-
- @exception.wrap_exception
- def describe_instances(self):
- retval = {}
- for inst in self.instdir.by_node(FLAGS.node_name):
- retval[inst['instance_id']] = (
- Instance.fromName(self._conn, inst['instance_id']))
- return retval
-
- @defer.inlineCallbacks
- def report_state(self, nodename, daemon):
- # TODO(termie): make this pattern be more elegant. -todd
- try:
- record = model.Daemon(nodename, daemon)
- record.heartbeat()
- if getattr(self, "model_disconnected", False):
- self.model_disconnected = False
- logging.error("Recovered model server connection!")
-
- except model.ConnectionError, ex:
- if not getattr(self, "model_disconnected", False):
- self.model_disconnected = True
- logging.exception("model server went away")
- yield
-
- @exception.wrap_exception
- def run_instance(self, instance_id, **_kwargs):
- """ launch a new instance with specified options """
- logging.debug("Starting instance %s..." % (instance_id))
- inst = self.instdir.get(instance_id)
- # TODO: Get the real security group of launch in here
- security_group = "default"
- # NOTE(vish): passing network type allows us to express the
- # network without making a call to network to find
- # out which type of network to setup
- network_service.setup_compute_network(
- inst.get('network_type', 'vlan'),
- inst['user_id'],
- inst['project_id'],
- security_group)
-
- inst['node_name'] = FLAGS.node_name
- inst.save()
- # TODO(vish) check to make sure the availability zone matches
- new_inst = Instance(self._conn, name=instance_id, data=inst)
- logging.info("Instances current state is %s", new_inst.state)
- if new_inst.is_running():
- raise exception.Error("Instance is already running")
- new_inst.spawn()
-
- @exception.wrap_exception
- def terminate_instance(self, instance_id):
- """ terminate an instance on this machine """
- logging.debug("Got told to terminate instance %s" % instance_id)
- instance = self.get_instance(instance_id)
- # inst = self.instdir.get(instance_id)
- if not instance:
- raise exception.Error(
- 'trying to terminate unknown instance: %s' % instance_id)
- d = instance.destroy()
- # d.addCallback(lambda x: inst.destroy())
- return d
-
- @exception.wrap_exception
- def reboot_instance(self, instance_id):
- """ reboot an instance on this server
- KVM doesn't support reboot, so we terminate and restart """
- instance = self.get_instance(instance_id)
- if not instance:
- raise exception.Error(
- 'trying to reboot unknown instance: %s' % instance_id)
- return instance.reboot()
-
- @defer.inlineCallbacks
- @exception.wrap_exception
- def get_console_output(self, instance_id):
- """ send the console output for an instance """
- logging.debug("Getting console output for %s" % (instance_id))
- inst = self.instdir.get(instance_id)
- instance = self.get_instance(instance_id)
- if not instance:
- raise exception.Error(
- 'trying to get console log for unknown: %s' % instance_id)
- rv = yield instance.console_output()
- # TODO(termie): this stuff belongs in the API layer, no need to
- # munge the data we send to ourselves
- output = {"InstanceId" : instance_id,
- "Timestamp" : "2",
- "output" : base64.b64encode(rv)}
- defer.returnValue(output)
-
- @defer.inlineCallbacks
- @exception.wrap_exception
- def attach_volume(self, instance_id = None,
- volume_id = None, mountpoint = None):
- volume = volume_service.get_volume(volume_id)
- yield self._init_aoe()
- yield process.simple_execute(
- "sudo virsh attach-disk %s /dev/etherd/%s %s" %
- (instance_id,
- volume['aoe_device'],
- mountpoint.rpartition('/dev/')[2]))
- volume.finish_attach()
- defer.returnValue(True)
-
- @defer.inlineCallbacks
- def _init_aoe(self):
- yield process.simple_execute("sudo aoe-discover")
- yield process.simple_execute("sudo aoe-stat")
-
- @defer.inlineCallbacks
- @exception.wrap_exception
- def detach_volume(self, instance_id, volume_id):
- """ detach a volume from an instance """
- # despite the documentation, virsh detach-disk just wants the device
- # name without the leading /dev/
- volume = volume_service.get_volume(volume_id)
- target = volume['mountpoint'].rpartition('/dev/')[2]
- yield process.simple_execute(
- "sudo virsh detach-disk %s %s " % (instance_id, target))
- volume.finish_detach()
- defer.returnValue(True)
-
-
-class Group(object):
- def __init__(self, group_id):
- self.group_id = group_id
-
-
-class ProductCode(object):
- def __init__(self, product_code):
- self.product_code = product_code
-
-
-class Instance(object):
-
- NOSTATE = 0x00
- RUNNING = 0x01
- BLOCKED = 0x02
- PAUSED = 0x03
- SHUTDOWN = 0x04
- SHUTOFF = 0x05
- CRASHED = 0x06
-
- def __init__(self, conn, name, data):
- """ spawn an instance with a given name """
- self._conn = conn
- # TODO(vish): this can be removed after data has been updated
- # data doesn't seem to have a working iterator so in doesn't work
- if data.get('owner_id', None) is not None:
- data['user_id'] = data['owner_id']
- data['project_id'] = data['owner_id']
- self.datamodel = data
-
- size = data.get('instance_type', FLAGS.default_instance_type)
- if size not in INSTANCE_TYPES:
- raise exception.Error('invalid instance type: %s' % size)
-
- self.datamodel.update(INSTANCE_TYPES[size])
-
- self.datamodel['name'] = name
- self.datamodel['instance_id'] = name
- self.datamodel['basepath'] = data.get(
- 'basepath', os.path.abspath(
- os.path.join(FLAGS.instances_path, self.name)))
- self.datamodel['memory_kb'] = int(self.datamodel['memory_mb']) * 1024
- self.datamodel.setdefault('image_id', FLAGS.default_image)
- self.datamodel.setdefault('kernel_id', FLAGS.default_kernel)
- self.datamodel.setdefault('ramdisk_id', FLAGS.default_ramdisk)
- self.datamodel.setdefault('project_id', self.datamodel['user_id'])
- self.datamodel.setdefault('bridge_name', None)
- #self.datamodel.setdefault('key_data', None)
- #self.datamodel.setdefault('key_name', None)
- #self.datamodel.setdefault('addressing_type', None)
-
- # TODO(joshua) - The ugly non-flat ones
- self.datamodel['groups'] = data.get('security_group', 'default')
- # TODO(joshua): Support product codes somehow
- self.datamodel.setdefault('product_codes', None)
-
- self.datamodel.save()
- logging.debug("Finished init of Instance with id of %s" % name)
-
- @classmethod
- def fromName(cls, conn, name):
- """ use the saved data for reloading the instance """
- instdir = model.InstanceDirectory()
- instance = instdir.get(name)
- return cls(conn=conn, name=name, data=instance)
-
- def set_state(self, state_code, state_description=None):
- self.datamodel['state'] = state_code
- if not state_description:
- state_description = power_state.name(state_code)
- self.datamodel['state_description'] = state_description
- self.datamodel.save()
-
- @property
- def state(self):
- # it is a string in datamodel
- return int(self.datamodel['state'])
-
- @property
- def name(self):
- return self.datamodel['name']
-
- def is_pending(self):
- return (self.state == power_state.NOSTATE or self.state == 'pending')
-
- def is_destroyed(self):
- return self.state == power_state.SHUTOFF
-
- def is_running(self):
- logging.debug("Instance state is: %s" % self.state)
- return (self.state == power_state.RUNNING or self.state == 'running')
-
- def describe(self):
- return self.datamodel
-
- def info(self):
- result = self._conn.get_info(self.name)
- result['node_name'] = FLAGS.node_name
- return result
-
- def update_state(self):
- self.datamodel.update(self.info())
- self.set_state(self.state)
- self.datamodel.save() # Extra, but harmless
-
- @defer.inlineCallbacks
- @exception.wrap_exception
- def destroy(self):
- if self.is_destroyed():
- self.datamodel.destroy()
- raise exception.Error('trying to destroy already destroyed'
- ' instance: %s' % self.name)
-
- self.set_state(power_state.NOSTATE, 'shutting_down')
- yield self._conn.destroy(self)
- self.datamodel.destroy()
-
- @defer.inlineCallbacks
- @exception.wrap_exception
- def reboot(self):
- if not self.is_running():
- raise exception.Error(
- 'trying to reboot a non-running'
- 'instance: %s (state: %s)' % (self.name, self.state))
-
- logging.debug('rebooting instance %s' % self.name)
- self.set_state(power_state.NOSTATE, 'rebooting')
- yield self._conn.reboot(self)
- self.update_state()
-
- @defer.inlineCallbacks
- @exception.wrap_exception
- def spawn(self):
- self.set_state(power_state.NOSTATE, 'spawning')
- logging.debug("Starting spawn in Instance")
- try:
- yield self._conn.spawn(self)
- except Exception, ex:
- logging.debug(ex)
- self.set_state(power_state.SHUTDOWN)
- self.update_state()
-
- @exception.wrap_exception
- def console_output(self):
- # FIXME: Abstract this for Xen
- if FLAGS.connection_type == 'libvirt':
- fname = os.path.abspath(
- os.path.join(self.datamodel['basepath'], 'console.log'))
- with open(fname, 'r') as f:
- console = f.read()
- else:
- console = 'FAKE CONSOLE OUTPUT'
- return defer.succeed(console)
+ pass
diff --git a/nova/crypto.py b/nova/crypto.py
index cc84f5e45..b05548ea1 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -24,7 +24,6 @@ SSH keypairs and x509 certificates.
import base64
import hashlib
import logging
-import M2Crypto
import os
import shutil
import struct
@@ -32,6 +31,8 @@ import tempfile
import time
import utils
+import M2Crypto
+
from nova import exception
from nova import flags
@@ -42,11 +43,13 @@ flags.DEFINE_string('keys_path', utils.abspath('../keys'), 'Where we keep our ke
flags.DEFINE_string('ca_path', utils.abspath('../CA'), 'Where we keep our root CA')
flags.DEFINE_boolean('use_intermediate_ca', False, 'Should we use intermediate CAs for each project?')
+
def ca_path(project_id):
if project_id:
return "%s/INTER/%s/cacert.pem" % (FLAGS.ca_path, project_id)
return "%s/cacert.pem" % (FLAGS.ca_path)
+
def fetch_ca(project_id=None, chain=True):
if not FLAGS.use_intermediate_ca:
project_id = None
@@ -60,6 +63,7 @@ def fetch_ca(project_id=None, chain=True):
buffer += cafile.read()
return buffer
+
def generate_key_pair(bits=1024):
# what is the magic 65537?
@@ -109,6 +113,7 @@ def generate_x509_cert(subject, bits=1024):
shutil.rmtree(tmpdir)
return (private_key, csr)
+
def sign_csr(csr_text, intermediate=None):
if not FLAGS.use_intermediate_ca:
intermediate = None
@@ -122,6 +127,7 @@ def sign_csr(csr_text, intermediate=None):
os.chdir(start)
return _sign_csr(csr_text, user_ca)
+
def _sign_csr(csr_text, ca_folder):
tmpfolder = tempfile.mkdtemp()
csrfile = open("%s/inbound.csr" % (tmpfolder), "w")
diff --git a/nova/datastore.old.py b/nova/datastore.old.py
new file mode 100644
index 000000000..751c5eeeb
--- /dev/null
+++ b/nova/datastore.old.py
@@ -0,0 +1,261 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Datastore:
+
+MAKE Sure that ReDIS is running, and your flags are set properly,
+before trying to run this.
+"""
+
+import logging
+
+from nova import exception
+from nova import flags
+from nova import utils
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('redis_host', '127.0.0.1',
+ 'Host that redis is running on.')
+flags.DEFINE_integer('redis_port', 6379,
+ 'Port that redis is running on.')
+flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away')
+
+
+class Redis(object):
+ def __init__(self):
+ if hasattr(self.__class__, '_instance'):
+ raise Exception('Attempted to instantiate singleton')
+
+ @classmethod
+ def instance(cls):
+ if not hasattr(cls, '_instance'):
+ inst = redis.Redis(host=FLAGS.redis_host,
+ port=FLAGS.redis_port,
+ db=FLAGS.redis_db)
+ cls._instance = inst
+ return cls._instance
+
+
+class ConnectionError(exception.Error):
+ pass
+
+
+def absorb_connection_error(fn):
+ def _wrapper(*args, **kwargs):
+ try:
+ return fn(*args, **kwargs)
+ except redis.exceptions.ConnectionError, ce:
+ raise ConnectionError(str(ce))
+ return _wrapper
+
+
+class BasicModel(object):
+ """
+ All Redis-backed data derives from this class.
+
+ You MUST specify an identifier() property that returns a unique string
+ per instance.
+
+ You MUST have an initializer that takes a single argument that is a value
+ returned by identifier() to load a new class with.
+
+ You may want to specify a dictionary for default_state().
+
+ You may also specify override_type at the class left to use a key other
+ than __class__.__name__.
+
+ You override save and destroy calls to automatically build and destroy
+ associations.
+ """
+
+ override_type = None
+
+ @absorb_connection_error
+ def __init__(self):
+ state = Redis.instance().hgetall(self.__redis_key)
+ if state:
+ self.initial_state = state
+ self.state = dict(self.initial_state)
+ else:
+ self.initial_state = {}
+ self.state = self.default_state()
+
+
+ def default_state(self):
+ """You probably want to define this in your subclass"""
+ return {}
+
+ @classmethod
+ def _redis_name(cls):
+ return cls.override_type or cls.__name__.lower()
+
+ @classmethod
+ def lookup(cls, identifier):
+ rv = cls(identifier)
+ if rv.is_new_record():
+ return None
+ else:
+ return rv
+
+ @classmethod
+ @absorb_connection_error
+ def all(cls):
+ """yields all objects in the store"""
+ redis_set = cls._redis_set_name(cls.__name__)
+ for identifier in Redis.instance().smembers(redis_set):
+ yield cls(identifier)
+
+ @classmethod
+ def associated_to(cls, foreign_type, foreign_id):
+ for identifier in cls.associated_keys(foreign_type, foreign_id):
+ yield cls(identifier)
+
+ @classmethod
+ @absorb_connection_error
+ def associated_keys(cls, foreign_type, foreign_id):
+ redis_set = cls._redis_association_name(foreign_type, foreign_id)
+ return Redis.instance().smembers(redis_set) or []
+
+ @classmethod
+ def _redis_set_name(cls, kls_name):
+ # stupidly pluralize (for compatiblity with previous codebase)
+ return kls_name.lower() + "s"
+
+ @classmethod
+ def _redis_association_name(cls, foreign_type, foreign_id):
+ return cls._redis_set_name("%s:%s:%s" %
+ (foreign_type, foreign_id, cls._redis_name()))
+
+ @property
+ def identifier(self):
+ """You DEFINITELY want to define this in your subclass"""
+ raise NotImplementedError("Your subclass should define identifier")
+
+ @property
+ def __redis_key(self):
+ return '%s:%s' % (self._redis_name(), self.identifier)
+
+ def __repr__(self):
+ return "<%s:%s>" % (self.__class__.__name__, self.identifier)
+
+ def keys(self):
+ return self.state.keys()
+
+ def copy(self):
+ copyDict = {}
+ for item in self.keys():
+ copyDict[item] = self[item]
+ return copyDict
+
+ def get(self, item, default):
+ return self.state.get(item, default)
+
+ def update(self, update_dict):
+ return self.state.update(update_dict)
+
+ def setdefault(self, item, default):
+ return self.state.setdefault(item, default)
+
+ def __contains__(self, item):
+ return item in self.state
+
+ def __getitem__(self, item):
+ return self.state[item]
+
+ def __setitem__(self, item, val):
+ self.state[item] = val
+ return self.state[item]
+
+ def __delitem__(self, item):
+ """We don't support this"""
+ raise Exception("Silly monkey, models NEED all their properties.")
+
+ def is_new_record(self):
+ return self.initial_state == {}
+
+ @absorb_connection_error
+ def add_to_index(self):
+ """Each insance of Foo has its id tracked int the set named Foos"""
+ set_name = self.__class__._redis_set_name(self.__class__.__name__)
+ Redis.instance().sadd(set_name, self.identifier)
+
+ @absorb_connection_error
+ def remove_from_index(self):
+ """Remove id of this instance from the set tracking ids of this type"""
+ set_name = self.__class__._redis_set_name(self.__class__.__name__)
+ Redis.instance().srem(set_name, self.identifier)
+
+ @absorb_connection_error
+ def associate_with(self, foreign_type, foreign_id):
+ """Add this class id into the set foreign_type:foreign_id:this_types"""
+ # note the extra 's' on the end is for plurality
+ # to match the old data without requiring a migration of any sort
+ self.add_associated_model_to_its_set(foreign_type, foreign_id)
+ redis_set = self.__class__._redis_association_name(foreign_type,
+ foreign_id)
+ Redis.instance().sadd(redis_set, self.identifier)
+
+ @absorb_connection_error
+ def unassociate_with(self, foreign_type, foreign_id):
+ """Delete from foreign_type:foreign_id:this_types set"""
+ redis_set = self.__class__._redis_association_name(foreign_type,
+ foreign_id)
+ Redis.instance().srem(redis_set, self.identifier)
+
+ def add_associated_model_to_its_set(self, model_type, model_id):
+ """
+ When associating an X to a Y, save Y for newer timestamp, etc, and to
+ make sure to save it if Y is a new record.
+ If the model_type isn't found as a usable class, ignore it, this can
+ happen when associating to things stored in LDAP (user, project, ...).
+ """
+ table = globals()
+ klsname = model_type.capitalize()
+ if table.has_key(klsname):
+ model_class = table[klsname]
+ model_inst = model_class(model_id)
+ model_inst.save()
+
+ @absorb_connection_error
+ def save(self):
+ """
+ update the directory with the state from this model
+ also add it to the index of items of the same type
+ then set the initial_state = state so new changes are tracked
+ """
+ # TODO(ja): implement hmset in redis-py and use it
+ # instead of multiple calls to hset
+ if self.is_new_record():
+ self["create_time"] = utils.isotime()
+ for key, val in self.state.iteritems():
+ Redis.instance().hset(self.__redis_key, key, val)
+ self.add_to_index()
+ self.initial_state = dict(self.state)
+ return True
+
+ @absorb_connection_error
+ def destroy(self):
+ """deletes all related records from datastore."""
+ logging.info("Destroying datamodel for %s %s",
+ self.__class__.__name__, self.identifier)
+ Redis.instance().delete(self.__redis_key)
+ self.remove_from_index()
+ return True
+
diff --git a/nova/datastore.py b/nova/datastore.py
index 5dc6ed107..8e2519429 100644
--- a/nova/datastore.py
+++ b/nova/datastore.py
@@ -26,10 +26,7 @@ before trying to run this.
import logging
import redis
-from nova import exception
from nova import flags
-from nova import utils
-
FLAGS = flags.FLAGS
flags.DEFINE_string('redis_host', '127.0.0.1',
@@ -54,209 +51,3 @@ class Redis(object):
return cls._instance
-class ConnectionError(exception.Error):
- pass
-
-
-def absorb_connection_error(fn):
- def _wrapper(*args, **kwargs):
- try:
- return fn(*args, **kwargs)
- except redis.exceptions.ConnectionError, ce:
- raise ConnectionError(str(ce))
- return _wrapper
-
-
-class BasicModel(object):
- """
- All Redis-backed data derives from this class.
-
- You MUST specify an identifier() property that returns a unique string
- per instance.
-
- You MUST have an initializer that takes a single argument that is a value
- returned by identifier() to load a new class with.
-
- You may want to specify a dictionary for default_state().
-
- You may also specify override_type at the class left to use a key other
- than __class__.__name__.
-
- You override save and destroy calls to automatically build and destroy
- associations.
- """
-
- override_type = None
-
- @absorb_connection_error
- def __init__(self):
- state = Redis.instance().hgetall(self.__redis_key)
- if state:
- self.initial_state = state
- self.state = dict(self.initial_state)
- else:
- self.initial_state = {}
- self.state = self.default_state()
-
-
- def default_state(self):
- """You probably want to define this in your subclass"""
- return {}
-
- @classmethod
- def _redis_name(cls):
- return cls.override_type or cls.__name__.lower()
-
- @classmethod
- def lookup(cls, identifier):
- rv = cls(identifier)
- if rv.is_new_record():
- return None
- else:
- return rv
-
- @classmethod
- @absorb_connection_error
- def all(cls):
- """yields all objects in the store"""
- redis_set = cls._redis_set_name(cls.__name__)
- for identifier in Redis.instance().smembers(redis_set):
- yield cls(identifier)
-
- @classmethod
- def associated_to(cls, foreign_type, foreign_id):
- for identifier in cls.associated_keys(foreign_type, foreign_id):
- yield cls(identifier)
-
- @classmethod
- @absorb_connection_error
- def associated_keys(cls, foreign_type, foreign_id):
- redis_set = cls._redis_association_name(foreign_type, foreign_id)
- return Redis.instance().smembers(redis_set) or []
-
- @classmethod
- def _redis_set_name(cls, kls_name):
- # stupidly pluralize (for compatiblity with previous codebase)
- return kls_name.lower() + "s"
-
- @classmethod
- def _redis_association_name(cls, foreign_type, foreign_id):
- return cls._redis_set_name("%s:%s:%s" %
- (foreign_type, foreign_id, cls._redis_name()))
-
- @property
- def identifier(self):
- """You DEFINITELY want to define this in your subclass"""
- raise NotImplementedError("Your subclass should define identifier")
-
- @property
- def __redis_key(self):
- return '%s:%s' % (self._redis_name(), self.identifier)
-
- def __repr__(self):
- return "<%s:%s>" % (self.__class__.__name__, self.identifier)
-
- def keys(self):
- return self.state.keys()
-
- def copy(self):
- copyDict = {}
- for item in self.keys():
- copyDict[item] = self[item]
- return copyDict
-
- def get(self, item, default):
- return self.state.get(item, default)
-
- def update(self, update_dict):
- return self.state.update(update_dict)
-
- def setdefault(self, item, default):
- return self.state.setdefault(item, default)
-
- def __contains__(self, item):
- return item in self.state
-
- def __getitem__(self, item):
- return self.state[item]
-
- def __setitem__(self, item, val):
- self.state[item] = val
- return self.state[item]
-
- def __delitem__(self, item):
- """We don't support this"""
- raise Exception("Silly monkey, models NEED all their properties.")
-
- def is_new_record(self):
- return self.initial_state == {}
-
- @absorb_connection_error
- def add_to_index(self):
- """Each insance of Foo has its id tracked int the set named Foos"""
- set_name = self.__class__._redis_set_name(self.__class__.__name__)
- Redis.instance().sadd(set_name, self.identifier)
-
- @absorb_connection_error
- def remove_from_index(self):
- """Remove id of this instance from the set tracking ids of this type"""
- set_name = self.__class__._redis_set_name(self.__class__.__name__)
- Redis.instance().srem(set_name, self.identifier)
-
- @absorb_connection_error
- def associate_with(self, foreign_type, foreign_id):
- """Add this class id into the set foreign_type:foreign_id:this_types"""
- # note the extra 's' on the end is for plurality
- # to match the old data without requiring a migration of any sort
- self.add_associated_model_to_its_set(foreign_type, foreign_id)
- redis_set = self.__class__._redis_association_name(foreign_type,
- foreign_id)
- Redis.instance().sadd(redis_set, self.identifier)
-
- @absorb_connection_error
- def unassociate_with(self, foreign_type, foreign_id):
- """Delete from foreign_type:foreign_id:this_types set"""
- redis_set = self.__class__._redis_association_name(foreign_type,
- foreign_id)
- Redis.instance().srem(redis_set, self.identifier)
-
- def add_associated_model_to_its_set(self, model_type, model_id):
- """
- When associating an X to a Y, save Y for newer timestamp, etc, and to
- make sure to save it if Y is a new record.
- If the model_type isn't found as a usable class, ignore it, this can
- happen when associating to things stored in LDAP (user, project, ...).
- """
- table = globals()
- klsname = model_type.capitalize()
- if table.has_key(klsname):
- model_class = table[klsname]
- model_inst = model_class(model_id)
- model_inst.save()
-
- @absorb_connection_error
- def save(self):
- """
- update the directory with the state from this model
- also add it to the index of items of the same type
- then set the initial_state = state so new changes are tracked
- """
- # TODO(ja): implement hmset in redis-py and use it
- # instead of multiple calls to hset
- if self.is_new_record():
- self["create_time"] = utils.isotime()
- for key, val in self.state.iteritems():
- Redis.instance().hset(self.__redis_key, key, val)
- self.add_to_index()
- self.initial_state = dict(self.state)
- return True
-
- @absorb_connection_error
- def destroy(self):
- """deletes all related records from datastore."""
- logging.info("Destroying datamodel for %s %s",
- self.__class__.__name__, self.identifier)
- Redis.instance().delete(self.__redis_key)
- self.remove_from_index()
- return True
-
diff --git a/nova/db/__init__.py b/nova/db/__init__.py
new file mode 100644
index 000000000..054b7ac94
--- /dev/null
+++ b/nova/db/__init__.py
@@ -0,0 +1,23 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+DB abstraction for Nova
+"""
+
+from nova.db.api import *
diff --git a/nova/db/api.py b/nova/db/api.py
new file mode 100644
index 000000000..6cb49b7e4
--- /dev/null
+++ b/nova/db/api.py
@@ -0,0 +1,439 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Defines interface for DB access
+"""
+
+from nova import exception
+from nova import flags
+from nova import utils
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('db_backend', 'sqlalchemy',
+ 'The backend to use for db')
+
+
+IMPL = utils.LazyPluggable(FLAGS['db_backend'],
+ sqlalchemy='nova.db.sqlalchemy.api')
+
+
+# TODO(vish): where should these exceptions go?
+class NoMoreAddresses(exception.Error):
+ """No more available addresses"""
+ pass
+
+
+class NoMoreBlades(exception.Error):
+ """No more available blades"""
+ pass
+
+
+class NoMoreNetworks(exception.Error):
+ """No more available networks"""
+ pass
+
+
+###################
+
+
+def daemon_get(context, daemon_id):
+ """Get an daemon or raise if it does not exist."""
+ return IMPL.daemon_get(context, daemon_id)
+
+
+def daemon_get_by_args(context, host, binary):
+ """Get the state of an daemon by node name and binary."""
+ return IMPL.daemon_get_by_args(context, host, binary)
+
+
+def daemon_create(context, values):
+ """Create a daemon from the values dictionary."""
+ return IMPL.daemon_create(context, values)
+
+
+def daemon_update(context, daemon_id, values):
+ """Set the given properties on an daemon and update it.
+
+ Raises NotFound if daemon does not exist.
+
+ """
+ return IMPL.daemon_update(context, daemon_id, values)
+
+
+###################
+
+
+def floating_ip_allocate_address(context, host, project_id):
+ """Allocate free floating ip and return the address.
+
+ Raises if one is not available.
+ """
+ return IMPL.floating_ip_allocate_address(context, host, project_id)
+
+
+def floating_ip_create(context, address, host):
+ """Create a floating ip for a given address on the specified host."""
+ return IMPL.floating_ip_create(context, address, host)
+
+
+def floating_ip_disassociate(context, address):
+ """Disassociate an floating ip from a fixed ip by address.
+
+ Returns the address of the existing fixed ip.
+ """
+ return IMPL.floating_ip_disassociate(context, address)
+
+
+def floating_ip_deallocate(context, address):
+ """Deallocate an floating ip by address"""
+ return IMPL.floating_ip_deallocate(context, address)
+
+
+def floating_ip_fixed_ip_associate(context, floating_address, fixed_address):
+ """Associate an floating ip to a fixed_ip by address."""
+ return IMPL.floating_ip_fixed_ip_associate(context,
+ floating_address,
+ fixed_address)
+
+
+def floating_ip_get_by_address(context, address):
+ """Get a floating ip by address or raise if it doesn't exist."""
+ return IMPL.floating_ip_get_by_address(context, address)
+
+
+def floating_ip_get_instance(context, address):
+ """Get an instance for a floating ip by address."""
+ return IMPL.floating_ip_get_instance(context, address)
+
+
+####################
+
+
+def fixed_ip_allocate(context, network_id):
+ """Allocate free fixed ip and return the address.
+
+ Raises if one is not available.
+ """
+ return IMPL.fixed_ip_allocate(context, network_id)
+
+
+def fixed_ip_create(context, values):
+ """Create a fixed ip from the values dictionary."""
+ return IMPL.fixed_ip_create(context, values)
+
+
+def fixed_ip_deallocate(context, address):
+ """Deallocate a fixed ip by address."""
+ return IMPL.fixed_ip_deallocate(context, address)
+
+
+def fixed_ip_get_by_address(context, address):
+ """Get a fixed ip by address or raise if it does not exist."""
+ return IMPL.fixed_ip_get_by_address(context, address)
+
+
+def fixed_ip_get_instance(context, address):
+ """Get an instance for a fixed ip by address."""
+ return IMPL.fixed_ip_get_instance(context, address)
+
+
+def fixed_ip_get_network(context, address):
+ """Get a network for a fixed ip by address."""
+ return IMPL.fixed_ip_get_network(context, address)
+
+
+def fixed_ip_instance_associate(context, address, instance_id):
+ """Associate a fixed ip to an instance by address."""
+ return IMPL.fixed_ip_instance_associate(context, address, instance_id)
+
+
+def fixed_ip_instance_disassociate(context, address):
+ """Disassociate a fixed ip from an instance by address."""
+ return IMPL.fixed_ip_instance_disassociate(context, address)
+
+
+def fixed_ip_update(context, address, values):
+ """Create a fixed ip from the values dictionary."""
+ return IMPL.fixed_ip_update(context, address, values)
+
+
+####################
+
+
+def instance_create(context, values):
+ """Create an instance from the values dictionary."""
+ return IMPL.instance_create(context, values)
+
+
+def instance_destroy(context, instance_id):
+ """Destroy the instance or raise if it does not exist."""
+ return IMPL.instance_destroy(context, instance_id)
+
+
+def instance_get(context, instance_id):
+ """Get an instance or raise if it does not exist."""
+ return IMPL.instance_get(context, instance_id)
+
+
+def instance_get_all(context):
+ """Get all instances."""
+ return IMPL.instance_get_all(context)
+
+
+def instance_get_by_project(context, project_id):
+ """Get all instance belonging to a project."""
+ return IMPL.instance_get_by_project(context, project_id)
+
+
+def instance_get_by_reservation(context, reservation_id):
+ """Get all instance belonging to a reservation."""
+ return IMPL.instance_get_by_reservation(context, reservation_id)
+
+
+def instance_get_fixed_address(context, instance_id):
+ """Get the fixed ip address of an instance."""
+ return IMPL.instance_get_fixed_address(context, instance_id)
+
+
+def instance_get_floating_address(context, instance_id):
+ """Get the first floating ip address of an instance."""
+ return IMPL.instance_get_floating_address(context, instance_id)
+
+
+def instance_get_by_str(context, str_id):
+ """Get an instance by string id."""
+ return IMPL.instance_get_by_str(context, str_id)
+
+
+def instance_get_host(context, instance_id):
+ """Get the host that the instance is running on."""
+ return IMPL.instance_get_host(context, instance_id)
+
+
+def instance_is_vpn(context, instance_id):
+ """True if instance is a vpn."""
+ return IMPL.instance_is_vpn(context, instance_id)
+
+
+def instance_state(context, instance_id, state, description=None):
+ """Set the state of an instance."""
+ return IMPL.instance_state(context, instance_id, state, description)
+
+
+def instance_update(context, instance_id, values):
+ """Set the given properties on an instance and update it.
+
+ Raises NotFound if instance does not exist.
+
+ """
+ return IMPL.instance_update(context, instance_id, values)
+
+
+####################
+
+
+def network_count(context):
+ """Return the number of networks."""
+ return IMPL.network_count(context)
+
+
+def network_count_allocated_ips(context, network_id):
+ """Return the number of allocated non-reserved ips in the network."""
+ return IMPL.network_count_allocated_ips(context, network_id)
+
+
+def network_count_available_ips(context, network_id):
+ """Return the number of available ips in the network."""
+ return IMPL.network_count_available_ips(context, network_id)
+
+
+def network_count_reserved_ips(context, network_id):
+ """Return the number of reserved ips in the network."""
+ return IMPL.network_count_reserved_ips(context, network_id)
+
+
+def network_create(context, values):
+ """Create a network from the values dictionary."""
+ return IMPL.network_create(context, values)
+
+
+def network_create_fixed_ips(context, network_id, num_vpn_clients):
+ """Create the ips for the network, reserving sepecified ips."""
+ return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients)
+
+
+def network_destroy(context, network_id):
+ """Destroy the network or raise if it does not exist."""
+ return IMPL.network_destroy(context, network_id)
+
+
+def network_get(context, network_id):
+ """Get an network or raise if it does not exist."""
+ return IMPL.network_get(context, network_id)
+
+
+# pylint: disable-msg=C0103
+def network_get_associated_fixed_ips(context, network_id):
+ """Get all network's ips that have been associated."""
+ return IMPL.network_get_associated_fixed_ips(context, network_id)
+
+
+def network_get_by_bridge(context, bridge):
+ """Get an network or raise if it does not exist."""
+ return IMPL.network_get_by_bridge(context, bridge)
+
+
+def network_get_host(context, network_id):
+ """Get host assigned to network or raise"""
+ return IMPL.network_get_host(context, network_id)
+
+
+def network_get_index(context, network_id):
+ """Get non-conflicting index for network"""
+ return IMPL.network_get_index(context, network_id)
+
+
+def network_get_vpn_ip(context, network_id):
+ """Get non-conflicting index for network"""
+ return IMPL.network_get_vpn_ip(context, network_id)
+
+
+def network_index_count(context):
+ """Return count of network indexes"""
+ return IMPL.network_index_count(context)
+
+
+def network_index_create(context, values):
+ """Create a network index from the values dict"""
+ return IMPL.network_index_create(context, values)
+
+
+def network_set_cidr(context, network_id, cidr):
+ """Set the Classless Inner Domain Routing for the network"""
+ return IMPL.network_set_cidr(context, network_id, cidr)
+
+
+def network_set_host(context, network_id, host_id):
+ """Safely set the host for network"""
+ return IMPL.network_set_host(context, network_id, host_id)
+
+
+def network_update(context, network_id, values):
+ """Set the given properties on an network and update it.
+
+ Raises NotFound if network does not exist.
+
+ """
+ return IMPL.network_update(context, network_id, values)
+
+
+###################
+
+
+def project_get_network(context, project_id):
+ """Return the network associated with the project."""
+ return IMPL.project_get_network(context, project_id)
+
+
+###################
+
+
+def queue_get_for(context, topic, physical_node_id):
+ """Return a channel to send a message to a node with a topic."""
+ return IMPL.queue_get_for(context, topic, physical_node_id)
+
+
+###################
+
+
+def export_device_count(context):
+ """Return count of export devices."""
+ return IMPL.export_device_count(context)
+
+
+def export_device_create(context, values):
+ """Create an export_device from the values dictionary."""
+ return IMPL.export_device_create(context, values)
+
+
+###################
+
+
+def volume_allocate_shelf_and_blade(context, volume_id):
+ """Atomically allocate a free shelf and blade from the pool."""
+ return IMPL.volume_allocate_shelf_and_blade(context, volume_id)
+
+
+def volume_attached(context, volume_id, instance_id, mountpoint):
+ """Ensure that a volume is set as attached."""
+ return IMPL.volume_attached(context, volume_id, instance_id, mountpoint)
+
+
+def volume_create(context, values):
+ """Create a volume from the values dictionary."""
+ return IMPL.volume_create(context, values)
+
+
+def volume_destroy(context, volume_id):
+ """Destroy the volume or raise if it does not exist."""
+ return IMPL.volume_destroy(context, volume_id)
+
+
+def volume_detached(context, volume_id):
+ """Ensure that a volume is set as detached."""
+ return IMPL.volume_detached(context, volume_id)
+
+
+def volume_get(context, volume_id):
+ """Get a volume or raise if it does not exist."""
+ return IMPL.volume_get(context, volume_id)
+
+
+def volume_get_all(context):
+ """Get all volumes."""
+ return IMPL.volume_get_all(context)
+
+
+def volume_get_by_project(context, project_id):
+ """Get all volumes belonging to a project."""
+ return IMPL.volume_get_by_project(context, project_id)
+
+
+def volume_get_by_str(context, str_id):
+ """Get a volume by string id."""
+ return IMPL.volume_get_by_str(context, str_id)
+
+
+def volume_get_host(context, volume_id):
+ """Get the host that the volume is running on."""
+ return IMPL.volume_get_host(context, volume_id)
+
+
+def volume_get_shelf_and_blade(context, volume_id):
+ """Get the shelf and blade allocated to the volume."""
+ return IMPL.volume_get_shelf_and_blade(context, volume_id)
+
+
+def volume_update(context, volume_id, values):
+ """Set the given properties on an volume and update it.
+
+ Raises NotFound if volume does not exist.
+
+ """
+ return IMPL.volume_update(context, volume_id, values)
diff --git a/nova/db/sqlalchemy/__init__.py b/nova/db/sqlalchemy/__init__.py
new file mode 100644
index 000000000..3288ebd20
--- /dev/null
+++ b/nova/db/sqlalchemy/__init__.py
@@ -0,0 +1,24 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+SQLAlchemy database backend
+"""
+from nova.db.sqlalchemy import models
+
+models.register_models()
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
new file mode 100644
index 000000000..5d98ee5bf
--- /dev/null
+++ b/nova/db/sqlalchemy/api.py
@@ -0,0 +1,577 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Implementation of SQLAlchemy backend
+"""
+
+from nova import db
+from nova import exception
+from nova import flags
+from nova.db.sqlalchemy import models
+from nova.db.sqlalchemy.session import managed_session
+from sqlalchemy import or_
+
+FLAGS = flags.FLAGS
+
+# NOTE(vish): disabling docstring pylint because the docstrings are
+# in the interface definition
+# pylint: disable-msg=C0111
+
+###################
+
+
+def daemon_get(_context, daemon_id):
+ return models.Daemon.find(daemon_id)
+
+
+def daemon_get_by_args(_context, host, binary):
+ return models.Daemon.find_by_args(host, binary)
+
+
+def daemon_create(_context, values):
+ daemon_ref = models.Daemon()
+ for (key, value) in values.iteritems():
+ daemon_ref[key] = value
+ daemon_ref.save()
+ return daemon_ref.id
+
+
+def daemon_update(_context, daemon_id, values):
+ daemon_ref = daemon_get(_context, daemon_id)
+ for (key, value) in values.iteritems():
+ daemon_ref[key] = value
+ daemon_ref.save()
+
+
+###################
+
+
+def floating_ip_allocate_address(_context, host, project_id):
+ with managed_session(autocommit=False) as session:
+ floating_ip_ref = session.query(models.FloatingIp) \
+ .filter_by(host=host) \
+ .filter_by(fixed_ip_id=None) \
+ .filter_by(deleted=False) \
+ .with_lockmode('update') \
+ .first()
+ # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
+ # then this has concurrency issues
+ if not floating_ip_ref:
+ raise db.NoMoreAddresses()
+ floating_ip_ref['project_id'] = project_id
+ session.add(floating_ip_ref)
+ session.commit()
+ return floating_ip_ref['address']
+
+
+def floating_ip_create(_context, address, host):
+ floating_ip_ref = models.FloatingIp()
+ floating_ip_ref['address'] = address
+ floating_ip_ref['host'] = host
+ floating_ip_ref.save()
+ return floating_ip_ref
+
+
+def floating_ip_fixed_ip_associate(_context, floating_address, fixed_address):
+ with managed_session(autocommit=False) as session:
+ floating_ip_ref = models.FloatingIp.find_by_str(floating_address,
+ session=session)
+ fixed_ip_ref = models.FixedIp.find_by_str(fixed_address,
+ session=session)
+ floating_ip_ref.fixed_ip = fixed_ip_ref
+ floating_ip_ref.save(session=session)
+ session.commit()
+
+
+def floating_ip_disassociate(_context, address):
+ with managed_session(autocommit=False) as session:
+ floating_ip_ref = models.FloatingIp.find_by_str(address,
+ session=session)
+ fixed_ip_ref = floating_ip_ref.fixed_ip
+ if fixed_ip_ref:
+ fixed_ip_address = fixed_ip_ref['address']
+ else:
+ fixed_ip_address = None
+ floating_ip_ref.fixed_ip = None
+ floating_ip_ref.save(session=session)
+ session.commit()
+ return fixed_ip_address
+
+
+def floating_ip_deallocate(_context, address):
+ with managed_session(autocommit=False) as session:
+ floating_ip_ref = models.FloatingIp.find_by_str(address,
+ session=session)
+ floating_ip_ref['project_id'] = None
+ floating_ip_ref.save(session=session)
+
+
+def floating_ip_get_by_address(_context, address):
+ return models.FloatingIp.find_by_str(address)
+
+
+def floating_ip_get_instance(_context, address):
+ with managed_session() as session:
+ floating_ip_ref = models.FloatingIp.find_by_str(address,
+ session=session)
+ return floating_ip_ref.fixed_ip.instance
+
+
+###################
+
+
+def fixed_ip_allocate(_context, network_id):
+ with managed_session(autocommit=False) as session:
+ network_or_none = or_(models.FixedIp.network_id == network_id,
+ models.FixedIp.network_id == None)
+ fixed_ip_ref = session.query(models.FixedIp) \
+ .filter(network_or_none) \
+ .filter_by(reserved=False) \
+ .filter_by(allocated=False) \
+ .filter_by(leased=False) \
+ .filter_by(deleted=False) \
+ .with_lockmode('update') \
+ .first()
+ # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
+ # then this has concurrency issues
+ if not fixed_ip_ref:
+ raise db.NoMoreAddresses()
+ if not fixed_ip_ref.network:
+ fixed_ip_ref.network = models.Network.find(network_id)
+ fixed_ip_ref['allocated'] = True
+ session.add(fixed_ip_ref)
+ session.commit()
+ return fixed_ip_ref['address']
+
+
+def fixed_ip_create(_context, values):
+ fixed_ip_ref = models.FixedIp()
+ for (key, value) in values.iteritems():
+ fixed_ip_ref[key] = value
+ fixed_ip_ref.save()
+ return fixed_ip_ref['address']
+
+
+def fixed_ip_get_by_address(_context, address):
+ return models.FixedIp.find_by_str(address)
+
+
+def fixed_ip_get_instance(_context, address):
+ with managed_session() as session:
+ return models.FixedIp.find_by_str(address, session=session).instance
+
+
+def fixed_ip_get_network(_context, address):
+ with managed_session() as session:
+ return models.FixedIp.find_by_str(address, session=session).network
+
+
+def fixed_ip_deallocate(_context, address):
+ fixed_ip_ref = fixed_ip_get_by_address(_context, address)
+ fixed_ip_ref['allocated'] = False
+ fixed_ip_ref.save()
+
+
+def fixed_ip_instance_associate(_context, address, instance_id):
+ with managed_session(autocommit=False) as session:
+ fixed_ip_ref = models.FixedIp.find_by_str(address, session=session)
+ instance_ref = models.Instance.find(instance_id, session=session)
+ fixed_ip_ref.instance = instance_ref
+ fixed_ip_ref.save(session=session)
+ session.commit()
+
+
+def fixed_ip_instance_disassociate(_context, address):
+ with managed_session(autocommit=False) as session:
+ fixed_ip_ref = models.FixedIp.find_by_str(address, session=session)
+ fixed_ip_ref.instance = None
+ fixed_ip_ref.save(session=session)
+ session.commit()
+
+
+def fixed_ip_update(_context, address, values):
+ fixed_ip_ref = fixed_ip_get_by_address(_context, address)
+ for (key, value) in values.iteritems():
+ fixed_ip_ref[key] = value
+ fixed_ip_ref.save()
+
+
+###################
+
+
+def instance_create(_context, values):
+ instance_ref = models.Instance()
+ for (key, value) in values.iteritems():
+ instance_ref[key] = value
+ instance_ref.save()
+ return instance_ref.id
+
+
+def instance_destroy(_context, instance_id):
+ instance_ref = instance_get(_context, instance_id)
+ instance_ref.delete()
+
+
+def instance_get(_context, instance_id):
+ return models.Instance.find(instance_id)
+
+
+def instance_get_all(_context):
+ return models.Instance.all()
+
+
+def instance_get_by_project(_context, project_id):
+ with managed_session() as session:
+ return session.query(models.Instance) \
+ .filter_by(project_id=project_id) \
+ .filter_by(deleted=False) \
+ .all()
+
+
+def instance_get_by_reservation(_context, reservation_id):
+ with managed_session() as session:
+ return session.query(models.Instance) \
+ .filter_by(reservation_id=reservation_id) \
+ .filter_by(deleted=False) \
+ .all()
+
+
+def instance_get_by_str(_context, str_id):
+ return models.Instance.find_by_str(str_id)
+
+
+def instance_get_fixed_address(_context, instance_id):
+ with managed_session() as session:
+ instance_ref = models.Instance.find(instance_id, session=session)
+ if not instance_ref.fixed_ip:
+ return None
+ return instance_ref.fixed_ip['address']
+
+
+def instance_get_floating_address(_context, instance_id):
+ with managed_session() as session:
+ instance_ref = models.Instance.find(instance_id, session=session)
+ if not instance_ref.fixed_ip:
+ return None
+ if not instance_ref.fixed_ip.floating_ips:
+ return None
+ # NOTE(vish): this just returns the first floating ip
+ return instance_ref.fixed_ip.floating_ips[0]['address']
+
+
+def instance_get_host(_context, instance_id):
+ instance_ref = instance_get(_context, instance_id)
+ return instance_ref['host']
+
+
+def instance_is_vpn(_context, instance_id):
+ instance_ref = instance_get(_context, instance_id)
+ return instance_ref['image_id'] == FLAGS.vpn_image_id
+
+
+def instance_state(_context, instance_id, state, description=None):
+ instance_ref = instance_get(_context, instance_id)
+ instance_ref.set_state(state, description)
+
+
+def instance_update(_context, instance_id, values):
+ instance_ref = instance_get(_context, instance_id)
+ for (key, value) in values.iteritems():
+ instance_ref[key] = value
+ instance_ref.save()
+
+
+###################
+
+
+def network_count(_context):
+ return models.Network.count()
+
+
+def network_count_allocated_ips(_context, network_id):
+ with managed_session() as session:
+ return session.query(models.FixedIp) \
+ .filter_by(network_id=network_id) \
+ .filter_by(allocated=True) \
+ .filter_by(deleted=False) \
+ .count()
+
+
+def network_count_available_ips(_context, network_id):
+ with managed_session() as session:
+ return session.query(models.FixedIp) \
+ .filter_by(network_id=network_id) \
+ .filter_by(allocated=False) \
+ .filter_by(reserved=False) \
+ .filter_by(deleted=False) \
+ .count()
+
+
+def network_count_reserved_ips(_context, network_id):
+ with managed_session() as session:
+ return session.query(models.FixedIp) \
+ .filter_by(network_id=network_id) \
+ .filter_by(reserved=True) \
+ .filter_by(deleted=False) \
+ .count()
+
+
+def network_create(_context, values):
+ network_ref = models.Network()
+ for (key, value) in values.iteritems():
+ network_ref[key] = value
+ network_ref.save()
+ return network_ref
+
+
+def network_destroy(_context, network_id):
+ with managed_session(autocommit=False) as session:
+ # TODO(vish): do we have to use sql here?
+ session.execute('update networks set deleted=1 where id=:id',
+ {'id': network_id})
+ session.execute('update fixed_ips set deleted=1 where network_id=:id',
+ {'id': network_id})
+ session.execute('update floating_ips set deleted=1 '
+ 'where fixed_ip_id in '
+ '(select id from fixed_ips '
+ 'where network_id=:id)',
+ {'id': network_id})
+ session.execute('update network_indexes set network_id=NULL '
+ 'where network_id=:id',
+ {'id': network_id})
+ session.commit()
+
+
+def network_get(_context, network_id):
+ return models.Network.find(network_id)
+
+
+# pylint: disable-msg=C0103
+def network_get_associated_fixed_ips(_context, network_id):
+ with managed_session() as session:
+ return session.query(models.FixedIp) \
+ .filter_by(network_id=network_id) \
+ .filter(models.FixedIp.instance_id != None) \
+ .filter_by(deleted=False) \
+ .all()
+
+
+def network_get_by_bridge(_context, bridge):
+ with managed_session() as session:
+ rv = session.query(models.Network) \
+ .filter_by(bridge=bridge) \
+ .filter_by(deleted=False) \
+ .first()
+ if not rv:
+ raise exception.NotFound('No network for bridge %s' % bridge)
+ return rv
+
+
+def network_get_host(_context, network_id):
+ network_ref = network_get(_context, network_id)
+ return network_ref['host']
+
+
+def network_get_index(_context, network_id):
+ with managed_session(autocommit=False) as session:
+ network_index = session.query(models.NetworkIndex) \
+ .filter_by(network_id=None) \
+ .filter_by(deleted=False) \
+ .with_lockmode('update') \
+ .first()
+ if not network_index:
+ raise db.NoMoreNetworks()
+ network_index['network'] = models.Network.find(network_id,
+ session=session)
+ session.add(network_index)
+ session.commit()
+ return network_index['index']
+
+
+def network_index_count(_context):
+ return models.NetworkIndex.count()
+
+
+def network_index_create(_context, values):
+ network_index_ref = models.NetworkIndex()
+ for (key, value) in values.iteritems():
+ network_index_ref[key] = value
+ network_index_ref.save()
+
+
+def network_set_host(_context, network_id, host_id):
+ with managed_session(autocommit=False) as session:
+ network = session.query(models.Network) \
+ .filter_by(id=network_id) \
+ .filter_by(deleted=False) \
+ .with_lockmode('update') \
+ .first()
+ if not network:
+ raise exception.NotFound("Couldn't find network with %s" %
+ network_id)
+ # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
+ # then this has concurrency issues
+ if network.host:
+ session.commit()
+ return network['host']
+ network['host'] = host_id
+ session.add(network)
+ session.commit()
+ return network['host']
+
+
+def network_update(_context, network_id, values):
+ network_ref = network_get(_context, network_id)
+ for (key, value) in values.iteritems():
+ network_ref[key] = value
+ network_ref.save()
+
+
+###################
+
+
+def project_get_network(_context, project_id):
+ with managed_session() as session:
+ rv = session.query(models.Network) \
+ .filter_by(project_id=project_id) \
+ .filter_by(deleted=False) \
+ .first()
+ if not rv:
+ raise exception.NotFound('No network for project: %s' % project_id)
+ return rv
+
+
+###################
+
+
+def queue_get_for(_context, topic, physical_node_id):
+ # FIXME(ja): this should be servername?
+ return "%s.%s" % (topic, physical_node_id)
+
+###################
+
+
+def export_device_count(_context):
+ return models.ExportDevice.count()
+
+
+def export_device_create(_context, values):
+ export_device_ref = models.ExportDevice()
+ for (key, value) in values.iteritems():
+ export_device_ref[key] = value
+ export_device_ref.save()
+ return export_device_ref
+
+
+###################
+
+
+def volume_allocate_shelf_and_blade(_context, volume_id):
+ with managed_session(autocommit=False) as session:
+ export_device = session.query(models.ExportDevice) \
+ .filter_by(volume=None) \
+ .filter_by(deleted=False) \
+ .with_lockmode('update') \
+ .first()
+ # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
+ # then this has concurrency issues
+ if not export_device:
+ raise db.NoMoreBlades()
+ export_device.volume_id = volume_id
+ session.add(export_device)
+ session.commit()
+ return (export_device.shelf_id, export_device.blade_id)
+
+
+def volume_attached(_context, volume_id, instance_id, mountpoint):
+ volume_ref = volume_get(_context, volume_id)
+ volume_ref.instance_id = instance_id
+ volume_ref['status'] = 'in-use'
+ volume_ref['mountpoint'] = mountpoint
+ volume_ref['attach_status'] = 'attached'
+ volume_ref.save()
+
+
+def volume_create(_context, values):
+ volume_ref = models.Volume()
+ for (key, value) in values.iteritems():
+ volume_ref[key] = value
+ volume_ref.save()
+ return volume_ref
+
+
+def volume_destroy(_context, volume_id):
+ with managed_session(autocommit=False) as session:
+ # TODO(vish): do we have to use sql here?
+ session.execute('update volumes set deleted=1 where id=:id',
+ {'id': volume_id})
+ session.execute('update export_devices set volume_id=NULL '
+ 'where volume_id=:id',
+ {'id': volume_id})
+ session.commit()
+
+
+def volume_detached(_context, volume_id):
+ volume_ref = volume_get(_context, volume_id)
+ volume_ref['instance_id'] = None
+ volume_ref['mountpoint'] = None
+ volume_ref['status'] = 'available'
+ volume_ref['attach_status'] = 'detached'
+ volume_ref.save()
+
+
+def volume_get(_context, volume_id):
+ return models.Volume.find(volume_id)
+
+
+def volume_get_all(_context):
+ return models.Volume.all()
+
+
+def volume_get_by_project(_context, project_id):
+ with managed_session() as session:
+ return session.query(models.Volume) \
+ .filter_by(project_id=project_id) \
+ .filter_by(deleted=False) \
+ .all()
+
+
+def volume_get_by_str(_context, str_id):
+ return models.Volume.find_by_str(str_id)
+
+
+def volume_get_host(_context, volume_id):
+ volume_ref = volume_get(_context, volume_id)
+ return volume_ref['host']
+
+
+def volume_get_shelf_and_blade(_context, volume_id):
+ with managed_session() as session:
+ export_device = session.query(models.ExportDevice) \
+ .filter_by(volume_id=volume_id) \
+ .first()
+ if not export_device:
+ raise exception.NotFound()
+ return (export_device.shelf_id, export_device.blade_id)
+
+
+def volume_update(_context, volume_id, values):
+ volume_ref = volume_get(_context, volume_id)
+ for (key, value) in values.iteritems():
+ volume_ref[key] = value
+ volume_ref.save()
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
new file mode 100644
index 000000000..9e15614f7
--- /dev/null
+++ b/nova/db/sqlalchemy/models.py
@@ -0,0 +1,390 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+SQLAlchemy models for nova data
+"""
+
+# TODO(vish): clean up these imports
+from sqlalchemy.orm import relationship, backref, validates, exc
+from sqlalchemy import Column, Integer, String
+from sqlalchemy import ForeignKey, DateTime, Boolean, Text
+from sqlalchemy.ext.declarative import declarative_base
+
+from nova.db.sqlalchemy.session import managed_session
+
+from nova import auth
+from nova import exception
+from nova import flags
+
+FLAGS = flags.FLAGS
+
+BASE = declarative_base()
+
+
+class NovaBase(object):
+ """Base class for Nova Models"""
+ __table_args__ = {'mysql_engine': 'InnoDB'}
+ __table_initialized__ = False
+ __prefix__ = 'none'
+ created_at = Column(DateTime)
+ updated_at = Column(DateTime)
+ deleted = Column(Boolean, default=False)
+
+ @classmethod
+ def all(cls, session=None):
+ """Get all objects of this type"""
+ if session:
+ return session.query(cls) \
+ .filter_by(deleted=False) \
+ .all()
+ else:
+ with managed_session() as sess:
+ return cls.all(session=sess)
+
+ @classmethod
+ def count(cls, session=None):
+ """Count objects of this type"""
+ if session:
+ return session.query(cls) \
+ .filter_by(deleted=False) \
+ .count()
+ else:
+ with managed_session() as sess:
+ return cls.count(session=sess)
+
+ @classmethod
+ def find(cls, obj_id, session=None):
+ """Find object by id"""
+ if session:
+ try:
+ return session.query(cls) \
+ .filter_by(id=obj_id) \
+ .filter_by(deleted=False) \
+ .one()
+ except exc.NoResultFound:
+ raise exception.NotFound("No model for id %s" % obj_id)
+ else:
+ with managed_session() as sess:
+ return cls.find(obj_id, session=sess)
+
+ @classmethod
+ def find_by_str(cls, str_id, session=None):
+ """Find object by str_id"""
+ int_id = int(str_id.rpartition('-')[2])
+ return cls.find(int_id, session=session)
+
+ @property
+ def str_id(self):
+ """Get string id of object (generally prefix + '-' + id)"""
+ return "%s-%s" % (self.__prefix__, self.id)
+
+ def save(self, session=None):
+ """Save this object"""
+ if session:
+ session.add(self)
+ session.flush()
+ else:
+ with managed_session() as sess:
+ self.save(session=sess)
+
+ def delete(self, session=None):
+ """Delete this object"""
+ self.deleted = True
+ self.save(session=session)
+
+ def __setitem__(self, key, value):
+ setattr(self, key, value)
+
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+
+class Image(BASE, NovaBase):
+ """Represents an image in the datastore"""
+ __tablename__ = 'images'
+ __prefix__ = 'ami'
+ id = Column(Integer, primary_key=True)
+ user_id = Column(String(255))
+ project_id = Column(String(255))
+ image_type = Column(String(255))
+ public = Column(Boolean, default=False)
+ state = Column(String(255))
+ location = Column(String(255))
+ arch = Column(String(255))
+ default_kernel_id = Column(String(255))
+ default_ramdisk_id = Column(String(255))
+
+ @validates('image_type')
+ def validate_image_type(self, key, image_type):
+ assert(image_type in ['machine', 'kernel', 'ramdisk', 'raw'])
+
+ @validates('state')
+ def validate_state(self, key, state):
+ assert(state in ['available', 'pending', 'disabled'])
+
+ @validates('default_kernel_id')
+ def validate_kernel_id(self, key, val):
+ if val != 'machine':
+ assert(val is None)
+
+ @validates('default_ramdisk_id')
+ def validate_ramdisk_id(self, key, val):
+ if val != 'machine':
+ assert(val is None)
+
+
+class Host(BASE, NovaBase):
+ """Represents a host where services are running"""
+ __tablename__ = 'hosts'
+ id = Column(String(255), primary_key=True)
+
+
+class Daemon(BASE, NovaBase):
+ """Represents a running service on a host"""
+ __tablename__ = 'daemons'
+ id = Column(Integer, primary_key=True)
+ host = Column(String(255), ForeignKey('hosts.id'))
+ binary = Column(String(255))
+ report_count = Column(Integer, nullable=False, default=0)
+
+ @classmethod
+ def find_by_args(cls, host, binary, session=None):
+ if session:
+ try:
+ return session.query(cls) \
+ .filter_by(host=host) \
+ .filter_by(binary=binary) \
+ .filter_by(deleted=False) \
+ .one()
+ except exc.NoResultFound:
+ raise exception.NotFound("No model for %s, %s" % (host,
+ binary))
+ else:
+ with managed_session() as sess:
+ return cls.find_by_args(host, binary, session=sess)
+
+
+class Instance(BASE, NovaBase):
+ """Represents a guest vm"""
+ __tablename__ = 'instances'
+ __prefix__ = 'i'
+ id = Column(Integer, primary_key=True)
+
+ user_id = Column(String(255))
+ project_id = Column(String(255))
+
+ @property
+ def user(self):
+ return auth.manager.AuthManager().get_user(self.user_id)
+
+ @property
+ def project(self):
+ return auth.manager.AuthManager().get_project(self.project_id)
+
+ @property
+ def name(self):
+ return self.str_id
+
+ image_id = Column(Integer, ForeignKey('images.id'), nullable=True)
+ kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True)
+ ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True)
+# ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id))
+# kernel = relationship(Kernel, backref=backref('instances', order_by=id))
+# project = relationship(Project, backref=backref('instances', order_by=id))
+
+ launch_index = Column(Integer)
+ key_name = Column(String(255))
+ key_data = Column(Text)
+ security_group = Column(String(255))
+
+ state = Column(Integer)
+ state_description = Column(String(255))
+
+ hostname = Column(String(255))
+ host = Column(String(255), ForeignKey('hosts.id'))
+
+ instance_type = Column(Integer)
+
+ user_data = Column(Text)
+
+ reservation_id = Column(String(255))
+ mac_address = Column(String(255))
+
+ def set_state(self, state_code, state_description=None):
+ """Set the code and description of an instance"""
+ # TODO(devcamcar): Move this out of models and into driver
+ from nova.compute import power_state
+ self.state = state_code
+ if not state_description:
+ state_description = power_state.name(state_code)
+ self.state_description = state_description
+ self.save()
+
+ # TODO(vish): see Ewan's email about state improvements, probably
+ # should be in a driver base class or some such
+ # vmstate_state = running, halted, suspended, paused
+ # power_state = what we have
+ # task_state = transitory and may trigger power state transition
+
+ #@validates('state')
+ #def validate_state(self, key, state):
+ # assert(state in ['nostate', 'running', 'blocked', 'paused',
+ # 'shutdown', 'shutoff', 'crashed'])
+
+
+class Volume(BASE, NovaBase):
+ """Represents a block storage device that can be attached to a vm"""
+ __tablename__ = 'volumes'
+ __prefix__ = 'vol'
+ id = Column(Integer, primary_key=True)
+
+ user_id = Column(String(255))
+ project_id = Column(String(255))
+
+ host = Column(String(255), ForeignKey('hosts.id'))
+ size = Column(Integer)
+ availability_zone = Column(String(255)) # TODO(vish): foreign key?
+ instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True)
+ mountpoint = Column(String(255))
+ attach_time = Column(String(255)) # TODO(vish): datetime
+ status = Column(String(255)) # TODO(vish): enum?
+ attach_status = Column(String(255)) # TODO(vish): enum
+
+
+class ExportDevice(BASE, NovaBase):
+ """Represates a shelf and blade that a volume can be exported on"""
+ __tablename__ = 'export_devices'
+ id = Column(Integer, primary_key=True)
+ shelf_id = Column(Integer)
+ blade_id = Column(Integer)
+ volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=True)
+ volume = relationship(Volume, backref=backref('export_device',
+ uselist=False))
+
+
+class Network(BASE, NovaBase):
+ """Represents a network"""
+ __tablename__ = 'networks'
+ id = Column(Integer, primary_key=True)
+
+ injected = Column(Boolean, default=False)
+ cidr = Column(String(255))
+ netmask = Column(String(255))
+ bridge = Column(String(255))
+ gateway = Column(String(255))
+ broadcast = Column(String(255))
+ dns = Column(String(255))
+
+ vlan = Column(Integer)
+ vpn_public_address = Column(String(255))
+ vpn_public_port = Column(Integer)
+ vpn_private_address = Column(String(255))
+ dhcp_start = Column(String(255))
+
+ project_id = Column(String(255))
+ host = Column(String(255), ForeignKey('hosts.id'))
+
+
+class NetworkIndex(BASE, NovaBase):
+ """Represents a unique offset for a network
+
+ Currently vlan number, vpn port, and fixed ip ranges are keyed off of
+ this index. These may ultimately need to be converted to separate
+ pools.
+ """
+ __tablename__ = 'network_indexes'
+ id = Column(Integer, primary_key=True)
+ index = Column(Integer)
+ network_id = Column(Integer, ForeignKey('networks.id'), nullable=True)
+ network = relationship(Network, backref=backref('network_index',
+ uselist=False))
+
+
+# TODO(vish): can these both come from the same baseclass?
+class FixedIp(BASE, NovaBase):
+ """Represents a fixed ip for an instance"""
+ __tablename__ = 'fixed_ips'
+ id = Column(Integer, primary_key=True)
+ address = Column(String(255))
+ network_id = Column(Integer, ForeignKey('networks.id'), nullable=True)
+ network = relationship(Network, backref=backref('fixed_ips'))
+ instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True)
+ instance = relationship(Instance, backref=backref('fixed_ip',
+ uselist=False))
+ allocated = Column(Boolean, default=False)
+ leased = Column(Boolean, default=False)
+ reserved = Column(Boolean, default=False)
+
+ @property
+ def str_id(self):
+ return self.address
+
+ @classmethod
+ def find_by_str(cls, str_id, session=None):
+ if session:
+ try:
+ return session.query(cls) \
+ .filter_by(address=str_id) \
+ .filter_by(deleted=False) \
+ .one()
+ except exc.NoResultFound:
+ raise exception.NotFound("No model for address %s" % str_id)
+ else:
+ with managed_session() as sess:
+ return cls.find_by_str(str_id, session=sess)
+
+
+class FloatingIp(BASE, NovaBase):
+ """Represents a floating ip that dynamically forwards to a fixed ip"""
+ __tablename__ = 'floating_ips'
+ id = Column(Integer, primary_key=True)
+ address = Column(String(255))
+ fixed_ip_id = Column(Integer, ForeignKey('fixed_ips.id'), nullable=True)
+ fixed_ip = relationship(FixedIp, backref=backref('floating_ips'))
+
+ project_id = Column(String(255))
+ host = Column(String(255), ForeignKey('hosts.id'))
+
+ @property
+ def str_id(self):
+ return self.address
+
+ @classmethod
+ def find_by_str(cls, str_id, session=None):
+ if session:
+ try:
+ return session.query(cls) \
+ .filter_by(address=str_id) \
+ .filter_by(deleted=False) \
+ .one()
+ except exc.NoResultFound:
+ raise exception.NotFound("No model for address %s" % str_id)
+ else:
+ with managed_session() as sess:
+ return cls.find_by_str(str_id, session=sess)
+
+
+def register_models():
+ """Register Models and create metadata"""
+ from sqlalchemy import create_engine
+ models = (Image, Host, Daemon, Instance, Volume, ExportDevice,
+ FixedIp, FloatingIp, Network, NetworkIndex)
+ engine = create_engine(FLAGS.sql_connection, echo=False)
+ for model in models:
+ model.metadata.create_all(engine)
diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py
new file mode 100644
index 000000000..70e3212e1
--- /dev/null
+++ b/nova/db/sqlalchemy/session.py
@@ -0,0 +1,55 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Session Handling for SQLAlchemy backend
+"""
+
+import logging
+
+from sqlalchemy import create_engine
+from sqlalchemy.orm import create_session
+
+from nova import flags
+
+FLAGS = flags.FLAGS
+
+
+def managed_session(autocommit=True):
+ """Helper method to grab session manager"""
+ return SessionExecutionManager(autocommit=autocommit)
+
+
+class SessionExecutionManager:
+ """Session manager supporting with .. as syntax"""
+ _engine = None
+ _session = None
+
+ def __init__(self, autocommit):
+ if not self._engine:
+ self._engine = create_engine(FLAGS.sql_connection, echo=False)
+ self._session = create_session(bind=self._engine,
+ autocommit=autocommit)
+
+ def __enter__(self):
+ return self._session
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if exc_type:
+ logging.exception("Rolling back due to failed transaction")
+ self._session.rollback()
+ self._session.close()
diff --git a/nova/endpoint/__init__.py b/nova/endpoint/__init__.py
index 753685149..e69de29bb 100644
--- a/nova/endpoint/__init__.py
+++ b/nova/endpoint/__init__.py
@@ -1,32 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-:mod:`nova.endpoint` -- Main NOVA Api endpoints
-=====================================================
-
-.. automodule:: nova.endpoint
- :platform: Unix
- :synopsis: REST APIs for all nova functions
-.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
-.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
-.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
-.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
-.. moduleauthor:: Manish Singh <yosh@gimp.org>
-.. moduleauthor:: Andy Smith <andy@anarkystic.com>
-"""
diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py
index 4f4824fca..d6f622755 100644
--- a/nova/endpoint/admin.py
+++ b/nova/endpoint/admin.py
@@ -37,6 +37,7 @@ def user_dict(user, base64_file=None):
else:
return {}
+
def project_dict(project):
"""Convert the project object to a result dict"""
if project:
@@ -47,6 +48,7 @@ def project_dict(project):
else:
return {}
+
def host_dict(host):
"""Convert a host model object to a result dict"""
if host:
@@ -54,6 +56,7 @@ def host_dict(host):
else:
return {}
+
def admin_only(target):
"""Decorator for admin-only API calls"""
def wrapper(*args, **kwargs):
@@ -66,6 +69,7 @@ def admin_only(target):
return wrapper
+
class AdminController(object):
"""
API Controller for users, hosts, nodes, and workers.
diff --git a/nova/endpoint/api.py b/nova/endpoint/api.py
index 78a18b9ea..40be00bb7 100755
--- a/nova/endpoint/api.py
+++ b/nova/endpoint/api.py
@@ -25,12 +25,13 @@ import logging
import multiprocessing
import random
import re
-import tornado.web
-from twisted.internet import defer
import urllib
# TODO(termie): replace minidom with etree
from xml.dom import minidom
+import tornado.web
+from twisted.internet import defer
+
from nova import crypto
from nova import exception
from nova import flags
@@ -43,6 +44,7 @@ from nova.endpoint import cloud
FLAGS = flags.FLAGS
flags.DEFINE_integer('cc_port', 8773, 'cloud controller port')
+
_log = logging.getLogger("api")
_log.setLevel(logging.DEBUG)
@@ -227,6 +229,7 @@ class MetadataRequestHandler(tornado.web.RequestHandler):
self.print_data(data)
self.finish()
+
class APIRequestHandler(tornado.web.RequestHandler):
def get(self, controller_name):
self.execute(controller_name)
diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index 43edd3575..4e86145db 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -26,27 +26,22 @@ import base64
import logging
import os
import time
+
from twisted.internet import defer
-from nova import datastore
+from nova import db
from nova import exception
from nova import flags
from nova import rpc
from nova import utils
from nova.auth import rbac
from nova.auth import manager
-from nova.compute import model
from nova.compute.instance_types import INSTANCE_TYPES
from nova.endpoint import images
-from nova.network import service as network_service
-from nova.network import model as network_model
-from nova.volume import service
FLAGS = flags.FLAGS
-flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
-
def _gen_key(user_id, key_name):
""" Tuck this into AuthManager """
@@ -64,26 +59,16 @@ class CloudController(object):
sent to the other nodes.
"""
def __init__(self):
- self.instdir = model.InstanceDirectory()
+ self.network_manager = utils.import_object(FLAGS.network_manager)
self.setup()
- @property
- def instances(self):
- """ All instances in the system, as dicts """
- return self.instdir.all
-
- @property
- def volumes(self):
- """ returns a list of all volumes """
- for volume_id in datastore.Redis.instance().smembers("volumes"):
- volume = service.get_volume(volume_id)
- yield volume
-
def __str__(self):
return 'CloudController'
def setup(self):
""" Ensure the keychains and folders exist. """
+ # FIXME(ja): this should be moved to a nova-manage command,
+ # if not setup throw exceptions instead of running
# Create keys folder, if it doesn't exist
if not os.path.exists(FLAGS.keys_path):
os.makedirs(FLAGS.keys_path)
@@ -92,27 +77,23 @@ class CloudController(object):
if not os.path.exists(root_ca_path):
start = os.getcwd()
os.chdir(FLAGS.ca_path)
+ # TODO: Do this with M2Crypto instead
utils.runthis("Generating root CA: %s", "sh genrootca.sh")
os.chdir(start)
- # TODO: Do this with M2Crypto instead
-
- def get_instance_by_ip(self, ip):
- return self.instdir.by_ip(ip)
def _get_mpi_data(self, project_id):
result = {}
- for instance in self.instdir.all:
- if instance['project_id'] == project_id:
- line = '%s slots=%d' % (instance['private_dns_name'],
- INSTANCE_TYPES[instance['instance_type']]['vcpus'])
- if instance['key_name'] in result:
- result[instance['key_name']].append(line)
- else:
- result[instance['key_name']] = [line]
+ for instance in db.instance_get_by_project(project_id):
+ line = '%s slots=%d' % (instance.fixed_ip['str_id'],
+ INSTANCE_TYPES[instance['instance_type']]['vcpus'])
+ if instance['key_name'] in result:
+ result[instance['key_name']].append(line)
+ else:
+ result[instance['key_name']] = [line]
return result
def get_metadata(self, ipaddress):
- i = self.get_instance_by_ip(ipaddress)
+ i = db.fixed_ip_get_instance(ipaddress)
if i is None:
return None
mpi = self._get_mpi_data(i['project_id'])
@@ -125,12 +106,7 @@ class CloudController(object):
}
else:
keys = ''
-
- address_record = network_model.FixedIp(i['private_dns_name'])
- if address_record:
- hostname = address_record['hostname']
- else:
- hostname = 'ip-%s' % i['private_dns_name'].replace('.', '-')
+ hostname = i['hostname']
data = {
'user-data': base64.b64decode(i['user_data']),
'meta-data': {
@@ -252,40 +228,38 @@ class CloudController(object):
@rbac.allow('projectmanager', 'sysadmin')
def get_console_output(self, context, instance_id, **kwargs):
# instance_id is passed in as a list of instances
- instance = self._get_instance(context, instance_id[0])
- return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
- {"method": "get_console_output",
- "args": {"instance_id": instance_id[0]}})
-
- def _get_user_id(self, context):
- if context and context.user:
- return context.user.id
- else:
- return None
+ instance_ref = db.instance_get_by_str(context, instance_id[0])
+ return rpc.call('%s.%s' % (FLAGS.compute_topic,
+ instance_ref['host']),
+ {"method": "get_console_output",
+ "args": {"context": None,
+ "instance_id": instance_ref['id']}})
@rbac.allow('projectmanager', 'sysadmin')
def describe_volumes(self, context, **kwargs):
- volumes = []
- for volume in self.volumes:
- if context.user.is_admin() or volume['project_id'] == context.project.id:
- v = self.format_volume(context, volume)
- volumes.append(v)
- return defer.succeed({'volumeSet': volumes})
-
- def format_volume(self, context, volume):
+ if context.user.is_admin():
+ volumes = db.volume_get_all(context)
+ else:
+ volumes = db.volume_get_by_project(context, context.project.id)
+
+ volumes = [self._format_volume(context, v) for v in volumes]
+
+ return {'volumeSet': volumes}
+
+ def _format_volume(self, context, volume):
v = {}
- v['volumeId'] = volume['volume_id']
+ v['volumeId'] = volume['str_id']
v['status'] = volume['status']
v['size'] = volume['size']
v['availabilityZone'] = volume['availability_zone']
- v['createTime'] = volume['create_time']
+ # v['createTime'] = volume['create_time']
if context.user.is_admin():
v['status'] = '%s (%s, %s, %s, %s)' % (
- volume.get('status', None),
- volume.get('user_id', None),
- volume.get('node_name', None),
- volume.get('instance_id', ''),
- volume.get('mountpoint', ''))
+ volume['status'],
+ volume['user_id'],
+ 'host',
+ volume['instance_id'],
+ volume['mountpoint'])
if volume['attach_status'] == 'attached':
v['attachmentSet'] = [{'attachTime': volume['attach_time'],
'deleteOnTermination': volume['delete_on_termination'],
@@ -298,96 +272,72 @@ class CloudController(object):
return v
@rbac.allow('projectmanager', 'sysadmin')
- @defer.inlineCallbacks
def create_volume(self, context, size, **kwargs):
- # TODO(vish): refactor this to create the volume object here and tell service to create it
- result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume",
- "args": {"size": size,
- "user_id": context.user.id,
- "project_id": context.project.id}})
- # NOTE(vish): rpc returned value is in the result key in the dictionary
- volume = self._get_volume(context, result)
- defer.returnValue({'volumeSet': [self.format_volume(context, volume)]})
-
- def _get_address(self, context, public_ip):
- # FIXME(vish) this should move into network.py
- address = network_model.ElasticIp.lookup(public_ip)
- if address and (context.user.is_admin() or address['project_id'] == context.project.id):
- return address
- raise exception.NotFound("Address at ip %s not found" % public_ip)
-
- def _get_image(self, context, image_id):
- """passes in context because
- objectstore does its own authorization"""
- result = images.list(context, [image_id])
- if not result:
- raise exception.NotFound('Image %s could not be found' % image_id)
- image = result[0]
- return image
-
- def _get_instance(self, context, instance_id):
- for instance in self.instdir.all:
- if instance['instance_id'] == instance_id:
- if context.user.is_admin() or instance['project_id'] == context.project.id:
- return instance
- raise exception.NotFound('Instance %s could not be found' % instance_id)
-
- def _get_volume(self, context, volume_id):
- volume = service.get_volume(volume_id)
- if context.user.is_admin() or volume['project_id'] == context.project.id:
- return volume
- raise exception.NotFound('Volume %s could not be found' % volume_id)
+ vol = {}
+ vol['size'] = size
+ vol['user_id'] = context.user.id
+ vol['project_id'] = context.project.id
+ vol['availability_zone'] = FLAGS.storage_availability_zone
+ vol['status'] = "creating"
+ vol['attach_status'] = "detached"
+ volume_ref = db.volume_create(context, vol)
+
+ rpc.cast(FLAGS.volume_topic, {"method": "create_volume",
+ "args": {"context": None,
+ "volume_id": volume_ref['id']}})
+
+ return {'volumeSet': [self._format_volume(context, volume_ref)]}
+
@rbac.allow('projectmanager', 'sysadmin')
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
- volume = self._get_volume(context, volume_id)
- if volume['status'] == "attached":
+ volume_ref = db.volume_get_by_str(context, volume_id)
+ # TODO(vish): abstract status checking?
+ if volume_ref['status'] == "attached":
raise exception.ApiError("Volume is already attached")
- # TODO(vish): looping through all volumes is slow. We should probably maintain an index
- for vol in self.volumes:
- if vol['instance_id'] == instance_id and vol['mountpoint'] == device:
- raise exception.ApiError("Volume %s is already attached to %s" % (vol['volume_id'], vol['mountpoint']))
- volume.start_attach(instance_id, device)
- instance = self._get_instance(context, instance_id)
- compute_node = instance['node_name']
- rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node),
+ #volume.start_attach(instance_id, device)
+ instance_ref = db.instance_get_by_str(context, instance_id)
+ host = db.instance_get_host(context, instance_ref['id'])
+ rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "attach_volume",
- "args": {"volume_id": volume_id,
- "instance_id": instance_id,
- "mountpoint": device}})
- return defer.succeed({'attachTime': volume['attach_time'],
- 'device': volume['mountpoint'],
- 'instanceId': instance_id,
+ "args": {"context": None,
+ "volume_id": volume_ref['id'],
+ "instance_id": instance_ref['id'],
+ "mountpoint": device}})
+ return defer.succeed({'attachTime': volume_ref['attach_time'],
+ 'device': volume_ref['mountpoint'],
+ 'instanceId': instance_ref['id_str'],
'requestId': context.request_id,
- 'status': volume['attach_status'],
- 'volumeId': volume_id})
-
+ 'status': volume_ref['attach_status'],
+ 'volumeId': volume_ref['id']})
@rbac.allow('projectmanager', 'sysadmin')
def detach_volume(self, context, volume_id, **kwargs):
- volume = self._get_volume(context, volume_id)
- instance_id = volume.get('instance_id', None)
- if not instance_id:
+ volume_ref = db.volume_get_by_str(context, volume_id)
+ instance_ref = db.volume_get_instance(context, volume_ref['id'])
+ if not instance_ref:
raise exception.Error("Volume isn't attached to anything!")
- if volume['status'] == "available":
+ # TODO(vish): abstract status checking?
+ if volume_ref['status'] == "available":
raise exception.Error("Volume is already detached")
try:
- volume.start_detach()
- instance = self._get_instance(context, instance_id)
- rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
+ #volume.start_detach()
+ host = db.instance_get_host(context, instance_ref['id'])
+ rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "detach_volume",
- "args": {"instance_id": instance_id,
- "volume_id": volume_id}})
+ "args": {"context": None,
+ "instance_id": instance_ref['id'],
+ "volume_id": volume_ref['id']}})
except exception.NotFound:
# If the instance doesn't exist anymore,
# then we need to call detach blind
- volume.finish_detach()
- return defer.succeed({'attachTime': volume['attach_time'],
- 'device': volume['mountpoint'],
- 'instanceId': instance_id,
+ db.volume_detached(context)
+ return defer.succeed({'attachTime': volume_ref['attach_time'],
+ 'device': volume_ref['mountpoint'],
+ 'instanceId': instance_ref['id_str'],
'requestId': context.request_id,
- 'status': volume['attach_status'],
- 'volumeId': volume_id})
+ 'status': volume_ref['attach_status'],
+ 'volumeId': volume_ref['id']})
def _convert_to_set(self, lst, label):
if lst == None or lst == []:
@@ -408,52 +358,52 @@ class CloudController(object):
assert len(i) == 1
return i[0]
- def _format_instances(self, context, reservation_id = None):
+ def _format_instances(self, context, reservation_id=None):
reservations = {}
- if context.user.is_admin():
- instgenerator = self.instdir.all
+ if reservation_id:
+ instances = db.instance_get_by_reservation(context, reservation_id)
else:
- instgenerator = self.instdir.by_project(context.project.id)
- for instance in instgenerator:
- res_id = instance.get('reservation_id', 'Unknown')
- if reservation_id != None and reservation_id != res_id:
- continue
+ if not context.user.is_admin():
+ instances = db.instance_get_all(context)
+ else:
+ instances = db.instance_get_by_project(context, context.project.id)
+ for instance in instances:
if not context.user.is_admin():
if instance['image_id'] == FLAGS.vpn_image_id:
continue
i = {}
- i['instance_id'] = instance.get('instance_id', None)
- i['image_id'] = instance.get('image_id', None)
- i['instance_state'] = {
- 'code': instance.get('state', 0),
- 'name': instance.get('state_description', 'pending')
+ i['instanceId'] = instance['str_id']
+ i['imageId'] = instance['image_id']
+ i['instanceState'] = {
+ 'code': instance['state'],
+ 'name': instance['state_description']
}
- i['public_dns_name'] = network_model.get_public_ip_for_instance(
- i['instance_id'])
- i['private_dns_name'] = instance.get('private_dns_name', None)
+ floating_addr = db.instance_get_floating_address(context,
+ instance['id'])
+ i['public_dns_name'] = floating_addr
+ fixed_addr = db.instance_get_fixed_address(context,
+ instance['id'])
+ i['private_dns_name'] = fixed_addr
if not i['public_dns_name']:
i['public_dns_name'] = i['private_dns_name']
- i['dns_name'] = instance.get('dns_name', None)
- i['key_name'] = instance.get('key_name', None)
+ i['dns_name'] = None
+ i['key_name'] = instance.key_name
if context.user.is_admin():
i['key_name'] = '%s (%s, %s)' % (i['key_name'],
- instance.get('project_id', None),
- instance.get('node_name', ''))
- i['product_codes_set'] = self._convert_to_set(
- instance.get('product_codes', None), 'product_code')
- i['instance_type'] = instance.get('instance_type', None)
- i['launch_time'] = instance.get('launch_time', None)
- i['ami_launch_index'] = instance.get('ami_launch_index',
- None)
- if not reservations.has_key(res_id):
+ instance.project_id,
+ 'host') # FIXME
+ i['product_codes_set'] = self._convert_to_set([], 'product_codes')
+ i['instance_type'] = instance.instance_type
+ i['launch_time'] = instance.created_at
+ i['ami_launch_index'] = instance.launch_index
+ if not reservations.has_key(instance['reservation_id']):
r = {}
- r['reservation_id'] = res_id
- r['owner_id'] = instance.get('project_id', None)
- r['group_set'] = self._convert_to_set(
- instance.get('groups', None), 'group_id')
+ r['reservation_id'] = instance['reservation_id']
+ r['owner_id'] = instance.project_id
+ r['group_set'] = self._convert_to_set([], 'groups')
r['instances_set'] = []
- reservations[res_id] = r
- reservations[res_id]['instances_set'].append(i)
+ reservations[instance['reservation_id']] = r
+ reservations[instance['reservation_id']]['instances_set'].append(i)
return list(reservations.values())
@@ -463,20 +413,23 @@ class CloudController(object):
def format_addresses(self, context):
addresses = []
- for address in network_model.ElasticIp.all():
- # TODO(vish): implement a by_project iterator for addresses
- if (context.user.is_admin() or
- address['project_id'] == context.project.id):
- address_rv = {
- 'public_ip': address['address'],
- 'instance_id': address.get('instance_id', 'free')
- }
- if context.user.is_admin():
- address_rv['instance_id'] = "%s (%s, %s)" % (
- address['instance_id'],
- address['user_id'],
- address['project_id'],
- )
+ if context.user.is_admin():
+ iterator = db.floating_ip_get_all(context)
+ else:
+ iterator = db.floating_ip_get_by_project(context,
+ context.project.id)
+ for floating_ip_ref in iterator:
+ address = floating_ip_ref['id_str']
+ instance_ref = db.floating_ip_get_instance(address)
+ address_rv = {
+ 'public_ip': address,
+ 'instance_id': instance_ref['id_str']
+ }
+ if context.user.is_admin():
+ address_rv['instance_id'] = "%s (%s)" % (
+ address_rv['instance_id'],
+ floating_ip_ref['project_id'],
+ )
addresses.append(address_rv)
return {'addressesSet': addresses}
@@ -485,8 +438,8 @@ class CloudController(object):
def allocate_address(self, context, **kwargs):
network_topic = yield self._get_network_topic(context)
public_ip = yield rpc.call(network_topic,
- {"method": "allocate_elastic_ip",
- "args": {"user_id": context.user.id,
+ {"method": "allocate_floating_ip",
+ "args": {"context": None,
"project_id": context.project.id}})
defer.returnValue({'addressSet': [{'publicIp': public_ip}]})
@@ -494,56 +447,63 @@ class CloudController(object):
@defer.inlineCallbacks
def release_address(self, context, public_ip, **kwargs):
# NOTE(vish): Should we make sure this works?
+ floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
network_topic = yield self._get_network_topic(context)
rpc.cast(network_topic,
- {"method": "deallocate_elastic_ip",
- "args": {"elastic_ip": public_ip}})
+ {"method": "deallocate_floating_ip",
+ "args": {"context": None,
+ "floating_ip": floating_ip_ref['str_id']}})
defer.returnValue({'releaseResponse': ["Address released."]})
@rbac.allow('netadmin')
@defer.inlineCallbacks
def associate_address(self, context, instance_id, public_ip, **kwargs):
- instance = self._get_instance(context, instance_id)
- address = self._get_address(context, public_ip)
+ instance_ref = db.instance_get_by_str(context, instance_id)
+ fixed_ip_ref = db.fixed_ip_get_by_instance(context, instance_ref['id'])
+ floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
network_topic = yield self._get_network_topic(context)
rpc.cast(network_topic,
- {"method": "associate_elastic_ip",
- "args": {"elastic_ip": address['address'],
- "fixed_ip": instance['private_dns_name'],
- "instance_id": instance['instance_id']}})
+ {"method": "associate_floating_ip",
+ "args": {"context": None,
+ "floating_ip": floating_ip_ref['str_id'],
+ "fixed_ip": fixed_ip_ref['str_id'],
+ "instance_id": instance_ref['id']}})
defer.returnValue({'associateResponse': ["Address associated."]})
@rbac.allow('netadmin')
@defer.inlineCallbacks
def disassociate_address(self, context, public_ip, **kwargs):
- address = self._get_address(context, public_ip)
+ floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
network_topic = yield self._get_network_topic(context)
rpc.cast(network_topic,
- {"method": "disassociate_elastic_ip",
- "args": {"elastic_ip": address['address']}})
+ {"method": "disassociate_floating_ip",
+ "args": {"context": None,
+ "floating_ip": floating_ip_ref['str_id']}})
defer.returnValue({'disassociateResponse': ["Address disassociated."]})
@defer.inlineCallbacks
def _get_network_topic(self, context):
"""Retrieves the network host for a project"""
- host = network_service.get_host_for_project(context.project.id)
+ network_ref = db.project_get_network(context, context.project.id)
+ host = db.network_get_host(context, network_ref['id'])
if not host:
host = yield rpc.call(FLAGS.network_topic,
{"method": "set_network_host",
- "args": {"user_id": context.user.id,
+ "args": {"context": None,
"project_id": context.project.id}})
- defer.returnValue('%s.%s' %(FLAGS.network_topic, host))
+ defer.returnValue(db.queue_get_for(context, FLAGS.network_topic, host))
@rbac.allow('projectmanager', 'sysadmin')
@defer.inlineCallbacks
def run_instances(self, context, **kwargs):
# make sure user can access the image
# vpn image is private so it doesn't show up on lists
- if kwargs['image_id'] != FLAGS.vpn_image_id:
- image = self._get_image(context, kwargs['image_id'])
+ vpn = kwargs['image_id'] == FLAGS.vpn_image_id
- # FIXME(ja): if image is cloudpipe, this breaks
+ if not vpn:
+ image = images.get(context, kwargs['image_id'])
+ # FIXME(ja): if image is vpn, this breaks
# get defaults from imagestore
image_id = image['imageId']
kernel_id = image.get('kernelId', FLAGS.default_kernel)
@@ -554,11 +514,10 @@ class CloudController(object):
ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id)
# make sure we have access to kernel and ramdisk
- self._get_image(context, kernel_id)
- self._get_image(context, ramdisk_id)
+ images.get(context, kernel_id)
+ images.get(context, ramdisk_id)
logging.debug("Going to run instances...")
- reservation_id = utils.generate_uid('r')
launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
key_data = None
if kwargs.has_key('key_name'):
@@ -567,107 +526,122 @@ class CloudController(object):
raise exception.ApiError('Key Pair %s not found' %
kwargs['key_name'])
key_data = key_pair.public_key
- network_topic = yield self._get_network_topic(context)
+
# TODO: Get the real security group of launch in here
security_group = "default"
+
+ reservation_id = utils.generate_uid('r')
+ base_options = {}
+ base_options['image_id'] = image_id
+ base_options['kernel_id'] = kernel_id
+ base_options['ramdisk_id'] = ramdisk_id
+ base_options['reservation_id'] = reservation_id
+ base_options['key_data'] = key_data
+ base_options['key_name'] = kwargs.get('key_name', None)
+ base_options['user_id'] = context.user.id
+ base_options['project_id'] = context.project.id
+ base_options['user_data'] = kwargs.get('user_data', '')
+ base_options['instance_type'] = kwargs.get('instance_type', 'm1.small')
+ base_options['security_group'] = security_group
+
for num in range(int(kwargs['max_count'])):
- is_vpn = False
- if image_id == FLAGS.vpn_image_id:
- is_vpn = True
- inst = self.instdir.new()
- allocate_data = yield rpc.call(network_topic,
- {"method": "allocate_fixed_ip",
- "args": {"user_id": context.user.id,
- "project_id": context.project.id,
- "security_group": security_group,
- "is_vpn": is_vpn,
- "hostname": inst.instance_id}})
- inst['image_id'] = image_id
- inst['kernel_id'] = kernel_id
- inst['ramdisk_id'] = ramdisk_id
- inst['user_data'] = kwargs.get('user_data', '')
- inst['instance_type'] = kwargs.get('instance_type', 'm1.small')
- inst['reservation_id'] = reservation_id
- inst['launch_time'] = launch_time
- inst['key_data'] = key_data or ''
- inst['key_name'] = kwargs.get('key_name', '')
- inst['user_id'] = context.user.id
- inst['project_id'] = context.project.id
- inst['ami_launch_index'] = num
- inst['security_group'] = security_group
- inst['hostname'] = inst.instance_id
- for (key, value) in allocate_data.iteritems():
- inst[key] = value
-
- inst.save()
+ inst_id = db.instance_create(context, base_options)
+
+ inst = {}
+ inst['mac_address'] = utils.generate_mac()
+ inst['launch_index'] = num
+ inst['hostname'] = inst_id
+ db.instance_update(context, inst_id, inst)
+ address = self.network_manager.allocate_fixed_ip(context,
+ inst_id,
+ vpn)
+
+ # TODO(vish): This probably should be done in the scheduler
+ # network is setup when host is assigned
+ network_topic = yield self._get_network_topic(context)
+ rpc.call(network_topic,
+ {"method": "setup_fixed_ip",
+ "args": {"context": None,
+ "address": address}})
+
rpc.cast(FLAGS.scheduler_topic,
- {"method": "run_instance",
- "args": {"instance_id": inst.instance_id}})
- logging.debug("Casting to node for %s's instance with IP of %s" %
- (context.user.name, inst['private_dns_name']))
- # TODO: Make Network figure out the network name from ip.
- defer.returnValue(self._format_run_instances(context, reservation_id))
+ {"method": "run_instance",
+ "args": {"context": None,
+ "instance_id": inst_id}})
+ logging.debug("Casting to scheduler for %s/%s's instance %s" %
+ (context.project.name, context.user.name, inst_id))
+ defer.returnValue(self._format_run_instances(context,
+ reservation_id))
+
@rbac.allow('projectmanager', 'sysadmin')
@defer.inlineCallbacks
def terminate_instances(self, context, instance_id, **kwargs):
logging.debug("Going to start terminating instances")
- network_topic = yield self._get_network_topic(context)
- for i in instance_id:
- logging.debug("Going to try and terminate %s" % i)
+ # network_topic = yield self._get_network_topic(context)
+ for id_str in instance_id:
+ logging.debug("Going to try and terminate %s" % id_str)
try:
- instance = self._get_instance(context, i)
+ instance_ref = db.instance_get_by_str(context, id_str)
except exception.NotFound:
logging.warning("Instance %s was not found during terminate"
- % i)
+ % id_str)
continue
- elastic_ip = network_model.get_public_ip_for_instance(i)
- if elastic_ip:
- logging.debug("Disassociating address %s" % elastic_ip)
- # NOTE(vish): Right now we don't really care if the ip is
- # disassociated. We may need to worry about
- # checking this later. Perhaps in the scheduler?
- rpc.cast(network_topic,
- {"method": "disassociate_elastic_ip",
- "args": {"elastic_ip": elastic_ip}})
- fixed_ip = instance.get('private_dns_name', None)
- if fixed_ip:
- logging.debug("Deallocating address %s" % fixed_ip)
+ # FIXME(ja): where should network deallocate occur?
+ address = db.instance_get_floating_address(context,
+ instance_ref['id'])
+ if address:
+ logging.debug("Disassociating address %s" % address)
# NOTE(vish): Right now we don't really care if the ip is
- # actually removed. We may need to worry about
+ # disassociated. We may need to worry about
# checking this later. Perhaps in the scheduler?
+ network_topic = yield self._get_network_topic(context)
rpc.cast(network_topic,
- {"method": "deallocate_fixed_ip",
- "args": {"fixed_ip": fixed_ip}})
-
- if instance.get('node_name', 'unassigned') != 'unassigned':
- # NOTE(joshua?): It's also internal default
- rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
+ {"method": "disassociate_floating_ip",
+ "args": {"context": None,
+ "address": address}})
+
+ address = db.instance_get_fixed_address(context,
+ instance_ref['id'])
+ if address:
+ logging.debug("Deallocating address %s" % address)
+ # NOTE(vish): Currently, nothing needs to be done on the
+ # network node until release. If this changes,
+ # we will need to cast here.
+ db.fixed_ip_deallocate(context, address)
+
+ host = db.instance_get_host(context, instance_ref['id'])
+ if host:
+ rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "terminate_instance",
- "args": {"instance_id": i}})
+ "args": {"context": None,
+ "instance_id": instance_ref['id']}})
else:
- instance.destroy()
+ db.instance_destroy(context, instance_ref['id'])
defer.returnValue(True)
@rbac.allow('projectmanager', 'sysadmin')
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids"""
- for i in instance_id:
- instance = self._get_instance(context, i)
- rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
- {"method": "reboot_instance",
- "args": {"instance_id": i}})
+ for id_str in instance_id:
+ instance_ref = db.instance_get_by_str(context, id_str)
+ host = db.instance_get_host(context, instance_ref['id'])
+ rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "reboot_instance",
+ "args": {"context": None,
+ "instance_id": instance_ref['id']}})
return defer.succeed(True)
@rbac.allow('projectmanager', 'sysadmin')
def delete_volume(self, context, volume_id, **kwargs):
# TODO: return error if not authorized
- volume = self._get_volume(context, volume_id)
- volume_node = volume['node_name']
- rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node),
+ volume_ref = db.volume_get_by_str(context, volume_id)
+ host = db.volume_get_host(context, volume_ref['id'])
+ rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "delete_volume",
- "args": {"volume_id": volume_id}})
+ "args": {"context": None,
+ "volume_id": volume_id}})
return defer.succeed(True)
@rbac.allow('all')
@@ -718,23 +692,3 @@ class CloudController(object):
raise exception.ApiError('operation_type must be add or remove')
result = images.modify(context, image_id, operation_type)
return defer.succeed(result)
-
- def update_state(self, topic, value):
- """ accepts status reports from the queue and consolidates them """
- # TODO(jmc): if an instance has disappeared from
- # the node, call instance_death
- if topic == "instances":
- return defer.succeed(True)
- aggregate_state = getattr(self, topic)
- node_name = value.keys()[0]
- items = value[node_name]
-
- logging.debug("Updating %s state for %s" % (topic, node_name))
-
- for item_id in items.keys():
- if (aggregate_state.has_key('pending') and
- aggregate_state['pending'].has_key(item_id)):
- del aggregate_state['pending'][item_id]
- aggregate_state[node_name] = items
-
- return defer.succeed(True)
diff --git a/nova/endpoint/images.py b/nova/endpoint/images.py
index fe7cb5d11..f72c277a0 100644
--- a/nova/endpoint/images.py
+++ b/nova/endpoint/images.py
@@ -21,10 +21,12 @@ Proxy AMI-related calls from the cloud controller, to the running
objectstore daemon.
"""
-import boto.s3.connection
import json
import urllib
+import boto.s3.connection
+
+from nova import exception
from nova import flags
from nova import utils
from nova.auth import manager
@@ -32,6 +34,7 @@ from nova.auth import manager
FLAGS = flags.FLAGS
+
def modify(context, image_id, operation):
conn(context).make_request(
method='POST',
@@ -68,6 +71,15 @@ def list(context, filter_list=[]):
return [i for i in result if i['imageId'] in filter_list]
return result
+def get(context, image_id):
+ """return a image object if the context has permissions"""
+ result = list(context, [image_id])
+ if not result:
+ raise exception.NotFound('Image %s could not be found' % image_id)
+ image = result[0]
+ return image
+
+
def deregister(context, image_id):
""" unregister an image """
conn(context).make_request(
@@ -75,6 +87,7 @@ def deregister(context, image_id):
bucket='_images',
query_args=qs({'image_id': image_id}))
+
def conn(context):
access = manager.AuthManager().get_access_key(context.user,
context.project)
diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py
deleted file mode 100644
index 75b828e91..000000000
--- a/nova/endpoint/rackspace.py
+++ /dev/null
@@ -1,183 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Rackspace API Endpoint
-"""
-
-import json
-import time
-
-import webob.dec
-import webob.exc
-
-from nova import flags
-from nova import rpc
-from nova import utils
-from nova import wsgi
-from nova.auth import manager
-from nova.compute import model as compute
-from nova.network import model as network
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
-
-
-class API(wsgi.Middleware):
- """Entry point for all requests."""
-
- def __init__(self):
- super(API, self).__init__(Router(webob.exc.HTTPNotFound()))
-
- def __call__(self, environ, start_response):
- context = {}
- if "HTTP_X_AUTH_TOKEN" in environ:
- context['user'] = manager.AuthManager().get_user_from_access_key(
- environ['HTTP_X_AUTH_TOKEN'])
- if context['user']:
- context['project'] = manager.AuthManager().get_project(
- context['user'].name)
- if "user" not in context:
- return webob.exc.HTTPForbidden()(environ, start_response)
- environ['nova.context'] = context
- return self.application(environ, start_response)
-
-
-class Router(wsgi.Router):
- """Route requests to the next WSGI application."""
-
- def _build_map(self):
- """Build routing map for authentication and cloud."""
- self._connect("/v1.0", controller=AuthenticationAPI())
- cloud = CloudServerAPI()
- self._connect("/servers", controller=cloud.launch_server,
- conditions={"method": ["POST"]})
- self._connect("/servers/{server_id}", controller=cloud.delete_server,
- conditions={'method': ["DELETE"]})
- self._connect("/servers", controller=cloud)
-
-
-class AuthenticationAPI(wsgi.Application):
- """Handle all authorization requests through WSGI applications."""
-
- @webob.dec.wsgify
- def __call__(self, req): # pylint: disable-msg=W0221
- # TODO(todd): make a actual session with a unique token
- # just pass the auth key back through for now
- res = webob.Response()
- res.status = '204 No Content'
- res.headers.add('X-Server-Management-Url', req.host_url)
- res.headers.add('X-Storage-Url', req.host_url)
- res.headers.add('X-CDN-Managment-Url', req.host_url)
- res.headers.add('X-Auth-Token', req.headers['X-Auth-Key'])
- return res
-
-
-class CloudServerAPI(wsgi.Application):
- """Handle all server requests through WSGI applications."""
-
- def __init__(self):
- super(CloudServerAPI, self).__init__()
- self.instdir = compute.InstanceDirectory()
- self.network = network.PublicNetworkController()
-
- @webob.dec.wsgify
- def __call__(self, req): # pylint: disable-msg=W0221
- value = {"servers": []}
- for inst in self.instdir.all:
- value["servers"].append(self.instance_details(inst))
- return json.dumps(value)
-
- def instance_details(self, inst): # pylint: disable-msg=R0201
- """Build the data structure to represent details for an instance."""
- return {
- "id": inst.get("instance_id", None),
- "imageId": inst.get("image_id", None),
- "flavorId": inst.get("instacne_type", None),
- "hostId": inst.get("node_name", None),
- "status": inst.get("state", "pending"),
- "addresses": {
- "public": [network.get_public_ip_for_instance(
- inst.get("instance_id", None))],
- "private": [inst.get("private_dns_name", None)]},
-
- # implemented only by Rackspace, not AWS
- "name": inst.get("name", "Not-Specified"),
-
- # not supported
- "progress": "Not-Supported",
- "metadata": {
- "Server Label": "Not-Supported",
- "Image Version": "Not-Supported"}}
-
- @webob.dec.wsgify
- def launch_server(self, req):
- """Launch a new instance."""
- data = json.loads(req.body)
- inst = self.build_server_instance(data, req.environ['nova.context'])
- rpc.cast(
- FLAGS.compute_topic, {
- "method": "run_instance",
- "args": {"instance_id": inst.instance_id}})
-
- return json.dumps({"server": self.instance_details(inst)})
-
- def build_server_instance(self, env, context):
- """Build instance data structure and save it to the data store."""
- reservation = utils.generate_uid('r')
- ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
- inst = self.instdir.new()
- inst['name'] = env['server']['name']
- inst['image_id'] = env['server']['imageId']
- inst['instance_type'] = env['server']['flavorId']
- inst['user_id'] = context['user'].id
- inst['project_id'] = context['project'].id
- inst['reservation_id'] = reservation
- inst['launch_time'] = ltime
- inst['mac_address'] = utils.generate_mac()
- address = self.network.allocate_ip(
- inst['user_id'],
- inst['project_id'],
- mac=inst['mac_address'])
- inst['private_dns_name'] = str(address)
- inst['bridge_name'] = network.BridgedNetwork.get_network_for_project(
- inst['user_id'],
- inst['project_id'],
- 'default')['bridge_name']
- # key_data, key_name, ami_launch_index
- # TODO(todd): key data or root password
- inst.save()
- return inst
-
- @webob.dec.wsgify
- @wsgi.route_args
- def delete_server(self, req, route_args): # pylint: disable-msg=R0201
- """Delete an instance."""
- owner_hostname = None
- instance = compute.Instance.lookup(route_args['server_id'])
- if instance:
- owner_hostname = instance["node_name"]
- if not owner_hostname:
- return webob.exc.HTTPNotFound("Did not find image, or it was "
- "not in a running state.")
- rpc_transport = "%s:%s" % (FLAGS.compute_topic, owner_hostname)
- rpc.cast(rpc_transport,
- {"method": "reboot_instance",
- "args": {"instance_id": route_args['server_id']}})
- req.status = "202 Accepted"
diff --git a/nova/exception.py b/nova/exception.py
index 52497a19e..29bcb17f8 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -25,31 +25,39 @@ import logging
import sys
import traceback
+
class Error(Exception):
def __init__(self, message=None):
super(Error, self).__init__(message)
+
class ApiError(Error):
def __init__(self, message='Unknown', code='Unknown'):
self.message = message
self.code = code
super(ApiError, self).__init__('%s: %s'% (code, message))
+
class NotFound(Error):
pass
+
class Duplicate(Error):
pass
+
class NotAuthorized(Error):
pass
+
class NotEmpty(Error):
pass
+
class Invalid(Error):
pass
+
def wrap_exception(f):
def _wrap(*args, **kw):
try:
diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py
index 689194513..068025249 100644
--- a/nova/fakerabbit.py
+++ b/nova/fakerabbit.py
@@ -16,12 +16,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-""" Based a bit on the carrot.backeds.queue backend... but a lot better """
+"""Based a bit on the carrot.backeds.queue backend... but a lot better."""
-from carrot.backends import base
import logging
import Queue as queue
+from carrot.backends import base
+
class Message(base.BaseMessage):
pass
diff --git a/nova/flags.py b/nova/flags.py
index be1e1184a..aa9648843 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -22,6 +22,7 @@ where they're used.
"""
import getopt
+import os
import socket
import sys
@@ -34,7 +35,7 @@ class FlagValues(gflags.FlagValues):
Unknown flags will be ignored when parsing the command line, but the
command line will be kept so that it can be replayed if new flags are
defined after the initial parsing.
-
+
"""
def __init__(self):
@@ -50,7 +51,7 @@ class FlagValues(gflags.FlagValues):
# leftover args at the end
sneaky_unparsed_args = {"value": None}
original_argv = list(argv)
-
+
if self.IsGnuGetOpt():
orig_getopt = getattr(getopt, 'gnu_getopt')
orig_name = 'gnu_getopt'
@@ -74,14 +75,14 @@ class FlagValues(gflags.FlagValues):
unparsed_args = sneaky_unparsed_args['value']
if unparsed_args:
if self.IsGnuGetOpt():
- args = argv[:1] + unparsed
+ args = argv[:1] + unparsed_args
else:
args = argv[:1] + original_argv[-len(unparsed_args):]
else:
args = argv[:1]
finally:
setattr(getopt, orig_name, orig_getopt)
-
+
# Store the arguments for later, we'll need them for new flags
# added at runtime
self.__dict__['__stored_argv'] = original_argv
@@ -92,7 +93,7 @@ class FlagValues(gflags.FlagValues):
def SetDirty(self, name):
"""Mark a flag as dirty so that accessing it will case a reparse."""
self.__dict__['__dirty'].append(name)
-
+
def IsDirty(self, name):
return name in self.__dict__['__dirty']
@@ -113,12 +114,12 @@ class FlagValues(gflags.FlagValues):
for k in self.__dict__['__dirty']:
setattr(self, k, getattr(new_flags, k))
self.ClearDirty()
-
+
def __setitem__(self, name, flag):
gflags.FlagValues.__setitem__(self, name, flag)
if self.WasAlreadyParsed():
self.SetDirty(name)
-
+
def __getitem__(self, name):
if self.IsDirty(name):
self.ParseNewFlags()
@@ -141,6 +142,7 @@ def _wrapper(func):
return _wrapped
+DEFINE = _wrapper(gflags.DEFINE)
DEFINE_string = _wrapper(gflags.DEFINE_string)
DEFINE_integer = _wrapper(gflags.DEFINE_integer)
DEFINE_bool = _wrapper(gflags.DEFINE_bool)
@@ -168,7 +170,6 @@ def DECLARE(name, module_string, flag_values=FLAGS):
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
DEFINE_integer('s3_port', 3333, 's3 port')
DEFINE_string('s3_host', '127.0.0.1', 's3 host')
-#DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
DEFINE_string('scheduler_topic', 'scheduler', 'the topic scheduler nodes listen on')
DEFINE_string('volume_topic', 'volume', 'the topic volume nodes listen on')
@@ -176,29 +177,25 @@ DEFINE_string('network_topic', 'network', 'the topic network nodes listen on')
DEFINE_bool('verbose', False, 'show debug output')
DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit')
-DEFINE_bool('fake_network', False, 'should we use fake network devices and addresses')
+DEFINE_bool('fake_network', False,
+ 'should we use fake network devices and addresses')
DEFINE_string('rabbit_host', 'localhost', 'rabbit host')
DEFINE_integer('rabbit_port', 5672, 'rabbit port')
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
DEFINE_string('rabbit_password', 'guest', 'rabbit password')
DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
-DEFINE_string('ec2_url',
- 'http://127.0.0.1:8773/services/Cloud',
- 'Url to ec2 api server')
-
-DEFINE_string('default_image',
- 'ami-11111',
- 'default image to use, testing only')
-DEFINE_string('default_kernel',
- 'aki-11111',
- 'default kernel to use, testing only')
-DEFINE_string('default_ramdisk',
- 'ari-11111',
- 'default ramdisk to use, testing only')
-DEFINE_string('default_instance_type',
- 'm1.small',
- 'default instance type to use, testing only')
+DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud',
+ 'Url to ec2 api server')
+
+DEFINE_string('default_image', 'ami-11111',
+ 'default image to use, testing only')
+DEFINE_string('default_kernel', 'aki-11111',
+ 'default kernel to use, testing only')
+DEFINE_string('default_ramdisk', 'ari-11111',
+ 'default ramdisk to use, testing only')
+DEFINE_string('default_instance_type', 'm1.small',
+ 'default instance type to use, testing only')
DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server')
DEFINE_string('vpn_key_suffix',
@@ -208,10 +205,20 @@ DEFINE_string('vpn_key_suffix',
DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
# UNUSED
-DEFINE_string('node_availability_zone',
- 'nova',
- 'availability zone of this node')
-DEFINE_string('node_name',
- socket.gethostname(),
- 'name of this node')
+DEFINE_string('node_availability_zone', 'nova',
+ 'availability zone of this node')
+DEFINE_string('host', socket.gethostname(),
+ 'name of this node')
+
+DEFINE_string('sql_connection',
+ 'sqlite:///%s/nova.sqlite' % os.path.abspath("./"),
+ 'connection string for sql database')
+
+DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager',
+ 'Manager for compute')
+DEFINE_string('network_manager', 'nova.network.manager.VlanManager',
+ 'Manager for network')
+DEFINE_string('volume_manager', 'nova.volume.manager.AOEManager',
+ 'Manager for volume')
+
diff --git a/nova/image/__init__.py b/nova/image/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/nova/image/__init__.py
diff --git a/nova/image/service.py b/nova/image/service.py
new file mode 100644
index 000000000..1a7a258b7
--- /dev/null
+++ b/nova/image/service.py
@@ -0,0 +1,90 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import cPickle as pickle
+import os.path
+import random
+import string
+
+class ImageService(object):
+ """Provides storage and retrieval of disk image objects."""
+
+ @staticmethod
+ def load():
+ """Factory method to return image service."""
+ #TODO(gundlach): read from config.
+ class_ = LocalImageService
+ return class_()
+
+ def index(self):
+ """
+ Return a dict from opaque image id to image data.
+ """
+
+ def show(self, id):
+ """
+ Returns a dict containing image data for the given opaque image id.
+ """
+
+
+class GlanceImageService(ImageService):
+ """Provides storage and retrieval of disk image objects within Glance."""
+ # TODO(gundlach): once Glance has an API, build this.
+ pass
+
+
+class LocalImageService(ImageService):
+ """Image service storing images to local disk."""
+
+ def __init__(self):
+ self._path = "/tmp/nova/images"
+ try:
+ os.makedirs(self._path)
+ except OSError: # exists
+ pass
+
+ def _path_to(self, image_id=''):
+ return os.path.join(self._path, image_id)
+
+ def _ids(self):
+ """The list of all image ids."""
+ return os.listdir(self._path)
+
+ def index(self):
+ return [ self.show(id) for id in self._ids() ]
+
+ def show(self, id):
+ return pickle.load(open(self._path_to(id)))
+
+ def create(self, data):
+ """
+ Store the image data and return the new image id.
+ """
+ id = ''.join(random.choice(string.letters) for _ in range(20))
+ data['id'] = id
+ self.update(id, data)
+ return id
+
+ def update(self, image_id, data):
+ """Replace the contents of the given image with the new data."""
+ pickle.dump(data, open(self._path_to(image_id), 'w'))
+
+ def delete(self, image_id):
+ """
+ Delete the given image. Raises OSError if the image does not exist.
+ """
+ os.unlink(self._path_to(image_id))
diff --git a/nova/manager.py b/nova/manager.py
new file mode 100644
index 000000000..4cc27f05b
--- /dev/null
+++ b/nova/manager.py
@@ -0,0 +1,36 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Base class for managers of different parts of the system
+"""
+
+from nova import utils
+from nova import flags
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('db_driver', 'nova.db.api',
+ 'driver to use for volume creation')
+
+
+class Manager(object):
+ """DB driver is injected in the init method"""
+ def __init__(self, db_driver=None):
+ if not db_driver:
+ db_driver = FLAGS.db_driver
+ self.db = utils.import_object(db_driver) # pylint: disable-msg=C0103
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 15050adaf..1506e85ad 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -18,116 +18,123 @@ Implements vlans, bridges, and iptables rules using linux utilities.
"""
import logging
-import signal
import os
+import signal
-# todo(ja): does the definition of network_path belong here?
+# TODO(ja): does the definition of network_path belong here?
+from nova import db
from nova import flags
from nova import utils
-FLAGS = flags.FLAGS
+FLAGS = flags.FLAGS
flags.DEFINE_string('dhcpbridge_flagfile',
'/etc/nova/nova-dhcpbridge.conf',
'location of flagfile for dhcpbridge')
-
-def execute(cmd, addl_env=None):
- """Wrapper around utils.execute for fake_network"""
- if FLAGS.fake_network:
- logging.debug("FAKE NET: %s", cmd)
- return "fake", 0
- else:
- return utils.execute(cmd, addl_env=addl_env)
-
-
-def runthis(desc, cmd):
- """Wrapper around utils.runthis for fake_network"""
- if FLAGS.fake_network:
- return execute(cmd)
- else:
- return utils.runthis(desc, cmd)
-
-
-def device_exists(device):
- """Check if ethernet device exists"""
- (_out, err) = execute("ifconfig %s" % device)
- return not err
-
-
-def confirm_rule(cmd):
- """Delete and re-add iptables rule"""
- execute("sudo iptables --delete %s" % (cmd))
- execute("sudo iptables -I %s" % (cmd))
-
-
-def remove_rule(cmd):
- """Remove iptables rule"""
- execute("sudo iptables --delete %s" % (cmd))
-
-
-def bind_public_ip(public_ip, interface):
- """Bind ip to an interface"""
- runthis("Binding IP to interface: %s",
- "sudo ip addr add %s dev %s" % (public_ip, interface))
-
-
-def unbind_public_ip(public_ip, interface):
- """Unbind a public ip from an interface"""
- runthis("Binding IP to interface: %s",
- "sudo ip addr del %s dev %s" % (public_ip, interface))
-
-
-def vlan_create(net):
- """Create a vlan on on a bridge device unless vlan already exists"""
- if not device_exists("vlan%s" % net['vlan']):
- logging.debug("Starting VLAN inteface for %s network", (net['vlan']))
- execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD")
- execute("sudo vconfig add %s %s" % (FLAGS.bridge_dev, net['vlan']))
- execute("sudo ifconfig vlan%s up" % (net['vlan']))
-
-
-def bridge_create(net):
- """Create a bridge on a vlan unless it already exists"""
- if not device_exists(net['bridge_name']):
- logging.debug("Starting Bridge inteface for %s network", (net['vlan']))
- execute("sudo brctl addbr %s" % (net['bridge_name']))
- execute("sudo brctl setfd %s 0" % (net.bridge_name))
- # execute("sudo brctl setageing %s 10" % (net.bridge_name))
- execute("sudo brctl stp %s off" % (net['bridge_name']))
- execute("sudo brctl addif %s vlan%s" % (net['bridge_name'],
- net['vlan']))
- if net.bridge_gets_ip:
- execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \
- (net['bridge_name'], net.gateway, net.broadcast, net.netmask))
- confirm_rule("FORWARD --in-interface %s -j ACCEPT" %
- (net['bridge_name']))
+flags.DEFINE_string('networks_path', utils.abspath('../networks'),
+ 'Location to keep network config files')
+flags.DEFINE_string('public_interface', 'vlan1',
+ 'Interface for public IP addresses')
+flags.DEFINE_string('bridge_dev', 'eth0',
+ 'network device for bridges')
+
+
+def bind_floating_ip(floating_ip):
+ """Bind ip to public interface"""
+ _execute("sudo ip addr add %s dev %s" % (floating_ip,
+ FLAGS.public_interface))
+
+
+def unbind_floating_ip(floating_ip):
+ """Unbind a public ip from public interface"""
+ _execute("sudo ip addr del %s dev %s" % (floating_ip,
+ FLAGS.public_interface))
+
+
+def ensure_vlan_forward(public_ip, port, private_ip):
+ """Sets up forwarding rules for vlan"""
+ _confirm_rule("FORWARD -d %s -p udp --dport 1194 -j ACCEPT" % private_ip)
+ _confirm_rule(
+ "PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194"
+ % (public_ip, port, private_ip))
+
+
+DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)]
+
+
+def ensure_floating_forward(floating_ip, fixed_ip):
+ """Ensure floating ip forwarding rule"""
+ _confirm_rule("PREROUTING -t nat -d %s -j DNAT --to %s"
+ % (floating_ip, fixed_ip))
+ _confirm_rule("POSTROUTING -t nat -s %s -j SNAT --to %s"
+ % (fixed_ip, floating_ip))
+ # TODO(joshua): Get these from the secgroup datastore entries
+ _confirm_rule("FORWARD -d %s -p icmp -j ACCEPT"
+ % (fixed_ip))
+ for (protocol, port) in DEFAULT_PORTS:
+ _confirm_rule(
+ "FORWARD -d %s -p %s --dport %s -j ACCEPT"
+ % (fixed_ip, protocol, port))
+
+
+def remove_floating_forward(floating_ip, fixed_ip):
+ """Remove forwarding for floating ip"""
+ _remove_rule("PREROUTING -t nat -d %s -j DNAT --to %s"
+ % (floating_ip, fixed_ip))
+ _remove_rule("POSTROUTING -t nat -s %s -j SNAT --to %s"
+ % (fixed_ip, floating_ip))
+ _remove_rule("FORWARD -d %s -p icmp -j ACCEPT"
+ % (fixed_ip))
+ for (protocol, port) in DEFAULT_PORTS:
+ _remove_rule("FORWARD -d %s -p %s --dport %s -j ACCEPT"
+ % (fixed_ip, protocol, port))
+
+
+def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
+ """Create a vlan and bridge unless they already exist"""
+ interface = ensure_vlan(vlan_num)
+ ensure_bridge(bridge, interface, net_attrs)
+
+
+def ensure_vlan(vlan_num):
+ """Create a vlan unless it already exists"""
+ interface = "vlan%s" % vlan_num
+ if not _device_exists(interface):
+ logging.debug("Starting VLAN inteface %s", interface)
+ _execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD")
+ _execute("sudo vconfig add %s %s" % (FLAGS.bridge_dev, vlan_num))
+ _execute("sudo ifconfig %s up" % interface)
+ return interface
+
+
+def ensure_bridge(bridge, interface, net_attrs=None):
+ """Create a bridge unless it already exists"""
+ if not _device_exists(bridge):
+ logging.debug("Starting Bridge inteface for %s", interface)
+ _execute("sudo brctl addbr %s" % bridge)
+ _execute("sudo brctl setfd %s 0" % bridge)
+ # _execute("sudo brctl setageing %s 10" % bridge)
+ _execute("sudo brctl stp %s off" % bridge)
+ _execute("sudo brctl addif %s %s" % (bridge, interface))
+ if net_attrs:
+ _execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \
+ (bridge,
+ net_attrs['gateway'],
+ net_attrs['broadcast'],
+ net_attrs['netmask']))
+ _confirm_rule("FORWARD --in-interface %s -j ACCEPT" % bridge)
else:
- execute("sudo ifconfig %s up" % net['bridge_name'])
+ _execute("sudo ifconfig %s up" % bridge)
-def _dnsmasq_cmd(net):
- """Builds dnsmasq command"""
- cmd = ['sudo -E dnsmasq',
- ' --strict-order',
- ' --bind-interfaces',
- ' --conf-file=',
- ' --pid-file=%s' % dhcp_file(net['vlan'], 'pid'),
- ' --listen-address=%s' % net.dhcp_listen_address,
- ' --except-interface=lo',
- ' --dhcp-range=%s,static,120s' % net.dhcp_range_start,
- ' --dhcp-hostsfile=%s' % dhcp_file(net['vlan'], 'conf'),
- ' --dhcp-script=%s' % bin_file('nova-dhcpbridge'),
- ' --leasefile-ro']
- return ''.join(cmd)
-
-
-def host_dhcp(address):
- """Return a host string for an address object"""
- return "%s,%s.novalocal,%s" % (address['mac'],
- address['hostname'],
- address.address)
+def get_dhcp_hosts(context, network_id):
+ """Get a string containing a network's hosts config in dnsmasq format"""
+ hosts = []
+ for fixed_ip in db.network_get_associated_fixed_ips(context, network_id):
+ hosts.append(_host_dhcp(fixed_ip['str_id']))
+ return '\n'.join(hosts)
# TODO(ja): if the system has restarted or pid numbers have wrapped
@@ -135,17 +142,17 @@ def host_dhcp(address):
# dnsmasq. As well, sending a HUP only reloads the hostfile,
# so any configuration options (like dchp-range, vlan, ...)
# aren't reloaded
-def start_dnsmasq(network):
+def update_dhcp(context, network_id):
"""(Re)starts a dnsmasq server for a given network
if a dnsmasq instance is already running then send a HUP
signal causing it to reload, otherwise spawn a new instance
"""
- with open(dhcp_file(network['vlan'], 'conf'), 'w') as f:
- for address in network.assigned_objs:
- f.write("%s\n" % host_dhcp(address))
+ network_ref = db.network_get(context, network_id)
+ with open(_dhcp_file(network_ref['vlan'], 'conf'), 'w') as f:
+ f.write(get_dhcp_hosts(context, network_id))
- pid = dnsmasq_pid_for(network)
+ pid = _dnsmasq_pid_for(network_ref['vlan'])
# if dnsmasq is already running, then tell it to reload
if pid:
@@ -154,38 +161,89 @@ def start_dnsmasq(network):
try:
os.kill(pid, signal.SIGHUP)
return
- except Exception as exc: # pylint: disable=W0703
+ except Exception as exc: # pylint: disable-msg=W0703
logging.debug("Hupping dnsmasq threw %s", exc)
# FLAGFILE and DNSMASQ_INTERFACE in env
env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile,
- 'DNSMASQ_INTERFACE': network['bridge_name']}
- execute(_dnsmasq_cmd(network), addl_env=env)
+ 'DNSMASQ_INTERFACE': network_ref['bridge']}
+ command = _dnsmasq_cmd(network_ref)
+ _execute(command, addl_env=env)
+
+
+def _host_dhcp(address):
+ """Return a host string for an address"""
+ instance_ref = db.fixed_ip_get_instance(None, address)
+ return "%s,%s.novalocal,%s" % (instance_ref['mac_address'],
+ instance_ref['hostname'],
+ address)
+
+
+def _execute(cmd, *args, **kwargs):
+ """Wrapper around utils._execute for fake_network"""
+ if FLAGS.fake_network:
+ logging.debug("FAKE NET: %s", cmd)
+ return "fake", 0
+ else:
+ return utils.execute(cmd, *args, **kwargs)
+
+
+def _device_exists(device):
+ """Check if ethernet device exists"""
+ (_out, err) = _execute("ifconfig %s" % device, check_exit_code=False)
+ return not err
+
+
+def _confirm_rule(cmd):
+ """Delete and re-add iptables rule"""
+ _execute("sudo iptables --delete %s" % (cmd), check_exit_code=False)
+ _execute("sudo iptables -I %s" % (cmd))
+
+
+def _remove_rule(cmd):
+ """Remove iptables rule"""
+ _execute("sudo iptables --delete %s" % (cmd))
+
+
+def _dnsmasq_cmd(net):
+ """Builds dnsmasq command"""
+ cmd = ['sudo -E dnsmasq',
+ ' --strict-order',
+ ' --bind-interfaces',
+ ' --conf-file=',
+ ' --pid-file=%s' % _dhcp_file(net['vlan'], 'pid'),
+ ' --listen-address=%s' % net['gateway'],
+ ' --except-interface=lo',
+ ' --dhcp-range=%s,static,120s' % net['dhcp_start'],
+ ' --dhcp-hostsfile=%s' % _dhcp_file(net['vlan'], 'conf'),
+ ' --dhcp-script=%s' % _bin_file('nova-dhcpbridge'),
+ ' --leasefile-ro']
+ return ''.join(cmd)
-def stop_dnsmasq(network):
+def _stop_dnsmasq(network):
"""Stops the dnsmasq instance for a given network"""
- pid = dnsmasq_pid_for(network)
+ pid = _dnsmasq_pid_for(network)
if pid:
try:
os.kill(pid, signal.SIGTERM)
- except Exception as exc: # pylint: disable=W0703
+ except Exception as exc: # pylint: disable-msg=W0703
logging.debug("Killing dnsmasq threw %s", exc)
-def dhcp_file(vlan, kind):
+def _dhcp_file(vlan, kind):
"""Return path to a pid, leases or conf file for a vlan"""
return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path, vlan, kind))
-def bin_file(script):
+def _bin_file(script):
"""Return the absolute path to scipt in the bin directory"""
return os.path.abspath(os.path.join(__file__, "../../../bin", script))
-def dnsmasq_pid_for(network):
+def _dnsmasq_pid_for(vlan):
"""Returns he pid for prior dnsmasq instance for a vlan
Returns None if no pid file exists
@@ -193,7 +251,7 @@ def dnsmasq_pid_for(network):
If machine has rebooted pid might be incorrect (caller should check)
"""
- pid_file = dhcp_file(network['vlan'], 'pid')
+ pid_file = _dhcp_file(vlan, 'pid')
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
diff --git a/nova/network/manager.py b/nova/network/manager.py
new file mode 100644
index 000000000..dbb8e66da
--- /dev/null
+++ b/nova/network/manager.py
@@ -0,0 +1,328 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Network Hosts are responsible for allocating ips and setting up network
+"""
+
+import logging
+import math
+
+import IPy
+
+from nova import db
+from nova import exception
+from nova import flags
+from nova import manager
+from nova import utils
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('flat_network_bridge', 'br100',
+ 'Bridge for simple network instances')
+flags.DEFINE_list('flat_network_ips',
+ ['192.168.0.2', '192.168.0.3', '192.168.0.4'],
+ 'Available ips for simple network')
+flags.DEFINE_string('flat_network_network', '192.168.0.0',
+ 'Network for simple network')
+flags.DEFINE_string('flat_network_netmask', '255.255.255.0',
+ 'Netmask for simple network')
+flags.DEFINE_string('flat_network_gateway', '192.168.0.1',
+ 'Broadcast for simple network')
+flags.DEFINE_string('flat_network_broadcast', '192.168.0.255',
+ 'Broadcast for simple network')
+flags.DEFINE_string('flat_network_dns', '8.8.4.4',
+ 'Dns for simple network')
+flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks')
+flags.DEFINE_integer('num_networks', 1000, 'Number of networks to support')
+flags.DEFINE_string('vpn_ip', utils.get_my_ip(),
+ 'Public IP for the cloudpipe VPN servers')
+flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks')
+flags.DEFINE_integer('network_size', 256,
+ 'Number of addresses in each private subnet')
+flags.DEFINE_string('public_range', '4.4.4.0/24', 'Public IP address block')
+flags.DEFINE_string('private_range', '10.0.0.0/8', 'Private IP address block')
+flags.DEFINE_integer('cnt_vpn_clients', 5,
+ 'Number of addresses reserved for vpn clients')
+flags.DEFINE_string('network_driver', 'nova.network.linux_net',
+ 'Driver to use for network creation')
+
+
+class AddressAlreadyAllocated(exception.Error):
+ """Address was already allocated"""
+ pass
+
+
+class AddressNotAllocated(exception.Error):
+ """Address has not been allocated"""
+ pass
+
+
+class NetworkManager(manager.Manager):
+ """Implements common network manager functionality
+
+ This class must be subclassed.
+ """
+ def __init__(self, network_driver=None, *args, **kwargs):
+ if not network_driver:
+ network_driver = FLAGS.network_driver
+ self.driver = utils.import_object(network_driver)
+ super(NetworkManager, self).__init__(*args, **kwargs)
+
+ def set_network_host(self, context, project_id):
+ """Safely sets the host of the projects network"""
+ logging.debug("setting network host")
+ network_ref = self.db.project_get_network(context, project_id)
+ # TODO(vish): can we minimize db access by just getting the
+ # id here instead of the ref?
+ network_id = network_ref['id']
+ host = self.db.network_set_host(context,
+ network_id,
+ FLAGS.host)
+ self._on_set_network_host(context, network_id)
+ return host
+
+ def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
+ """Gets a fixed ip from the pool"""
+ raise NotImplementedError()
+
+ def setup_fixed_ip(self, context, address):
+ """Sets up rules for fixed ip"""
+ raise NotImplementedError()
+
+ def _on_set_network_host(self, context, network_id):
+ """Called when this host becomes the host for a project"""
+ raise NotImplementedError()
+
+ def setup_compute_network(self, context, project_id):
+ """Sets up matching network for compute hosts"""
+ raise NotImplementedError()
+
+ def allocate_floating_ip(self, context, project_id):
+ """Gets an floating ip from the pool"""
+ # TODO(vish): add floating ips through manage command
+ return self.db.floating_ip_allocate_address(context,
+ FLAGS.host,
+ project_id)
+
+ def associate_floating_ip(self, context, floating_address, fixed_address):
+ """Associates an floating ip to a fixed ip"""
+ self.db.floating_ip_fixed_ip_associate(context,
+ floating_address,
+ fixed_address)
+ self.driver.bind_floating_ip(floating_address)
+ self.driver.ensure_floating_forward(floating_address, fixed_address)
+
+ def disassociate_floating_ip(self, context, floating_address):
+ """Disassociates a floating ip"""
+ fixed_address = self.db.floating_ip_disassociate(context,
+ floating_address)
+ self.driver.unbind_floating_ip(floating_address)
+ self.driver.remove_floating_forward(floating_address, fixed_address)
+
+ def deallocate_floating_ip(self, context, floating_address):
+ """Returns an floating ip to the pool"""
+ self.db.floating_ip_deallocate(context, floating_address)
+
+ @property
+ def _bottom_reserved_ips(self): # pylint: disable-msg=R0201
+ """Number of reserved ips at the bottom of the range"""
+ return 2 # network, gateway
+
+ @property
+ def _top_reserved_ips(self): # pylint: disable-msg=R0201
+ """Number of reserved ips at the top of the range"""
+ return 1 # broadcast
+
+ def _create_fixed_ips(self, context, network_id):
+ """Create all fixed ips for network"""
+ network_ref = self.db.network_get(context, network_id)
+ # NOTE(vish): should these be properties of the network as opposed
+ # to properties of the manager class?
+ bottom_reserved = self._bottom_reserved_ips
+ top_reserved = self._top_reserved_ips
+ project_net = IPy.IP(network_ref['cidr'])
+ num_ips = len(project_net)
+ for index in range(num_ips):
+ address = str(project_net[index])
+ if index < bottom_reserved or num_ips - index < top_reserved:
+ reserved = True
+ else:
+ reserved = False
+ self.db.fixed_ip_create(context, {'network_id': network_id,
+ 'address': address,
+ 'reserved': reserved})
+
+
+class FlatManager(NetworkManager):
+ """Basic network where no vlans are used"""
+
+ def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
+ """Gets a fixed ip from the pool"""
+ network_ref = self.db.project_get_network(context, context.project.id)
+ address = self.db.fixed_ip_allocate(context, network_ref['id'])
+ self.db.fixed_ip_instance_associate(context, address, instance_id)
+ return address
+
+ def setup_compute_network(self, context, project_id):
+ """Network is created manually"""
+ pass
+
+ def setup_fixed_ip(self, context, address):
+ """Currently no setup"""
+ pass
+
+ def _on_set_network_host(self, context, network_id):
+ """Called when this host becomes the host for a project"""
+ # NOTE(vish): should there be two types of network objects
+ # in the datastore?
+ net = {}
+ net['injected'] = True
+ net['network_str'] = FLAGS.flat_network_network
+ net['netmask'] = FLAGS.flat_network_netmask
+ net['bridge'] = FLAGS.flat_network_bridge
+ net['gateway'] = FLAGS.flat_network_gateway
+ net['broadcast'] = FLAGS.flat_network_broadcast
+ net['dns'] = FLAGS.flat_network_dns
+ self.db.network_update(context, network_id, net)
+ # NOTE(vish): Rignt now we are putting all of the fixed ips in
+ # one large pool, but ultimately it may be better to
+ # have each network manager have its own network that
+ # it is responsible for and its own pool of ips.
+ for address in FLAGS.flat_network_ips:
+ self.db.fixed_ip_create(context, {'address': address})
+
+
+class VlanManager(NetworkManager):
+ """Vlan network with dhcp"""
+ def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
+ """Gets a fixed ip from the pool"""
+ network_ref = self.db.project_get_network(context, context.project.id)
+ if kwargs.get('vpn', None):
+ address = self._allocate_vpn_ip(context, network_ref['id'])
+ else:
+ address = self.db.fixed_ip_allocate(context,
+ network_ref['id'])
+ self.db.fixed_ip_instance_associate(context, address, instance_id)
+ return address
+
+ def setup_fixed_ip(self, context, address):
+ """Sets forwarding rules and dhcp for fixed ip"""
+ fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
+ network_ref = self.db.fixed_ip_get_network(context, address)
+ if self.db.instance_is_vpn(context, fixed_ip_ref['instance_id']):
+ self.driver.ensure_vlan_forward(network_ref['vpn_public_address'],
+ network_ref['vpn_public_port'],
+ network_ref['vpn_private_address'])
+ self.driver.update_dhcp(context, network_ref['id'])
+
+ def lease_fixed_ip(self, context, address):
+ """Called by dhcp-bridge when ip is leased"""
+ logging.debug("Leasing IP %s", address)
+ fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
+ if not fixed_ip_ref['allocated']:
+ raise AddressNotAllocated(address)
+ self.db.fixed_ip_update(context,
+ fixed_ip_ref['str_id'],
+ {'leased': True})
+
+ def release_fixed_ip(self, context, address):
+ """Called by dhcp-bridge when ip is released"""
+ logging.debug("Releasing IP %s", address)
+ self.db.fixed_ip_update(context, address, {'allocated': False,
+ 'leased': False})
+ self.db.fixed_ip_instance_disassociate(context, address)
+
+ def allocate_network(self, context, project_id):
+ """Set up the network"""
+ self._ensure_indexes(context)
+ network_ref = db.network_create(context, {'project_id': project_id})
+ network_id = network_ref['id']
+ private_net = IPy.IP(FLAGS.private_range)
+ index = db.network_get_index(context, network_id)
+ vlan = FLAGS.vlan_start + index
+ start = index * FLAGS.network_size
+ significant_bits = 32 - int(math.log(FLAGS.network_size, 2))
+ cidr = "%s/%s" % (private_net[start], significant_bits)
+ project_net = IPy.IP(cidr)
+ net = {}
+ net['cidr'] = cidr
+ # NOTE(vish): we could turn these into properties
+ net['netmask'] = str(project_net.netmask())
+ net['gateway'] = str(project_net[1])
+ net['broadcast'] = str(project_net.broadcast())
+ net['vpn_private_address'] = str(project_net[2])
+ net['dhcp_start'] = str(project_net[3])
+ net['vlan'] = vlan
+ net['bridge'] = 'br%s' % vlan
+ net['vpn_public_address'] = FLAGS.vpn_ip
+ net['vpn_public_port'] = FLAGS.vpn_start + index
+ db.network_update(context, network_id, net)
+ self._create_fixed_ips(context, network_id)
+ return network_id
+
+ def setup_compute_network(self, context, project_id):
+ """Sets up matching network for compute hosts"""
+ network_ref = self.db.project_get_network(context, project_id)
+ self.driver.ensure_vlan_bridge(network_ref['vlan'],
+ network_ref['bridge'])
+
+ def restart_nets(self):
+ """Ensure the network for each user is enabled"""
+ # TODO(vish): Implement this
+ pass
+
+ @staticmethod
+ def _allocate_vpn_ip(context, network_id):
+ """Allocate vpn ip for network"""
+ # TODO(vish): There is a possible concurrency issue here.
+ network_ref = db.network_get(context, network_id)
+ address = network_ref['vpn_private_address']
+ fixed_ip_ref = db.fixed_ip_get_by_address(context, address)
+ # TODO(vish): Should this be fixed_ip_is_allocated?
+ if fixed_ip_ref['allocated']:
+ raise AddressAlreadyAllocated()
+ db.fixed_ip_update(context, fixed_ip_ref['id'], {'allocated': True})
+ return fixed_ip_ref['str_id']
+
+ def _ensure_indexes(self, context):
+ """Ensure the indexes for the network exist
+
+ This could use a manage command instead of keying off of a flag"""
+ if not self.db.network_index_count(context):
+ for index in range(FLAGS.num_networks):
+ self.db.network_index_create(context, {'index': index})
+
+ def _on_set_network_host(self, context, network_id):
+ """Called when this host becomes the host for a project"""
+ network_ref = self.db.network_get(context, network_id)
+ self.driver.ensure_vlan_bridge(network_ref['vlan'],
+ network_ref['bridge'],
+ network_ref)
+
+ @property
+ def _bottom_reserved_ips(self):
+ """Number of reserved ips at the bottom of the range"""
+ return super(VlanManager, self)._bottom_reserved_ips + 1 # vpn server
+
+ @property
+ def _top_reserved_ips(self):
+ """Number of reserved ips at the top of the range"""
+ parent_reserved = super(VlanManager, self)._top_reserved_ips
+ return parent_reserved + FLAGS.cnt_vpn_clients
+
diff --git a/nova/network/model.py b/nova/network/model.py
deleted file mode 100644
index 1a958b564..000000000
--- a/nova/network/model.py
+++ /dev/null
@@ -1,633 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Model Classes for network control, including VLANs, DHCP, and IP allocation.
-"""
-
-import IPy
-import logging
-import os
-import time
-
-from nova import datastore
-from nova import exception as nova_exception
-from nova import flags
-from nova import utils
-from nova.auth import manager
-from nova.network import exception
-from nova.network import linux_net
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_string('networks_path', utils.abspath('../networks'),
- 'Location to keep network config files')
-flags.DEFINE_integer('public_vlan', 1, 'VLAN for public IP addresses')
-flags.DEFINE_string('public_interface', 'vlan1',
- 'Interface for public IP addresses')
-flags.DEFINE_string('bridge_dev', 'eth1',
- 'network device for bridges')
-flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks')
-flags.DEFINE_integer('vlan_end', 4093, 'Last VLAN for private networks')
-flags.DEFINE_integer('network_size', 256,
- 'Number of addresses in each private subnet')
-flags.DEFINE_string('public_range', '4.4.4.0/24', 'Public IP address block')
-flags.DEFINE_string('private_range', '10.0.0.0/8', 'Private IP address block')
-flags.DEFINE_integer('cnt_vpn_clients', 5,
- 'Number of addresses reserved for vpn clients')
-flags.DEFINE_integer('cloudpipe_start_port', 12000,
- 'Starting port for mapped CloudPipe external ports')
-
-logging.getLogger().setLevel(logging.DEBUG)
-
-
-class Vlan(datastore.BasicModel):
- """Tracks vlans assigned to project it the datastore"""
- def __init__(self, project, vlan): # pylint: disable=W0231
- """
- Since we don't want to try and find a vlan by its identifier,
- but by a project id, we don't call super-init.
- """
- self.project_id = project
- self.vlan_id = vlan
-
- @property
- def identifier(self):
- """Datastore identifier"""
- return "%s:%s" % (self.project_id, self.vlan_id)
-
- @classmethod
- def create(cls, project, vlan):
- """Create a Vlan object"""
- instance = cls(project, vlan)
- instance.save()
- return instance
-
- @classmethod
- @datastore.absorb_connection_error
- def lookup(cls, project):
- """Returns object by project if it exists in datastore or None"""
- set_name = cls._redis_set_name(cls.__name__)
- vlan = datastore.Redis.instance().hget(set_name, project)
- if vlan:
- return cls(project, vlan)
- else:
- return None
-
- @classmethod
- @datastore.absorb_connection_error
- def dict_by_project(cls):
- """A hash of project:vlan"""
- set_name = cls._redis_set_name(cls.__name__)
- return datastore.Redis.instance().hgetall(set_name) or {}
-
- @classmethod
- @datastore.absorb_connection_error
- def dict_by_vlan(cls):
- """A hash of vlan:project"""
- set_name = cls._redis_set_name(cls.__name__)
- retvals = {}
- hashset = datastore.Redis.instance().hgetall(set_name) or {}
- for (key, val) in hashset.iteritems():
- retvals[val] = key
- return retvals
-
- @classmethod
- @datastore.absorb_connection_error
- def all(cls):
- set_name = cls._redis_set_name(cls.__name__)
- elements = datastore.Redis.instance().hgetall(set_name)
- for project in elements:
- yield cls(project, elements[project])
-
- @datastore.absorb_connection_error
- def save(self):
- """
- Vlan saves state into a giant hash named "vlans", with keys of
- project_id and value of vlan number. Therefore, we skip the
- default way of saving into "vlan:ID" and adding to a set of "vlans".
- """
- set_name = self._redis_set_name(self.__class__.__name__)
- datastore.Redis.instance().hset(set_name,
- self.project_id,
- self.vlan_id)
-
- @datastore.absorb_connection_error
- def destroy(self):
- """Removes the object from the datastore"""
- set_name = self._redis_set_name(self.__class__.__name__)
- datastore.Redis.instance().hdel(set_name, self.project_id)
-
- def subnet(self):
- """Returns a string containing the subnet"""
- vlan = int(self.vlan_id)
- network = IPy.IP(FLAGS.private_range)
- start = (vlan - FLAGS.vlan_start) * FLAGS.network_size
- # minus one for the gateway.
- return "%s-%s" % (network[start],
- network[start + FLAGS.network_size - 1])
-
-
-class FixedIp(datastore.BasicModel):
- """Represents a fixed ip in the datastore"""
-
- def __init__(self, address):
- self.address = address
- super(FixedIp, self).__init__()
-
- @property
- def identifier(self):
- return self.address
-
- # NOTE(vish): address states allocated, leased, deallocated
- def default_state(self):
- return {'address': self.address,
- 'state': 'none'}
-
- @classmethod
- # pylint: disable=R0913
- def create(cls, user_id, project_id, address, mac, hostname, network_id):
- """Creates an FixedIp object"""
- addr = cls(address)
- addr['user_id'] = user_id
- addr['project_id'] = project_id
- addr['mac'] = mac
- if hostname is None:
- hostname = "ip-%s" % address.replace('.', '-')
- addr['hostname'] = hostname
- addr['network_id'] = network_id
- addr['state'] = 'allocated'
- addr.save()
- return addr
-
- def save(self):
- is_new = self.is_new_record()
- success = super(FixedIp, self).save()
- if success and is_new:
- self.associate_with("network", self['network_id'])
-
- def destroy(self):
- self.unassociate_with("network", self['network_id'])
- super(FixedIp, self).destroy()
-
-
-class ElasticIp(FixedIp):
- """Represents an elastic ip in the datastore"""
- override_type = "address"
-
- def default_state(self):
- return {'address': self.address,
- 'instance_id': 'available',
- 'private_ip': 'available'}
-
-
-# CLEANUP:
-# TODO(ja): does vlanpool "keeper" need to know the min/max -
-# shouldn't FLAGS always win?
-class BaseNetwork(datastore.BasicModel):
- """Implements basic logic for allocating ips in a network"""
- override_type = 'network'
- address_class = FixedIp
-
- @property
- def identifier(self):
- """Datastore identifier"""
- return self.network_id
-
- def default_state(self):
- """Default values for new objects"""
- return {'network_id': self.network_id, 'network_str': self.network_str}
-
- @classmethod
- # pylint: disable=R0913
- def create(cls, user_id, project_id, security_group, vlan, network_str):
- """Create a BaseNetwork object"""
- network_id = "%s:%s" % (project_id, security_group)
- net = cls(network_id, network_str)
- net['user_id'] = user_id
- net['project_id'] = project_id
- net["vlan"] = vlan
- net["bridge_name"] = "br%s" % vlan
- net.save()
- return net
-
- def __init__(self, network_id, network_str=None):
- self.network_id = network_id
- self.network_str = network_str
- super(BaseNetwork, self).__init__()
- self.save()
-
- @property
- def network(self):
- """Returns a string representing the network"""
- return IPy.IP(self['network_str'])
-
- @property
- def netmask(self):
- """Returns the netmask of this network"""
- return self.network.netmask()
-
- @property
- def gateway(self):
- """Returns the network gateway address"""
- return self.network[1]
-
- @property
- def broadcast(self):
- """Returns the network broadcast address"""
- return self.network.broadcast()
-
- @property
- def bridge_name(self):
- """Returns the bridge associated with this network"""
- return "br%s" % (self["vlan"])
-
- @property
- def user(self):
- """Returns the user associated with this network"""
- return manager.AuthManager().get_user(self['user_id'])
-
- @property
- def project(self):
- """Returns the project associated with this network"""
- return manager.AuthManager().get_project(self['project_id'])
-
- # pylint: disable=R0913
- def _add_host(self, user_id, project_id, ip_address, mac, hostname):
- """Add a host to the datastore"""
- self.address_class.create(user_id, project_id, ip_address,
- mac, hostname, self.identifier)
-
- def _rem_host(self, ip_address):
- """Remove a host from the datastore"""
- self.address_class(ip_address).destroy()
-
- @property
- def assigned(self):
- """Returns a list of all assigned addresses"""
- return self.address_class.associated_keys('network', self.identifier)
-
- @property
- def assigned_objs(self):
- """Returns a list of all assigned addresses as objects"""
- return self.address_class.associated_to('network', self.identifier)
-
- def get_address(self, ip_address):
- """Returns a specific ip as an object"""
- if ip_address in self.assigned:
- return self.address_class(ip_address)
- return None
-
- @property
- def available(self):
- """Returns a list of all available addresses in the network"""
- for idx in range(self.num_bottom_reserved_ips,
- len(self.network) - self.num_top_reserved_ips):
- address = str(self.network[idx])
- if not address in self.assigned:
- yield address
-
- @property
- def num_bottom_reserved_ips(self):
- """Returns number of ips reserved at the bottom of the range"""
- return 2 # Network, Gateway
-
- @property
- def num_top_reserved_ips(self):
- """Returns number of ips reserved at the top of the range"""
- return 1 # Broadcast
-
- def allocate_ip(self, user_id, project_id, mac, hostname=None):
- """Allocates an ip to a mac address"""
- for address in self.available:
- logging.debug("Allocating IP %s to %s", address, project_id)
- self._add_host(user_id, project_id, address, mac, hostname)
- self.express(address=address)
- return address
- raise exception.NoMoreAddresses("Project %s with network %s" %
- (project_id, str(self.network)))
-
- def lease_ip(self, ip_str):
- """Called when DHCP lease is activated"""
- if not ip_str in self.assigned:
- raise exception.AddressNotAllocated()
- address = self.get_address(ip_str)
- if address:
- logging.debug("Leasing allocated IP %s", ip_str)
- address['state'] = 'leased'
- address.save()
-
- def release_ip(self, ip_str):
- """Called when DHCP lease expires
-
- Removes the ip from the assigned list"""
- if not ip_str in self.assigned:
- raise exception.AddressNotAllocated()
- logging.debug("Releasing IP %s", ip_str)
- self._rem_host(ip_str)
- self.deexpress(address=ip_str)
-
- def deallocate_ip(self, ip_str):
- """Deallocates an allocated ip"""
- if not ip_str in self.assigned:
- raise exception.AddressNotAllocated()
- address = self.get_address(ip_str)
- if address:
- if address['state'] != 'leased':
- # NOTE(vish): address hasn't been leased, so release it
- self.release_ip(ip_str)
- else:
- logging.debug("Deallocating allocated IP %s", ip_str)
- address['state'] == 'deallocated'
- address.save()
-
- def express(self, address=None):
- """Set up network. Implemented in subclasses"""
- pass
-
- def deexpress(self, address=None):
- """Tear down network. Implemented in subclasses"""
- pass
-
-
-class BridgedNetwork(BaseNetwork):
- """
- Virtual Network that can express itself to create a vlan and
- a bridge (with or without an IP address/netmask/gateway)
-
- properties:
- bridge_name - string (example value: br42)
- vlan - integer (example value: 42)
- bridge_dev - string (example: eth0)
- bridge_gets_ip - boolean used during bridge creation
-
- if bridge_gets_ip then network address for bridge uses the properties:
- gateway
- broadcast
- netmask
- """
-
- bridge_gets_ip = False
- override_type = 'network'
-
- @classmethod
- def get_network_for_project(cls,
- user_id,
- project_id,
- security_group='default'):
- """Returns network for a given project"""
- vlan = get_vlan_for_project(project_id)
- network_str = vlan.subnet()
- return cls.create(user_id, project_id, security_group, vlan.vlan_id,
- network_str)
-
- def __init__(self, *args, **kwargs):
- super(BridgedNetwork, self).__init__(*args, **kwargs)
- self['bridge_dev'] = FLAGS.bridge_dev
- self.save()
-
- def express(self, address=None):
- super(BridgedNetwork, self).express(address=address)
- linux_net.vlan_create(self)
- linux_net.bridge_create(self)
-
-
-class DHCPNetwork(BridgedNetwork):
- """Network supporting DHCP"""
- bridge_gets_ip = True
- override_type = 'network'
-
- def __init__(self, *args, **kwargs):
- super(DHCPNetwork, self).__init__(*args, **kwargs)
- if not(os.path.exists(FLAGS.networks_path)):
- os.makedirs(FLAGS.networks_path)
-
- @property
- def num_bottom_reserved_ips(self):
- # For cloudpipe
- return super(DHCPNetwork, self).num_bottom_reserved_ips + 1
-
- @property
- def num_top_reserved_ips(self):
- return super(DHCPNetwork, self).num_top_reserved_ips + \
- FLAGS.cnt_vpn_clients
-
- @property
- def dhcp_listen_address(self):
- """Address where dhcp server should listen"""
- return self.gateway
-
- @property
- def dhcp_range_start(self):
- """Starting address dhcp server should use"""
- return self.network[self.num_bottom_reserved_ips]
-
- def express(self, address=None):
- super(DHCPNetwork, self).express(address=address)
- if len(self.assigned) > 0:
- logging.debug("Starting dnsmasq server for network with vlan %s",
- self['vlan'])
- linux_net.start_dnsmasq(self)
- else:
- logging.debug("Not launching dnsmasq: no hosts.")
- self.express_vpn()
-
- def allocate_vpn_ip(self, user_id, project_id, mac, hostname=None):
- """Allocates the reserved ip to a vpn instance"""
- address = str(self.network[2])
- self._add_host(user_id, project_id, address, mac, hostname)
- self.express(address=address)
- return address
-
- def express_vpn(self):
- """Sets up routing rules for vpn"""
- private_ip = str(self.network[2])
- linux_net.confirm_rule("FORWARD -d %s -p udp --dport 1194 -j ACCEPT"
- % (private_ip, ))
- linux_net.confirm_rule(
- "PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194"
- % (self.project.vpn_ip, self.project.vpn_port, private_ip))
-
- def deexpress(self, address=None):
- # if this is the last address, stop dns
- super(DHCPNetwork, self).deexpress(address=address)
- if len(self.assigned) == 0:
- linux_net.stop_dnsmasq(self)
- else:
- linux_net.start_dnsmasq(self)
-
-DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)]
-
-
-class PublicNetworkController(BaseNetwork):
- """Handles elastic ips"""
- override_type = 'network'
- address_class = ElasticIp
-
- def __init__(self, *args, **kwargs):
- network_id = "public:default"
- super(PublicNetworkController, self).__init__(network_id,
- FLAGS.public_range, *args, **kwargs)
- self['user_id'] = "public"
- self['project_id'] = "public"
- self["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ',
- time.gmtime())
- self["vlan"] = FLAGS.public_vlan
- self.save()
- self.express()
-
- def deallocate_ip(self, ip_str):
- # NOTE(vish): cleanup is now done on release by the parent class
- self.release_ip(ip_str)
-
- def associate_address(self, public_ip, private_ip, instance_id):
- """Associates a public ip to a private ip and instance id"""
- if not public_ip in self.assigned:
- raise exception.AddressNotAllocated()
- # TODO(josh): Keep an index going both ways
- for addr in self.assigned_objs:
- if addr.get('private_ip', None) == private_ip:
- raise exception.AddressAlreadyAssociated()
- addr = self.get_address(public_ip)
- if addr.get('private_ip', 'available') != 'available':
- raise exception.AddressAlreadyAssociated()
- addr['private_ip'] = private_ip
- addr['instance_id'] = instance_id
- addr.save()
- self.express(address=public_ip)
-
- def disassociate_address(self, public_ip):
- """Disassociates a public ip with its private ip"""
- if not public_ip in self.assigned:
- raise exception.AddressNotAllocated()
- addr = self.get_address(public_ip)
- if addr.get('private_ip', 'available') == 'available':
- raise exception.AddressNotAssociated()
- self.deexpress(address=public_ip)
- addr['private_ip'] = 'available'
- addr['instance_id'] = 'available'
- addr.save()
-
- def express(self, address=None):
- if address:
- if not address in self.assigned:
- raise exception.AddressNotAllocated()
- addresses = [self.get_address(address)]
- else:
- addresses = self.assigned_objs
- for addr in addresses:
- if addr.get('private_ip', 'available') == 'available':
- continue
- public_ip = addr['address']
- private_ip = addr['private_ip']
- linux_net.bind_public_ip(public_ip, FLAGS.public_interface)
- linux_net.confirm_rule("PREROUTING -t nat -d %s -j DNAT --to %s"
- % (public_ip, private_ip))
- linux_net.confirm_rule("POSTROUTING -t nat -s %s -j SNAT --to %s"
- % (private_ip, public_ip))
- # TODO(joshua): Get these from the secgroup datastore entries
- linux_net.confirm_rule("FORWARD -d %s -p icmp -j ACCEPT"
- % (private_ip))
- for (protocol, port) in DEFAULT_PORTS:
- linux_net.confirm_rule(
- "FORWARD -d %s -p %s --dport %s -j ACCEPT"
- % (private_ip, protocol, port))
-
- def deexpress(self, address=None):
- addr = self.get_address(address)
- private_ip = addr['private_ip']
- linux_net.unbind_public_ip(address, FLAGS.public_interface)
- linux_net.remove_rule("PREROUTING -t nat -d %s -j DNAT --to %s"
- % (address, private_ip))
- linux_net.remove_rule("POSTROUTING -t nat -s %s -j SNAT --to %s"
- % (private_ip, address))
- linux_net.remove_rule("FORWARD -d %s -p icmp -j ACCEPT"
- % (private_ip))
- for (protocol, port) in DEFAULT_PORTS:
- linux_net.remove_rule("FORWARD -d %s -p %s --dport %s -j ACCEPT"
- % (private_ip, protocol, port))
-
-
-# FIXME(todd): does this present a race condition, or is there some
-# piece of architecture that mitigates it (only one queue
-# listener per net)?
-def get_vlan_for_project(project_id):
- """Allocate vlan IDs to individual users"""
- vlan = Vlan.lookup(project_id)
- if vlan:
- return vlan
- known_vlans = Vlan.dict_by_vlan()
- for vnum in range(FLAGS.vlan_start, FLAGS.vlan_end):
- vstr = str(vnum)
- if not vstr in known_vlans:
- return Vlan.create(project_id, vnum)
- old_project_id = known_vlans[vstr]
- if not manager.AuthManager().get_project(old_project_id):
- vlan = Vlan.lookup(old_project_id)
- if vlan:
- # NOTE(todd): This doesn't check for vlan id match, because
- # it seems to be assumed that vlan<=>project is
- # always a 1:1 mapping. It could be made way
- # sexier if it didn't fight against the way
- # BasicModel worked and used associate_with
- # to build connections to projects.
- # NOTE(josh): This is here because we want to make sure we
- # don't orphan any VLANs. It is basically
- # garbage collection for after projects abandoned
- # their reference.
- vlan.destroy()
- vlan.project_id = project_id
- vlan.save()
- return vlan
- else:
- return Vlan.create(project_id, vnum)
- raise exception.AddressNotAllocated("Out of VLANs")
-
-
-def get_project_network(project_id, security_group='default'):
- """Gets a project's private network, allocating one if needed"""
- project = manager.AuthManager().get_project(project_id)
- if not project:
- raise nova_exception.NotFound("Project %s doesn't exist." % project_id)
- manager_id = project.project_manager_id
- return DHCPNetwork.get_network_for_project(manager_id,
- project.id,
- security_group)
-
-
-def get_network_by_address(address):
- """Gets the network for a given private ip"""
- address_record = FixedIp.lookup(address)
- if not address_record:
- raise exception.AddressNotAllocated()
- return get_project_network(address_record['project_id'])
-
-
-def get_network_by_interface(iface, security_group='default'):
- """Gets the network for a given interface"""
- vlan = iface.rpartition("br")[2]
- project_id = Vlan.dict_by_vlan().get(vlan)
- return get_project_network(project_id, security_group)
-
-
-def get_public_ip_for_instance(instance_id):
- """Gets the public ip for a given instance"""
- # FIXME(josh): this should be a lookup - iteration won't scale
- for address_record in ElasticIp.all():
- if address_record.get('instance_id', 'available') == instance_id:
- return address_record['address']
diff --git a/nova/network/service.py b/nova/network/service.py
index 625f20dd4..28f017a27 100644
--- a/nova/network/service.py
+++ b/nova/network/service.py
@@ -17,241 +17,15 @@
# under the License.
"""
-Network Hosts are responsible for allocating ips and setting up network
+Network service allows rpc calls to the network manager and reports state
+to the database.
"""
-from nova import datastore
-from nova import flags
from nova import service
-from nova import utils
-from nova.auth import manager
-from nova.exception import NotFound
-from nova.network import exception
-from nova.network import model
-from nova.network import vpn
-FLAGS = flags.FLAGS
-flags.DEFINE_string('network_type',
- 'flat',
- 'Service Class for Networking')
-flags.DEFINE_string('flat_network_bridge', 'br100',
- 'Bridge for simple network instances')
-flags.DEFINE_list('flat_network_ips',
- ['192.168.0.2', '192.168.0.3', '192.168.0.4'],
- 'Available ips for simple network')
-flags.DEFINE_string('flat_network_network', '192.168.0.0',
- 'Network for simple network')
-flags.DEFINE_string('flat_network_netmask', '255.255.255.0',
- 'Netmask for simple network')
-flags.DEFINE_string('flat_network_gateway', '192.168.0.1',
- 'Broadcast for simple network')
-flags.DEFINE_string('flat_network_broadcast', '192.168.0.255',
- 'Broadcast for simple network')
-flags.DEFINE_string('flat_network_dns', '8.8.4.4',
- 'Dns for simple network')
-
-
-def type_to_class(network_type):
- """Convert a network_type string into an actual Python class"""
- if network_type == 'flat':
- return FlatNetworkService
- elif network_type == 'vlan':
- return VlanNetworkService
- raise NotFound("Couldn't find %s network type" % network_type)
-
-
-def setup_compute_network(network_type, user_id, project_id, security_group):
- """Sets up the network on a compute host"""
- srv = type_to_class(network_type)
- srv.setup_compute_network(network_type,
- user_id,
- project_id,
- security_group)
-
-
-def get_host_for_project(project_id):
- """Get host allocated to project from datastore"""
- redis = datastore.Redis.instance()
- return redis.get(_host_key(project_id))
-
-
-def _host_key(project_id):
- """Returns redis host key for network"""
- return "networkhost:%s" % project_id
-
-
-class BaseNetworkService(service.Service):
- """Implements common network service functionality
-
- This class must be subclassed.
+class NetworkService(service.Service):
"""
- def __init__(self, *args, **kwargs):
- self.network = model.PublicNetworkController()
- super(BaseNetworkService, self).__init__(*args, **kwargs)
-
- def set_network_host(self, user_id, project_id, *args, **kwargs):
- """Safely sets the host of the projects network"""
- redis = datastore.Redis.instance()
- key = _host_key(project_id)
- if redis.setnx(key, FLAGS.node_name):
- self._on_set_network_host(user_id, project_id,
- security_group='default',
- *args, **kwargs)
- return FLAGS.node_name
- else:
- return redis.get(key)
-
- def allocate_fixed_ip(self, user_id, project_id,
- security_group='default',
- *args, **kwargs):
- """Subclass implements getting fixed ip from the pool"""
- raise NotImplementedError()
-
- def deallocate_fixed_ip(self, fixed_ip, *args, **kwargs):
- """Subclass implements return of ip to the pool"""
- raise NotImplementedError()
-
- def _on_set_network_host(self, user_id, project_id,
- *args, **kwargs):
- """Called when this host becomes the host for a project"""
- pass
-
- @classmethod
- def setup_compute_network(cls, user_id, project_id, security_group,
- *args, **kwargs):
- """Sets up matching network for compute hosts"""
- raise NotImplementedError()
-
- def allocate_elastic_ip(self, user_id, project_id):
- """Gets a elastic ip from the pool"""
- # NOTE(vish): Replicating earlier decision to use 'public' as
- # mac address name, although this should probably
- # be done inside of the PublicNetworkController
- return self.network.allocate_ip(user_id, project_id, 'public')
-
- def associate_elastic_ip(self, elastic_ip, fixed_ip, instance_id):
- """Associates an elastic ip to a fixed ip"""
- self.network.associate_address(elastic_ip, fixed_ip, instance_id)
-
- def disassociate_elastic_ip(self, elastic_ip):
- """Disassociates a elastic ip"""
- self.network.disassociate_address(elastic_ip)
-
- def deallocate_elastic_ip(self, elastic_ip):
- """Returns a elastic ip to the pool"""
- self.network.deallocate_ip(elastic_ip)
-
-
-class FlatNetworkService(BaseNetworkService):
- """Basic network where no vlans are used"""
-
- @classmethod
- def setup_compute_network(cls, user_id, project_id, security_group,
- *args, **kwargs):
- """Network is created manually"""
- pass
-
- def allocate_fixed_ip(self,
- user_id,
- project_id,
- security_group='default',
- *args, **kwargs):
- """Gets a fixed ip from the pool
-
- Flat network just grabs the next available ip from the pool
- """
- # NOTE(vish): Some automation could be done here. For example,
- # creating the flat_network_bridge and setting up
- # a gateway. This is all done manually atm.
- redis = datastore.Redis.instance()
- if not redis.exists('ips') and not len(redis.keys('instances:*')):
- for fixed_ip in FLAGS.flat_network_ips:
- redis.sadd('ips', fixed_ip)
- fixed_ip = redis.spop('ips')
- if not fixed_ip:
- raise exception.NoMoreAddresses()
- # TODO(vish): some sort of dns handling for hostname should
- # probably be done here.
- return {'inject_network': True,
- 'network_type': FLAGS.network_type,
- 'mac_address': utils.generate_mac(),
- 'private_dns_name': str(fixed_ip),
- 'bridge_name': FLAGS.flat_network_bridge,
- 'network_network': FLAGS.flat_network_network,
- 'network_netmask': FLAGS.flat_network_netmask,
- 'network_gateway': FLAGS.flat_network_gateway,
- 'network_broadcast': FLAGS.flat_network_broadcast,
- 'network_dns': FLAGS.flat_network_dns}
-
- def deallocate_fixed_ip(self, fixed_ip, *args, **kwargs):
- """Returns an ip to the pool"""
- datastore.Redis.instance().sadd('ips', fixed_ip)
-
-
-class VlanNetworkService(BaseNetworkService):
- """Vlan network with dhcp"""
- # NOTE(vish): A lot of the interactions with network/model.py can be
- # simplified and improved. Also there it may be useful
- # to support vlans separately from dhcp, instead of having
- # both of them together in this class.
- # pylint: disable=W0221
- def allocate_fixed_ip(self,
- user_id,
- project_id,
- security_group='default',
- is_vpn=False,
- hostname=None,
- *args, **kwargs):
- """Gets a fixed ip from the pool"""
- mac = utils.generate_mac()
- net = model.get_project_network(project_id)
- if is_vpn:
- fixed_ip = net.allocate_vpn_ip(user_id,
- project_id,
- mac,
- hostname)
- else:
- fixed_ip = net.allocate_ip(user_id,
- project_id,
- mac,
- hostname)
- return {'network_type': FLAGS.network_type,
- 'bridge_name': net['bridge_name'],
- 'mac_address': mac,
- 'private_dns_name': fixed_ip}
-
- def deallocate_fixed_ip(self, fixed_ip,
- *args, **kwargs):
- """Returns an ip to the pool"""
- return model.get_network_by_address(fixed_ip).deallocate_ip(fixed_ip)
-
- def lease_ip(self, fixed_ip):
- """Called by bridge when ip is leased"""
- return model.get_network_by_address(fixed_ip).lease_ip(fixed_ip)
-
- def release_ip(self, fixed_ip):
- """Called by bridge when ip is released"""
- return model.get_network_by_address(fixed_ip).release_ip(fixed_ip)
-
- def restart_nets(self):
- """Ensure the network for each user is enabled"""
- for project in manager.AuthManager().get_projects():
- model.get_project_network(project.id).express()
-
- def _on_set_network_host(self, user_id, project_id,
- *args, **kwargs):
- """Called when this host becomes the host for a project"""
- vpn.NetworkData.create(project_id)
-
- @classmethod
- def setup_compute_network(cls, user_id, project_id, security_group,
- *args, **kwargs):
- """Sets up matching network for compute hosts"""
- # NOTE(vish): Use BridgedNetwork instead of DHCPNetwork because
- # we don't want to run dnsmasq on the client machines
- net = model.BridgedNetwork.get_network_for_project(
- user_id,
- project_id,
- security_group)
- net.express()
+ Network Service automatically passes commands on to the Network Manager
+ """
+ pass
diff --git a/nova/network/vpn.py b/nova/network/vpn.py
deleted file mode 100644
index a0e2a7fa1..000000000
--- a/nova/network/vpn.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Network Data for projects"""
-
-from nova import datastore
-from nova import exception
-from nova import flags
-from nova import utils
-
-FLAGS = flags.FLAGS
-
-
-flags.DEFINE_string('vpn_ip', utils.get_my_ip(),
- 'Public IP for the cloudpipe VPN servers')
-flags.DEFINE_integer('vpn_start_port', 1000,
- 'Start port for the cloudpipe VPN servers')
-flags.DEFINE_integer('vpn_end_port', 2000,
- 'End port for the cloudpipe VPN servers')
-
-
-class NoMorePorts(exception.Error):
- """No ports available to allocate for the given ip"""
- pass
-
-
-class NetworkData(datastore.BasicModel):
- """Manages network host, and vpn ip and port for projects"""
- def __init__(self, project_id):
- self.project_id = project_id
- super(NetworkData, self).__init__()
-
- @property
- def identifier(self):
- """Identifier used for key in redis"""
- return self.project_id
-
- @classmethod
- def create(cls, project_id):
- """Creates a vpn for project
-
- This method finds a free ip and port and stores the associated
- values in the datastore.
- """
- # TODO(vish): will we ever need multiiple ips per host?
- port = cls.find_free_port_for_ip(FLAGS.vpn_ip)
- network_data = cls(project_id)
- # save ip for project
- network_data['host'] = FLAGS.node_name
- network_data['project'] = project_id
- network_data['ip'] = FLAGS.vpn_ip
- network_data['port'] = port
- network_data.save()
- return network_data
-
- @classmethod
- def find_free_port_for_ip(cls, vpn_ip):
- """Finds a free port for a given ip from the redis set"""
- # TODO(vish): these redis commands should be generalized and
- # placed into a base class. Conceptually, it is
- # similar to an association, but we are just
- # storing a set of values instead of keys that
- # should be turned into objects.
- cls._ensure_set_exists(vpn_ip)
-
- port = datastore.Redis.instance().spop(cls._redis_ports_key(vpn_ip))
- if not port:
- raise NoMorePorts()
- return port
-
- @classmethod
- def _redis_ports_key(cls, vpn_ip):
- """Key that ports are stored under in redis"""
- return 'ip:%s:ports' % vpn_ip
-
- @classmethod
- def _ensure_set_exists(cls, vpn_ip):
- """Creates the set of ports for the ip if it doesn't already exist"""
- # TODO(vish): these ports should be allocated through an admin
- # command instead of a flag
- redis = datastore.Redis.instance()
- if (not redis.exists(cls._redis_ports_key(vpn_ip)) and
- not redis.exists(cls._redis_association_name('ip', vpn_ip))):
- for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
- redis.sadd(cls._redis_ports_key(vpn_ip), i)
-
- @classmethod
- def num_ports_for_ip(cls, vpn_ip):
- """Calculates the number of free ports for a given ip"""
- cls._ensure_set_exists(vpn_ip)
- return datastore.Redis.instance().scard('ip:%s:ports' % vpn_ip)
-
- @property
- def ip(self): # pylint: disable=C0103
- """The ip assigned to the project"""
- return self['ip']
-
- @property
- def port(self):
- """The port assigned to the project"""
- return int(self['port'])
-
- def save(self):
- """Saves the association to the given ip"""
- self.associate_with('ip', self.ip)
- super(NetworkData, self).save()
-
- def destroy(self):
- """Cleans up datastore and adds port back to pool"""
- self.unassociate_with('ip', self.ip)
- datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port)
- super(NetworkData, self).destroy()
diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py
index b42a96233..c2b412dd7 100644
--- a/nova/objectstore/bucket.py
+++ b/nova/objectstore/bucket.py
@@ -36,6 +36,7 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('buckets_path', utils.abspath('../buckets'),
'path to s3 buckets')
+
class Bucket(object):
def __init__(self, name):
self.name = name
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
index dfe1918e3..5c3dc286b 100644
--- a/nova/objectstore/handler.py
+++ b/nova/objectstore/handler.py
@@ -38,17 +38,19 @@ S3 client with this module::
"""
import datetime
-import logging
import json
+import logging
import multiprocessing
import os
-from tornado import escape
import urllib
-from twisted.application import internet, service
-from twisted.web.resource import Resource
-from twisted.web import server, static, error
-
+from tornado import escape
+from twisted.application import internet
+from twisted.application import service
+from twisted.web import error
+from twisted.web import resource
+from twisted.web import server
+from twisted.web import static
from nova import exception
from nova import flags
@@ -60,7 +62,9 @@ from nova.objectstore import image
FLAGS = flags.FLAGS
+
def render_xml(request, value):
+ """Writes value as XML string to request"""
assert isinstance(value, dict) and len(value) == 1
request.setHeader("Content-Type", "application/xml; charset=UTF-8")
@@ -72,12 +76,16 @@ def render_xml(request, value):
request.write('</' + escape.utf8(name) + '>')
request.finish()
+
def finish(request, content=None):
+ """Finalizer method for request"""
if content:
request.write(content)
request.finish()
+
def _render_parts(value, write_cb):
+ """Helper method to render different Python objects to XML"""
if isinstance(value, basestring):
write_cb(escape.xhtml_escape(value))
elif isinstance(value, int) or isinstance(value, long):
@@ -95,38 +103,52 @@ def _render_parts(value, write_cb):
else:
raise Exception("Unknown S3 value type %r", value)
+
def get_argument(request, key, default_value):
+ """Returns the request's value at key, or default_value
+ if not found
+ """
if key in request.args:
return request.args[key][0]
return default_value
+
def get_context(request):
+ """Returns the supplied request's context object"""
try:
# Authorization Header format: 'AWS <access>:<secret>'
authorization_header = request.getHeader('Authorization')
if not authorization_header:
raise exception.NotAuthorized
- access, sep, secret = authorization_header.split(' ')[1].rpartition(':')
- (user, project) = manager.AuthManager().authenticate(access,
- secret,
- {},
- request.method,
- request.getRequestHostname(),
- request.uri,
- headers=request.getAllHeaders(),
- check_type='s3')
+ auth_header_value = authorization_header.split(' ')[1]
+ access, _ignored, secret = auth_header_value.rpartition(':')
+ am = manager.AuthManager()
+ (user, project) = am.authenticate(access,
+ secret,
+ {},
+ request.method,
+ request.getRequestHostname(),
+ request.uri,
+ headers=request.getAllHeaders(),
+ check_type='s3')
return api.APIRequestContext(None, user, project)
except exception.Error as ex:
- logging.debug("Authentication Failure: %s" % ex)
+ logging.debug("Authentication Failure: %s", ex)
raise exception.NotAuthorized
-class ErrorHandlingResource(Resource):
- """Maps exceptions to 404 / 401 codes. Won't work for exceptions thrown after NOT_DONE_YET is returned."""
- # TODO(unassigned) (calling-all-twisted-experts): This needs to be plugged in to the right place in twisted...
- # This doesn't look like it's the right place (consider exceptions in getChild; or after NOT_DONE_YET is returned
+class ErrorHandlingResource(resource.Resource):
+ """Maps exceptions to 404 / 401 codes. Won't work for
+ exceptions thrown after NOT_DONE_YET is returned.
+ """
+ # TODO(unassigned) (calling-all-twisted-experts): This needs to be
+ # plugged in to the right place in twisted...
+ # This doesn't look like it's the right place
+ # (consider exceptions in getChild; or after
+ # NOT_DONE_YET is returned
def render(self, request):
+ """Renders the response as XML"""
try:
- return Resource.render(self, request)
+ return resource.Resource.render(self, request)
except exception.NotFound:
request.setResponseCode(404)
return ''
@@ -134,9 +156,14 @@ class ErrorHandlingResource(Resource):
request.setResponseCode(403)
return ''
+
class S3(ErrorHandlingResource):
"""Implementation of an S3-like storage server based on local files."""
- def getChild(self, name, request):
+ def __init__(self):
+ ErrorHandlingResource.__init__(self)
+
+ def getChild(self, name, request): # pylint: disable-msg=C0103
+ """Returns either the image or bucket resource"""
request.context = get_context(request)
if name == '':
return self
@@ -145,32 +172,40 @@ class S3(ErrorHandlingResource):
else:
return BucketResource(name)
- def render_GET(self, request):
+ def render_GET(self, request): # pylint: disable-msg=R0201
+ """Renders the GET request for a list of buckets as XML"""
logging.debug('List of buckets requested')
- buckets = [b for b in bucket.Bucket.all() if b.is_authorized(request.context)]
+ buckets = [b for b in bucket.Bucket.all() \
+ if b.is_authorized(request.context)]
render_xml(request, {"ListAllMyBucketsResult": {
"Buckets": {"Bucket": [b.metadata for b in buckets]},
}})
return server.NOT_DONE_YET
+
class BucketResource(ErrorHandlingResource):
+ """A web resource containing an S3-like bucket"""
def __init__(self, name):
- Resource.__init__(self)
+ resource.Resource.__init__(self)
self.name = name
def getChild(self, name, request):
+ """Returns the bucket resource itself, or the object resource
+ the bucket contains if a name is supplied
+ """
if name == '':
return self
else:
return ObjectResource(bucket.Bucket(self.name), name)
def render_GET(self, request):
- logging.debug("List keys for bucket %s" % (self.name))
+ "Returns the keys for the bucket resource"""
+ logging.debug("List keys for bucket %s", self.name)
try:
bucket_object = bucket.Bucket(self.name)
- except exception.NotFound, e:
+ except exception.NotFound:
return error.NoResource(message="No such bucket").render(request)
if not bucket_object.is_authorized(request.context):
@@ -181,19 +216,26 @@ class BucketResource(ErrorHandlingResource):
max_keys = int(get_argument(request, "max-keys", 1000))
terse = int(get_argument(request, "terse", 0))
- results = bucket_object.list_keys(prefix=prefix, marker=marker, max_keys=max_keys, terse=terse)
+ results = bucket_object.list_keys(prefix=prefix,
+ marker=marker,
+ max_keys=max_keys,
+ terse=terse)
render_xml(request, {"ListBucketResult": results})
return server.NOT_DONE_YET
def render_PUT(self, request):
- logging.debug("Creating bucket %s" % (self.name))
- logging.debug("calling bucket.Bucket.create(%r, %r)" % (self.name, request.context))
+ "Creates the bucket resource"""
+ logging.debug("Creating bucket %s", self.name)
+ logging.debug("calling bucket.Bucket.create(%r, %r)",
+ self.name,
+ request.context)
bucket.Bucket.create(self.name, request.context)
request.finish()
return server.NOT_DONE_YET
def render_DELETE(self, request):
- logging.debug("Deleting bucket %s" % (self.name))
+ """Deletes the bucket resource"""
+ logging.debug("Deleting bucket %s", self.name)
bucket_object = bucket.Bucket(self.name)
if not bucket_object.is_authorized(request.context):
@@ -205,25 +247,37 @@ class BucketResource(ErrorHandlingResource):
class ObjectResource(ErrorHandlingResource):
+ """The resource returned from a bucket"""
def __init__(self, bucket, name):
- Resource.__init__(self)
+ resource.Resource.__init__(self)
self.bucket = bucket
self.name = name
def render_GET(self, request):
- logging.debug("Getting object: %s / %s" % (self.bucket.name, self.name))
+ """Returns the object
+
+ Raises NotAuthorized if user in request context is not
+ authorized to delete the object.
+ """
+ logging.debug("Getting object: %s / %s", self.bucket.name, self.name)
if not self.bucket.is_authorized(request.context):
raise exception.NotAuthorized
obj = self.bucket[urllib.unquote(self.name)]
request.setHeader("Content-Type", "application/unknown")
- request.setHeader("Last-Modified", datetime.datetime.utcfromtimestamp(obj.mtime))
+ request.setHeader("Last-Modified",
+ datetime.datetime.utcfromtimestamp(obj.mtime))
request.setHeader("Etag", '"' + obj.md5 + '"')
return static.File(obj.path).render_GET(request)
def render_PUT(self, request):
- logging.debug("Putting object: %s / %s" % (self.bucket.name, self.name))
+ """Modifies/inserts the object and returns a result code
+
+ Raises NotAuthorized if user in request context is not
+ authorized to delete the object.
+ """
+ logging.debug("Putting object: %s / %s", self.bucket.name, self.name)
if not self.bucket.is_authorized(request.context):
raise exception.NotAuthorized
@@ -236,7 +290,15 @@ class ObjectResource(ErrorHandlingResource):
return server.NOT_DONE_YET
def render_DELETE(self, request):
- logging.debug("Deleting object: %s / %s" % (self.bucket.name, self.name))
+ """Deletes the object and returns a result code
+
+ Raises NotAuthorized if user in request context is not
+ authorized to delete the object.
+ """
+
+ logging.debug("Deleting object: %s / %s",
+ self.bucket.name,
+ self.name)
if not self.bucket.is_authorized(request.context):
raise exception.NotAuthorized
@@ -245,24 +307,31 @@ class ObjectResource(ErrorHandlingResource):
request.setResponseCode(204)
return ''
+
class ImageResource(ErrorHandlingResource):
+ """A web resource representing a single image"""
isLeaf = True
def __init__(self, name):
- Resource.__init__(self)
+ resource.Resource.__init__(self)
self.img = image.Image(name)
def render_GET(self, request):
- return static.File(self.img.image_path, defaultType='application/octet-stream').render_GET(request)
-
-class ImagesResource(Resource):
- def getChild(self, name, request):
+ """Returns the image file"""
+ return static.File(self.img.image_path,
+ defaultType='application/octet-stream'
+ ).render_GET(request)
+
+class ImagesResource(resource.Resource):
+ """A web resource representing a list of images"""
+ def getChild(self, name, _request):
+ """Returns itself or an ImageResource if no name given"""
if name == '':
return self
else:
return ImageResource(name)
- def render_GET(self, request):
+ def render_GET(self, request): # pylint: disable-msg=R0201
""" returns a json listing of all images
that a user has permissions to see """
@@ -289,7 +358,7 @@ class ImagesResource(Resource):
request.finish()
return server.NOT_DONE_YET
- def render_PUT(self, request):
+ def render_PUT(self, request): # pylint: disable-msg=R0201
""" create a new registered image """
image_id = get_argument(request, 'image_id', u'')
@@ -301,7 +370,6 @@ class ImagesResource(Resource):
raise exception.NotAuthorized
bucket_object = bucket.Bucket(image_location.split("/")[0])
- manifest = image_location[len(image_location.split('/')[0])+1:]
if not bucket_object.is_authorized(request.context):
raise exception.NotAuthorized
@@ -311,8 +379,8 @@ class ImagesResource(Resource):
p.start()
return ''
- def render_POST(self, request):
- """ update image attributes: public/private """
+ def render_POST(self, request): # pylint: disable-msg=R0201
+ """Update image attributes: public/private"""
image_id = get_argument(request, 'image_id', u'')
operation = get_argument(request, 'operation', u'')
@@ -326,8 +394,8 @@ class ImagesResource(Resource):
return ''
- def render_DELETE(self, request):
- """ delete a registered image """
+ def render_DELETE(self, request): # pylint: disable-msg=R0201
+ """Delete a registered image"""
image_id = get_argument(request, "image_id", u"")
image_object = image.Image(image_id)
@@ -339,14 +407,21 @@ class ImagesResource(Resource):
request.setResponseCode(204)
return ''
+
def get_site():
+ """Support for WSGI-like interfaces"""
root = S3()
site = server.Site(root)
return site
+
def get_application():
+ """Support WSGI-like interfaces"""
factory = get_site()
application = service.Application("objectstore")
+ # Disabled because of lack of proper introspection in Twisted
+ # or possibly different versions of twisted?
+ # pylint: disable-msg=E1101
objectStoreService = internet.TCPServer(FLAGS.s3_port, factory)
objectStoreService.setServiceParent(application)
return application
diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py
index 861eb364f..f3c02a425 100644
--- a/nova/objectstore/image.py
+++ b/nova/objectstore/image.py
@@ -42,6 +42,7 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('images_path', utils.abspath('../images'),
'path to decrypted images')
+
class Image(object):
def __init__(self, image_id):
self.image_id = image_id
@@ -231,13 +232,22 @@ class Image(object):
@staticmethod
def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_private_key, decrypted_filename):
- key, err = utils.execute('openssl rsautl -decrypt -inkey %s' % cloud_private_key, encrypted_key)
+ key, err = utils.execute(
+ 'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
+ process_input=encrypted_key,
+ check_exit_code=False)
if err:
raise exception.Error("Failed to decrypt private key: %s" % err)
- iv, err = utils.execute('openssl rsautl -decrypt -inkey %s' % cloud_private_key, encrypted_iv)
+ iv, err = utils.execute(
+ 'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
+ process_input=encrypted_iv,
+ check_exit_code=False)
if err:
raise exception.Error("Failed to decrypt initialization vector: %s" % err)
- out, err = utils.execute('openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s' % (encrypted_filename, key, iv, decrypted_filename))
+ _out, err = utils.execute(
+ 'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s'
+ % (encrypted_filename, key, iv, decrypted_filename),
+ check_exit_code=False)
if err:
raise exception.Error("Failed to decrypt image file %s : %s" % (encrypted_filename, err))
diff --git a/nova/objectstore/stored.py b/nova/objectstore/stored.py
index 81c047b22..9829194cb 100644
--- a/nova/objectstore/stored.py
+++ b/nova/objectstore/stored.py
@@ -23,7 +23,7 @@ Properties of an object stored within a bucket.
import os
import nova.crypto
-from nova.exception import NotFound, NotAuthorized
+from nova import exception
class Object(object):
@@ -33,7 +33,7 @@ class Object(object):
self.key = key
self.path = bucket._object_path(key)
if not os.path.isfile(self.path):
- raise NotFound
+ raise exception.NotFound
def __repr__(self):
return "<Object %s/%s>" % (self.bucket, self.key)
diff --git a/nova/process.py b/nova/process.py
index 2dc56372f..425d9f162 100644
--- a/nova/process.py
+++ b/nova/process.py
@@ -2,6 +2,7 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2010 FathomDB Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -20,16 +21,12 @@
Process pool, still buggy right now.
"""
-import logging
-import multiprocessing
import StringIO
+
from twisted.internet import defer
from twisted.internet import error
-from twisted.internet import process
from twisted.internet import protocol
from twisted.internet import reactor
-from twisted.internet import threads
-from twisted.python import failure
from nova import flags
@@ -54,111 +51,100 @@ class UnexpectedErrorOutput(IOError):
IOError.__init__(self, "got stdout: %r\nstderr: %r" % (stdout, stderr))
-# NOTE(termie): this too
-class _BackRelay(protocol.ProcessProtocol):
+# This is based on _BackRelay from twister.internal.utils, but modified to
+# capture both stdout and stderr, without odd stderr handling, and also to
+# handle stdin
+class BackRelayWithInput(protocol.ProcessProtocol):
"""
Trivial protocol for communicating with a process and turning its output
into the result of a L{Deferred}.
@ivar deferred: A L{Deferred} which will be called back with all of stdout
- and, if C{errortoo} is true, all of stderr as well (mixed together in
- one string). If C{errortoo} is false and any bytes are received over
- stderr, this will fire with an L{_UnexpectedErrorOutput} instance and
- the attribute will be set to C{None}.
-
- @ivar onProcessEnded: If C{errortoo} is false and bytes are received over
- stderr, this attribute will refer to a L{Deferred} which will be called
- back when the process ends. This C{Deferred} is also associated with
- the L{_UnexpectedErrorOutput} which C{deferred} fires with earlier in
- this case so that users can determine when the process has actually
- ended, in addition to knowing when bytes have been received via stderr.
+ and all of stderr as well (as a tuple). C{terminate_on_stderr} is true
+ and any bytes are received over stderr, this will fire with an
+ L{_UnexpectedErrorOutput} instance and the attribute will be set to
+ C{None}.
+
+ @ivar onProcessEnded: If C{terminate_on_stderr} is false and bytes are
+ received over stderr, this attribute will refer to a L{Deferred} which
+ will be called back when the process ends. This C{Deferred} is also
+ associated with the L{_UnexpectedErrorOutput} which C{deferred} fires
+ with earlier in this case so that users can determine when the process
+ has actually ended, in addition to knowing when bytes have been received
+ via stderr.
"""
- def __init__(self, deferred, errortoo=0):
+ def __init__(self, deferred, started_deferred=None,
+ terminate_on_stderr=False, check_exit_code=True,
+ process_input=None):
self.deferred = deferred
- self.s = StringIO.StringIO()
- if errortoo:
- self.errReceived = self.errReceivedIsGood
- else:
- self.errReceived = self.errReceivedIsBad
-
- def errReceivedIsBad(self, text):
- if self.deferred is not None:
- self.onProcessEnded = defer.Deferred()
- err = UnexpectedErrorOutput(text, self.onProcessEnded)
- self.deferred.errback(failure.Failure(err))
+ self.stdout = StringIO.StringIO()
+ self.stderr = StringIO.StringIO()
+ self.started_deferred = started_deferred
+ self.terminate_on_stderr = terminate_on_stderr
+ self.check_exit_code = check_exit_code
+ self.process_input = process_input
+ self.on_process_ended = None
+
+ def errReceived(self, text):
+ self.stderr.write(text)
+ if self.terminate_on_stderr and (self.deferred is not None):
+ self.on_process_ended = defer.Deferred()
+ self.deferred.errback(UnexpectedErrorOutput(
+ stdout=self.stdout.getvalue(),
+ stderr=self.stderr.getvalue()))
self.deferred = None
self.transport.loseConnection()
- def errReceivedIsGood(self, text):
- self.s.write(text)
-
def outReceived(self, text):
- self.s.write(text)
-
- def processEnded(self, reason):
- if self.deferred is not None:
- self.deferred.callback(self.s.getvalue())
- elif self.onProcessEnded is not None:
- self.onProcessEnded.errback(reason)
-
-
-class BackRelayWithInput(_BackRelay):
- def __init__(self, deferred, startedDeferred=None, error_ok=0,
- input=None):
- # Twisted doesn't use new-style classes in most places :(
- _BackRelay.__init__(self, deferred, errortoo=error_ok)
- self.error_ok = error_ok
- self.input = input
- self.stderr = StringIO.StringIO()
- self.startedDeferred = startedDeferred
-
- def errReceivedIsBad(self, text):
- self.stderr.write(text)
- self.transport.loseConnection()
-
- def errReceivedIsGood(self, text):
- self.stderr.write(text)
-
- def connectionMade(self):
- if self.startedDeferred:
- self.startedDeferred.callback(self)
- if self.input:
- self.transport.write(self.input)
- self.transport.closeStdin()
+ self.stdout.write(text)
def processEnded(self, reason):
if self.deferred is not None:
- stdout, stderr = self.s.getvalue(), self.stderr.getvalue()
+ stdout, stderr = self.stdout.getvalue(), self.stderr.getvalue()
try:
- # NOTE(termie): current behavior means if error_ok is True
- # we won't throw an error even if the process
- # exited with a non-0 status, so you can't be
- # okay with stderr output and not with bad exit
- # codes.
- if not self.error_ok:
+ if self.check_exit_code:
reason.trap(error.ProcessDone)
self.deferred.callback((stdout, stderr))
except:
+ # NOTE(justinsb): This logic is a little suspicious to me...
+ # If the callback throws an exception, then errback will be
+ # called also. However, this is what the unit tests test for...
self.deferred.errback(UnexpectedErrorOutput(stdout, stderr))
+ elif self.on_process_ended is not None:
+ self.on_process_ended.errback(reason)
-def getProcessOutput(executable, args=None, env=None, path=None, reactor=None,
- error_ok=0, input=None, startedDeferred=None):
- if reactor is None:
- from twisted.internet import reactor
+ def connectionMade(self):
+ if self.started_deferred:
+ self.started_deferred.callback(self)
+ if self.process_input:
+ self.transport.write(self.process_input)
+ self.transport.closeStdin()
+
+def get_process_output(executable, args=None, env=None, path=None,
+ process_reactor=None, check_exit_code=True,
+ process_input=None, started_deferred=None,
+ terminate_on_stderr=False):
+ if process_reactor is None:
+ process_reactor = reactor
args = args and args or ()
env = env and env and {}
- d = defer.Deferred()
- p = BackRelayWithInput(
- d, startedDeferred=startedDeferred, error_ok=error_ok, input=input)
+ deferred = defer.Deferred()
+ process_handler = BackRelayWithInput(
+ deferred,
+ started_deferred=started_deferred,
+ check_exit_code=check_exit_code,
+ process_input=process_input,
+ terminate_on_stderr=terminate_on_stderr)
# NOTE(vish): commands come in as unicode, but self.executes needs
# strings or process.spawn raises a deprecation warning
executable = str(executable)
if not args is None:
args = [str(x) for x in args]
- reactor.spawnProcess(p, executable, (executable,)+tuple(args), env, path)
- return d
+ process_reactor.spawnProcess( process_handler, executable,
+ (executable,)+tuple(args), env, path)
+ return deferred
class ProcessPool(object):
@@ -184,26 +170,27 @@ class ProcessPool(object):
return self.execute(executable, args, **kw)
def execute(self, *args, **kw):
- d = self._pool.acquire()
+ deferred = self._pool.acquire()
- def _associateProcess(proto):
- d.process = proto.transport
+ def _associate_process(proto):
+ deferred.process = proto.transport
return proto.transport
started = defer.Deferred()
- started.addCallback(_associateProcess)
- kw.setdefault('startedDeferred', started)
+ started.addCallback(_associate_process)
+ kw.setdefault('started_deferred', started)
- d.process = None
- d.started = started
+ deferred.process = None
+ deferred.started = started
- d.addCallback(lambda _: getProcessOutput(*args, **kw))
- d.addBoth(self._release)
- return d
+ deferred.addCallback(lambda _: get_process_output(*args, **kw))
+ deferred.addBoth(self._release)
+ return deferred
- def _release(self, rv=None):
+ def _release(self, retval=None):
self._pool.release()
- return rv
+ return retval
+
class SharedPool(object):
_instance = None
@@ -213,5 +200,6 @@ class SharedPool(object):
def __getattr__(self, key):
return getattr(self._instance, key)
+
def simple_execute(cmd, **kwargs):
return SharedPool().simple_execute(cmd, **kwargs)
diff --git a/nova/rpc.py b/nova/rpc.py
index 4ac546c2a..84a9b5590 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -21,12 +21,13 @@ AMQP-based RPC. Queues have consumers and publishers.
No fan-out support yet.
"""
-from carrot import connection as carrot_connection
-from carrot import messaging
import json
import logging
import sys
import uuid
+
+from carrot import connection as carrot_connection
+from carrot import messaging
from twisted.internet import defer
from twisted.internet import task
@@ -58,7 +59,7 @@ class Connection(carrot_connection.BrokerConnection):
params['backend_cls'] = fakerabbit.Backend
# NOTE(vish): magic is fun!
- # pylint: disable=W0142
+ # pylint: disable-msg=W0142
cls._instance = cls(**params)
return cls._instance
@@ -103,7 +104,7 @@ class Consumer(messaging.Consumer):
if self.failed_connection:
# NOTE(vish): conn is defined in the parent class, we can
# recreate it as long as we create the backend too
- # pylint: disable=W0201
+ # pylint: disable-msg=W0201
self.conn = Connection.recreate()
self.backend = self.conn.create_backend()
super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
@@ -113,7 +114,7 @@ class Consumer(messaging.Consumer):
# NOTE(vish): This is catching all errors because we really don't
# exceptions to be logged 10 times a second if some
# persistent failure occurs.
- except Exception: # pylint: disable=W0703
+ except Exception: # pylint: disable-msg=W0703
if not self.failed_connection:
logging.exception("Failed to fetch message from queue")
self.failed_connection = True
@@ -177,7 +178,7 @@ class AdapterConsumer(TopicConsumer):
node_func = getattr(self.proxy, str(method))
node_args = dict((str(k), v) for k, v in args.iteritems())
# NOTE(vish): magic is fun!
- # pylint: disable=W0142
+ # pylint: disable-msg=W0142
d = defer.maybeDeferred(node_func, **node_args)
if msg_id:
d.addCallback(lambda rval: msg_reply(msg_id, rval, None))
diff --git a/nova/server.py b/nova/server.py
index 96550f078..c6b60e090 100644
--- a/nova/server.py
+++ b/nova/server.py
@@ -44,6 +44,8 @@ flags.DEFINE_bool('use_syslog', True, 'output to syslog when daemonizing')
flags.DEFINE_string('logfile', None, 'log file to output to')
flags.DEFINE_string('pidfile', None, 'pid file to output to')
flags.DEFINE_string('working_directory', './', 'working directory...')
+flags.DEFINE_integer('uid', os.getuid(), 'uid under which to run')
+flags.DEFINE_integer('gid', os.getgid(), 'gid under which to run')
def stop(pidfile):
@@ -135,6 +137,8 @@ def daemonize(args, name, main):
threaded=False),
stdin=stdin,
stdout=stdout,
- stderr=stderr
+ stderr=stderr,
+ uid=FLAGS.uid,
+ gid=FLAGS.gid
):
main(args)
diff --git a/nova/service.py b/nova/service.py
index 96281bc6b..a6df7335b 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -28,52 +28,72 @@ from twisted.internet import defer
from twisted.internet import task
from twisted.application import service
-from nova import datastore
+from nova import db
+from nova import exception
from nova import flags
from nova import rpc
-from nova.compute import model
+from nova import utils
FLAGS = flags.FLAGS
-
flags.DEFINE_integer('report_interval', 10,
'seconds between nodes reporting state to cloud',
lower_bound=1)
+
class Service(object, service.Service):
- """Base class for workers that run on hosts"""
+ """Base class for workers that run on hosts."""
+
+ def __init__(self, manager, *args, **kwargs):
+ self.manager = manager
+ self.model_disconnected = False
+ super(Service, self).__init__(*args, **kwargs)
+
+ def __getattr__(self, key):
+ try:
+ return super(Service, self).__getattr__(key)
+ except AttributeError:
+ return getattr(self.manager, key)
@classmethod
def create(cls,
- report_interval=None, # defaults to flag
- bin_name=None, # defaults to basename of executable
- topic=None): # defaults to basename - "nova-" part
- """Instantiates class and passes back application object"""
+ report_interval=None,
+ bin_name=None,
+ topic=None,
+ manager=None):
+ """Instantiates class and passes back application object.
+
+ Args:
+ report_interval, defaults to flag
+ bin_name, defaults to basename of executable
+ topic, defaults to bin_name - "nova-" part
+ manager, defaults to FLAGS.<topic>_manager
+ """
if not report_interval:
- # NOTE(vish): set here because if it is set to flag in the
- # parameter list, it wrongly uses the default
report_interval = FLAGS.report_interval
+
# NOTE(vish): magic to automatically determine bin_name and topic
if not bin_name:
bin_name = os.path.basename(inspect.stack()[-1][1])
if not topic:
topic = bin_name.rpartition("nova-")[2]
- logging.warn("Starting %s node" % topic)
- node_instance = cls()
-
+ if not manager:
+ manager = FLAGS.get('%s_manager' % topic, None)
+ manager_ref = utils.import_object(manager)
+ logging.warn("Starting %s node", topic)
+ service_ref = cls(manager_ref)
conn = rpc.Connection.instance()
consumer_all = rpc.AdapterConsumer(
connection=conn,
topic='%s' % topic,
- proxy=node_instance)
-
+ proxy=service_ref)
consumer_node = rpc.AdapterConsumer(
connection=conn,
- topic='%s.%s' % (topic, FLAGS.node_name),
- proxy=node_instance)
+ topic='%s.%s' % (topic, FLAGS.host),
+ proxy=service_ref)
- pulse = task.LoopingCall(node_instance.report_state,
- FLAGS.node_name,
+ pulse = task.LoopingCall(service_ref.report_state,
+ FLAGS.host,
bin_name)
pulse.start(interval=report_interval, now=False)
@@ -81,22 +101,34 @@ class Service(object, service.Service):
consumer_node.attach_to_twisted()
# This is the parent service that twistd will be looking for when it
- # parses this file, return it so that we can get it into globals below
+ # parses this file, return it so that we can get it into globals.
application = service.Application(bin_name)
- node_instance.setServiceParent(application)
+ service_ref.setServiceParent(application)
return application
@defer.inlineCallbacks
- def report_state(self, nodename, daemon):
- # TODO(termie): make this pattern be more elegant. -todd
+ def report_state(self, host, binary, context=None):
+ """Update the state of this daemon in the datastore."""
try:
- record = model.Daemon(nodename, daemon)
- record.heartbeat()
+ try:
+ daemon_ref = db.daemon_get_by_args(context, host, binary)
+ daemon_id = daemon_ref['id']
+ except exception.NotFound:
+ daemon_id = db.daemon_create(context, {'host': host,
+ 'binary': binary,
+ 'report_count': 0})
+ daemon_ref = db.daemon_get(context, daemon_id)
+ db.daemon_update(context,
+ daemon_id,
+ {'report_count': daemon_ref['report_count'] + 1})
+
+ # TODO(termie): make this pattern be more elegant.
if getattr(self, "model_disconnected", False):
self.model_disconnected = False
logging.error("Recovered model server connection!")
- except datastore.ConnectionError, ex:
+ # TODO(vish): this should probably only catch connection errors
+ except: # pylint: disable-msg=W0702
if not getattr(self, "model_disconnected", False):
self.model_disconnected = True
logging.exception("model server went away")
diff --git a/nova/test.py b/nova/test.py
index c7e08734f..4eb5c1c53 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -22,11 +22,11 @@ Allows overriding of flags for use of fakes,
and some black magic for inline callbacks.
"""
-import mox
-import stubout
import sys
import time
+import mox
+import stubout
from tornado import ioloop
from twisted.internet import defer
from twisted.trial import unittest
@@ -39,6 +39,12 @@ FLAGS = flags.FLAGS
flags.DEFINE_bool('fake_tests', True,
'should we use everything for testing')
+from sqlalchemy import create_engine
+from sqlalchemy.ext.declarative import declarative_base
+
+engine = create_engine('sqlite:///:memory:', echo=True)
+Base = declarative_base()
+Base.metadata.create_all(engine)
def skip_if_fake(func):
"""Decorator that skips a test if running in fake mode"""
@@ -91,7 +97,6 @@ class TrialTestCase(unittest.TestCase):
setattr(FLAGS, k, v)
-
class BaseTestCase(TrialTestCase):
# TODO(jaypipes): Can this be moved into the TrialTestCase class?
"""Base test case class for all unit tests."""
diff --git a/nova/tests/api_unittest.py b/nova/tests/api_unittest.py
index 9d072866c..462d1b295 100644
--- a/nova/tests/api_unittest.py
+++ b/nova/tests/api_unittest.py
@@ -16,6 +16,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+"""Unit tests for the API endpoint"""
+
import boto
from boto.ec2 import regioninfo
import httplib
@@ -38,7 +40,15 @@ FLAGS = flags.FLAGS
# circuit boto calls and feed them into our tornado handlers,
# it's pretty damn circuitous so apologies if you have to fix
# a bug in it
-def boto_to_tornado(method, path, headers, data, host, connection=None):
+# NOTE(jaypipes) The pylint disables here are for R0913 (too many args) which
+# isn't controllable since boto's HTTPRequest needs that many
+# args, and for the version-differentiated import of tornado's
+# httputil.
+# NOTE(jaypipes): The disable-msg=E1101 and E1103 below is because pylint is
+# unable to introspect the deferred's return value properly
+
+def boto_to_tornado(method, path, headers, data, # pylint: disable-msg=R0913
+ host, connection=None):
""" translate boto requests into tornado requests
connection should be a FakeTornadoHttpConnection instance
@@ -46,7 +56,7 @@ def boto_to_tornado(method, path, headers, data, host, connection=None):
try:
headers = httpserver.HTTPHeaders()
except AttributeError:
- from tornado import httputil
+ from tornado import httputil # pylint: disable-msg=E0611
headers = httputil.HTTPHeaders()
for k, v in headers.iteritems():
headers[k] = v
@@ -61,57 +71,64 @@ def boto_to_tornado(method, path, headers, data, host, connection=None):
return req
-def raw_to_httpresponse(s):
- """ translate a raw tornado http response into an httplib.HTTPResponse """
- sock = FakeHttplibSocket(s)
+def raw_to_httpresponse(response_string):
+ """translate a raw tornado http response into an httplib.HTTPResponse"""
+ sock = FakeHttplibSocket(response_string)
resp = httplib.HTTPResponse(sock)
resp.begin()
return resp
class FakeHttplibSocket(object):
- """ a fake socket implementation for httplib.HTTPResponse, trivial """
- def __init__(self, s):
- self.fp = StringIO.StringIO(s)
+ """a fake socket implementation for httplib.HTTPResponse, trivial"""
+ def __init__(self, response_string):
+ self._buffer = StringIO.StringIO(response_string)
- def makefile(self, mode, other):
- return self.fp
+ def makefile(self, _mode, _other):
+ """Returns the socket's internal buffer"""
+ return self._buffer
class FakeTornadoStream(object):
- """ a fake stream to satisfy tornado's assumptions, trivial """
- def set_close_callback(self, f):
+ """a fake stream to satisfy tornado's assumptions, trivial"""
+ def set_close_callback(self, _func):
+ """Dummy callback for stream"""
pass
class FakeTornadoConnection(object):
- """ a fake connection object for tornado to pass to its handlers
+ """A fake connection object for tornado to pass to its handlers
web requests are expected to write to this as they get data and call
finish when they are done with the request, we buffer the writes and
kick off a callback when it is done so that we can feed the result back
into boto.
"""
- def __init__(self, d):
- self.d = d
+ def __init__(self, deferred):
+ self._deferred = deferred
self._buffer = StringIO.StringIO()
def write(self, chunk):
+ """Writes a chunk of data to the internal buffer"""
self._buffer.write(chunk)
def finish(self):
- s = self._buffer.getvalue()
- self.d.callback(s)
+ """Finalizes the connection and returns the buffered data via the
+ deferred callback.
+ """
+ data = self._buffer.getvalue()
+ self._deferred.callback(data)
xheaders = None
@property
- def stream(self):
+ def stream(self): # pylint: disable-msg=R0201
+ """Required property for interfacing with tornado"""
return FakeTornadoStream()
class FakeHttplibConnection(object):
- """ a fake httplib.HTTPConnection for boto to use
+ """A fake httplib.HTTPConnection for boto to use
requests made via this connection actually get translated and routed into
our tornado app, we then wait for the response and turn it back into
@@ -123,7 +140,9 @@ class FakeHttplibConnection(object):
self.deferred = defer.Deferred()
def request(self, method, path, data, headers):
- req = boto_to_tornado
+ """Creates a connection to a fake tornado and sets
+ up a deferred request with the supplied data and
+ headers"""
conn = FakeTornadoConnection(self.deferred)
request = boto_to_tornado(connection=conn,
method=method,
@@ -131,12 +150,16 @@ class FakeHttplibConnection(object):
headers=headers,
data=data,
host=self.host)
- handler = self.app(request)
+ self.app(request)
self.deferred.addCallback(raw_to_httpresponse)
def getresponse(self):
+ """A bit of deferred magic for catching the response
+ from the previously deferred request"""
@defer.inlineCallbacks
def _waiter():
+ """Callback that simply yields the deferred's
+ return value."""
result = yield self.deferred
defer.returnValue(result)
d = _waiter()
@@ -144,14 +167,16 @@ class FakeHttplibConnection(object):
# this deferred has already been called by the time
# we get here, we are going to cheat and return
# the result of the callback
- return d.result
+ return d.result # pylint: disable-msg=E1101
def close(self):
+ """Required for compatibility with boto/tornado"""
pass
class ApiEc2TestCase(test.BaseTestCase):
- def setUp(self):
+ """Unit test for the cloud controller on an EC2 API"""
+ def setUp(self): # pylint: disable-msg=C0103,C0111
super(ApiEc2TestCase, self).setUp()
self.manager = manager.AuthManager()
@@ -171,12 +196,16 @@ class ApiEc2TestCase(test.BaseTestCase):
self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
def expect_http(self, host=None, is_secure=False):
+ """Returns a new EC2 connection"""
http = FakeHttplibConnection(
self.app, '%s:%d' % (self.host, FLAGS.cc_port), False)
+ # pylint: disable-msg=E1103
self.ec2.new_http_connection(host, is_secure).AndReturn(http)
return http
def test_describe_instances(self):
+ """Test that, after creating a user and a project, the describe
+ instances call to the API works properly"""
self.expect_http()
self.mox.ReplayAll()
user = self.manager.create_user('fake', 'fake', 'fake')
@@ -187,14 +216,18 @@ class ApiEc2TestCase(test.BaseTestCase):
def test_get_all_key_pairs(self):
+ """Test that, after creating a user and project and generating
+ a key pair, that the API call to list key pairs works properly"""
self.expect_http()
self.mox.ReplayAll()
- keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") for x in range(random.randint(4, 8)))
+ keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
+ for x in range(random.randint(4, 8)))
user = self.manager.create_user('fake', 'fake', 'fake')
project = self.manager.create_project('fake', 'fake', 'fake')
self.manager.generate_key_pair(user.id, keyname)
rv = self.ec2.get_all_key_pairs()
- self.assertTrue(filter(lambda k: k.name == keyname, rv))
+ results = [k for k in rv if k.name == keyname]
+ self.assertEquals(len(results), 1)
self.manager.delete_project(project)
self.manager.delete_user(user)
diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py
index 0b404bfdc..59a81818c 100644
--- a/nova/tests/auth_unittest.py
+++ b/nova/tests/auth_unittest.py
@@ -32,7 +32,6 @@ FLAGS = flags.FLAGS
class AuthTestCase(test.BaseTestCase):
- flush_db = False
def setUp(self):
super(AuthTestCase, self).setUp()
self.flags(connection_type='fake',
diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py
index 3501771cc..e6796e3da 100644
--- a/nova/tests/cloud_unittest.py
+++ b/nova/tests/cloud_unittest.py
@@ -27,8 +27,8 @@ from xml.etree import ElementTree
from nova import flags
from nova import rpc
from nova import test
+from nova import utils
from nova.auth import manager
-from nova.compute import service
from nova.endpoint import api
from nova.endpoint import cloud
@@ -47,13 +47,9 @@ class CloudTestCase(test.BaseTestCase):
# set up our cloud
self.cloud = cloud.CloudController()
- self.cloud_consumer = rpc.AdapterConsumer(connection=self.conn,
- topic=FLAGS.cloud_topic,
- proxy=self.cloud)
- self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop))
# set up a service
- self.compute = service.ComputeService()
+ self.compute = utils.import_class(FLAGS.compute_manager)
self.compute_consumer = rpc.AdapterConsumer(connection=self.conn,
topic=FLAGS.compute_topic,
proxy=self.compute)
diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py
index da0f82e3a..867b572f3 100644
--- a/nova/tests/compute_unittest.py
+++ b/nova/tests/compute_unittest.py
@@ -15,113 +15,96 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
+"""
+Tests For Compute
+"""
import logging
-import time
+
from twisted.internet import defer
-from xml.etree import ElementTree
+from nova import db
from nova import exception
from nova import flags
from nova import test
from nova import utils
-from nova.compute import model
-from nova.compute import service
+from nova.auth import manager
FLAGS = flags.FLAGS
-class InstanceXmlTestCase(test.TrialTestCase):
- # @defer.inlineCallbacks
- def test_serialization(self):
- # TODO: Reimplement this, it doesn't make sense in redis-land
- return
-
- # instance_id = 'foo'
- # first_node = node.Node()
- # inst = yield first_node.run_instance(instance_id)
- #
- # # force the state so that we can verify that it changes
- # inst._s['state'] = node.Instance.NOSTATE
- # xml = inst.toXml()
- # self.assert_(ElementTree.parse(StringIO.StringIO(xml)))
- #
- # second_node = node.Node()
- # new_inst = node.Instance.fromXml(second_node._conn, pool=second_node._pool, xml=xml)
- # self.assertEqual(new_inst.state, node.Instance.RUNNING)
- # rv = yield first_node.terminate_instance(instance_id)
-
-
-class ComputeConnectionTestCase(test.TrialTestCase):
- def setUp(self):
+class ComputeTestCase(test.TrialTestCase):
+ """Test case for compute"""
+ def setUp(self): # pylint: disable-msg=C0103
logging.getLogger().setLevel(logging.DEBUG)
- super(ComputeConnectionTestCase, self).setUp()
+ super(ComputeTestCase, self).setUp()
self.flags(connection_type='fake',
fake_storage=True)
- self.compute = service.ComputeService()
-
- def create_instance(self):
- instdir = model.InstanceDirectory()
- inst = instdir.new()
- # TODO(ja): add ami, ari, aki, user_data
+ self.compute = utils.import_object(FLAGS.compute_manager)
+ self.manager = manager.AuthManager()
+ self.user = self.manager.create_user('fake', 'fake', 'fake')
+ self.project = self.manager.create_project('fake', 'fake', 'fake')
+ self.context = None
+
+ def tearDown(self): # pylint: disable-msg=C0103
+ self.manager.delete_user(self.user)
+ self.manager.delete_project(self.project)
+
+ def _create_instance(self):
+ """Create a test instance"""
+ inst = {}
+ inst['image_id'] = 'ami-test'
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
- inst['user_id'] = 'fake'
- inst['project_id'] = 'fake'
+ inst['user_id'] = self.user.id
+ inst['project_id'] = self.project.id
inst['instance_type'] = 'm1.tiny'
- inst['node_name'] = FLAGS.node_name
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
- inst.save()
- return inst['instance_id']
+ return db.instance_create(None, inst)
@defer.inlineCallbacks
- def test_run_describe_terminate(self):
- instance_id = self.create_instance()
+ def test_run_terminate(self):
+ """Make sure it is possible to run and terminate instance"""
+ instance_id = self._create_instance()
- rv = yield self.compute.run_instance(instance_id)
+ yield self.compute.run_instance(self.context, instance_id)
- rv = yield self.compute.describe_instances()
- logging.info("Running instances: %s", rv)
- self.assertEqual(rv[instance_id].name, instance_id)
+ instances = db.instance_get_all(None)
+ logging.info("Running instances: %s", instances)
+ self.assertEqual(len(instances), 1)
- rv = yield self.compute.terminate_instance(instance_id)
+ yield self.compute.terminate_instance(self.context, instance_id)
- rv = yield self.compute.describe_instances()
- logging.info("After terminating instances: %s", rv)
- self.assertEqual(rv, {})
+ instances = db.instance_get_all(None)
+ logging.info("After terminating instances: %s", instances)
+ self.assertEqual(len(instances), 0)
@defer.inlineCallbacks
def test_reboot(self):
- instance_id = self.create_instance()
- rv = yield self.compute.run_instance(instance_id)
-
- rv = yield self.compute.describe_instances()
- self.assertEqual(rv[instance_id].name, instance_id)
-
- yield self.compute.reboot_instance(instance_id)
-
- rv = yield self.compute.describe_instances()
- self.assertEqual(rv[instance_id].name, instance_id)
- rv = yield self.compute.terminate_instance(instance_id)
+ """Ensure instance can be rebooted"""
+ instance_id = self._create_instance()
+ yield self.compute.run_instance(self.context, instance_id)
+ yield self.compute.reboot_instance(self.context, instance_id)
+ yield self.compute.terminate_instance(self.context, instance_id)
@defer.inlineCallbacks
def test_console_output(self):
- instance_id = self.create_instance()
- rv = yield self.compute.run_instance(instance_id)
+ """Make sure we can get console output from instance"""
+ instance_id = self._create_instance()
+ yield self.compute.run_instance(self.context, instance_id)
- console = yield self.compute.get_console_output(instance_id)
+ console = yield self.compute.get_console_output(self.context,
+ instance_id)
self.assert_(console)
- rv = yield self.compute.terminate_instance(instance_id)
+ yield self.compute.terminate_instance(self.context, instance_id)
@defer.inlineCallbacks
def test_run_instance_existing(self):
- instance_id = self.create_instance()
- rv = yield self.compute.run_instance(instance_id)
-
- rv = yield self.compute.describe_instances()
- self.assertEqual(rv[instance_id].name, instance_id)
-
- self.assertRaises(exception.Error, self.compute.run_instance, instance_id)
- rv = yield self.compute.terminate_instance(instance_id)
+ """Ensure failure when running an instance that already exists"""
+ instance_id = self._create_instance()
+ yield self.compute.run_instance(self.context, instance_id)
+ self.assertFailure(self.compute.run_instance(self.context,
+ instance_id),
+ exception.Error)
+ yield self.compute.terminate_instance(self.context, instance_id)
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index a7310fb26..3114912ba 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -20,9 +20,21 @@ from nova import flags
FLAGS = flags.FLAGS
-FLAGS.connection_type = 'fake'
+flags.DECLARE('fake_storage', 'nova.volume.manager')
FLAGS.fake_storage = True
+FLAGS.connection_type = 'fake'
FLAGS.fake_rabbit = True
-FLAGS.fake_network = True
FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
+flags.DECLARE('network_size', 'nova.network.manager')
+flags.DECLARE('num_networks', 'nova.network.manager')
+flags.DECLARE('fake_network', 'nova.network.manager')
+FLAGS.network_size = 16
+FLAGS.num_networks = 5
+FLAGS.fake_network = True
+flags.DECLARE('num_shelves', 'nova.volume.manager')
+flags.DECLARE('blades_per_shelf', 'nova.volume.manager')
+FLAGS.num_shelves = 2
+FLAGS.blades_per_shelf = 4
FLAGS.verbose = True
+FLAGS.sql_connection = 'sqlite:///nova.sqlite'
+#FLAGS.sql_connection = 'mysql://root@localhost/test'
diff --git a/nova/tests/model_unittest.py b/nova/tests/model_unittest.py
index dc2441c24..130516c66 100644
--- a/nova/tests/model_unittest.py
+++ b/nova/tests/model_unittest.py
@@ -108,14 +108,14 @@ class ModelTestCase(test.TrialTestCase):
self.assertEqual(x.identifier, 'i-test')
def test_instance_associates_node(self):
- """create, then check that it is listed for the node_name"""
+ """create, then check that it is listed for the host"""
instance = self.create_instance()
found = False
- for x in model.InstanceDirectory().by_node(FLAGS.node_name):
+ for x in model.InstanceDirectory().by_node(FLAGS.host):
if x.identifier == 'i-test':
found = True
self.assertFalse(found)
- instance['node_name'] = 'test_node'
+ instance['host'] = 'test_node'
instance.save()
for x in model.InstanceDirectory().by_node('test_node'):
if x.identifier == 'i-test':
diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index 039509809..f3124c1ba 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -22,21 +22,19 @@ import IPy
import os
import logging
+from nova import db
+from nova import exception
from nova import flags
from nova import test
from nova import utils
from nova.auth import manager
-from nova.network import model
-from nova.network import service
-from nova.network import vpn
-from nova.network.exception import NoMoreAddresses
FLAGS = flags.FLAGS
class NetworkTestCase(test.TrialTestCase):
"""Test cases for network code"""
- def setUp(self): # pylint: disable=C0103
+ def setUp(self): # pylint: disable-msg=C0103
super(NetworkTestCase, self).setUp()
# NOTE(vish): if you change these flags, make sure to change the
# flags in the corresponding section in nova-dhcpbridge
@@ -44,167 +42,164 @@ class NetworkTestCase(test.TrialTestCase):
fake_storage=True,
fake_network=True,
auth_driver='nova.auth.ldapdriver.FakeLdapDriver',
- network_size=32)
+ network_size=16,
+ num_networks=5)
logging.getLogger().setLevel(logging.DEBUG)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
self.projects = []
- self.projects.append(self.manager.create_project('netuser',
- 'netuser',
- 'netuser'))
- for i in range(0, 6):
+ self.network = utils.import_object(FLAGS.network_manager)
+ self.context = None
+ for i in range(5):
name = 'project%s' % i
self.projects.append(self.manager.create_project(name,
'netuser',
name))
- vpn.NetworkData.create(self.projects[i].id)
- self.service = service.VlanNetworkService()
-
- def tearDown(self): # pylint: disable=C0103
+ # create the necessary network data for the project
+ self.network.set_network_host(self.context, self.projects[i].id)
+ instance_id = db.instance_create(None,
+ {'mac_address': utils.generate_mac()})
+ self.instance_id = instance_id
+ instance_id = db.instance_create(None,
+ {'mac_address': utils.generate_mac()})
+ self.instance2_id = instance_id
+
+ def tearDown(self): # pylint: disable-msg=C0103
super(NetworkTestCase, self).tearDown()
+ # TODO(termie): this should really be instantiating clean datastores
+ # in between runs, one failure kills all the tests
+ db.instance_destroy(None, self.instance_id)
+ db.instance_destroy(None, self.instance2_id)
for project in self.projects:
self.manager.delete_project(project)
self.manager.delete_user(self.user)
- def test_public_network_allocation(self):
+ def _create_address(self, project_num, instance_id=None):
+ """Create an address in given project num"""
+ net = db.project_get_network(None, self.projects[project_num].id)
+ address = db.fixed_ip_allocate(None, net['id'])
+ if instance_id is None:
+ instance_id = self.instance_id
+ db.fixed_ip_instance_associate(None, address, instance_id)
+ return address
+
+ def test_public_network_association(self):
"""Makes sure that we can allocaate a public ip"""
+ # TODO(vish): better way of adding floating ips
pubnet = IPy.IP(flags.FLAGS.public_range)
- address = self.service.allocate_elastic_ip(self.user.id,
- self.projects[0].id)
- self.assertTrue(IPy.IP(address) in pubnet)
+ ip_str = str(pubnet[0])
+ try:
+ db.floating_ip_get_by_address(None, ip_str)
+ except exception.NotFound:
+ db.floating_ip_create(None, ip_str, FLAGS.host)
+ float_addr = self.network.allocate_floating_ip(self.context,
+ self.projects[0].id)
+ fix_addr = self._create_address(0)
+ self.assertEqual(float_addr, str(pubnet[0]))
+ self.network.associate_floating_ip(self.context, float_addr, fix_addr)
+ address = db.instance_get_floating_address(None, self.instance_id)
+ self.assertEqual(address, float_addr)
+ self.network.disassociate_floating_ip(self.context, float_addr)
+ address = db.instance_get_floating_address(None, self.instance_id)
+ self.assertEqual(address, None)
+ self.network.deallocate_floating_ip(self.context, float_addr)
+ db.fixed_ip_deallocate(None, fix_addr)
def test_allocate_deallocate_fixed_ip(self):
"""Makes sure that we can allocate and deallocate a fixed ip"""
- result = self.service.allocate_fixed_ip(
- self.user.id, self.projects[0].id)
- address = result['private_dns_name']
- mac = result['mac_address']
- net = model.get_project_network(self.projects[0].id, "default")
- self.assertEqual(True, is_in_project(address, self.projects[0].id))
- hostname = "test-host"
- issue_ip(mac, address, hostname, net.bridge_name)
- self.service.deallocate_fixed_ip(address)
+ address = self._create_address(0)
+ self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
+ lease_ip(address)
+ db.fixed_ip_deallocate(None, address)
# Doesn't go away until it's dhcp released
- self.assertEqual(True, is_in_project(address, self.projects[0].id))
+ self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
- release_ip(mac, address, hostname, net.bridge_name)
- self.assertEqual(False, is_in_project(address, self.projects[0].id))
+ release_ip(address)
+ self.assertFalse(is_allocated_in_project(address, self.projects[0].id))
def test_side_effects(self):
"""Ensures allocating and releasing has no side effects"""
- hostname = "side-effect-host"
- result = self.service.allocate_fixed_ip(self.user.id,
- self.projects[0].id)
- mac = result['mac_address']
- address = result['private_dns_name']
- result = self.service.allocate_fixed_ip(self.user,
- self.projects[1].id)
- secondmac = result['mac_address']
- secondaddress = result['private_dns_name']
-
- net = model.get_project_network(self.projects[0].id, "default")
- secondnet = model.get_project_network(self.projects[1].id, "default")
-
- self.assertEqual(True, is_in_project(address, self.projects[0].id))
- self.assertEqual(True, is_in_project(secondaddress,
- self.projects[1].id))
- self.assertEqual(False, is_in_project(address, self.projects[1].id))
+ address = self._create_address(0)
+ address2 = self._create_address(1, self.instance2_id)
+
+ self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
+ self.assertTrue(is_allocated_in_project(address2, self.projects[1].id))
+ self.assertFalse(is_allocated_in_project(address, self.projects[1].id))
# Addresses are allocated before they're issued
- issue_ip(mac, address, hostname, net.bridge_name)
- issue_ip(secondmac, secondaddress, hostname, secondnet.bridge_name)
+ lease_ip(address)
+ lease_ip(address2)
- self.service.deallocate_fixed_ip(address)
- release_ip(mac, address, hostname, net.bridge_name)
- self.assertEqual(False, is_in_project(address, self.projects[0].id))
+ db.fixed_ip_deallocate(None, address)
+ release_ip(address)
+ self.assertFalse(is_allocated_in_project(address, self.projects[0].id))
# First address release shouldn't affect the second
- self.assertEqual(True, is_in_project(secondaddress,
- self.projects[1].id))
+ self.assertTrue(is_allocated_in_project(address2, self.projects[1].id))
- self.service.deallocate_fixed_ip(secondaddress)
- release_ip(secondmac, secondaddress, hostname, secondnet.bridge_name)
- self.assertEqual(False, is_in_project(secondaddress,
- self.projects[1].id))
+ db.fixed_ip_deallocate(None, address2)
+ release_ip(address2)
+ self.assertFalse(is_allocated_in_project(address2,
+ self.projects[1].id))
def test_subnet_edge(self):
"""Makes sure that private ips don't overlap"""
- result = self.service.allocate_fixed_ip(self.user.id,
- self.projects[0].id)
- firstaddress = result['private_dns_name']
- hostname = "toomany-hosts"
+ first = self._create_address(0)
+ lease_ip(first)
for i in range(1, 5):
- project_id = self.projects[i].id
- result = self.service.allocate_fixed_ip(
- self.user, project_id)
- mac = result['mac_address']
- address = result['private_dns_name']
- result = self.service.allocate_fixed_ip(
- self.user, project_id)
- mac2 = result['mac_address']
- address2 = result['private_dns_name']
- result = self.service.allocate_fixed_ip(
- self.user, project_id)
- mac3 = result['mac_address']
- address3 = result['private_dns_name']
- net = model.get_project_network(project_id, "default")
- issue_ip(mac, address, hostname, net.bridge_name)
- issue_ip(mac2, address2, hostname, net.bridge_name)
- issue_ip(mac3, address3, hostname, net.bridge_name)
- self.assertEqual(False, is_in_project(address,
- self.projects[0].id))
- self.assertEqual(False, is_in_project(address2,
- self.projects[0].id))
- self.assertEqual(False, is_in_project(address3,
- self.projects[0].id))
- self.service.deallocate_fixed_ip(address)
- self.service.deallocate_fixed_ip(address2)
- self.service.deallocate_fixed_ip(address3)
- release_ip(mac, address, hostname, net.bridge_name)
- release_ip(mac2, address2, hostname, net.bridge_name)
- release_ip(mac3, address3, hostname, net.bridge_name)
- net = model.get_project_network(self.projects[0].id, "default")
- self.service.deallocate_fixed_ip(firstaddress)
- release_ip(mac, firstaddress, hostname, net.bridge_name)
+ address = self._create_address(i)
+ address2 = self._create_address(i)
+ address3 = self._create_address(i)
+ lease_ip(address)
+ lease_ip(address2)
+ lease_ip(address3)
+ self.assertFalse(is_allocated_in_project(address,
+ self.projects[0].id))
+ self.assertFalse(is_allocated_in_project(address2,
+ self.projects[0].id))
+ self.assertFalse(is_allocated_in_project(address3,
+ self.projects[0].id))
+ db.fixed_ip_deallocate(None, address)
+ db.fixed_ip_deallocate(None, address2)
+ db.fixed_ip_deallocate(None, address3)
+ release_ip(address)
+ release_ip(address2)
+ release_ip(address3)
+ release_ip(first)
+ db.fixed_ip_deallocate(None, first)
def test_vpn_ip_and_port_looks_valid(self):
"""Ensure the vpn ip and port are reasonable"""
self.assert_(self.projects[0].vpn_ip)
- self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start_port)
- self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_end_port)
-
- def test_too_many_vpns(self):
- """Ensure error is raised if we run out of vpn ports"""
- vpns = []
- for i in xrange(vpn.NetworkData.num_ports_for_ip(FLAGS.vpn_ip)):
- vpns.append(vpn.NetworkData.create("vpnuser%s" % i))
- self.assertRaises(vpn.NoMorePorts, vpn.NetworkData.create, "boom")
- for network_datum in vpns:
- network_datum.destroy()
+ self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start)
+ self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_start +
+ FLAGS.num_networks)
+
+ def test_too_many_networks(self):
+ """Ensure error is raised if we run out of networks"""
+ projects = []
+ networks_left = FLAGS.num_networks - db.network_count(None)
+ for i in range(networks_left):
+ project = self.manager.create_project('many%s' % i, self.user)
+ projects.append(project)
+ self.assertRaises(db.NoMoreNetworks,
+ self.manager.create_project,
+ 'boom',
+ self.user)
+ for project in projects:
+ self.manager.delete_project(project)
def test_ips_are_reused(self):
"""Makes sure that ip addresses that are deallocated get reused"""
- result = self.service.allocate_fixed_ip(
- self.user.id, self.projects[0].id)
- mac = result['mac_address']
- address = result['private_dns_name']
-
- hostname = "reuse-host"
- net = model.get_project_network(self.projects[0].id, "default")
-
- issue_ip(mac, address, hostname, net.bridge_name)
- self.service.deallocate_fixed_ip(address)
- release_ip(mac, address, hostname, net.bridge_name)
-
- result = self.service.allocate_fixed_ip(
- self.user, self.projects[0].id)
- secondmac = result['mac_address']
- secondaddress = result['private_dns_name']
- self.assertEqual(address, secondaddress)
- issue_ip(secondmac, secondaddress, hostname, net.bridge_name)
- self.service.deallocate_fixed_ip(secondaddress)
- release_ip(secondmac, secondaddress, hostname, net.bridge_name)
+ address = self._create_address(0)
+ lease_ip(address)
+ db.fixed_ip_deallocate(None, address)
+ release_ip(address)
+
+ address2 = self._create_address(0)
+ self.assertEqual(address, address2)
+ db.fixed_ip_deallocate(None, address2)
def test_available_ips(self):
"""Make sure the number of available ips for the network is correct
@@ -217,44 +212,47 @@ class NetworkTestCase(test.TrialTestCase):
There are ips reserved at the bottom and top of the range.
services (network, gateway, CloudPipe, broadcast)
"""
- net = model.get_project_network(self.projects[0].id, "default")
- num_preallocated_ips = len(net.assigned)
+ network = db.project_get_network(None, self.projects[0].id)
net_size = flags.FLAGS.network_size
- num_available_ips = net_size - (net.num_bottom_reserved_ips +
- num_preallocated_ips +
- net.num_top_reserved_ips)
- self.assertEqual(num_available_ips, len(list(net.available)))
+ total_ips = (db.network_count_available_ips(None, network['id']) +
+ db.network_count_reserved_ips(None, network['id']) +
+ db.network_count_allocated_ips(None, network['id']))
+ self.assertEqual(total_ips, net_size)
def test_too_many_addresses(self):
"""Test for a NoMoreAddresses exception when all fixed ips are used.
"""
- net = model.get_project_network(self.projects[0].id, "default")
-
- hostname = "toomany-hosts"
- macs = {}
- addresses = {}
- # Number of availaible ips is len of the available list
- num_available_ips = len(list(net.available))
+ network = db.project_get_network(None, self.projects[0].id)
+ num_available_ips = db.network_count_available_ips(None,
+ network['id'])
+ addresses = []
for i in range(num_available_ips):
- result = self.service.allocate_fixed_ip(self.user.id,
- self.projects[0].id)
- macs[i] = result['mac_address']
- addresses[i] = result['private_dns_name']
- issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
+ address = self._create_address(0)
+ addresses.append(address)
+ lease_ip(address)
- self.assertEqual(len(list(net.available)), 0)
- self.assertRaises(NoMoreAddresses, self.service.allocate_fixed_ip,
- self.user.id, self.projects[0].id)
+ self.assertEqual(db.network_count_available_ips(None,
+ network['id']), 0)
+ self.assertRaises(db.NoMoreAddresses,
+ db.fixed_ip_allocate,
+ None,
+ network['id'])
for i in range(len(addresses)):
- self.service.deallocate_fixed_ip(addresses[i])
- release_ip(macs[i], addresses[i], hostname, net.bridge_name)
- self.assertEqual(len(list(net.available)), num_available_ips)
+ db.fixed_ip_deallocate(None, addresses[i])
+ release_ip(addresses[i])
+ self.assertEqual(db.network_count_available_ips(None,
+ network['id']),
+ num_available_ips)
-def is_in_project(address, project_id):
+def is_allocated_in_project(address, project_id):
"""Returns true if address is in specified project"""
- return address in model.get_project_network(project_id).assigned
+ project_net = db.project_get_network(None, project_id)
+ network = db.fixed_ip_get_network(None, address)
+ instance = db.fixed_ip_get_instance(None, address)
+ # instance exists until release
+ return instance is not None and network['id'] == project_net['id']
def binpath(script):
@@ -262,22 +260,22 @@ def binpath(script):
return os.path.abspath(os.path.join(__file__, "../../../bin", script))
-def issue_ip(mac, private_ip, hostname, interface):
+def lease_ip(private_ip):
"""Run add command on dhcpbridge"""
- cmd = "%s add %s %s %s" % (binpath('nova-dhcpbridge'),
- mac, private_ip, hostname)
- env = {'DNSMASQ_INTERFACE': interface,
+ network_ref = db.fixed_ip_get_network(None, private_ip)
+ cmd = "%s add fake %s fake" % (binpath('nova-dhcpbridge'), private_ip)
+ env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
'TESTING': '1',
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
(out, err) = utils.execute(cmd, addl_env=env)
logging.debug("ISSUE_IP: %s, %s ", out, err)
-def release_ip(mac, private_ip, hostname, interface):
+def release_ip(private_ip):
"""Run del command on dhcpbridge"""
- cmd = "%s del %s %s %s" % (binpath('nova-dhcpbridge'),
- mac, private_ip, hostname)
- env = {'DNSMASQ_INTERFACE': interface,
+ network_ref = db.fixed_ip_get_network(None, private_ip)
+ cmd = "%s del fake %s fake" % (binpath('nova-dhcpbridge'), private_ip)
+ env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
'TESTING': '1',
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
(out, err) = utils.execute(cmd, addl_env=env)
diff --git a/nova/tests/process_unittest.py b/nova/tests/process_unittest.py
index 75187e1fc..25c60c616 100644
--- a/nova/tests/process_unittest.py
+++ b/nova/tests/process_unittest.py
@@ -48,7 +48,7 @@ class ProcessTestCase(test.TrialTestCase):
def test_execute_stderr(self):
pool = process.ProcessPool(2)
- d = pool.simple_execute('cat BAD_FILE', error_ok=1)
+ d = pool.simple_execute('cat BAD_FILE', check_exit_code=False)
def _check(rv):
self.assertEqual(rv[0], '')
self.assert_('No such file' in rv[1])
diff --git a/nova/tests/rpc_unittest.py b/nova/tests/rpc_unittest.py
index 764a97416..e12a28fbc 100644
--- a/nova/tests/rpc_unittest.py
+++ b/nova/tests/rpc_unittest.py
@@ -32,7 +32,7 @@ FLAGS = flags.FLAGS
class RpcTestCase(test.BaseTestCase):
"""Test cases for rpc"""
- def setUp(self): # pylint: disable=C0103
+ def setUp(self): # pylint: disable-msg=C0103
super(RpcTestCase, self).setUp()
self.conn = rpc.Connection.instance()
self.receiver = TestReceiver()
diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py
new file mode 100644
index 000000000..318abe645
--- /dev/null
+++ b/nova/tests/service_unittest.py
@@ -0,0 +1,166 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Unit Tests for remote procedure calls using queue
+"""
+
+import mox
+
+from nova import exception
+from nova import flags
+from nova import rpc
+from nova import test
+from nova import service
+from nova import manager
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string("fake_manager", "nova.tests.service_unittest.FakeManager",
+ "Manager for testing")
+
+
+class FakeManager(manager.Manager):
+ """Fake manager for tests"""
+ pass
+
+
+class ServiceTestCase(test.BaseTestCase):
+ """Test cases for rpc"""
+
+ def setUp(self): # pylint: disable=C0103
+ super(ServiceTestCase, self).setUp()
+ self.mox.StubOutWithMock(service, 'db')
+
+ def test_create(self):
+ self.mox.StubOutWithMock(rpc,
+ 'AdapterConsumer',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(
+ service.task, 'LoopingCall', use_mock_anything=True)
+ rpc.AdapterConsumer(connection=mox.IgnoreArg(),
+ topic='fake',
+ proxy=mox.IsA(service.Service)).AndReturn(
+ rpc.AdapterConsumer)
+
+ rpc.AdapterConsumer(connection=mox.IgnoreArg(),
+ topic='fake.%s' % FLAGS.host,
+ proxy=mox.IsA(service.Service)).AndReturn(
+ rpc.AdapterConsumer)
+
+ # Stub out looping call a bit needlessly since we don't have an easy
+ # way to cancel it (yet) when the tests finishes
+ service.task.LoopingCall(
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
+ service.task.LoopingCall)
+ service.task.LoopingCall.start(interval=mox.IgnoreArg(),
+ now=mox.IgnoreArg())
+
+ rpc.AdapterConsumer.attach_to_twisted()
+ rpc.AdapterConsumer.attach_to_twisted()
+ self.mox.ReplayAll()
+
+ app = service.Service.create(bin_name='nova-fake')
+ self.assert_(app)
+
+ # We're testing sort of weird behavior in how report_state decides
+ # whether it is disconnected, it looks for a variable on itself called
+ # 'model_disconnected' and report_state doesn't really do much so this
+ # these are mostly just for coverage
+ def test_report_state(self):
+ host = 'foo'
+ binary = 'bar'
+ daemon_ref = {'host': host,
+ 'binary': binary,
+ 'report_count': 0,
+ 'id': 1}
+ service.db.__getattr__('report_state')
+ service.db.daemon_get_by_args(None,
+ host,
+ binary).AndReturn(daemon_ref)
+ service.db.daemon_update(None, daemon_ref['id'],
+ mox.ContainsKeyValue('report_count', 1))
+
+ self.mox.ReplayAll()
+ s = service.Service()
+ rv = yield s.report_state(host, binary)
+
+ def test_report_state_no_daemon(self):
+ host = 'foo'
+ binary = 'bar'
+ daemon_create = {'host': host,
+ 'binary': binary,
+ 'report_count': 0}
+ daemon_ref = {'host': host,
+ 'binary': binary,
+ 'report_count': 0,
+ 'id': 1}
+
+ service.db.__getattr__('report_state')
+ service.db.daemon_get_by_args(None,
+ host,
+ binary).AndRaise(exception.NotFound())
+ service.db.daemon_create(None,
+ daemon_create).AndReturn(daemon_ref['id'])
+ service.db.daemon_get(None, daemon_ref['id']).AndReturn(daemon_ref)
+ service.db.daemon_update(None, daemon_ref['id'],
+ mox.ContainsKeyValue('report_count', 1))
+
+ self.mox.ReplayAll()
+ s = service.Service()
+ rv = yield s.report_state(host, binary)
+
+ def test_report_state_newly_disconnected(self):
+ host = 'foo'
+ binary = 'bar'
+ daemon_ref = {'host': host,
+ 'binary': binary,
+ 'report_count': 0,
+ 'id': 1}
+
+ service.db.__getattr__('report_state')
+ service.db.daemon_get_by_args(None,
+ host,
+ binary).AndRaise(Exception())
+
+ self.mox.ReplayAll()
+ s = service.Service()
+ rv = yield s.report_state(host, binary)
+
+ self.assert_(s.model_disconnected)
+
+ def test_report_state_newly_connected(self):
+ host = 'foo'
+ binary = 'bar'
+ daemon_ref = {'host': host,
+ 'binary': binary,
+ 'report_count': 0,
+ 'id': 1}
+
+ service.db.__getattr__('report_state')
+ service.db.daemon_get_by_args(None,
+ host,
+ binary).AndReturn(daemon_ref)
+ service.db.daemon_update(None, daemon_ref['id'],
+ mox.ContainsKeyValue('report_count', 1))
+
+ self.mox.ReplayAll()
+ s = service.Service()
+ s.model_disconnected = True
+ rv = yield s.report_state(host, binary)
+
+ self.assert_(not s.model_disconnected)
diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py
index 2a07afe69..0df0c20d6 100644
--- a/nova/tests/volume_unittest.py
+++ b/nova/tests/volume_unittest.py
@@ -15,139 +15,149 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
+"""
+Tests for Volume Code
+"""
import logging
-import shutil
-import tempfile
from twisted.internet import defer
-from nova import compute
from nova import exception
+from nova import db
from nova import flags
from nova import test
-from nova.volume import service as volume_service
-
+from nova import utils
FLAGS = flags.FLAGS
class VolumeTestCase(test.TrialTestCase):
- def setUp(self):
+ """Test Case for volumes"""
+ def setUp(self): # pylint: disable-msg=C0103
logging.getLogger().setLevel(logging.DEBUG)
super(VolumeTestCase, self).setUp()
- self.compute = compute.service.ComputeService()
- self.volume = None
- self.tempdir = tempfile.mkdtemp()
+ self.compute = utils.import_object(FLAGS.compute_manager)
self.flags(connection_type='fake',
- fake_storage=True,
- aoe_export_dir=self.tempdir)
- self.volume = volume_service.VolumeService()
-
- def tearDown(self):
- shutil.rmtree(self.tempdir)
+ fake_storage=True)
+ self.volume = utils.import_object(FLAGS.volume_manager)
+ self.context = None
+
+ @staticmethod
+ def _create_volume(size='0'):
+ """Create a volume object"""
+ vol = {}
+ vol['size'] = size
+ vol['user_id'] = 'fake'
+ vol['project_id'] = 'fake'
+ vol['availability_zone'] = FLAGS.storage_availability_zone
+ vol['status'] = "creating"
+ vol['attach_status'] = "detached"
+ return db.volume_create(None, vol)['id']
@defer.inlineCallbacks
- def test_run_create_volume(self):
- vol_size = '0'
- user_id = 'fake'
- project_id = 'fake'
- volume_id = yield self.volume.create_volume(vol_size, user_id, project_id)
- # TODO(termie): get_volume returns differently than create_volume
- self.assertEqual(volume_id,
- volume_service.get_volume(volume_id)['volume_id'])
-
- rv = self.volume.delete_volume(volume_id)
- self.assertRaises(exception.Error, volume_service.get_volume, volume_id)
+ def test_create_delete_volume(self):
+ """Test volume can be created and deleted"""
+ volume_id = self._create_volume()
+ yield self.volume.create_volume(self.context, volume_id)
+ self.assertEqual(volume_id, db.volume_get(None, volume_id).id)
+
+ yield self.volume.delete_volume(self.context, volume_id)
+ self.assertRaises(exception.NotFound,
+ db.volume_get,
+ None,
+ volume_id)
@defer.inlineCallbacks
def test_too_big_volume(self):
- vol_size = '1001'
- user_id = 'fake'
- project_id = 'fake'
+ """Ensure failure if a too large of a volume is requested"""
+ # FIXME(vish): validation needs to move into the data layer in
+ # volume_create
+ defer.returnValue(True)
try:
- yield self.volume.create_volume(vol_size, user_id, project_id)
+ volume_id = self._create_volume('1001')
+ yield self.volume.create_volume(self.context, volume_id)
self.fail("Should have thrown TypeError")
except TypeError:
pass
@defer.inlineCallbacks
def test_too_many_volumes(self):
- vol_size = '1'
- user_id = 'fake'
- project_id = 'fake'
- num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1
- total_slots = FLAGS.blades_per_shelf * num_shelves
+ """Ensure that NoMoreBlades is raised when we run out of volumes"""
vols = []
- from nova import datastore
- redis = datastore.Redis.instance()
- for i in xrange(total_slots):
- vid = yield self.volume.create_volume(vol_size, user_id, project_id)
- vols.append(vid)
- self.assertFailure(self.volume.create_volume(vol_size,
- user_id,
- project_id),
- volume_service.NoMoreBlades)
- for id in vols:
- yield self.volume.delete_volume(id)
+ total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf
+ for _index in xrange(total_slots):
+ volume_id = self._create_volume()
+ yield self.volume.create_volume(self.context, volume_id)
+ vols.append(volume_id)
+ volume_id = self._create_volume()
+ self.assertFailure(self.volume.create_volume(self.context,
+ volume_id),
+ db.NoMoreBlades)
+ db.volume_destroy(None, volume_id)
+ for volume_id in vols:
+ yield self.volume.delete_volume(self.context, volume_id)
@defer.inlineCallbacks
def test_run_attach_detach_volume(self):
- # Create one volume and one compute to test with
+ """Make sure volume can be attached and detached from instance"""
instance_id = "storage-test"
- vol_size = "5"
- user_id = "fake"
- project_id = 'fake'
mountpoint = "/dev/sdf"
- volume_id = yield self.volume.create_volume(vol_size, user_id, project_id)
- volume_obj = volume_service.get_volume(volume_id)
- volume_obj.start_attach(instance_id, mountpoint)
+ volume_id = self._create_volume()
+ yield self.volume.create_volume(self.context, volume_id)
if FLAGS.fake_tests:
- volume_obj.finish_attach()
+ db.volume_attached(None, volume_id, instance_id, mountpoint)
else:
- rv = yield self.compute.attach_volume(instance_id,
- volume_id,
- mountpoint)
- self.assertEqual(volume_obj['status'], "in-use")
- self.assertEqual(volume_obj['attach_status'], "attached")
- self.assertEqual(volume_obj['instance_id'], instance_id)
- self.assertEqual(volume_obj['mountpoint'], mountpoint)
-
- self.assertFailure(self.volume.delete_volume(volume_id), exception.Error)
- volume_obj.start_detach()
+ yield self.compute.attach_volume(instance_id,
+ volume_id,
+ mountpoint)
+ vol = db.volume_get(None, volume_id)
+ self.assertEqual(vol['status'], "in-use")
+ self.assertEqual(vol['attach_status'], "attached")
+ self.assertEqual(vol['instance_id'], instance_id)
+ self.assertEqual(vol['mountpoint'], mountpoint)
+
+ self.assertFailure(self.volume.delete_volume(self.context, volume_id),
+ exception.Error)
if FLAGS.fake_tests:
- volume_obj.finish_detach()
+ db.volume_detached(None, volume_id)
else:
- rv = yield self.volume.detach_volume(instance_id,
- volume_id)
- volume_obj = volume_service.get_volume(volume_id)
- self.assertEqual(volume_obj['status'], "available")
+ yield self.compute.detach_volume(instance_id,
+ volume_id)
+ vol = db.volume_get(None, volume_id)
+ self.assertEqual(vol['status'], "available")
- rv = self.volume.delete_volume(volume_id)
+ yield self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.Error,
- volume_service.get_volume,
+ db.volume_get,
+ None,
volume_id)
@defer.inlineCallbacks
- def test_multiple_volume_race_condition(self):
- vol_size = "5"
- user_id = "fake"
- project_id = 'fake'
+ def test_concurrent_volumes_get_different_blades(self):
+ """Ensure multiple concurrent volumes get different blades"""
+ volume_ids = []
shelf_blades = []
+
def _check(volume_id):
- vol = volume_service.get_volume(volume_id)
- shelf_blade = '%s.%s' % (vol['shelf_id'], vol['blade_id'])
+ """Make sure blades aren't duplicated"""
+ volume_ids.append(volume_id)
+ (shelf_id, blade_id) = db.volume_get_shelf_and_blade(None,
+ volume_id)
+ shelf_blade = '%s.%s' % (shelf_id, blade_id)
self.assert_(shelf_blade not in shelf_blades)
shelf_blades.append(shelf_blade)
- logging.debug("got %s" % shelf_blade)
- vol.destroy()
+ logging.debug("Blade %s allocated", shelf_blade)
deferreds = []
- for i in range(5):
- d = self.volume.create_volume(vol_size, user_id, project_id)
+ total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf
+ for _index in xrange(total_slots):
+ volume_id = self._create_volume()
+ d = self.volume.create_volume(self.context, volume_id)
d.addCallback(_check)
d.addErrback(self.fail)
deferreds.append(d)
yield defer.DeferredList(deferreds)
+ for volume_id in volume_ids:
+ self.volume.delete_volume(self.context, volume_id)
def test_multi_node(self):
# TODO(termie): Figure out how to test with two nodes,
diff --git a/nova/twistd.py b/nova/twistd.py
index 8de322aa5..9511c231c 100644
--- a/nova/twistd.py
+++ b/nova/twistd.py
@@ -21,6 +21,7 @@ Twisted daemon helpers, specifically to parse out gFlags from twisted flags,
manage pid files and support syslogging.
"""
+import gflags
import logging
import os
import signal
@@ -49,6 +50,14 @@ class TwistdServerOptions(ServerOptions):
return
+class FlagParser(object):
+ def __init__(self, parser):
+ self.parser = parser
+
+ def Parse(self, s):
+ return self.parser(s)
+
+
def WrapTwistedOptions(wrapped):
class TwistedOptionsToFlags(wrapped):
subCommands = None
@@ -79,7 +88,12 @@ def WrapTwistedOptions(wrapped):
reflect.accumulateClassList(self.__class__, 'optParameters', twistd_params)
for param in twistd_params:
key = param[0].replace('-', '_')
- flags.DEFINE_string(key, param[2], str(param[-1]))
+ if len(param) > 4:
+ flags.DEFINE(FlagParser(param[4]),
+ key, param[2], str(param[3]),
+ serializer=gflags.ArgumentSerializer())
+ else:
+ flags.DEFINE_string(key, param[2], str(param[3]))
def _absorbHandlers(self):
twistd_handlers = {}
diff --git a/nova/utils.py b/nova/utils.py
index 63db080f1..536d722bb 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -20,7 +20,7 @@
System-level utilities and helper functions.
"""
-from datetime import datetime, timedelta
+import datetime
import inspect
import logging
import os
@@ -29,12 +29,16 @@ import subprocess
import socket
import sys
+from twisted.internet.threads import deferToThread
+
from nova import exception
from nova import flags
+
FLAGS = flags.FLAGS
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
+
def import_class(import_str):
"""Returns a class from a string including module and class"""
mod_str, _sep, class_str = import_str.rpartition('.')
@@ -44,6 +48,15 @@ def import_class(import_str):
except (ImportError, ValueError, AttributeError):
raise exception.NotFound('Class %s cannot be found' % class_str)
+def import_object(import_str):
+ """Returns an object including a module or module and class"""
+ try:
+ __import__(import_str)
+ return sys.modules[import_str]
+ except ImportError:
+ cls = import_class(import_str)
+ return cls()
+
def fetchfile(url, target):
logging.debug("Fetching %s" % url)
# c = pycurl.Curl()
@@ -53,22 +66,25 @@ def fetchfile(url, target):
# c.perform()
# c.close()
# fp.close()
- execute("curl %s -o %s" % (url, target))
+ execute("curl --fail %s -o %s" % (url, target))
-def execute(cmd, input=None, addl_env=None):
+def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
env = os.environ.copy()
if addl_env:
env.update(addl_env)
obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
result = None
- if input != None:
- result = obj.communicate(input)
+ if process_input != None:
+ result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close()
if obj.returncode:
logging.debug("Result was %s" % (obj.returncode))
+ if check_exit_code and obj.returncode <> 0:
+ raise Exception( "Unexpected exit code: %s. result=%s"
+ % (obj.returncode, result))
return result
@@ -94,9 +110,13 @@ def debug(arg):
return arg
-def runthis(prompt, cmd):
+def runthis(prompt, cmd, check_exit_code = True):
logging.debug("Running %s" % (cmd))
- logging.debug(prompt % (subprocess.call(cmd.split(" "))))
+ exit_code = subprocess.call(cmd.split(" "))
+ logging.debug(prompt % (exit_code))
+ if check_exit_code and exit_code <> 0:
+ raise Exception( "Unexpected exit code: %s from cmd: %s"
+ % (exit_code, cmd))
def generate_uid(topic, size=8):
@@ -115,8 +135,7 @@ def last_octet(address):
def get_my_ip():
- ''' returns the actual ip of the local machine.
- '''
+ """Returns the actual ip of the local machine."""
if getattr(FLAGS, 'fake_tests', None):
return '127.0.0.1'
try:
@@ -129,10 +148,48 @@ def get_my_ip():
logging.warn("Couldn't get IP, using 127.0.0.1 %s", ex)
return "127.0.0.1"
+
def isotime(at=None):
if not at:
- at = datetime.utcnow()
+ at = datetime.datetime.utcnow()
return at.strftime(TIME_FORMAT)
+
def parse_isotime(timestr):
- return datetime.strptime(timestr, TIME_FORMAT)
+ return datetime.datetime.strptime(timestr, TIME_FORMAT)
+
+
+class LazyPluggable(object):
+ """A pluggable backend loaded lazily based on some value."""
+
+ def __init__(self, pivot, **backends):
+ self.__backends = backends
+ self.__pivot = pivot
+ self.__backend = None
+
+ def __get_backend(self):
+ if not self.__backend:
+ backend_name = self.__pivot.value
+ if backend_name not in self.__backends:
+ raise exception.Error('Invalid backend: %s' % backend_name)
+
+ backend = self.__backends[backend_name]
+ if type(backend) == type(tuple()):
+ name = backend[0]
+ fromlist = backend[1]
+ else:
+ name = backend
+ fromlist = backend
+
+ self.__backend = __import__(name, None, None, fromlist)
+ logging.error('backend %s', self.__backend)
+ return self.__backend
+
+ def __getattr__(self, key):
+ backend = self.__get_backend()
+ return getattr(backend, key)
+
+def deferredToThread(f):
+ def g(*args, **kwargs):
+ return deferToThread(f, *args, **kwargs)
+ return g
diff --git a/nova/validate.py b/nova/validate.py
index a69306fad..21f4ed286 100644
--- a/nova/validate.py
+++ b/nova/validate.py
@@ -57,6 +57,7 @@ def rangetest(**argchecks): # validate ranges for both+defaults
return onCall
return onDecorator
+
def typetest(**argchecks):
def onDecorator(func):
import sys
diff --git a/nova/virt/connection.py b/nova/virt/connection.py
index 90bc7fa0a..34e37adf7 100644
--- a/nova/virt/connection.py
+++ b/nova/virt/connection.py
@@ -17,6 +17,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+"""Abstraction of the underlying virtualization API"""
+
+import logging
+import sys
+
from nova import flags
from nova.virt import fake
from nova.virt import libvirt_conn
@@ -35,7 +40,6 @@ def get_connection(read_only=False):
Any object returned here must conform to the interface documented by
FakeConnection.
"""
-
# TODO(termie): maybe lazy load after initial check for permissions
# TODO(termie): check whether we can be disconnected
t = FLAGS.connection_type
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 105837181..060b53729 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -24,6 +24,8 @@ This module also documents the semantics of real hypervisor connections.
import logging
+from twisted.internet import defer
+
from nova.compute import power_state
@@ -37,12 +39,12 @@ class FakeConnection(object):
The interface to this class talks in terms of 'instances' (Amazon EC2 and
internal Nova terminology), by which we mean 'running virtual machine'
(XenAPI terminology) or domain (Xen or libvirt terminology).
-
+
An instance has an ID, which is the identifier chosen by Nova to represent
the instance further up the stack. This is unfortunately also called a
'name' elsewhere. As far as this layer is concerned, 'instance ID' and
'instance name' are synonyms.
-
+
Note that the instance ID or name is not human-readable or
customer-controlled -- it's an internal ID chosen by Nova. At the
nova.virt layer, instances do not have human-readable names at all -- such
@@ -89,17 +91,21 @@ class FakeConnection(object):
This function should use the data there to guide the creation of
the new instance.
- Once this function successfully completes, the instance should be
+ The work will be done asynchronously. This function returns a
+ Deferred that allows the caller to detect when it is complete.
+
+ Once this successfully completes, the instance should be
running (power_state.RUNNING).
- If this function fails, any partial instance should be completely
+ If this fails, any partial instance should be completely
cleaned up, and the virtualization platform should be in the state
that it was before this call began.
"""
-
+
fake_instance = FakeInstance()
self.instances[instance.name] = fake_instance
fake_instance._state = power_state.RUNNING
+ return defer.succeed(None)
def reboot(self, instance):
"""
@@ -107,8 +113,11 @@ class FakeConnection(object):
The given parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name.
+
+ The work will be done asynchronously. This function returns a
+ Deferred that allows the caller to detect when it is complete.
"""
- pass
+ return defer.succeed(None)
def destroy(self, instance):
"""
@@ -116,10 +125,14 @@ class FakeConnection(object):
The given parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name.
+
+ The work will be done asynchronously. This function returns a
+ Deferred that allows the caller to detect when it is complete.
"""
del self.instances[instance.name]
+ return defer.succeed(None)
- def get_info(self, instance_id):
+ def get_info(self, instance_name):
"""
Get a block of information about the given instance. This is returned
as a dictionary containing 'state': The power_state of the instance,
@@ -128,42 +141,42 @@ class FakeConnection(object):
of virtual CPUs the instance has, 'cpu_time': The total CPU time used
by the instance, in nanoseconds.
"""
- i = self.instances[instance_id]
+ i = self.instances[instance_name]
return {'state': i._state,
'max_mem': 0,
'mem': 0,
'num_cpu': 2,
'cpu_time': 0}
- def list_disks(self, instance_id):
+ def list_disks(self, instance_name):
"""
Return the IDs of all the virtual disks attached to the specified
instance, as a list. These IDs are opaque to the caller (they are
only useful for giving back to this layer as a parameter to
disk_stats). These IDs only need to be unique for a given instance.
-
+
Note that this function takes an instance ID, not a
compute.service.Instance, so that it can be called by compute.monitor.
"""
return ['A_DISK']
- def list_interfaces(self, instance_id):
+ def list_interfaces(self, instance_name):
"""
Return the IDs of all the virtual network interfaces attached to the
specified instance, as a list. These IDs are opaque to the caller
(they are only useful for giving back to this layer as a parameter to
interface_stats). These IDs only need to be unique for a given
instance.
-
+
Note that this function takes an instance ID, not a
compute.service.Instance, so that it can be called by compute.monitor.
"""
return ['A_VIF']
- def block_stats(self, instance_id, disk_id):
+ def block_stats(self, instance_name, disk_id):
"""
Return performance counters associated with the given disk_id on the
- given instance_id. These are returned as [rd_req, rd_bytes, wr_req,
+ given instance_name. These are returned as [rd_req, rd_bytes, wr_req,
wr_bytes, errs], where rd indicates read, wr indicates write, req is
the total number of I/O requests made, bytes is the total number of
bytes transferred, and errs is the number of requests held up due to a
@@ -175,13 +188,13 @@ class FakeConnection(object):
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
-
+
Note that this function takes an instance ID, not a
compute.service.Instance, so that it can be called by compute.monitor.
"""
return [0L, 0L, 0L, 0L, null]
- def interface_stats(self, instance_id, iface_id):
+ def interface_stats(self, instance_name, iface_id):
"""
Return performance counters associated with the given iface_id on the
given instance_id. These are returned as [rx_bytes, rx_packets,
@@ -196,7 +209,7 @@ class FakeConnection(object):
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
-
+
Note that this function takes an instance ID, not a
compute.service.Instance, so that it can be called by compute.monitor.
"""
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 1e23c48b9..a60bcc4c1 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -27,11 +27,11 @@ import urlparse
from nova import flags
from nova import process
-from nova.auth import signer
from nova.auth import manager
+from nova.auth import signer
-FLAGS = flags.FLAGS
+FLAGS = flags.FLAGS
flags.DEFINE_bool('use_s3', True,
'whether to get images from s3 or use local copy')
@@ -43,6 +43,7 @@ def fetch(image, path, user, project):
f = _fetch_local_image
return f(image, path, user, project)
+
def _fetch_s3_image(image, path, user, project):
url = image_url(image)
@@ -59,20 +60,23 @@ def _fetch_s3_image(image, path, user, project):
url_path)
headers['Authorization'] = 'AWS %s:%s' % (access, signature)
- cmd = ['/usr/bin/curl', '--silent', url]
+ cmd = ['/usr/bin/curl', '--fail', '--silent', url]
for (k,v) in headers.iteritems():
cmd += ['-H', '%s: %s' % (k,v)]
cmd += ['-o', path]
return process.SharedPool().execute(executable=cmd[0], args=cmd[1:])
+
def _fetch_local_image(image, path, user, project):
source = _image_path('%s/image' % image)
return process.simple_execute('cp %s %s' % (source, path))
+
def _image_path(path):
return os.path.join(FLAGS.images_path, path)
+
def image_url(image):
return "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port,
image)
diff --git a/nova/virt/libvirt.qemu.xml.template b/nova/virt/libvirt.qemu.xml.template
index 307f9d03a..17bd79b7c 100644
--- a/nova/virt/libvirt.qemu.xml.template
+++ b/nova/virt/libvirt.qemu.xml.template
@@ -1,7 +1,7 @@
<domain type='%(type)s'>
<name>%(name)s</name>
<os>
- <type>hvm</type>
+ <type>hvm</type>
<kernel>%(basepath)s/kernel</kernel>
<initrd>%(basepath)s/ramdisk</initrd>
<cmdline>root=/dev/vda1 console=ttyS0</cmdline>
@@ -26,5 +26,4 @@
<target port='1'/>
</serial>
</devices>
- <nova>%(nova)s</nova>
</domain>
diff --git a/nova/virt/libvirt.uml.xml.template b/nova/virt/libvirt.uml.xml.template
index 6f4290f98..c48f7bd55 100644
--- a/nova/virt/libvirt.uml.xml.template
+++ b/nova/virt/libvirt.uml.xml.template
@@ -21,5 +21,4 @@
<target port='1'/>
</serial>
</devices>
- <nova>%(nova)s</nova>
</domain>
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 7449d3954..931355cbd 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -29,6 +29,7 @@ import shutil
from twisted.internet import defer
from twisted.internet import task
+from nova import db
from nova import exception
from nova import flags
from nova import process
@@ -42,6 +43,7 @@ from nova.virt import images
libvirt = None
libxml2 = None
+
FLAGS = flags.FLAGS
flags.DEFINE_string('libvirt_xml_template',
utils.abspath('virt/libvirt.qemu.xml.template'),
@@ -57,7 +59,9 @@ flags.DEFINE_string('libvirt_type',
'Libvirt domain type (valid options are: kvm, qemu, uml)')
flags.DEFINE_string('libvirt_uri',
'',
- 'Override the default libvirt URI (which is dependent on libvirt_type)')
+ 'Override the default libvirt URI (which is dependent'
+ ' on libvirt_type)')
+
def get_connection(read_only):
# These are loaded late so that there's no need to install these
@@ -70,6 +74,7 @@ def get_connection(read_only):
libxml2 = __import__('libxml2')
return LibvirtConnection(read_only)
+
class LibvirtConnection(object):
def __init__(self, read_only):
self.libvirt_uri, template_file = self.get_uri_and_template()
@@ -78,13 +83,23 @@ class LibvirtConnection(object):
self._wrapped_conn = None
self.read_only = read_only
-
@property
def _conn(self):
- if not self._wrapped_conn:
+ if not self._wrapped_conn or not self._test_connection():
+ logging.debug('Connecting to libvirt: %s' % self.libvirt_uri)
self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only)
return self._wrapped_conn
+ def _test_connection(self):
+ try:
+ self._wrapped_conn.getInfo()
+ return True
+ except libvirt.libvirtError as e:
+ if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \
+ e.get_error_domain() == libvirt.VIR_FROM_REMOTE:
+ logging.debug('Connection to libvirt broke')
+ return False
+ raise
def get_uri_and_template(self):
if FLAGS.libvirt_type == 'uml':
@@ -95,7 +110,6 @@ class LibvirtConnection(object):
template_file = FLAGS.libvirt_xml_template
return uri, template_file
-
def _connect(self, uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT],
'root',
@@ -106,18 +120,15 @@ class LibvirtConnection(object):
else:
return libvirt.openAuth(uri, auth, 0)
-
-
def list_instances(self):
return [self._conn.lookupByID(x).name()
for x in self._conn.listDomainsID()]
-
def destroy(self, instance):
try:
- virt_dom = self._conn.lookupByName(instance.name)
+ virt_dom = self._conn.lookupByName(instance['name'])
virt_dom.destroy()
- except Exception, _err:
+ except Exception as _err:
pass
# If the instance is already terminated, we're still happy
d = defer.Deferred()
@@ -129,7 +140,7 @@ class LibvirtConnection(object):
timer = task.LoopingCall(f=None)
def _wait_for_shutdown():
try:
- instance.update_state()
+ instance.set_state(self.get_info(instance['name'])['state'])
if instance.state == power_state.SHUTDOWN:
timer.stop()
d.callback(None)
@@ -141,32 +152,30 @@ class LibvirtConnection(object):
timer.start(interval=0.5, now=True)
return d
-
def _cleanup(self, instance):
- target = os.path.abspath(instance.datamodel['basepath'])
+ target = os.path.join(FLAGS.instances_path, instance['name'])
logging.info("Deleting instance files at %s", target)
if os.path.exists(target):
shutil.rmtree(target)
-
@defer.inlineCallbacks
@exception.wrap_exception
def reboot(self, instance):
- xml = self.toXml(instance)
- yield self._conn.lookupByName(instance.name).destroy()
+ xml = self.to_xml(instance)
+ yield self._conn.lookupByName(instance['name']).destroy()
yield self._conn.createXML(xml, 0)
d = defer.Deferred()
timer = task.LoopingCall(f=None)
def _wait_for_reboot():
try:
- instance.update_state()
- if instance.is_running():
- logging.debug('rebooted instance %s' % instance.name)
+ instance.set_state(self.get_info(instance['name'])['state'])
+ if instance.state == power_state.RUNNING:
+ logging.debug('rebooted instance %s' % instance['name'])
timer.stop()
d.callback(None)
except Exception, exn:
- logging.error('_wait_for_reboot failed: %s' % exn)
+ logging.error('_wait_for_reboot failed: %s', exn)
instance.set_state(power_state.SHUTDOWN)
timer.stop()
d.callback(None)
@@ -174,11 +183,10 @@ class LibvirtConnection(object):
timer.start(interval=0.5, now=True)
yield d
-
@defer.inlineCallbacks
@exception.wrap_exception
def spawn(self, instance):
- xml = self.toXml(instance)
+ xml = self.to_xml(instance)
instance.set_state(power_state.NOSTATE, 'launching')
yield self._create_image(instance, xml)
yield self._conn.createXML(xml, 0)
@@ -190,27 +198,27 @@ class LibvirtConnection(object):
timer = task.LoopingCall(f=None)
def _wait_for_boot():
try:
- instance.update_state()
- if instance.is_running():
- logging.debug('booted instance %s' % instance.name)
+ instance.set_state(self.get_info(instance['name'])['state'])
+ if instance.state == power_state.RUNNING:
+ logging.debug('booted instance %s', instance['name'])
timer.stop()
local_d.callback(None)
- except Exception, exn:
- logging.error("_wait_for_boot exception %s" % exn)
- self.set_state(power_state.SHUTDOWN)
- logging.error('Failed to boot instance %s' % instance.name)
+ except:
+ logging.exception('Failed to boot instance %s',
+ instance['name'])
+ instance.set_state(power_state.SHUTDOWN)
timer.stop()
local_d.callback(None)
timer.f = _wait_for_boot
timer.start(interval=0.5, now=True)
yield local_d
-
@defer.inlineCallbacks
- def _create_image(self, instance, libvirt_xml):
+ def _create_image(self, inst, libvirt_xml):
# syntactic nicety
- data = instance.datamodel
- basepath = lambda x='': self.basepath(instance, x)
+ basepath = lambda fname='': os.path.join(FLAGS.instances_path,
+ inst['name'],
+ fname)
# ensure directories exist and are writable
yield process.simple_execute('mkdir -p %s' % basepath())
@@ -219,69 +227,70 @@ class LibvirtConnection(object):
# TODO(termie): these are blocking calls, it would be great
# if they weren't.
- logging.info('Creating image for: %s', data['instance_id'])
+ logging.info('Creating image for: %s', inst['name'])
f = open(basepath('libvirt.xml'), 'w')
f.write(libvirt_xml)
f.close()
- user = manager.AuthManager().get_user(data['user_id'])
- project = manager.AuthManager().get_project(data['project_id'])
+ user = manager.AuthManager().get_user(inst.user_id)
+ project = manager.AuthManager().get_project(inst.project_id)
if not os.path.exists(basepath('disk')):
- yield images.fetch(data['image_id'], basepath('disk-raw'), user, project)
+ yield images.fetch(inst.image_id, basepath('disk-raw'), user, project)
if not os.path.exists(basepath('kernel')):
- yield images.fetch(data['kernel_id'], basepath('kernel'), user, project)
+ yield images.fetch(inst.kernel_id, basepath('kernel'), user, project)
if not os.path.exists(basepath('ramdisk')):
- yield images.fetch(data['ramdisk_id'], basepath('ramdisk'), user, project)
+ yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, project)
- execute = lambda cmd, input=None: \
+ execute = lambda cmd, process_input=None: \
process.simple_execute(cmd=cmd,
- input=input,
- error_ok=1)
+ process_input=process_input,
+ check_exit_code=True)
- key = data['key_data']
+ key = inst.key_data
net = None
- if data.get('inject_network', False):
+ network_ref = db.project_get_network(None, project.id)
+ if network_ref['injected']:
+ address = db.instance_get_fixed_address(None, inst['id'])
with open(FLAGS.injected_network_template) as f:
- net = f.read() % {'address': data['private_dns_name'],
- 'network': data['network_network'],
- 'netmask': data['network_netmask'],
- 'gateway': data['network_gateway'],
- 'broadcast': data['network_broadcast'],
- 'dns': data['network_dns']}
+ net = f.read() % {'address': address,
+ 'network': network_ref['network'],
+ 'netmask': network_ref['netmask'],
+ 'gateway': network_ref['gateway'],
+ 'broadcast': network_ref['broadcast'],
+ 'dns': network_ref['dns']}
if key or net:
- logging.info('Injecting data into image %s', data['image_id'])
+ logging.info('Injecting data into image %s', inst.image_id)
yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute)
if os.path.exists(basepath('disk')):
yield process.simple_execute('rm -f %s' % basepath('disk'))
- bytes = (instance_types.INSTANCE_TYPES[data['instance_type']]['local_gb']
+ bytes = (instance_types.INSTANCE_TYPES[inst.instance_type]['local_gb']
* 1024 * 1024 * 1024)
yield disk.partition(
basepath('disk-raw'), basepath('disk'), bytes, execute=execute)
-
- def basepath(self, instance, path=''):
- return os.path.abspath(os.path.join(instance.datamodel['basepath'], path))
-
-
- def toXml(self, instance):
+ def to_xml(self, instance):
# TODO(termie): cache?
logging.debug("Starting the toXML method")
- xml_info = instance.datamodel.copy()
- # TODO(joshua): Make this xml express the attached disks as well
-
- # TODO(termie): lazy lazy hack because xml is annoying
- xml_info['nova'] = json.dumps(instance.datamodel.copy())
- xml_info['type'] = FLAGS.libvirt_type
+ network = db.project_get_network(None, instance['project_id'])
+ # FIXME(vish): stick this in db
+ instance_type = instance_types.INSTANCE_TYPES[instance['instance_type']]
+ xml_info = {'type': FLAGS.libvirt_type,
+ 'name': instance['name'],
+ 'basepath': os.path.join(FLAGS.instances_path,
+ instance['name']),
+ 'memory_kb': instance_type['memory_mb'] * 1024,
+ 'vcpus': instance_type['vcpus'],
+ 'bridge_name': network['bridge'],
+ 'mac_address': instance['mac_address']}
libvirt_xml = self.libvirt_xml % xml_info
logging.debug("Finished the toXML method")
return libvirt_xml
-
- def get_info(self, instance_id):
- virt_dom = self._conn.lookupByName(instance_id)
+ def get_info(self, instance_name):
+ virt_dom = self._conn.lookupByName(instance_name)
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
return {'state': state,
'max_mem': max_mem,
@@ -289,9 +298,14 @@ class LibvirtConnection(object):
'num_cpu': num_cpu,
'cpu_time': cpu_time}
+ def get_disks(self, instance_name):
+ """
+ Note that this function takes an instance name, not an Instance, so
+ that it can be called by monitor.
- def get_disks(self, instance_id):
- domain = self._conn.lookupByName(instance_id)
+ Returns a list of all block devices for this domain.
+ """
+ domain = self._conn.lookupByName(instance_name)
# TODO(devcamcar): Replace libxml2 with etree.
xml = domain.XMLDesc(0)
doc = None
@@ -326,9 +340,14 @@ class LibvirtConnection(object):
return disks
+ def get_interfaces(self, instance_name):
+ """
+ Note that this function takes an instance name, not an Instance, so
+ that it can be called by monitor.
- def get_interfaces(self, instance_id):
- domain = self._conn.lookupByName(instance_id)
+ Returns a list of all network interfaces for this instance.
+ """
+ domain = self._conn.lookupByName(instance_name)
# TODO(devcamcar): Replace libxml2 with etree.
xml = domain.XMLDesc(0)
doc = None
@@ -363,12 +382,18 @@ class LibvirtConnection(object):
return interfaces
-
- def block_stats(self, instance_id, disk):
- domain = self._conn.lookupByName(instance_id)
+ def block_stats(self, instance_name, disk):
+ """
+ Note that this function takes an instance name, not an Instance, so
+ that it can be called by monitor.
+ """
+ domain = self._conn.lookupByName(instance_name)
return domain.blockStats(disk)
-
- def interface_stats(self, instance_id, interface):
- domain = self._conn.lookupByName(instance_id)
+ def interface_stats(self, instance_name, interface):
+ """
+ Note that this function takes an instance name, not an Instance, so
+ that it can be called by monitor.
+ """
+ domain = self._conn.lookupByName(instance_name)
return domain.interfaceStats(interface)
diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py
index 9fe15644f..b44ac383a 100644
--- a/nova/virt/xenapi.py
+++ b/nova/virt/xenapi.py
@@ -16,33 +16,69 @@
"""
A connection to XenServer or Xen Cloud Platform.
+
+The concurrency model for this class is as follows:
+
+All XenAPI calls are on a thread (using t.i.t.deferToThread, via the decorator
+deferredToThread). They are remote calls, and so may hang for the usual
+reasons. They should not be allowed to block the reactor thread.
+
+All long-running XenAPI calls (VM.start, VM.reboot, etc) are called async
+(using XenAPI.VM.async_start etc). These return a task, which can then be
+polled for completion. Polling is handled using reactor.callLater.
+
+This combination of techniques means that we don't block the reactor thread at
+all, and at the same time we don't hold lots of threads waiting for
+long-running operations.
+
+FIXME: get_info currently doesn't conform to these rules, and will block the
+reactor thread if the VM.get_by_name_label or VM.get_record calls block.
"""
import logging
import xmlrpclib
from twisted.internet import defer
+from twisted.internet import reactor
from twisted.internet import task
-from nova import exception
from nova import flags
from nova import process
+from nova import utils
from nova.auth.manager import AuthManager
from nova.compute import power_state
from nova.virt import images
XenAPI = None
+
FLAGS = flags.FLAGS
flags.DEFINE_string('xenapi_connection_url',
None,
- 'URL for connection to XenServer/Xen Cloud Platform. Required if connection_type=xenapi.')
+ 'URL for connection to XenServer/Xen Cloud Platform.'
+ ' Required if connection_type=xenapi.')
flags.DEFINE_string('xenapi_connection_username',
'root',
- 'Username for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi.')
+ 'Username for connection to XenServer/Xen Cloud Platform.'
+ ' Used only if connection_type=xenapi.')
flags.DEFINE_string('xenapi_connection_password',
None,
- 'Password for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi.')
+ 'Password for connection to XenServer/Xen Cloud Platform.'
+ ' Used only if connection_type=xenapi.')
+flags.DEFINE_float('xenapi_task_poll_interval',
+ 0.5,
+ 'The interval used for polling of remote tasks '
+ '(Async.VM.start, etc). Used only if '
+ 'connection_type=xenapi.')
+
+
+XENAPI_POWER_STATE = {
+ 'Halted' : power_state.SHUTDOWN,
+ 'Running' : power_state.RUNNING,
+ 'Paused' : power_state.PAUSED,
+ 'Suspended': power_state.SHUTDOWN, # FIXME
+ 'Crashed' : power_state.CRASHED
+}
def get_connection(_):
@@ -62,7 +98,6 @@ def get_connection(_):
class XenAPIConnection(object):
-
def __init__(self, url, user, pw):
self._conn = XenAPI.Session(url)
self._conn.login_with_password(user, pw)
@@ -72,9 +107,8 @@ class XenAPIConnection(object):
for vm in self._conn.xenapi.VM.get_all()]
@defer.inlineCallbacks
- @exception.wrap_exception
def spawn(self, instance):
- vm = yield self.lookup(instance.name)
+ vm = yield self._lookup(instance.name)
if vm is not None:
raise Exception('Attempted to create non-unique name %s' %
instance.name)
@@ -93,22 +127,27 @@ class XenAPIConnection(object):
user = AuthManager().get_user(instance.datamodel['user_id'])
project = AuthManager().get_project(instance.datamodel['project_id'])
- vdi_uuid = yield self.fetch_image(
+ vdi_uuid = yield self._fetch_image(
instance.datamodel['image_id'], user, project, True)
- kernel = yield self.fetch_image(
+ kernel = yield self._fetch_image(
instance.datamodel['kernel_id'], user, project, False)
- ramdisk = yield self.fetch_image(
+ ramdisk = yield self._fetch_image(
instance.datamodel['ramdisk_id'], user, project, False)
- vdi_ref = yield self._conn.xenapi.VDI.get_by_uuid(vdi_uuid)
+ vdi_ref = yield self._call_xenapi('VDI.get_by_uuid', vdi_uuid)
- vm_ref = yield self.create_vm(instance, kernel, ramdisk)
- yield self.create_vbd(vm_ref, vdi_ref, 0, True)
+ vm_ref = yield self._create_vm(instance, kernel, ramdisk)
+ yield self._create_vbd(vm_ref, vdi_ref, 0, True)
if network_ref:
yield self._create_vif(vm_ref, network_ref, mac_address)
- yield self._conn.xenapi.VM.start(vm_ref, False, False)
-
+ logging.debug('Starting VM %s...', vm_ref)
+ yield self._call_xenapi('VM.start', vm_ref, False, False)
+ logging.info('Spawning VM %s created %s.', instance.name, vm_ref)
- def create_vm(self, instance, kernel, ramdisk):
+ @defer.inlineCallbacks
+ def _create_vm(self, instance, kernel, ramdisk):
+ """Create a VM record. Returns a Deferred that gives the new
+ VM reference."""
+
mem = str(long(instance.datamodel['memory_kb']) * 1024)
vcpus = str(instance.datamodel['vcpus'])
rec = {
@@ -141,12 +180,15 @@ class XenAPIConnection(object):
'other_config': {},
}
logging.debug('Created VM %s...', instance.name)
- vm_ref = self._conn.xenapi.VM.create(rec)
+ vm_ref = yield self._call_xenapi('VM.create', rec)
logging.debug('Created VM %s as %s.', instance.name, vm_ref)
- return vm_ref
-
+ defer.returnValue(vm_ref)
- def create_vbd(self, vm_ref, vdi_ref, userdevice, bootable):
+ @defer.inlineCallbacks
+ def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable):
+ """Create a VBD record. Returns a Deferred that gives the new
+ VBD reference."""
+
vbd_rec = {}
vbd_rec['VM'] = vm_ref
vbd_rec['VDI'] = vdi_ref
@@ -161,13 +203,16 @@ class XenAPIConnection(object):
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref)
- vbd_ref = self._conn.xenapi.VBD.create(vbd_rec)
+ vbd_ref = yield self._call_xenapi('VBD.create', vbd_rec)
logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref,
vdi_ref)
- return vbd_ref
-
+ defer.returnValue(vbd_ref)
+ @defer.inlineCallbacks
def _create_vif(self, vm_ref, network_ref, mac_address):
+ """Create a VIF record. Returns a Deferred that gives the new
+ VIF reference."""
+
vif_rec = {}
vif_rec['device'] = '0'
vif_rec['network']= network_ref
@@ -179,27 +224,29 @@ class XenAPIConnection(object):
vif_rec['qos_algorithm_params'] = {}
logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref,
network_ref)
- vif_ref = self._conn.xenapi.VIF.create(vif_rec)
+ vif_ref = yield self._call_xenapi('VIF.create', vif_rec)
logging.debug('Created VIF %s for VM %s, network %s.', vif_ref,
vm_ref, network_ref)
- return vif_ref
-
+ defer.returnValue(vif_ref)
+ @defer.inlineCallbacks
def _find_network_with_bridge(self, bridge):
expr = 'field "bridge" = "%s"' % bridge
- networks = self._conn.xenapi.network.get_all_records_where(expr)
+ networks = yield self._call_xenapi('network.get_all_records_where',
+ expr)
if len(networks) == 1:
- return networks.keys()[0]
+ defer.returnValue(networks.keys()[0])
elif len(networks) > 1:
raise Exception('Found non-unique network for bridge %s' % bridge)
else:
raise Exception('Found no network for bridge %s' % bridge)
-
- def fetch_image(self, image, user, project, use_sr):
+ @defer.inlineCallbacks
+ def _fetch_image(self, image, user, project, use_sr):
"""use_sr: True to put the image as a VDI in an SR, False to place
it on dom0's filesystem. The former is for VM disks, the latter for
- its kernel and ramdisk (if external kernels are being used)."""
+ its kernel and ramdisk (if external kernels are being used).
+ Returns a Deferred that gives the new VDI UUID."""
url = images.image_url(image)
access = AuthManager().get_access_key(user, project)
@@ -211,33 +258,42 @@ class XenAPIConnection(object):
args['password'] = user.secret
if use_sr:
args['add_partition'] = 'true'
- return self._call_plugin('objectstore', fn, args)
-
+ task = yield self._async_call_plugin('objectstore', fn, args)
+ uuid = yield self._wait_for_task(task)
+ defer.returnValue(uuid)
+ @defer.inlineCallbacks
def reboot(self, instance):
- vm = self.lookup(instance.name)
+ vm = yield self._lookup(instance.name)
if vm is None:
raise Exception('instance not present %s' % instance.name)
- yield self._conn.xenapi.VM.clean_reboot(vm)
+ task = yield self._call_xenapi('Async.VM.clean_reboot', vm)
+ yield self._wait_for_task(task)
+ @defer.inlineCallbacks
def destroy(self, instance):
- vm = self.lookup(instance.name)
+ vm = yield self._lookup(instance.name)
if vm is None:
raise Exception('instance not present %s' % instance.name)
- yield self._conn.xenapi.VM.destroy(vm)
+ task = yield self._call_xenapi('Async.VM.destroy', vm)
+ yield self._wait_for_task(task)
def get_info(self, instance_id):
- vm = self.lookup(instance_id)
+ vm = self._lookup_blocking(instance_id)
if vm is None:
raise Exception('instance not present %s' % instance_id)
rec = self._conn.xenapi.VM.get_record(vm)
- return {'state': power_state_from_xenapi[rec['power_state']],
+ return {'state': XENAPI_POWER_STATE[rec['power_state']],
'max_mem': long(rec['memory_static_max']) >> 10,
'mem': long(rec['memory_dynamic_max']) >> 10,
'num_cpu': rec['VCPUs_max'],
'cpu_time': 0}
- def lookup(self, i):
+ @utils.deferredToThread
+ def _lookup(self, i):
+ return self._lookup_blocking(i)
+
+ def _lookup_blocking(self, i):
vms = self._conn.xenapi.VM.get_by_name_label(i)
n = len(vms)
if n == 0:
@@ -247,26 +303,58 @@ class XenAPIConnection(object):
else:
return vms[0]
-
- def _call_plugin(self, plugin, fn, args):
+ def _wait_for_task(self, task):
+ """Return a Deferred that will give the result of the given task.
+ The task is polled until it completes."""
+ d = defer.Deferred()
+ reactor.callLater(0, self._poll_task, task, d)
+ return d
+
+ @utils.deferredToThread
+ def _poll_task(self, task, deferred):
+ """Poll the given XenAPI task, and fire the given Deferred if we
+ get a result."""
+ try:
+ #logging.debug('Polling task %s...', task)
+ status = self._conn.xenapi.task.get_status(task)
+ if status == 'pending':
+ reactor.callLater(FLAGS.xenapi_task_poll_interval,
+ self._poll_task, task, deferred)
+ elif status == 'success':
+ result = self._conn.xenapi.task.get_result(task)
+ logging.info('Task %s status: success. %s', task, result)
+ deferred.callback(_parse_xmlrpc_value(result))
+ else:
+ error_info = self._conn.xenapi.task.get_error_info(task)
+ logging.warn('Task %s status: %s. %s', task, status,
+ error_info)
+ deferred.errback(XenAPI.Failure(error_info))
+ #logging.debug('Polling task %s done.', task)
+ except Exception, exn:
+ logging.warn(exn)
+ deferred.errback(exn)
+
+ @utils.deferredToThread
+ def _call_xenapi(self, method, *args):
+ """Call the specified XenAPI method on a background thread. Returns
+ a Deferred for the result."""
+ f = self._conn.xenapi
+ for m in method.split('.'):
+ f = f.__getattr__(m)
+ return f(*args)
+
+ @utils.deferredToThread
+ def _async_call_plugin(self, plugin, fn, args):
+ """Call Async.host.call_plugin on a background thread. Returns a
+ Deferred with the task reference."""
return _unwrap_plugin_exceptions(
- self._conn.xenapi.host.call_plugin,
+ self._conn.xenapi.Async.host.call_plugin,
self._get_xenapi_host(), plugin, fn, args)
-
def _get_xenapi_host(self):
return self._conn.xenapi.session.get_this_host(self._conn.handle)
-power_state_from_xenapi = {
- 'Halted' : power_state.SHUTDOWN,
- 'Running' : power_state.RUNNING,
- 'Paused' : power_state.PAUSED,
- 'Suspended': power_state.SHUTDOWN, # FIXME
- 'Crashed' : power_state.CRASHED
-}
-
-
def _unwrap_plugin_exceptions(func, *args, **kwargs):
try:
return func(*args, **kwargs)
@@ -286,3 +374,15 @@ def _unwrap_plugin_exceptions(func, *args, **kwargs):
except xmlrpclib.ProtocolError, exn:
logging.debug("Got exception: %s", exn)
raise
+
+
+def _parse_xmlrpc_value(val):
+ """Parse the given value as if it were an XML-RPC value. This is
+ sometimes used as the format for the task.result field."""
+ if not val:
+ return val
+ x = xmlrpclib.loads(
+ '<?xml version="1.0"?><methodResponse><params><param>' +
+ val +
+ '</param></params></methodResponse>')
+ return x[0][0]
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
new file mode 100644
index 000000000..f5c1330a3
--- /dev/null
+++ b/nova/volume/driver.py
@@ -0,0 +1,105 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Drivers for volumes
+"""
+
+import logging
+
+from twisted.internet import defer
+
+from nova import flags
+from nova import process
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('volume_group', 'nova-volumes',
+ 'Name for the VG that will contain exported volumes')
+flags.DEFINE_string('aoe_eth_dev', 'eth0',
+ 'Which device to export the volumes on')
+
+
+
+class AOEDriver(object):
+ """Executes commands relating to AOE volumes"""
+ def __init__(self, execute=process.simple_execute, *args, **kwargs):
+ self._execute = execute
+
+ @defer.inlineCallbacks
+ def create_volume(self, volume_id, size):
+ """Creates a logical volume"""
+ # NOTE(vish): makes sure that the volume group exists
+ yield self._execute("vgs | grep %s" % FLAGS.volume_group)
+ if int(size) == 0:
+ sizestr = '100M'
+ else:
+ sizestr = '%sG' % size
+ yield self._execute(
+ "sudo lvcreate -L %s -n %s %s" % (sizestr,
+ volume_id,
+ FLAGS.volume_group))
+
+ @defer.inlineCallbacks
+ def delete_volume(self, volume_id):
+ """Deletes a logical volume"""
+ yield self._execute(
+ "sudo lvremove -f %s/%s" % (FLAGS.volume_group,
+ volume_id))
+
+ @defer.inlineCallbacks
+ def create_export(self, volume_id, shelf_id, blade_id):
+ """Creates an export for a logical volume"""
+ yield self._execute(
+ "sudo vblade-persist setup %s %s %s /dev/%s/%s" %
+ (shelf_id,
+ blade_id,
+ FLAGS.aoe_eth_dev,
+ FLAGS.volume_group,
+ volume_id))
+
+ @defer.inlineCallbacks
+ def remove_export(self, _volume_id, shelf_id, blade_id):
+ """Removes an export for a logical volume"""
+ yield self._execute(
+ "sudo vblade-persist stop %s %s" % (shelf_id, blade_id))
+ yield self._execute(
+ "sudo vblade-persist destroy %s %s" % (shelf_id, blade_id))
+
+ @defer.inlineCallbacks
+ def ensure_exports(self):
+ """Runs all existing exports"""
+ # NOTE(ja): wait for blades to appear
+ yield self._execute("sleep 5")
+ yield self._execute("sudo vblade-persist auto all",
+ check_exit_code=False)
+ yield self._execute("sudo vblade-persist start all",
+ check_exit_code=False)
+
+
+
+class FakeAOEDriver(AOEDriver):
+ """Logs calls instead of executing"""
+ def __init__(self, *args, **kwargs):
+ super(FakeAOEDriver, self).__init__(self.fake_execute)
+
+ @staticmethod
+ def fake_execute(cmd, *_args, **_kwargs):
+ """Execute that simply logs the command"""
+ logging.debug("FAKE AOE: %s", cmd)
+
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
new file mode 100644
index 000000000..e5f4805a1
--- /dev/null
+++ b/nova/volume/manager.py
@@ -0,0 +1,121 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Volume manager manages creating, attaching, detaching, and
+destroying persistent storage volumes, ala EBS.
+"""
+
+import logging
+
+from twisted.internet import defer
+
+from nova import exception
+from nova import flags
+from nova import manager
+from nova import utils
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('storage_availability_zone',
+ 'nova',
+ 'availability zone of this service')
+flags.DEFINE_boolean('fake_storage', False,
+ 'Should we make real storage volumes to attach?')
+flags.DEFINE_string('volume_driver', 'nova.volume.driver.AOEDriver',
+ 'Driver to use for volume creation')
+flags.DEFINE_integer('num_shelves',
+ 100,
+ 'Number of vblade shelves')
+flags.DEFINE_integer('blades_per_shelf',
+ 16,
+ 'Number of vblade blades per shelf')
+
+
+class AOEManager(manager.Manager):
+ """Manages Ata-Over_Ethernet volumes"""
+ def __init__(self, volume_driver=None, *args, **kwargs):
+ if not volume_driver:
+ # NOTE(vish): support the legacy fake storage flag
+ if FLAGS.fake_storage:
+ volume_driver = 'nova.volume.driver.FakeAOEDriver'
+ else:
+ volume_driver = FLAGS.volume_driver
+ self.driver = utils.import_object(volume_driver)
+ super(AOEManager, self).__init__(*args, **kwargs)
+
+ def _ensure_blades(self, context):
+ """Ensure that blades have been created in datastore"""
+ total_blades = FLAGS.num_shelves * FLAGS.blades_per_shelf
+ if self.db.export_device_count(context) >= total_blades:
+ return
+ for shelf_id in xrange(FLAGS.num_shelves):
+ for blade_id in xrange(FLAGS.blades_per_shelf):
+ dev = {'shelf_id': shelf_id, 'blade_id': blade_id}
+ self.db.export_device_create(context, dev)
+
+ @defer.inlineCallbacks
+ def create_volume(self, context, volume_id):
+ """Creates and exports the volume"""
+ logging.info("volume %s: creating", volume_id)
+
+ volume_ref = self.db.volume_get(context, volume_id)
+
+ self.db.volume_update(context,
+ volume_id,
+ {'host': FLAGS.host})
+
+ size = volume_ref['size']
+ logging.debug("volume %s: creating lv of size %sG", volume_id, size)
+ yield self.driver.create_volume(volume_id, size)
+
+ logging.debug("volume %s: allocating shelf & blade", volume_id)
+ self._ensure_blades(context)
+ rval = self.db.volume_allocate_shelf_and_blade(context, volume_id)
+ (shelf_id, blade_id) = rval
+
+ logging.debug("volume %s: exporting shelf %s & blade %s", (volume_id,
+ shelf_id, blade_id))
+
+ yield self.driver.create_export(volume_id, shelf_id, blade_id)
+ # TODO(joshua): We need to trigger a fanout message
+ # for aoe-discover on all the nodes
+
+ self.db.volume_update(context, volume_id, {'status': 'available'})
+
+ logging.debug("volume %s: re-exporting all values", volume_id)
+ yield self.driver.ensure_exports()
+
+ logging.debug("volume %s: created successfully", volume_id)
+ defer.returnValue(volume_id)
+
+ @defer.inlineCallbacks
+ def delete_volume(self, context, volume_id):
+ """Deletes and unexports volume"""
+ logging.debug("Deleting volume with id of: %s", volume_id)
+ volume_ref = self.db.volume_get(context, volume_id)
+ if volume_ref['attach_status'] == "attached":
+ raise exception.Error("Volume is still attached")
+ if volume_ref['host'] != FLAGS.host:
+ raise exception.Error("Volume is not local to this node")
+ shelf_id, blade_id = self.db.volume_get_shelf_and_blade(context,
+ volume_id)
+ yield self.driver.remove_export(volume_id, shelf_id, blade_id)
+ yield self.driver.delete_volume(volume_id)
+ self.db.volume_destroy(context, volume_id)
+ defer.returnValue(True)
diff --git a/nova/volume/service.py b/nova/volume/service.py
index 66163a812..f1b1d8695 100644
--- a/nova/volume/service.py
+++ b/nova/volume/service.py
@@ -17,298 +17,15 @@
# under the License.
"""
-Nova Storage manages creating, attaching, detaching, and
-destroying persistent storage volumes, ala EBS.
-Currently uses Ata-over-Ethernet.
+Volume service allows rpc calls to the volume manager and reports state
+to the database.
"""
-import logging
-import os
-
-from twisted.internet import defer
-
-from nova import datastore
-from nova import exception
-from nova import flags
-from nova import process
from nova import service
-from nova import utils
-from nova import validate
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_string('storage_dev', '/dev/sdb',
- 'Physical device to use for volumes')
-flags.DEFINE_string('volume_group', 'nova-volumes',
- 'Name for the VG that will contain exported volumes')
-flags.DEFINE_string('aoe_eth_dev', 'eth0',
- 'Which device to export the volumes on')
-flags.DEFINE_integer('first_shelf_id',
- utils.last_octet(utils.get_my_ip()) * 10,
- 'AoE starting shelf_id for this service')
-flags.DEFINE_integer('last_shelf_id',
- utils.last_octet(utils.get_my_ip()) * 10 + 9,
- 'AoE starting shelf_id for this service')
-flags.DEFINE_string('aoe_export_dir',
- '/var/lib/vblade-persist/vblades',
- 'AoE directory where exports are created')
-flags.DEFINE_integer('blades_per_shelf',
- 16,
- 'Number of AoE blades per shelf')
-flags.DEFINE_string('storage_availability_zone',
- 'nova',
- 'availability zone of this service')
-flags.DEFINE_boolean('fake_storage', False,
- 'Should we make real storage volumes to attach?')
-
-class NoMoreBlades(exception.Error):
- pass
-
-def get_volume(volume_id):
- """ Returns a redis-backed volume object """
- volume_class = Volume
- if FLAGS.fake_storage:
- volume_class = FakeVolume
- vol = volume_class.lookup(volume_id)
- if vol:
- return vol
- raise exception.Error("Volume does not exist")
class VolumeService(service.Service):
"""
- There is one VolumeNode running on each host.
- However, each VolumeNode can report on the state of
- *all* volumes in the cluster.
+ Volume Service automatically passes commands on to the Volume Manager
"""
- def __init__(self):
- super(VolumeService, self).__init__()
- self.volume_class = Volume
- if FLAGS.fake_storage:
- self.volume_class = FakeVolume
- self._init_volume_group()
-
- @defer.inlineCallbacks
- @validate.rangetest(size=(0, 1000))
- def create_volume(self, size, user_id, project_id):
- """
- Creates an exported volume (fake or real),
- restarts exports to make it available.
- Volume at this point has size, owner, and zone.
- """
- logging.debug("Creating volume of size: %s" % (size))
- vol = yield self.volume_class.create(size, user_id, project_id)
- logging.debug("restarting exports")
- yield self._restart_exports()
- defer.returnValue(vol['volume_id'])
-
- def by_node(self, node_id):
- """ returns a list of volumes for a node """
- for volume_id in datastore.Redis.instance().smembers('volumes:%s' % (node_id)):
- yield self.volume_class(volume_id=volume_id)
-
- @property
- def all(self):
- """ returns a list of all volumes """
- for volume_id in datastore.Redis.instance().smembers('volumes'):
- yield self.volume_class(volume_id=volume_id)
-
- @defer.inlineCallbacks
- def delete_volume(self, volume_id):
- logging.debug("Deleting volume with id of: %s" % (volume_id))
- vol = get_volume(volume_id)
- if vol['attach_status'] == "attached":
- raise exception.Error("Volume is still attached")
- if vol['node_name'] != FLAGS.node_name:
- raise exception.Error("Volume is not local to this node")
- yield vol.destroy()
- defer.returnValue(True)
-
- @defer.inlineCallbacks
- def _restart_exports(self):
- if FLAGS.fake_storage:
- return
- # NOTE(vish): these commands sometimes sends output to stderr for warnings
- yield process.simple_execute("sudo vblade-persist auto all", error_ok=1)
- yield process.simple_execute("sudo vblade-persist start all", error_ok=1)
-
- @defer.inlineCallbacks
- def _init_volume_group(self):
- if FLAGS.fake_storage:
- return
- yield process.simple_execute(
- "sudo pvcreate %s" % (FLAGS.storage_dev))
- yield process.simple_execute(
- "sudo vgcreate %s %s" % (FLAGS.volume_group,
- FLAGS.storage_dev))
-
-class Volume(datastore.BasicModel):
-
- def __init__(self, volume_id=None):
- self.volume_id = volume_id
- super(Volume, self).__init__()
-
- @property
- def identifier(self):
- return self.volume_id
-
- def default_state(self):
- return {"volume_id": self.volume_id,
- "node_name": "unassigned"}
-
- @classmethod
- @defer.inlineCallbacks
- def create(cls, size, user_id, project_id):
- volume_id = utils.generate_uid('vol')
- vol = cls(volume_id)
- vol['node_name'] = FLAGS.node_name
- vol['size'] = size
- vol['user_id'] = user_id
- vol['project_id'] = project_id
- vol['availability_zone'] = FLAGS.storage_availability_zone
- vol["instance_id"] = 'none'
- vol["mountpoint"] = 'none'
- vol['attach_time'] = 'none'
- vol['status'] = "creating" # creating | available | in-use
- vol['attach_status'] = "detached" # attaching | attached | detaching | detached
- vol['delete_on_termination'] = 'False'
- vol.save()
- yield vol._create_lv()
- yield vol._setup_export()
- # TODO(joshua) - We need to trigger a fanout message for aoe-discover on all the nodes
- vol['status'] = "available"
- vol.save()
- defer.returnValue(vol)
-
- def start_attach(self, instance_id, mountpoint):
- """ """
- self['instance_id'] = instance_id
- self['mountpoint'] = mountpoint
- self['status'] = "in-use"
- self['attach_status'] = "attaching"
- self['attach_time'] = utils.isotime()
- self['delete_on_termination'] = 'False'
- self.save()
-
- def finish_attach(self):
- """ """
- self['attach_status'] = "attached"
- self.save()
-
- def start_detach(self):
- """ """
- self['attach_status'] = "detaching"
- self.save()
-
- def finish_detach(self):
- self['instance_id'] = None
- self['mountpoint'] = None
- self['status'] = "available"
- self['attach_status'] = "detached"
- self.save()
-
- def save(self):
- is_new = self.is_new_record()
- super(Volume, self).save()
- if is_new:
- redis = datastore.Redis.instance()
- key = self.__devices_key
- # TODO(vish): these should be added by admin commands
- more = redis.scard(self._redis_association_name("node",
- self['node_name']))
- if (not redis.exists(key) and not more):
- for shelf_id in range(FLAGS.first_shelf_id,
- FLAGS.last_shelf_id + 1):
- for blade_id in range(FLAGS.blades_per_shelf):
- redis.sadd(key, "%s.%s" % (shelf_id, blade_id))
- self.associate_with("node", self['node_name'])
-
- @defer.inlineCallbacks
- def destroy(self):
- yield self._remove_export()
- yield self._delete_lv()
- self.unassociate_with("node", self['node_name'])
- if self.get('shelf_id', None) and self.get('blade_id', None):
- redis = datastore.Redis.instance()
- key = self.__devices_key
- redis.sadd(key, "%s.%s" % (self['shelf_id'], self['blade_id']))
- super(Volume, self).destroy()
-
- @defer.inlineCallbacks
- def _create_lv(self):
- if str(self['size']) == '0':
- sizestr = '100M'
- else:
- sizestr = '%sG' % self['size']
- yield process.simple_execute(
- "sudo lvcreate -L %s -n %s %s" % (sizestr,
- self['volume_id'],
- FLAGS.volume_group),
- error_ok=1)
-
- @defer.inlineCallbacks
- def _delete_lv(self):
- yield process.simple_execute(
- "sudo lvremove -f %s/%s" % (FLAGS.volume_group,
- self['volume_id']), error_ok=1)
-
- @property
- def __devices_key(self):
- return 'volume_devices:%s' % FLAGS.node_name
-
- @defer.inlineCallbacks
- def _setup_export(self):
- redis = datastore.Redis.instance()
- key = self.__devices_key
- device = redis.spop(key)
- if not device:
- raise NoMoreBlades()
- (shelf_id, blade_id) = device.split('.')
- self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id)
- self['shelf_id'] = shelf_id
- self['blade_id'] = blade_id
- self.save()
- yield self._exec_setup_export()
-
- @defer.inlineCallbacks
- def _exec_setup_export(self):
- yield process.simple_execute(
- "sudo vblade-persist setup %s %s %s /dev/%s/%s" %
- (self['shelf_id'],
- self['blade_id'],
- FLAGS.aoe_eth_dev,
- FLAGS.volume_group,
- self['volume_id']), error_ok=1)
-
- @defer.inlineCallbacks
- def _remove_export(self):
- if not self.get('shelf_id', None) or not self.get('blade_id', None):
- defer.returnValue(False)
- yield self._exec_remove_export()
- defer.returnValue(True)
-
- @defer.inlineCallbacks
- def _exec_remove_export(self):
- yield process.simple_execute(
- "sudo vblade-persist stop %s %s" % (self['shelf_id'],
- self['blade_id']), error_ok=1)
- yield process.simple_execute(
- "sudo vblade-persist destroy %s %s" % (self['shelf_id'],
- self['blade_id']), error_ok=1)
-
-
-
-class FakeVolume(Volume):
- def _create_lv(self):
- pass
-
- def _exec_setup_export(self):
- fname = os.path.join(FLAGS.aoe_export_dir, self['aoe_device'])
- f = file(fname, "w")
- f.close()
-
- def _exec_remove_export(self):
- os.unlink(os.path.join(FLAGS.aoe_export_dir, self['aoe_device']))
-
- def _delete_lv(self):
- pass
+ pass
diff --git a/nova/wsgi.py b/nova/wsgi.py
index 4fd6e59e3..bec0a7b1c 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -29,6 +29,8 @@ import eventlet.wsgi
eventlet.patcher.monkey_patch(all=False, socket=True)
import routes
import routes.middleware
+import webob.dec
+import webob.exc
logging.getLogger("routes.middleware").addHandler(logging.StreamHandler())
@@ -41,6 +43,8 @@ def run_server(application, port):
class Application(object):
+# TODO(gundlach): I think we should toss this class, now that it has no
+# purpose.
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
def __call__(self, environ, start_response):
@@ -79,95 +83,210 @@ class Application(object):
raise NotImplementedError("You must implement __call__")
-class Middleware(Application): # pylint: disable-msg=W0223
- """Base WSGI middleware wrapper. These classes require an
- application to be initialized that will be called next."""
+class Middleware(Application):
+ """
+ Base WSGI middleware wrapper. These classes require an application to be
+ initialized that will be called next. By default the middleware will
+ simply call its wrapped app, or you can override __call__ to customize its
+ behavior.
+ """
def __init__(self, application): # pylint: disable-msg=W0231
self.application = application
+ @webob.dec.wsgify
+ def __call__(self, req): # pylint: disable-msg=W0221
+ """Override to implement middleware behavior."""
+ return self.application
+
class Debug(Middleware):
- """Helper class that can be insertd into any WSGI application chain
+ """Helper class that can be inserted into any WSGI application chain
to get information about the request and response."""
- def __call__(self, environ, start_response):
- for key, value in environ.items():
+ @webob.dec.wsgify
+ def __call__(self, req):
+ print ("*" * 40) + " REQUEST ENVIRON"
+ for key, value in req.environ.items():
print key, "=", value
print
- wrapper = debug_start_response(start_response)
- return debug_print_body(self.application(environ, wrapper))
-
-
-def debug_start_response(start_response):
- """Wrap the start_response to capture when called."""
+ resp = req.get_response(self.application)
- def wrapper(status, headers, exc_info=None):
- """Print out all headers when start_response is called."""
- print status
- for (key, value) in headers:
+ print ("*" * 40) + " RESPONSE HEADERS"
+ for (key, value) in resp.headers.iteritems():
print key, "=", value
print
- start_response(status, headers, exc_info)
- return wrapper
+ resp.app_iter = self.print_generator(resp.app_iter)
+ return resp
-def debug_print_body(body):
- """Print the body of the response as it is sent back."""
-
- class Wrapper(object):
- """Iterate through all the body parts and print before returning."""
-
- def __iter__(self):
- for part in body:
- sys.stdout.write(part)
- sys.stdout.flush()
- yield part
- print
+ @staticmethod
+ def print_generator(app_iter):
+ """
+ Iterator that prints the contents of a wrapper string iterator
+ when iterated.
+ """
+ print ("*" * 40) + " BODY"
+ for part in app_iter:
+ sys.stdout.write(part)
+ sys.stdout.flush()
+ yield part
+ print
- return Wrapper()
+class Router(object):
+ """
+ WSGI middleware that maps incoming requests to WSGI apps.
+ """
-class ParsedRoutes(Middleware):
- """Processed parsed routes from routes.middleware.RoutesMiddleware
- and call either the controller if found or the default application
- otherwise."""
+ def __init__(self, mapper):
+ """
+ Create a router for the given routes.Mapper.
- def __call__(self, environ, start_response):
- if environ['routes.route'] is None:
- return self.application(environ, start_response)
- app = environ['wsgiorg.routing_args'][1]['controller']
- return app(environ, start_response)
+ Each route in `mapper` must specify a 'controller', which is a
+ WSGI app to call. You'll probably want to specify an 'action' as
+ well and have your controller be a wsgi.Controller, who will route
+ the request to the action method.
+ Examples:
+ mapper = routes.Mapper()
+ sc = ServerController()
-class Router(Middleware): # pylint: disable-msg=R0921
- """Wrapper to help setup routes.middleware.RoutesMiddleware."""
+ # Explicit mapping of one route to a controller+action
+ mapper.connect(None, "/svrlist", controller=sc, action="list")
- def __init__(self, application):
- self.map = routes.Mapper()
- self._build_map()
- application = ParsedRoutes(application)
- application = routes.middleware.RoutesMiddleware(application, self.map)
- super(Router, self).__init__(application)
+ # Actions are all implicitly defined
+ mapper.resource("server", "servers", controller=sc)
- def __call__(self, environ, start_response):
- return self.application(environ, start_response)
+ # Pointing to an arbitrary WSGI app. You can specify the
+ # {path_info:.*} parameter so the target app can be handed just that
+ # section of the URL.
+ mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp())
+ """
+ self.map = mapper
+ self._router = routes.middleware.RoutesMiddleware(self._dispatch,
+ self.map)
- def _build_map(self):
- """Method to create new connections for the routing map."""
- raise NotImplementedError("You must implement _build_map")
+ @webob.dec.wsgify
+ def __call__(self, req):
+ """
+ Route the incoming request to a controller based on self.map.
+ If no match, return a 404.
+ """
+ return self._router
- def _connect(self, *args, **kwargs):
- """Wrapper for the map.connect method."""
- self.map.connect(*args, **kwargs)
+ @staticmethod
+ @webob.dec.wsgify
+ def _dispatch(req):
+ """
+ Called by self._router after matching the incoming request to a route
+ and putting the information into req.environ. Either returns 404
+ or the routed WSGI app's response.
+ """
+ match = req.environ['wsgiorg.routing_args'][1]
+ if not match:
+ return webob.exc.HTTPNotFound()
+ app = match['controller']
+ return app
+
+
+class Controller(object):
+ """
+ WSGI app that reads routing information supplied by RoutesMiddleware
+ and calls the requested action method upon itself. All action methods
+ must, in addition to their normal parameters, accept a 'req' argument
+ which is the incoming webob.Request. They raise a webob.exc exception,
+ or return a dict which will be serialized by requested content type.
+ """
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ """
+ Call the method specified in req.environ by RoutesMiddleware.
+ """
+ arg_dict = req.environ['wsgiorg.routing_args'][1]
+ action = arg_dict['action']
+ method = getattr(self, action)
+ del arg_dict['controller']
+ del arg_dict['action']
+ arg_dict['req'] = req
+ result = method(**arg_dict)
+ if type(result) is dict:
+ return self._serialize(result, req)
+ else:
+ return result
+
+ def _serialize(self, data, request):
+ """
+ Serialize the given dict to the response type requested in request.
+ Uses self._serialization_metadata if it exists, which is a dict mapping
+ MIME types to information needed to serialize to that type.
+ """
+ _metadata = getattr(type(self), "_serialization_metadata", {})
+ serializer = Serializer(request.environ, _metadata)
+ return serializer.to_content_type(data)
-def route_args(application):
- """Decorator to make grabbing routing args more convenient."""
+class Serializer(object):
+ """
+ Serializes a dictionary to a Content Type specified by a WSGI environment.
+ """
- def wrapper(self, req):
- """Call application with req and parsed routing args from."""
- return application(self, req, req.environ['wsgiorg.routing_args'][1])
+ def __init__(self, environ, metadata=None):
+ """
+ Create a serializer based on the given WSGI environment.
+ 'metadata' is an optional dict mapping MIME types to information
+ needed to serialize a dictionary to that type.
+ """
+ self.environ = environ
+ self.metadata = metadata or {}
- return wrapper
+ def to_content_type(self, data):
+ """
+ Serialize a dictionary into a string. The format of the string
+ will be decided based on the Content Type requested in self.environ:
+ by Accept: header, or by URL suffix.
+ """
+ mimetype = 'application/xml'
+ # TODO(gundlach): determine mimetype from request
+
+ if mimetype == 'application/json':
+ import json
+ return json.dumps(data)
+ elif mimetype == 'application/xml':
+ metadata = self.metadata.get('application/xml', {})
+ # We expect data to contain a single key which is the XML root.
+ root_key = data.keys()[0]
+ from xml.dom import minidom
+ doc = minidom.Document()
+ node = self._to_xml_node(doc, metadata, root_key, data[root_key])
+ return node.toprettyxml(indent=' ')
+ else:
+ return repr(data)
+
+ def _to_xml_node(self, doc, metadata, nodename, data):
+ """Recursive method to convert data members to XML nodes."""
+ result = doc.createElement(nodename)
+ if type(data) is list:
+ singular = metadata.get('plurals', {}).get(nodename, None)
+ if singular is None:
+ if nodename.endswith('s'):
+ singular = nodename[:-1]
+ else:
+ singular = 'item'
+ for item in data:
+ node = self._to_xml_node(doc, metadata, singular, item)
+ result.appendChild(node)
+ elif type(data) is dict:
+ attrs = metadata.get('attributes', {}).get(nodename, {})
+ for k, v in data.items():
+ if k in attrs:
+ result.setAttribute(k, str(v))
+ else:
+ node = self._to_xml_node(doc, metadata, k, v)
+ result.appendChild(node)
+ else: # atom
+ node = doc.createTextNode(str(data))
+ result.appendChild(node)
+ return result
diff --git a/nova/wsgi_test.py b/nova/wsgi_test.py
new file mode 100644
index 000000000..786dc1bce
--- /dev/null
+++ b/nova/wsgi_test.py
@@ -0,0 +1,96 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test WSGI basics and provide some helper functions for other WSGI tests.
+"""
+
+import unittest
+
+import routes
+import webob
+
+from nova import wsgi
+
+
+class Test(unittest.TestCase):
+
+ def test_debug(self):
+
+ class Application(wsgi.Application):
+ """Dummy application to test debug."""
+
+ def __call__(self, environ, start_response):
+ start_response("200", [("X-Test", "checking")])
+ return ['Test result']
+
+ application = wsgi.Debug(Application())
+ result = webob.Request.blank('/').get_response(application)
+ self.assertEqual(result.body, "Test result")
+
+ def test_router(self):
+
+ class Application(wsgi.Application):
+ """Test application to call from router."""
+
+ def __call__(self, environ, start_response):
+ start_response("200", [])
+ return ['Router result']
+
+ class Router(wsgi.Router):
+ """Test router."""
+
+ def __init__(self):
+ mapper = routes.Mapper()
+ mapper.connect("/test", controller=Application())
+ super(Router, self).__init__(mapper)
+
+ result = webob.Request.blank('/test').get_response(Router())
+ self.assertEqual(result.body, "Router result")
+ result = webob.Request.blank('/bad').get_response(Router())
+ self.assertNotEqual(result.body, "Router result")
+
+ def test_controller(self):
+
+ class Controller(wsgi.Controller):
+ """Test controller to call from router."""
+ test = self
+
+ def show(self, req, id): # pylint: disable-msg=W0622,C0103
+ """Default action called for requests with an ID."""
+ self.test.assertEqual(req.path_info, '/tests/123')
+ self.test.assertEqual(id, '123')
+ return id
+
+ class Router(wsgi.Router):
+ """Test router."""
+
+ def __init__(self):
+ mapper = routes.Mapper()
+ mapper.resource("test", "tests", controller=Controller())
+ super(Router, self).__init__(mapper)
+
+ result = webob.Request.blank('/tests/123').get_response(Router())
+ self.assertEqual(result.body, "123")
+ result = webob.Request.blank('/test/123').get_response(Router())
+ self.assertNotEqual(result.body, "123")
+
+ def test_serializer(self):
+ # TODO(eday): Placeholder for serializer testing.
+ pass
diff --git a/pylintrc b/pylintrc
index 53d02d6b2..6702ca895 100644
--- a/pylintrc
+++ b/pylintrc
@@ -1,19 +1,26 @@
[Messages Control]
-disable-msg=C0103
+# W0511: TODOs in code comments are fine.
+# W0142: *args and **kwargs are fine.
+disable-msg=W0511,W0142
[Basic]
-# Variables can be 1 to 31 characters long, with
-# lowercase and underscores
+# Variable names can be 1 to 31 characters long, with lowercase and underscores
variable-rgx=[a-z_][a-z0-9_]{0,30}$
+# Argument names can be 2 to 31 characters long, with lowercase and underscores
+argument-rgx=[a-z_][a-z0-9_]{1,30}$
+
# Method names should be at least 3 characters long
# and be lowecased with underscores
method-rgx=[a-z_][a-z0-9_]{2,50}$
-[MESSAGES CONTROL]
-# TODOs in code comments are fine...
-disable-msg=W0511
+# Module names matching nova-* are ok (files in bin/)
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(nova-[a-z0-9_-]+))$
+
+# Don't require docstrings on tests.
+no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$
[Design]
max-public-methods=100
min-public-methods=0
+max-args=6
diff --git a/run_tests.py b/run_tests.py
index d90ac8175..c47cbe2ec 100644
--- a/run_tests.py
+++ b/run_tests.py
@@ -38,11 +38,11 @@ Due to our use of multiprocessing it we frequently get some ignorable
'Interrupted system call' exceptions after test completion.
"""
+
import __main__
import os
import sys
-
from twisted.scripts import trial as trial_script
from nova import datastore
@@ -55,23 +55,23 @@ from nova.tests.api_unittest import *
from nova.tests.cloud_unittest import *
from nova.tests.compute_unittest import *
from nova.tests.flags_unittest import *
-from nova.tests.model_unittest import *
+#from nova.tests.model_unittest import *
from nova.tests.network_unittest import *
from nova.tests.objectstore_unittest import *
from nova.tests.process_unittest import *
from nova.tests.rpc_unittest import *
+from nova.tests.service_unittest import *
from nova.tests.validator_unittest import *
from nova.tests.volume_unittest import *
FLAGS = flags.FLAGS
-
flags.DEFINE_bool('flush_db', True,
'Flush the database before running fake tests')
-
flags.DEFINE_string('tests_stderr', 'run_tests.err.log',
- 'Path to where to pipe STDERR during test runs. '
- 'Default = "run_tests.err.log"')
+ 'Path to where to pipe STDERR during test runs.'
+ ' Default = "run_tests.err.log"')
+
if __name__ == '__main__':
OptionsClass = twistd.WrapTwistedOptions(trial_script.Options)
diff --git a/run_tests.sh b/run_tests.sh
index 85d7c8834..6ea40d95e 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -1,12 +1,66 @@
-#!/bin/bash
+#!/bin/bash
+
+function usage {
+ echo "Usage: $0 [OPTION]..."
+ echo "Run Nova's test suite(s)"
+ echo ""
+ echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
+ echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
+ echo " -h, --help Print this usage message"
+ echo ""
+ echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
+ echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
+ echo " prefer to run tests NOT in a virtual environment, simply pass the -N option."
+ exit
+}
+
+function process_options {
+ array=$1
+ elements=${#array[@]}
+ for (( x=0;x<$elements;x++)); do
+ process_option ${array[${x}]}
+ done
+}
+
+function process_option {
+ option=$1
+ case $option in
+ -h|--help) usage;;
+ -V|--virtual-env) let always_venv=1; let never_venv=0;;
+ -N|--no-virtual-env) let always_venv=0; let never_venv=1;;
+ esac
+}
venv=.nova-venv
with_venv=tools/with_venv.sh
+always_venv=0
+never_venv=0
+options=("$@")
+
+process_options $options
+
+if [ $never_venv -eq 1 ]; then
+ # Just run the test suites in current environment
+ python run_tests.py
+ exit
+fi
if [ -e ${venv} ]; then
${with_venv} python run_tests.py $@
else
- echo "No virtual environment found...creating one"
- python tools/install_venv.py
+ if [ $always_venv -eq 1 ]; then
+ # Automatically install the virtualenv
+ python tools/install_venv.py
+ else
+ echo -e "No virtual environment found...create one? (Y/n) \c"
+ read use_ve
+ if [ "x$use_ve" = "xY" ]; then
+ # Install the virtualenv and run the test suite in it
+ python tools/install_venv.py
+ else
+ python run_tests.py
+ exit
+ fi
+ fi
${with_venv} python run_tests.py $@
fi
diff --git a/setup.py b/setup.py
index 0fd286f7d..25252e8f4 100644
--- a/setup.py
+++ b/setup.py
@@ -52,5 +52,5 @@ setup(name='nova',
'bin/nova-manage',
'bin/nova-network',
'bin/nova-objectstore',
- 'bin/nova-rsapi',
+ 'bin/nova-api-new',
'bin/nova-volume'])
diff --git a/tools/install_venv.py b/tools/install_venv.py
index e1a270638..5d2369a96 100644
--- a/tools/install_venv.py
+++ b/tools/install_venv.py
@@ -37,30 +37,30 @@ def die(message, *args):
sys.exit(1)
-def run_command(cmd, redirect_output=True, error_ok=False):
- """Runs a command in an out-of-process shell, returning the
- output of that command
+def run_command(cmd, redirect_output=True, check_exit_code=True):
+ """
+ Runs a command in an out-of-process shell, returning the
+ output of that command. Working directory is ROOT.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
- proc = subprocess.Popen(cmd, stdout=stdout)
+ proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)
output = proc.communicate()[0]
- if not error_ok and proc.returncode != 0:
+ if check_exit_code and proc.returncode != 0:
die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return output
-HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install']).strip())
-HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv']).strip())
+HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install'], check_exit_code=False).strip())
+HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv'], check_exit_code=False).strip())
def check_dependencies():
"""Make sure virtualenv is in the path."""
- print 'Checking for virtualenv...',
if not HAS_VIRTUALENV:
print 'not found.'
# Try installing it via easy_install...
@@ -94,6 +94,12 @@ def install_dependencies(venv=VENV):
redirect_output=False)
+ # Tell the virtual env how to "import nova"
+ pthfile = os.path.join(venv, "lib", "python2.6", "site-packages", "nova.pth")
+ f = open(pthfile, 'w')
+ f.write("%s\n" % ROOT)
+
+
def print_help():
help = """
Nova development environment setup is complete.
diff --git a/tools/pip-requires b/tools/pip-requires
index c173d6221..dd69708ce 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -1,5 +1,6 @@
+SQLAlchemy==0.6.3
pep8==0.5.0
-pylint==0.21.1
+pylint==0.19
IPy==0.70
M2Crypto==0.20.2
amqplib==0.6.1
@@ -11,7 +12,9 @@ lockfile==0.8
python-daemon==1.5.5
python-gflags==1.3
redis==2.0.0
+routes==1.12.3
tornado==1.0
+webob==0.9.8
wsgiref==0.1.2
zope.interface==3.6.1
mox==0.5.0