From b3feee7425334f4f2369edc100ed4422e60e2288 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 16 Jul 2010 19:58:12 +0000 Subject: remove calls to runthis from node --- nova/compute/node.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/nova/compute/node.py b/nova/compute/node.py index 3abd20120..3e39e65fc 100644 --- a/nova/compute/node.py +++ b/nova/compute/node.py @@ -223,16 +223,20 @@ class Node(object, service.Service): volume_id = None, mountpoint = None): volume = storage.get_volume(volume_id) yield self._init_aoe() - yield utils.runthis("Attached Volume: %s", - "sudo virsh attach-disk %s /dev/etherd/%s %s" - % (instance_id, volume['aoe_device'], mountpoint.split("/")[-1])) + yield process.SharedPool().simple_execute( + "sudo virsh attach-disk %s /dev/etherd/%s %s" % + (instance_id, + volume['aoe_device'], + mountpoint.rpartition('/dev/')[2])) volume.finish_attach() defer.returnValue(True) + @defer.inlineCallbacks def _init_aoe(self): - utils.runthis("Doin an AoE discover, returns %s", "sudo aoe-discover") - utils.runthis("Doin an AoE stat, returns %s", "sudo aoe-stat") + yield process.SharedPool().simple_execute("sudo aoe-discover") + yield process.SharedPool().simple_execute("sudo aoe-stat") + @defer.inlineCallbacks @exception.wrap_exception def detach_volume(self, instance_id, volume_id): """ detach a volume from an instance """ @@ -240,10 +244,10 @@ class Node(object, service.Service): # name without the leading /dev/ volume = storage.get_volume(volume_id) target = volume['mountpoint'].rpartition('/dev/')[2] - utils.runthis("Detached Volume: %s", "sudo virsh detach-disk %s %s " - % (instance_id, target)) + yield process.SharedPool().simple_execute( + "sudo virsh detach-disk %s %s " % (instance_id, target)) volume.finish_detach() - return defer.succeed(True) + defer.returnValue(True) class Group(object): -- cgit From 049f27d00900f4b6e810d35f8e0e1ec3520d053b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 16 Jul 2010 19:58:50 +0000 Subject: change volume code to use twisted --- nova/volume/storage.py | 39 ++++++++++++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/nova/volume/storage.py b/nova/volume/storage.py index de20f30b5..305ef527a 100644 --- a/nova/volume/storage.py +++ b/nova/volume/storage.py @@ -35,6 +35,7 @@ from twisted.internet import defer from nova import datastore from nova import exception from nova import flags +from nova import process from nova import utils from nova import validate @@ -143,17 +144,24 @@ class BlockStore(object): datastore.Redis.instance().srem('volumes:%s' % (FLAGS.storage_name), vol['volume_id']) return True + @defer.inlineCallbacks def _restart_exports(self): if FLAGS.fake_storage: return - utils.runthis("Setting exports to auto: %s", "sudo vblade-persist auto all") - utils.runthis("Starting all exports: %s", "sudo vblade-persist start all") + yield process.SharedPool().simple_execute( + "sudo vblade-persist auto all") + yield process.SharedPool().simple_execute( + "sudo vblade-persist start all") + @defer.inlineCallbacks def _init_volume_group(self): if FLAGS.fake_storage: return - utils.runthis("PVCreate returned: %s", "sudo pvcreate %s" % (FLAGS.storage_dev)) - utils.runthis("VGCreate returned: %s", "sudo vgcreate %s %s" % (FLAGS.volume_group, FLAGS.storage_dev)) + yield process.SharedPool().simple_execute( + "sudo pvcreate %s" % (FLAGS.storage_dev)) + yield process.SharedPool().simple_execute( + "sudo vgcreate %s %s" % (FLAGS.volume_group, + FLAGS.storage_dev)) class Volume(datastore.BasicModel): @@ -227,15 +235,22 @@ class Volume(datastore.BasicModel): self._delete_lv() super(Volume, self).destroy() + @defer.inlineCallbacks def create_lv(self): if str(self['size']) == '0': sizestr = '100M' else: sizestr = '%sG' % self['size'] - utils.runthis("Creating LV: %s", "sudo lvcreate -L %s -n %s %s" % (sizestr, self['volume_id'], FLAGS.volume_group)) + yield process.SharedPool().simple_execute( + "sudo lvcreate -L %s -n %s %s" % (sizestr, + self['volume_id'], + FLAGS.volume_group)) + @defer.inlineCallbacks def _delete_lv(self): - utils.runthis("Removing LV: %s", "sudo lvremove -f %s/%s" % (FLAGS.volume_group, self['volume_id'])) + yield process.SharedPool().simple_execute( + "sudo lvremove -f %s/%s" % (FLAGS.volume_group, + self['volume_id'])) def _setup_export(self): (shelf_id, blade_id) = get_next_aoe_numbers() @@ -245,8 +260,9 @@ class Volume(datastore.BasicModel): self.save() self._exec_export() + @defer.inlineCallbacks def _exec_export(self): - utils.runthis("Creating AOE export: %s", + yield process.SharedPool().simple_execute( "sudo vblade-persist setup %s %s %s /dev/%s/%s" % (self['shelf_id'], self['blade_id'], @@ -254,9 +270,14 @@ class Volume(datastore.BasicModel): FLAGS.volume_group, self['volume_id'])) + @defer.inlineCallbacks def _remove_export(self): - utils.runthis("Stopped AOE export: %s", "sudo vblade-persist stop %s %s" % (self['shelf_id'], self['blade_id'])) - utils.runthis("Destroyed AOE export: %s", "sudo vblade-persist destroy %s %s" % (self['shelf_id'], self['blade_id'])) + yield process.SharedPool().simple_execute( + "sudo vblade-persist stop %s %s" % (self['shelf_id'], + self['blade_id'])) + yield process.SharedPool().simple_execute( + "sudo vblade-persist destroy %s %s" % (self['shelf_id'], + self['blade_id'])) class FakeVolume(Volume): -- cgit From 382381f74ca3423958add26b2578c4e77282a9a0 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 16 Jul 2010 20:50:08 +0000 Subject: simplify call to simple_execute --- nova/compute/node.py | 8 ++++---- nova/volume/storage.py | 18 +++++++++--------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/nova/compute/node.py b/nova/compute/node.py index 8874ef17e..7cae86d02 100644 --- a/nova/compute/node.py +++ b/nova/compute/node.py @@ -223,7 +223,7 @@ class Node(object, service.Service): volume_id = None, mountpoint = None): volume = storage.get_volume(volume_id) yield self._init_aoe() - yield process.SharedPool().simple_execute( + yield process.simple_execute( "sudo virsh attach-disk %s /dev/etherd/%s %s" % (instance_id, volume['aoe_device'], @@ -233,8 +233,8 @@ class Node(object, service.Service): @defer.inlineCallbacks def _init_aoe(self): - yield process.SharedPool().simple_execute("sudo aoe-discover") - yield process.SharedPool().simple_execute("sudo aoe-stat") + yield process.simple_execute("sudo aoe-discover") + yield process.simple_execute("sudo aoe-stat") @defer.inlineCallbacks @exception.wrap_exception @@ -244,7 +244,7 @@ class Node(object, service.Service): # name without the leading /dev/ volume = storage.get_volume(volume_id) target = volume['mountpoint'].rpartition('/dev/')[2] - yield process.SharedPool().simple_execute( + yield process.simple_execute( "sudo virsh detach-disk %s %s " % (instance_id, target)) volume.finish_detach() defer.returnValue(True) diff --git a/nova/volume/storage.py b/nova/volume/storage.py index 305ef527a..5424b092f 100644 --- a/nova/volume/storage.py +++ b/nova/volume/storage.py @@ -148,18 +148,18 @@ class BlockStore(object): def _restart_exports(self): if FLAGS.fake_storage: return - yield process.SharedPool().simple_execute( + yield process.simple_execute( "sudo vblade-persist auto all") - yield process.SharedPool().simple_execute( + yield process.simple_execute( "sudo vblade-persist start all") @defer.inlineCallbacks def _init_volume_group(self): if FLAGS.fake_storage: return - yield process.SharedPool().simple_execute( + yield process.simple_execute( "sudo pvcreate %s" % (FLAGS.storage_dev)) - yield process.SharedPool().simple_execute( + yield process.simple_execute( "sudo vgcreate %s %s" % (FLAGS.volume_group, FLAGS.storage_dev)) @@ -241,14 +241,14 @@ class Volume(datastore.BasicModel): sizestr = '100M' else: sizestr = '%sG' % self['size'] - yield process.SharedPool().simple_execute( + yield process.simple_execute( "sudo lvcreate -L %s -n %s %s" % (sizestr, self['volume_id'], FLAGS.volume_group)) @defer.inlineCallbacks def _delete_lv(self): - yield process.SharedPool().simple_execute( + yield process.simple_execute( "sudo lvremove -f %s/%s" % (FLAGS.volume_group, self['volume_id'])) @@ -262,7 +262,7 @@ class Volume(datastore.BasicModel): @defer.inlineCallbacks def _exec_export(self): - yield process.SharedPool().simple_execute( + yield process.simple_execute( "sudo vblade-persist setup %s %s %s /dev/%s/%s" % (self['shelf_id'], self['blade_id'], @@ -272,10 +272,10 @@ class Volume(datastore.BasicModel): @defer.inlineCallbacks def _remove_export(self): - yield process.SharedPool().simple_execute( + yield process.simple_execute( "sudo vblade-persist stop %s %s" % (self['shelf_id'], self['blade_id'])) - yield process.SharedPool().simple_execute( + yield process.simple_execute( "sudo vblade-persist destroy %s %s" % (self['shelf_id'], self['blade_id'])) -- cgit From 73af1a84eb682423bf40323387d739778765e138 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 16 Jul 2010 21:52:10 +0000 Subject: make nova-volume start with twisteds daemonize stuff --- bin/nova-compute | 9 +++----- bin/nova-volume | 61 +++++++++++++++++++++++++++++++++++--------------- nova/volume/storage.py | 25 ++++++++++++++++----- 3 files changed, 65 insertions(+), 30 deletions(-) diff --git a/bin/nova-compute b/bin/nova-compute index 5635efbaf..4b559beb4 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -33,9 +33,6 @@ NOVA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'nova') if os.path.exists(NOVA_PATH): sys.path.insert(0, os.path.dirname(NOVA_PATH)) - -from carrot import connection -from carrot import messaging from twisted.internet import task from twisted.application import service @@ -50,8 +47,8 @@ FLAGS = flags.FLAGS # context when the twistd.serve() call is made below so any # flags we define here will have to be conditionally defined, # flags defined by imported modules are safe. -if 'node_report_state_interval' not in FLAGS: - flags.DEFINE_integer('node_report_state_interval', 10, +if 'compute_report_state_interval' not in FLAGS: + flags.DEFINE_integer('compute_report_state_interval', 10, 'seconds between nodes reporting state to cloud', lower_bound=1) logging.getLogger().setLevel(logging.DEBUG) @@ -75,7 +72,7 @@ def main(): bin_name = os.path.basename(__file__) pulse = task.LoopingCall(n.report_state, FLAGS.node_name, bin_name) - pulse.start(interval=FLAGS.node_report_state_interval, now=False) + pulse.start(interval=FLAGS.compute_report_state_interval, now=False) injected = consumer_all.attach_to_twisted() injected = consumer_node.attach_to_twisted() diff --git a/bin/nova-volume b/bin/nova-volume index df9fb5c7a..64b726627 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -22,22 +22,37 @@ """ import logging -from tornado import ioloop +import os +import sys + +# NOTE(termie): kludge so that we can run this from the bin directory in the +# checkout without having to screw with paths +NOVA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'nova') +if os.path.exists(NOVA_PATH): + sys.path.insert(0, os.path.dirname(NOVA_PATH)) + +from twisted.internet import task +from twisted.application import service from nova import flags from nova import rpc -from nova import server -from nova import utils +from nova import twistd from nova.volume import storage FLAGS = flags.FLAGS -flags.DEFINE_integer('storage_report_state_interval', 10, - 'seconds between broadcasting state to cloud', - lower_bound=1) +# NOTE(termie): This file will necessarily be re-imported under different +# context when the twistd.serve() call is made below so any +# flags we define here will have to be conditionally defined, +# flags defined by imported modules are safe. +if 'volume_report_state_interval' not in FLAGS: + flags.DEFINE_integer('volume_report_state_interval', 10, + 'seconds between nodes reporting state to cloud', + lower_bound=1) -def main(argv): +def main(): + logging.warn('Starting volume node') bs = storage.BlockStore() conn = rpc.Connection.instance() @@ -51,19 +66,29 @@ def main(argv): topic='%s.%s' % (FLAGS.storage_topic, FLAGS.node_name), proxy=bs) - io_inst = ioloop.IOLoop.instance() - scheduler = ioloop.PeriodicCallback( - lambda: bs.report_state(), - FLAGS.storage_report_state_interval * 1000, - io_loop=io_inst) + bin_name = os.path.basename(__file__) + pulse = task.LoopingCall(bs.report_state, FLAGS.node_name, bin_name) + pulse.start(interval=FLAGS.volume_report_state_interval, now=False) + + injected = consumer_all.attach_to_twisted() + injected = consumer_node.attach_to_twisted() - injected = consumer_all.attachToTornado(io_inst) - injected = consumer_node.attachToTornado(io_inst) - scheduler.start() - io_inst.start() + # This is the parent service that twistd will be looking for when it + # parses this file, return it so that we can get it into globals below + application = service.Application(bin_name) + bs.setServiceParent(application) + return application +# NOTE(termie): When this script is executed from the commandline what it will +# actually do is tell the twistd application runner that it +# should run this file as a twistd application (see below). if __name__ == '__main__': - utils.default_flagfile() - server.serve('nova-volume', main) + twistd.serve(__file__) +# NOTE(termie): When this script is loaded by the twistd application runner +# this code path will be executed and twistd will expect a +# variable named 'application' to be available, it will then +# handle starting it and stopping it. +if __name__ == '__builtin__': + application = main() diff --git a/nova/volume/storage.py b/nova/volume/storage.py index 5424b092f..121bc01e6 100644 --- a/nova/volume/storage.py +++ b/nova/volume/storage.py @@ -28,8 +28,8 @@ import os import shutil import socket import tempfile -import time -from tornado import ioloop + +from twisted.application import service from twisted.internet import defer from nova import datastore @@ -38,6 +38,7 @@ from nova import flags from nova import process from nova import utils from nova import validate +from nova.compute import model FLAGS = flags.FLAGS @@ -81,7 +82,7 @@ def get_volume(volume_id): return volume_class(volume_id=volume_id) raise exception.Error("Volume does not exist") -class BlockStore(object): +class BlockStore(object, service.Service): """ There is one BlockStore running on each volume node. However, each BlockStore can report on the state of @@ -103,9 +104,21 @@ class BlockStore(object): except Exception, err: pass - def report_state(self): - #TODO: aggregate the state of the system - pass + @defer.inlineCallbacks + def report_state(self, nodename, daemon): + # TODO(termie): make this pattern be more elegant. -todd + try: + record = model.Daemon(nodename, daemon) + record.heartbeat() + if getattr(self, "model_disconnected", False): + self.model_disconnected = False + logging.error("Recovered model server connection!") + + except model.ConnectionError, ex: + if not getattr(self, "model_disconnected", False): + self.model_disconnected = True + logging.exception("model server went away") + yield @validate.rangetest(size=(0, 1000)) def create_volume(self, size, user_id, project_id): -- cgit From 0cf417c9e9c48352ea11c2333794545467585542 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 16 Jul 2010 21:53:17 +0000 Subject: fix conf file to no longer have daemonize=1 because twistd daemonizes by default --- debian/nova-volume.conf | 1 - 1 file changed, 1 deletion(-) diff --git a/debian/nova-volume.conf b/debian/nova-volume.conf index af3271d3b..820c92918 100644 --- a/debian/nova-volume.conf +++ b/debian/nova-volume.conf @@ -1,4 +1,3 @@ ---daemonize=1 --ca_path=/var/lib/nova/CA --keys_path=/var/lib/nova/keys --datastore_path=/var/lib/nova/keeper -- cgit From f39d6549d4e57941b14f328fa5a52a3a5f925d42 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sun, 18 Jul 2010 18:15:12 +0100 Subject: In preparation for XenAPI support, refactor the interface between nova.compute and the hypervisor (i.e. libvirt). compute.node is no longer coupled tightly with libvirt. Instead, hypervisor connections are handled through a simple abstract interface. This has the additional advantage that there is no need to riddle the code with FLAGS.fake_libvirt checks, as we now have an interface behind which we can mock. The libvirt-specific code, and the fakevirt code used for unit tests, have moved into nova.virt. The fake_libvirt flag has been replaced with a connection_type flag, that will allow us to specify different connection types. The disk image handling (S3 or local disk image fetch) has moved into nova.virt.images, where it will be easier to share between connection types. The power_state values (Instance.RUNNING etc) and the INSTANCE_TYPES dictionary have moved into their own files (nova.compute.instance_types and nova.compute.power_state) so that we can share them without mutual dependencies between nova.compute.node and nova.virt.libvirt_conn. --- bin/dhcpleasor.py | 2 +- docs/fakes.rst | 4 +- nova/compute/instance_types.py | 30 ++++ nova/compute/node.py | 276 +++-------------------------- nova/compute/power_state.py | 41 +++++ nova/endpoint/cloud.py | 3 +- nova/fakevirt.py | 112 ------------ nova/flags.py | 3 +- nova/tests/access_unittest.py | 2 +- nova/tests/cloud_unittest.py | 8 +- nova/tests/fake_flags.py | 2 +- nova/tests/future_unittest.py | 2 +- nova/tests/model_unittest.py | 2 +- nova/tests/network_unittest.py | 2 +- nova/tests/node_unittest.py | 2 +- nova/tests/objectstore_unittest.py | 2 + nova/tests/real_flags.py | 2 +- nova/tests/storage_unittest.py | 2 +- nova/tests/users_unittest.py | 2 +- nova/virt/__init__.py | 15 ++ nova/virt/connection.py | 42 +++++ nova/virt/fake.py | 81 +++++++++ nova/virt/images.py | 55 ++++++ nova/virt/libvirt_conn.py | 353 +++++++++++++++++++++++++++++++++++++ 24 files changed, 667 insertions(+), 378 deletions(-) create mode 100644 nova/compute/instance_types.py create mode 100644 nova/compute/power_state.py delete mode 100644 nova/fakevirt.py create mode 100644 nova/virt/__init__.py create mode 100644 nova/virt/connection.py create mode 100644 nova/virt/fake.py create mode 100644 nova/virt/images.py create mode 100644 nova/virt/libvirt_conn.py diff --git a/bin/dhcpleasor.py b/bin/dhcpleasor.py index 07ff325f1..4a3f374d5 100755 --- a/bin/dhcpleasor.py +++ b/bin/dhcpleasor.py @@ -71,7 +71,7 @@ def main(argv=None): FLAGS.fake_rabbit = True FLAGS.redis_db = 8 FLAGS.network_size = 32 - FLAGS.fake_libvirt=True + FLAGS.connection_type = 'fake' FLAGS.fake_network=True FLAGS.fake_users = True action = argv[1] diff --git a/docs/fakes.rst b/docs/fakes.rst index bea8bc4e9..a993fb4c8 100644 --- a/docs/fakes.rst +++ b/docs/fakes.rst @@ -18,10 +18,10 @@ Nova Fakes ========== -The :mod:`fakevirt` Module +The :mod:`virt.fake` Module -------------------------- -.. automodule:: nova.fakevirt +.. automodule:: nova.virt.fake :members: :undoc-members: :show-inheritance: diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py new file mode 100644 index 000000000..439be3c7d --- /dev/null +++ b/nova/compute/instance_types.py @@ -0,0 +1,30 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The built-in instance properties. +""" + +INSTANCE_TYPES = {} +INSTANCE_TYPES['m1.tiny'] = {'memory_mb': 512, 'vcpus': 1, 'local_gb': 0} +INSTANCE_TYPES['m1.small'] = {'memory_mb': 1024, 'vcpus': 1, 'local_gb': 10} +INSTANCE_TYPES['m1.medium'] = {'memory_mb': 2048, 'vcpus': 2, 'local_gb': 10} +INSTANCE_TYPES['m1.large'] = {'memory_mb': 4096, 'vcpus': 4, 'local_gb': 10} +INSTANCE_TYPES['m1.xlarge'] = {'memory_mb': 8192, 'vcpus': 4, 'local_gb': 10} +INSTANCE_TYPES['c1.medium'] = {'memory_mb': 2048, 'vcpus': 4, 'local_gb': 10} diff --git a/nova/compute/node.py b/nova/compute/node.py index d681ec661..7146d1279 100644 --- a/nova/compute/node.py +++ b/nova/compute/node.py @@ -20,93 +20,48 @@ Compute Node: Runs on each compute node, managing the - hypervisor using libvirt. + hypervisor using the virt module. """ import base64 -import json import logging import os -import shutil import sys from twisted.internet import defer from twisted.internet import task from twisted.application import service - -try: - import libvirt -except Exception, err: - logging.warning('no libvirt found') - from nova import exception -from nova import fakevirt from nova import flags from nova import process from nova import utils -from nova.compute import disk from nova.compute import model from nova.compute import network -from nova.objectstore import image # for image_path flag +from nova.compute import power_state +from nova.compute.instance_types import INSTANCE_TYPES +from nova.virt import connection as virt_connection from nova.volume import storage FLAGS = flags.FLAGS -flags.DEFINE_string('libvirt_xml_template', - utils.abspath('compute/libvirt.xml.template'), - 'Libvirt XML Template') -flags.DEFINE_bool('use_s3', True, - 'whether to get images from s3 or use local copy') flags.DEFINE_string('instances_path', utils.abspath('../instances'), 'where instances are stored on disk') -INSTANCE_TYPES = {} -INSTANCE_TYPES['m1.tiny'] = {'memory_mb': 512, 'vcpus': 1, 'local_gb': 0} -INSTANCE_TYPES['m1.small'] = {'memory_mb': 1024, 'vcpus': 1, 'local_gb': 10} -INSTANCE_TYPES['m1.medium'] = {'memory_mb': 2048, 'vcpus': 2, 'local_gb': 10} -INSTANCE_TYPES['m1.large'] = {'memory_mb': 4096, 'vcpus': 4, 'local_gb': 10} -INSTANCE_TYPES['m1.xlarge'] = {'memory_mb': 8192, 'vcpus': 4, 'local_gb': 10} -INSTANCE_TYPES['c1.medium'] = {'memory_mb': 2048, 'vcpus': 4, 'local_gb': 10} - - -def _image_path(path=''): - return os.path.join(FLAGS.images_path, path) - - -def _image_url(path): - return "%s:%s/_images/%s" % (FLAGS.s3_host, FLAGS.s3_port, path) - class Node(object, service.Service): """ Manages the running instances. """ def __init__(self): - """ load configuration options for this node and connect to libvirt """ + """ load configuration options for this node and connect to the hypervisor""" super(Node, self).__init__() self._instances = {} - self._conn = self._get_connection() + self._conn = virt_connection.get_connection() self._pool = process.ProcessPool() self.instdir = model.InstanceDirectory() # TODO(joshua): This needs to ensure system state, specifically: modprobe aoe - def _get_connection(self): - """ returns a libvirt connection object """ - # TODO(termie): maybe lazy load after initial check for permissions - # TODO(termie): check whether we can be disconnected - if FLAGS.fake_libvirt: - conn = fakevirt.FakeVirtConnection.instance() - else: - auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], - 'root', - None] - conn = libvirt.openAuth('qemu:///system', auth, 0) - if conn == None: - logging.error('Failed to open connection to the hypervisor') - sys.exit(1) - return conn - def noop(self): """ simple test of an AMQP message call """ return defer.succeed('PONG') @@ -122,8 +77,7 @@ class Node(object, service.Service): def adopt_instances(self): """ if there are instances already running, adopt them """ return defer.succeed(0) - instance_names = [self._conn.lookupByID(x).name() - for x in self._conn.listDomainsID()] + instance_names = self._conn.list_instances() for name in instance_names: try: new_inst = Instance.fromName(self._conn, self._pool, name) @@ -155,7 +109,7 @@ class Node(object, service.Service): logging.exception("model server went away") yield - # @exception.wrap_exception + @exception.wrap_exception def run_instance(self, instance_id, **_kwargs): """ launch a new instance with specified options """ logging.debug("Starting instance %s..." % (instance_id)) @@ -174,8 +128,7 @@ class Node(object, service.Service): logging.info("Instances current state is %s", new_inst.state) if new_inst.is_running(): raise exception.Error("Instance is already running") - d = new_inst.spawn() - return d + new_inst.spawn() @exception.wrap_exception def terminate_instance(self, instance_id): @@ -309,20 +262,6 @@ class Instance(object): self.datamodel.save() logging.debug("Finished init of Instance with id of %s" % name) - def toXml(self): - # TODO(termie): cache? - logging.debug("Starting the toXML method") - libvirt_xml = open(FLAGS.libvirt_xml_template).read() - xml_info = self.datamodel.copy() - # TODO(joshua): Make this xml express the attached disks as well - - # TODO(termie): lazy lazy hack because xml is annoying - xml_info['nova'] = json.dumps(self.datamodel.copy()) - libvirt_xml = libvirt_xml % xml_info - logging.debug("Finished the toXML method") - - return libvirt_xml - @classmethod def fromName(cls, conn, pool, name): """ use the saved data for reloading the instance """ @@ -333,7 +272,7 @@ class Instance(object): def set_state(self, state_code, state_description=None): self.datamodel['state'] = state_code if not state_description: - state_description = STATE_NAMES[state_code] + state_description = power_state.name(state_code) self.datamodel['state_description'] = state_description self.datamodel.save() @@ -347,37 +286,29 @@ class Instance(object): return self.datamodel['name'] def is_pending(self): - return (self.state == Instance.NOSTATE or self.state == 'pending') + return (self.state == power_state.NOSTATE or self.state == 'pending') def is_destroyed(self): - return self.state == Instance.SHUTOFF + return self.state == power_state.SHUTOFF def is_running(self): logging.debug("Instance state is: %s" % self.state) - return (self.state == Instance.RUNNING or self.state == 'running') + return (self.state == power_state.RUNNING or self.state == 'running') def describe(self): return self.datamodel def info(self): - logging.debug("Getting info for dom %s" % self.name) - virt_dom = self._conn.lookupByName(self.name) - (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() - return {'state': state, - 'max_mem': max_mem, - 'mem': mem, - 'num_cpu': num_cpu, - 'cpu_time': cpu_time, - 'node_name': FLAGS.node_name} - - def basepath(self, path=''): - return os.path.abspath(os.path.join(self.datamodel['basepath'], path)) + result = self._conn.get_info(self.name) + result['node_name'] = FLAGS.node_name + return result def update_state(self): self.datamodel.update(self.info()) self.set_state(self.state) self.datamodel.save() # Extra, but harmless + @defer.inlineCallbacks @exception.wrap_exception def destroy(self): if self.is_destroyed(): @@ -385,38 +316,9 @@ class Instance(object): raise exception.Error('trying to destroy already destroyed' ' instance: %s' % self.name) - self.set_state(Instance.NOSTATE, 'shutting_down') - try: - virt_dom = self._conn.lookupByName(self.name) - virt_dom.destroy() - except Exception, _err: - pass - # If the instance is already terminated, we're still happy - d = defer.Deferred() - d.addCallback(lambda x: self._cleanup()) - d.addCallback(lambda x: self.datamodel.destroy()) - # TODO(termie): short-circuit me for tests - # WE'LL save this for when we do shutdown, - # instead of destroy - but destroy returns immediately - timer = task.LoopingCall(f=None) - def _wait_for_shutdown(): - try: - self.update_state() - if self.state == Instance.SHUTDOWN: - timer.stop() - d.callback(None) - except Exception: - self.set_state(Instance.SHUTDOWN) - timer.stop() - d.callback(None) - timer.f = _wait_for_shutdown - timer.start(interval=0.5, now=True) - return d - - def _cleanup(self): - target = os.path.abspath(self.datamodel['basepath']) - logging.info("Deleting instance files at %s", target) - shutil.rmtree(target) + self.set_state(power_state.NOSTATE, 'shutting_down') + yield self._conn.destroy(self) + self.datamodel.destroy() @defer.inlineCallbacks @exception.wrap_exception @@ -427,136 +329,26 @@ class Instance(object): 'instance: %s (state: %s)' % (self.name, self.state)) logging.debug('rebooting instance %s' % self.name) - self.set_state(Instance.NOSTATE, 'rebooting') - yield self._conn.lookupByName(self.name).destroy() - self._conn.createXML(self.toXml(), 0) - - d = defer.Deferred() - timer = task.LoopingCall(f=None) - def _wait_for_reboot(): - try: - self.update_state() - if self.is_running(): - logging.debug('rebooted instance %s' % self.name) - timer.stop() - d.callback(None) - except Exception: - self.set_state(Instance.SHUTDOWN) - timer.stop() - d.callback(None) - timer.f = _wait_for_reboot - timer.start(interval=0.5, now=True) - yield d - - def _fetch_s3_image(self, image, path): - url = _image_url('%s/image' % image) - d = self._pool.simpleExecute('curl --silent %s -o %s' % (url, path)) - return d - - def _fetch_local_image(self, image, path): - source = _image_path('%s/image' % image) - d = self._pool.simpleExecute('cp %s %s' % (source, path)) - return d - - @defer.inlineCallbacks - def _create_image(self, libvirt_xml): - # syntactic nicety - data = self.datamodel - basepath = self.basepath - - # ensure directories exist and are writable - yield self._pool.simpleExecute('mkdir -p %s' % basepath()) - yield self._pool.simpleExecute('chmod 0777 %s' % basepath()) - - - # TODO(termie): these are blocking calls, it would be great - # if they weren't. - logging.info('Creating image for: %s', data['instance_id']) - f = open(basepath('libvirt.xml'), 'w') - f.write(libvirt_xml) - f.close() - - if FLAGS.fake_libvirt: - logging.info('fake_libvirt, nothing to do for create_image') - raise defer.returnValue(None); - - if FLAGS.use_s3: - _fetch_file = self._fetch_s3_image - else: - _fetch_file = self._fetch_local_image - - if not os.path.exists(basepath('disk')): - yield _fetch_file(data['image_id'], basepath('disk-raw')) - if not os.path.exists(basepath('kernel')): - yield _fetch_file(data['kernel_id'], basepath('kernel')) - if not os.path.exists(basepath('ramdisk')): - yield _fetch_file(data['ramdisk_id'], basepath('ramdisk')) - - execute = lambda cmd, input=None: self._pool.simpleExecute(cmd=cmd, - input=input, - error_ok=1) - - key = data['key_data'] - net = None - if FLAGS.simple_network: - with open(FLAGS.simple_network_template) as f: - net = f.read() % {'address': data['private_dns_name'], - 'network': FLAGS.simple_network_network, - 'netmask': FLAGS.simple_network_netmask, - 'gateway': FLAGS.simple_network_gateway, - 'broadcast': FLAGS.simple_network_broadcast, - 'dns': FLAGS.simple_network_dns} - if key or net: - logging.info('Injecting data into image %s', data['image_id']) - yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute) - - if os.path.exists(basepath('disk')): - yield self._pool.simpleExecute('rm -f %s' % basepath('disk')) - - bytes = (INSTANCE_TYPES[data['instance_type']]['local_gb'] - * 1024 * 1024 * 1024) - yield disk.partition( - basepath('disk-raw'), basepath('disk'), bytes, execute=execute) + self.set_state(power_state.NOSTATE, 'rebooting') + yield self._conn.reboot(self) + self.update_state() @defer.inlineCallbacks @exception.wrap_exception def spawn(self): - self.set_state(Instance.NOSTATE, 'spawning') + self.set_state(power_state.NOSTATE, 'spawning') logging.debug("Starting spawn in Instance") - - xml = self.toXml() - self.set_state(Instance.NOSTATE, 'launching') - logging.info('self %s', self) try: - yield self._create_image(xml) - self._conn.createXML(xml, 0) - # TODO(termie): this should actually register - # a callback to check for successful boot - logging.debug("Instance is running") - - local_d = defer.Deferred() - timer = task.LoopingCall(f=None) - def _wait_for_boot(): - try: - self.update_state() - if self.is_running(): - logging.debug('booted instance %s' % self.name) - timer.stop() - local_d.callback(None) - except Exception: - self.set_state(Instance.SHUTDOWN) - logging.error('Failed to boot instance %s' % self.name) - timer.stop() - local_d.callback(None) - timer.f = _wait_for_boot - timer.start(interval=0.5, now=True) + yield self._conn.spawn(self) except Exception, ex: logging.debug(ex) - self.set_state(Instance.SHUTDOWN) + self.set_state(power_state.SHUTDOWN) + self.update_state() @exception.wrap_exception def console_output(self): - if not FLAGS.fake_libvirt: + # FIXME: Abstract this for Xen + if FLAGS.connection_type == 'libvirt': fname = os.path.abspath( os.path.join(self.datamodel['basepath'], 'console.log')) with open(fname, 'r') as f: @@ -564,13 +356,3 @@ class Instance(object): else: console = 'FAKE CONSOLE OUTPUT' return defer.succeed(console) - -STATE_NAMES = { - Instance.NOSTATE : 'pending', - Instance.RUNNING : 'running', - Instance.BLOCKED : 'blocked', - Instance.PAUSED : 'paused', - Instance.SHUTDOWN : 'shutdown', - Instance.SHUTOFF : 'shutdown', - Instance.CRASHED : 'crashed', -} diff --git a/nova/compute/power_state.py b/nova/compute/power_state.py new file mode 100644 index 000000000..b27aa4677 --- /dev/null +++ b/nova/compute/power_state.py @@ -0,0 +1,41 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The various power states that a VM can be in.""" + +NOSTATE = 0x00 +RUNNING = 0x01 +BLOCKED = 0x02 +PAUSED = 0x03 +SHUTDOWN = 0x04 +SHUTOFF = 0x05 +CRASHED = 0x06 + + +def name(code): + d = { + NOSTATE : 'pending', + RUNNING : 'running', + BLOCKED : 'blocked', + PAUSED : 'paused', + SHUTDOWN: 'shutdown', + SHUTOFF : 'shutdown', + CRASHED : 'crashed', + } + return d[code] diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 3b7b4804b..51f5c859b 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -39,6 +39,7 @@ from nova.auth import users from nova.compute import model from nova.compute import network from nova.compute import node +from nova.compute.instance_types import INSTANCE_TYPES from nova.endpoint import images from nova.volume import storage @@ -103,7 +104,7 @@ class CloudController(object): result = {} for instance in self.instdir.all: if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], node.INSTANCE_TYPES[instance['instance_type']]['vcpus']) + line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) if instance['key_name'] in result: result[instance['key_name']].append(line) else: diff --git a/nova/fakevirt.py b/nova/fakevirt.py deleted file mode 100644 index bcbeae548..000000000 --- a/nova/fakevirt.py +++ /dev/null @@ -1,112 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A fake (in-memory) hypervisor+api. Allows nova testing w/o KVM and libvirt. -""" - -import StringIO -from xml.etree import ElementTree - - -class FakeVirtConnection(object): - # FIXME: networkCreateXML, listNetworks don't do anything since - # they aren't exercised in tests yet - - def __init__(self): - self.next_index = 0 - self.instances = {} - - @classmethod - def instance(cls): - if not hasattr(cls, '_instance'): - cls._instance = cls() - return cls._instance - - def lookupByID(self, i): - return self.instances[str(i)] - - def listDomainsID(self): - return self.instances.keys() - - def listNetworks(self): - return [] - - def lookupByName(self, instance_id): - for x in self.instances.values(): - if x.name() == instance_id: - return x - raise Exception('no instance found for instance_id: %s' % instance_id) - - def networkCreateXML(self, xml): - pass - - def createXML(self, xml, flags): - # parse the xml :( - xml_stringio = StringIO.StringIO(xml) - - my_xml = ElementTree.parse(xml_stringio) - name = my_xml.find('name').text - - fake_instance = FakeVirtInstance(conn=self, - index=str(self.next_index), - name=name, - xml=my_xml) - self.instances[str(self.next_index)] = fake_instance - self.next_index += 1 - - def _removeInstance(self, i): - self.instances.pop(str(i)) - - -class FakeVirtInstance(object): - NOSTATE = 0x00 - RUNNING = 0x01 - BLOCKED = 0x02 - PAUSED = 0x03 - SHUTDOWN = 0x04 - SHUTOFF = 0x05 - CRASHED = 0x06 - - def __init__(self, conn, index, name, xml): - self._conn = conn - self._destroyed = False - self._name = name - self._index = index - self._state = self.RUNNING - - def name(self): - return self._name - - def destroy(self): - if self._state == self.SHUTOFF: - raise Exception('instance already destroyed: %s' % self.name()) - self._state = self.SHUTDOWN - self._conn._removeInstance(self._index) - - def info(self): - return [self._state, 0, 2, 0, 0] - - def XMLDesc(self, flags): - return open('fakevirtinstance.xml', 'r').read() - - def blockStats(self, disk): - return [0L, 0L, 0L, 0L, null] - - def interfaceStats(self, iface): - return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L] diff --git a/nova/flags.py b/nova/flags.py index 60245a349..f9ebb28f7 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -36,14 +36,13 @@ DEFINE_bool = DEFINE_bool # Define any app-specific flags in their own files, docs at: # http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39 +DEFINE_string('connection_type', 'libvirt', 'libvirt or fake') DEFINE_integer('s3_port', 3333, 's3 port') DEFINE_integer('s3_internal_port', 3334, 's3 port') DEFINE_string('s3_host', '127.0.0.1', 's3 host') #DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on') DEFINE_string('storage_topic', 'storage', 'the topic storage nodes listen on') -DEFINE_bool('fake_libvirt', False, - 'whether to use a fake libvirt or not') DEFINE_bool('verbose', False, 'show debug output') DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit') DEFINE_bool('fake_network', False, 'should we use fake network devices and addresses') diff --git a/nova/tests/access_unittest.py b/nova/tests/access_unittest.py index 8500dd0cb..6cf7e893d 100644 --- a/nova/tests/access_unittest.py +++ b/nova/tests/access_unittest.py @@ -33,7 +33,7 @@ class Context(object): class AccessTestCase(test.BaseTestCase): def setUp(self): super(AccessTestCase, self).setUp() - FLAGS.fake_libvirt = True + FLAGS.connection_type = 'fake' FLAGS.fake_storage = True um = UserManager.instance() # Make test users diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py index b8614fdc8..8040f6331 100644 --- a/nova/tests/cloud_unittest.py +++ b/nova/tests/cloud_unittest.py @@ -39,7 +39,7 @@ FLAGS = flags.FLAGS class CloudTestCase(test.BaseTestCase): def setUp(self): super(CloudTestCase, self).setUp() - self.flags(fake_libvirt=True, + self.flags(connection_type='fake', fake_storage=True, fake_users=True) @@ -72,7 +72,7 @@ class CloudTestCase(test.BaseTestCase): users.UserManager.instance().delete_user('admin') def test_console_output(self): - if FLAGS.fake_libvirt: + if FLAGS.connection_type == 'fake': logging.debug("Can't test instances without a real virtual env.") return instance_id = 'foo' @@ -83,7 +83,7 @@ class CloudTestCase(test.BaseTestCase): rv = yield self.node.terminate_instance(instance_id) def test_run_instances(self): - if FLAGS.fake_libvirt: + if FLAGS.connection_type == 'fake': logging.debug("Can't test instances without a real virtual env.") return image_id = FLAGS.default_image @@ -104,7 +104,7 @@ class CloudTestCase(test.BaseTestCase): break self.assert_(rv) - if not FLAGS.fake_libvirt: + if connection_type != 'fake': time.sleep(45) # Should use boto for polling here for reservations in rv['reservationSet']: # for res_id in reservations.keys(): diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index d32f40d8f..5fcd2bcac 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -20,7 +20,7 @@ from nova import flags FLAGS = flags.FLAGS -FLAGS.fake_libvirt = True +FLAGS.connection_type = 'fake' FLAGS.fake_storage = True FLAGS.fake_rabbit = True FLAGS.fake_network = True diff --git a/nova/tests/future_unittest.py b/nova/tests/future_unittest.py index da5470ffe..31ec83065 100644 --- a/nova/tests/future_unittest.py +++ b/nova/tests/future_unittest.py @@ -39,7 +39,7 @@ FLAGS = flags.FLAGS class AdminTestCase(test.BaseTestCase): def setUp(self): super(AdminTestCase, self).setUp() - self.flags(fake_libvirt=True, + self.flags(connection_type='fake', fake_rabbit=True) self.conn = rpc.Connection.instance() diff --git a/nova/tests/model_unittest.py b/nova/tests/model_unittest.py index 1bd7e527f..b9eb2ac96 100644 --- a/nova/tests/model_unittest.py +++ b/nova/tests/model_unittest.py @@ -34,7 +34,7 @@ FLAGS = flags.FLAGS class ModelTestCase(test.TrialTestCase): def setUp(self): super(ModelTestCase, self).setUp() - self.flags(fake_libvirt=True, + self.flags(connection_type='fake', fake_storage=True, fake_users=True) diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index a822cc1d9..45ee6dbc7 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -33,7 +33,7 @@ from nova import utils class NetworkTestCase(test.TrialTestCase): def setUp(self): super(NetworkTestCase, self).setUp() - self.flags(fake_libvirt=True, + self.flags(connection_type='fake', fake_storage=True, fake_network=True, network_size=32) diff --git a/nova/tests/node_unittest.py b/nova/tests/node_unittest.py index 93942d79e..86d9775fd 100644 --- a/nova/tests/node_unittest.py +++ b/nova/tests/node_unittest.py @@ -57,7 +57,7 @@ class NodeConnectionTestCase(test.TrialTestCase): def setUp(self): logging.getLogger().setLevel(logging.DEBUG) super(NodeConnectionTestCase, self).setUp() - self.flags(fake_libvirt=True, + self.flags(connection_type='fake', fake_storage=True, fake_users=True) self.node = node.Node() diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py index f47ca7f00..f22256aaf 100644 --- a/nova/tests/objectstore_unittest.py +++ b/nova/tests/objectstore_unittest.py @@ -25,6 +25,8 @@ import tempfile from nova import flags from nova import objectstore +from nova.objectstore import bucket # for buckets_path flag +from nova.objectstore import image # for images_path flag from nova import test from nova.auth import users diff --git a/nova/tests/real_flags.py b/nova/tests/real_flags.py index 9e106f227..690fb640a 100644 --- a/nova/tests/real_flags.py +++ b/nova/tests/real_flags.py @@ -20,7 +20,7 @@ from nova import flags FLAGS = flags.FLAGS -FLAGS.fake_libvirt = False +FLAGS.connection_type = 'libvirt' FLAGS.fake_storage = False FLAGS.fake_rabbit = False FLAGS.fake_network = False diff --git a/nova/tests/storage_unittest.py b/nova/tests/storage_unittest.py index 60576d74f..f400cd2fd 100644 --- a/nova/tests/storage_unittest.py +++ b/nova/tests/storage_unittest.py @@ -34,7 +34,7 @@ class StorageTestCase(test.TrialTestCase): super(StorageTestCase, self).setUp() self.mynode = node.Node() self.mystorage = None - self.flags(fake_libvirt=True, + self.flags(connection_type='fake', fake_storage=True) self.mystorage = storage.BlockStore() diff --git a/nova/tests/users_unittest.py b/nova/tests/users_unittest.py index 301721075..824d5cff6 100644 --- a/nova/tests/users_unittest.py +++ b/nova/tests/users_unittest.py @@ -35,7 +35,7 @@ class UserTestCase(test.BaseTestCase): flush_db = False def setUp(self): super(UserTestCase, self).setUp() - self.flags(fake_libvirt=True, + self.flags(connection_type='fake', fake_storage=True) self.users = users.UserManager.instance() diff --git a/nova/virt/__init__.py b/nova/virt/__init__.py new file mode 100644 index 000000000..3d598c463 --- /dev/null +++ b/nova/virt/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/nova/virt/connection.py b/nova/virt/connection.py new file mode 100644 index 000000000..25c817415 --- /dev/null +++ b/nova/virt/connection.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import flags +from nova.virt import fake +from nova.virt import libvirt_conn + + +FLAGS = flags.FLAGS + + +def get_connection(read_only=False): + # TODO(termie): maybe lazy load after initial check for permissions + # TODO(termie): check whether we can be disconnected + t = FLAGS.connection_type + if t == 'fake': + conn = fake.get_connection(read_only) + elif t == 'libvirt': + conn = libvirt_conn.get_connection(read_only) + else: + raise Exception('Unknown connection type "%s"' % t) + + if conn is None: + logging.error('Failed to open connection to the hypervisor') + sys.exit(1) + return conn diff --git a/nova/virt/fake.py b/nova/virt/fake.py new file mode 100644 index 000000000..d9ae5ac96 --- /dev/null +++ b/nova/virt/fake.py @@ -0,0 +1,81 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A fake (in-memory) hypervisor+api. Allows nova testing w/o a hypervisor. +""" + +import logging + +from nova.compute import power_state + + +def get_connection(_): + # The read_only parameter is ignored. + return FakeConnection.instance() + + +class FakeConnection(object): + def __init__(self): + self.instances = {} + + @classmethod + def instance(cls): + if not hasattr(cls, '_instance'): + cls._instance = cls() + return cls._instance + + def list_instances(self): + return self.instances.keys() + + def spawn(self, instance): + fake_instance = FakeInstance() + self.instances[instance.name] = fake_instance + fake_instance._state = power_state.RUNNING + + def reboot(self, instance): + pass + + def destroy(self, instance): + del self.instances[instance.name] + + def get_info(self, instance_id): + i = self.instances[instance_id] + return {'state': i._state, + 'max_mem': 0, + 'mem': 0, + 'num_cpu': 2, + 'cpu_time': 0} + + def list_disks(self, instance_id): + return ['A_DISK'] + + def list_interfaces(self, instance_id): + return ['A_VIF'] + + def block_stats(self, instance_id, disk_id): + return [0L, 0L, 0L, 0L, null] + + def interface_stats(self, instance_id, iface_id): + return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L] + + +class FakeInstance(object): + def __init__(self): + self._state = power_state.NOSTATE diff --git a/nova/virt/images.py b/nova/virt/images.py new file mode 100644 index 000000000..0b11c134e --- /dev/null +++ b/nova/virt/images.py @@ -0,0 +1,55 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handling of VM disk images. +""" + +import os.path + +from nova import flags + +FLAGS = flags.FLAGS + +flags.DEFINE_bool('use_s3', True, + 'whether to get images from s3 or use local copy') + + +def fetch(pool, image, path): + if FLAGS.use_s3: + f = _fetch_s3_image + else: + f = _fetch_local_image + return f(pool, image, path) + +def _fetch_s3_image(pool, image, path): + url = _image_url('%s/image' % image) + d = pool.simpleExecute('curl --silent %s -o %s' % (url, path)) + return d + +def _fetch_local_image(pool, image, path): + source = _image_path('%s/image' % image) + d = pool.simpleExecute('cp %s %s' % (source, path)) + return d + +def _image_path(path): + return os.path.join(FLAGS.images_path, path) + +def _image_url(path): + return "%s:%s/_images/%s" % (FLAGS.s3_host, FLAGS.s3_port, path) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py new file mode 100644 index 000000000..74fec650e --- /dev/null +++ b/nova/virt/libvirt_conn.py @@ -0,0 +1,353 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A connection to a hypervisor (e.g. KVM) through libvirt. +""" + +import json +import logging +import os.path +import shutil +import sys + +from twisted.internet import defer +from twisted.internet import task + +from nova import exception +from nova import flags +from nova import process +from nova import utils +from nova.compute import disk +from nova.compute import instance_types +from nova.compute import power_state +from nova.virt import images + +libvirt = None +libxml2 = None + +FLAGS = flags.FLAGS +flags.DEFINE_string('libvirt_xml_template', + utils.abspath('compute/libvirt.xml.template'), + 'Libvirt XML Template') + +def get_connection(read_only): + # These are loaded late so that there's no need to install these + # libraries when not using libvirt. + global libvirt + global libxml2 + if libvirt is None: + libvirt = __import__('libvirt') + if libxml2 is None: + libxml2 = __import__('libxml2') + return LibvirtConnection(read_only) + + +class LibvirtConnection(object): + def __init__(self, read_only): + auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], + 'root', + None] + if read_only: + self._conn = libvirt.openReadOnly('qemu:///system') + else: + self._conn = libvirt.openAuth('qemu:///system', auth, 0) + self._pool = process.ProcessPool() + + + def list_instances(self): + return [self._conn.lookupByID(x).name() + for x in self._conn.listDomainsID()] + + + def destroy(self, instance): + try: + virt_dom = self._conn.lookupByName(instance.name) + virt_dom.destroy() + except Exception, _err: + pass + # If the instance is already terminated, we're still happy + d = defer.Deferred() + d.addCallback(lambda x: self._cleanup()) + # FIXME: What does this comment mean? + # TODO(termie): short-circuit me for tests + # WE'LL save this for when we do shutdown, + # instead of destroy - but destroy returns immediately + timer = task.LoopingCall(f=None) + def _wait_for_shutdown(): + try: + instance.update_state() + if instance.state == power_state.SHUTDOWN: + timer.stop() + d.callback(None) + except Exception: + instance.set_state(power_state.SHUTDOWN) + timer.stop() + d.callback(None) + timer.f = _wait_for_shutdown + timer.start(interval=0.5, now=True) + return d + + + def _cleanup(self, instance): + target = os.path.abspath(instance.datamodel['basepath']) + logging.info("Deleting instance files at %s", target) + shutil.rmtree(target) + + + @defer.inlineCallbacks + @exception.wrap_exception + def reboot(self, instance): + xml = self.toXml(instance) + yield self._conn.lookupByName(instance.name).destroy() + yield self._conn.createXML(xml, 0) + + d = defer.Deferred() + timer = task.LoopingCall(f=None) + def _wait_for_reboot(): + try: + instance.update_state() + if instance.is_running(): + logging.debug('rebooted instance %s' % instance.name) + timer.stop() + d.callback(None) + except Exception, exn: + logging.error('_wait_for_reboot failed: %s' % exn) + instance.set_state(power_state.SHUTDOWN) + timer.stop() + d.callback(None) + timer.f = _wait_for_reboot + timer.start(interval=0.5, now=True) + yield d + + + @defer.inlineCallbacks + @exception.wrap_exception + def spawn(self, instance): + xml = self.toXml(instance) + instance.set_state(power_state.NOSTATE, 'launching') + yield self._create_image(instance, xml) + yield self._conn.createXML(xml, 0) + # TODO(termie): this should actually register + # a callback to check for successful boot + logging.debug("Instance is running") + + local_d = defer.Deferred() + timer = task.LoopingCall(f=None) + def _wait_for_boot(): + try: + instance.update_state() + if instance.is_running(): + logging.debug('booted instance %s' % instance.name) + timer.stop() + local_d.callback(None) + except Exception, exn: + logging.error("_wait_for_boot exception %s" % exn) + self.set_state(power_state.SHUTDOWN) + logging.error('Failed to boot instance %s' % instance.name) + timer.stop() + local_d.callback(None) + timer.f = _wait_for_boot + timer.start(interval=0.5, now=True) + yield local_d + + + @defer.inlineCallbacks + def _create_image(self, instance, libvirt_xml): + # syntactic nicety + data = instance.datamodel + basepath = lambda x='': self.basepath(instance, x) + + # ensure directories exist and are writable + yield self._pool.simpleExecute('mkdir -p %s' % basepath()) + yield self._pool.simpleExecute('chmod 0777 %s' % basepath()) + + + # TODO(termie): these are blocking calls, it would be great + # if they weren't. + logging.info('Creating image for: %s', data['instance_id']) + f = open(basepath('libvirt.xml'), 'w') + f.write(libvirt_xml) + f.close() + + if not os.path.exists(basepath('disk')): + yield images.fetch(self._pool, data['image_id'], basepath('disk-raw')) + if not os.path.exists(basepath('kernel')): + yield images.fetch(self._pool, data['kernel_id'], basepath('kernel')) + if not os.path.exists(basepath('ramdisk')): + yield images.fetch(self._pool, data['ramdisk_id'], basepath('ramdisk')) + + execute = lambda cmd, input=None: self._pool.simpleExecute(cmd=cmd, + input=input, + error_ok=1) + + key = data['key_data'] + net = None + if FLAGS.simple_network: + with open(FLAGS.simple_network_template) as f: + net = f.read() % {'address': data['private_dns_name'], + 'network': FLAGS.simple_network_network, + 'netmask': FLAGS.simple_network_netmask, + 'gateway': FLAGS.simple_network_gateway, + 'broadcast': FLAGS.simple_network_broadcast, + 'dns': FLAGS.simple_network_dns} + if key or net: + logging.info('Injecting data into image %s', data['image_id']) + yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute) + + if os.path.exists(basepath('disk')): + yield self._pool.simpleExecute('rm -f %s' % basepath('disk')) + + bytes = (instance_types.INSTANCE_TYPES[data['instance_type']]['local_gb'] + * 1024 * 1024 * 1024) + yield disk.partition( + basepath('disk-raw'), basepath('disk'), bytes, execute=execute) + + + def basepath(self, instance, path=''): + return os.path.abspath(os.path.join(instance.datamodel['basepath'], path)) + + + def toXml(self, instance): + # TODO(termie): cache? + logging.debug("Starting the toXML method") + libvirt_xml = open(FLAGS.libvirt_xml_template).read() + xml_info = instance.datamodel.copy() + # TODO(joshua): Make this xml express the attached disks as well + + # TODO(termie): lazy lazy hack because xml is annoying + xml_info['nova'] = json.dumps(instance.datamodel.copy()) + libvirt_xml = libvirt_xml % xml_info + logging.debug("Finished the toXML method") + + return libvirt_xml + + + def get_info(self, instance_id): + virt_dom = self._conn.lookupByName(instance_id) + (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() + return {'state': state, + 'max_mem': max_mem, + 'mem': mem, + 'num_cpu': num_cpu, + 'cpu_time': cpu_time} + + + def get_disks(self, instance_id): + """ + Note that this function takes an instance ID, not an Instance, so + that it can be called by monitor. + + Returns a list of all block devices for this domain. + """ + domain = self._conn.lookupByName(instance_id) + # TODO(devcamcar): Replace libxml2 with etree. + xml = domain.XMLDesc(0) + doc = None + + try: + doc = libxml2.parseDoc(xml) + except: + return [] + + ctx = doc.xpathNewContext() + disks = [] + + try: + ret = ctx.xpathEval('/domain/devices/disk') + + for node in ret: + devdst = None + + for child in node.children: + if child.name == 'target': + devdst = child.prop('dev') + + if devdst == None: + continue + + disks.append(devdst) + finally: + if ctx != None: + ctx.xpathFreeContext() + if doc != None: + doc.freeDoc() + + return disks + + + def get_interfaces(self, instance_id): + """ + Note that this function takes an instance ID, not an Instance, so + that it can be called by monitor. + + Returns a list of all network interfaces for this instance. + """ + domain = self._conn.lookupByName(instance_id) + # TODO(devcamcar): Replace libxml2 with etree. + xml = domain.XMLDesc(0) + doc = None + + try: + doc = libxml2.parseDoc(xml) + except: + return [] + + ctx = doc.xpathNewContext() + interfaces = [] + + try: + ret = ctx.xpathEval('/domain/devices/interface') + + for node in ret: + devdst = None + + for child in node.children: + if child.name == 'target': + devdst = child.prop('dev') + + if devdst == None: + continue + + interfaces.append(devdst) + finally: + if ctx != None: + ctx.xpathFreeContext() + if doc != None: + doc.freeDoc() + + return interfaces + + + def block_stats(self, instance_id, disk): + """ + Note that this function takes an instance ID, not an Instance, so + that it can be called by monitor. + """ + domain = self._conn.lookupByName(instance_id) + return domain.blockStats(disk) + + + def interface_stats(self, instance_id, interface): + """ + Note that this function takes an instance ID, not an Instance, so + that it can be called by monitor. + """ + domain = self._conn.lookupByName(instance_id) + return domain.interfaceStats(interface) -- cgit From 2dd9438e192b5d760db0c5cee5bb5ded1ec5a9cc Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sun, 18 Jul 2010 18:24:17 +0100 Subject: Remove the tight coupling between nova.compute.monitor and libvirt. The libvirt-specific code was placed in nova.virt.libvirt_conn by the last changeset. This greatly simplifies the monitor code, and puts the libvirt-specific XML record parsing in a libvirt-specific place. --- nova/compute/monitor.py | 127 +++++++++--------------------------------------- 1 file changed, 23 insertions(+), 104 deletions(-) diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index fdc86b031..19e1a483d 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -27,7 +27,6 @@ Instance Monitoring: import boto import boto.s3 import datetime -import libxml2 import logging import os import rrdtool @@ -37,12 +36,8 @@ from twisted.internet import defer from twisted.internet import task from twisted.application import service -try: - import libvirt -except Exception, err: - logging.warning('no libvirt found') - from nova import flags +from nova.virt import connection as virt_connection FLAGS = flags.FLAGS @@ -130,83 +125,6 @@ def init_rrd(instance, name): *RRD_VALUES[name] ) -def get_disks(domain): - """ - Returns a list of all block devices for this domain. - """ - # TODO(devcamcar): Replace libxml2 with etree. - xml = domain.XMLDesc(0) - doc = None - - try: - doc = libxml2.parseDoc(xml) - except: - return [] - - ctx = doc.xpathNewContext() - disks = [] - - try: - ret = ctx.xpathEval('/domain/devices/disk') - - for node in ret: - devdst = None - - for child in node.children: - if child.name == 'target': - devdst = child.prop('dev') - - if devdst == None: - continue - - disks.append(devdst) - finally: - if ctx != None: - ctx.xpathFreeContext() - if doc != None: - doc.freeDoc() - - return disks - -def get_interfaces(domain): - """ - Returns a list of all network interfaces for this instance. - """ - # TODO(devcamcar): Replace libxml2 with etree. - xml = domain.XMLDesc(0) - doc = None - - try: - doc = libxml2.parseDoc(xml) - except: - return [] - - ctx = doc.xpathNewContext() - interfaces = [] - - try: - ret = ctx.xpathEval('/domain/devices/interface') - - for node in ret: - devdst = None - - for child in node.children: - if child.name == 'target': - devdst = child.prop('dev') - - if devdst == None: - continue - - interfaces.append(devdst) - finally: - if ctx != None: - ctx.xpathFreeContext() - if doc != None: - doc.freeDoc() - - return interfaces - - def graph_cpu(instance, duration): """ Creates a graph of cpu usage for the specified instance and duration. @@ -317,10 +235,9 @@ def store_graph(instance_id, filename): class Instance(object): - def __init__(self, conn, domain): + def __init__(self, conn, instance_id): self.conn = conn - self.domain = domain - self.instance_id = domain.name() + self.instance_id = instance_id self.last_updated = datetime.datetime.min self.cputime = 0 self.cputime_last_updated = None @@ -385,14 +302,14 @@ class Instance(object): """ Returns cpu usage statistics for this instance. """ - info = self.domain.info() + info = self.conn.get_info(self.instance_id) # Get the previous values. cputime_last = self.cputime cputime_last_updated = self.cputime_last_updated # Get the raw CPU time used in nanoseconds. - self.cputime = float(info[4]) + self.cputime = float(info['cpu_time']) self.cputime_last_updated = utcnow() logging.debug('CPU: %d', self.cputime) @@ -413,8 +330,8 @@ class Instance(object): logging.debug('cputime_delta = %s', cputime_delta) # Get the number of virtual cpus in this domain. - vcpus = int(info[3]) - + vcpus = int(info['num_cpu']) + logging.debug('vcpus = %d', vcpus) # Calculate CPU % used and cap at 100. @@ -427,14 +344,13 @@ class Instance(object): rd = 0 wr = 0 - # Get a list of block devices for this instance. - disks = get_disks(self.domain) + disks = self.conn.get_disks(self.instance_id) # Aggregate the read and write totals. for disk in disks: try: rd_req, rd_bytes, wr_req, wr_bytes, errs = \ - self.domain.blockStats(disk) + self.conn.block_stats(self.instance_id, disk) rd += rd_bytes wr += wr_bytes except TypeError: @@ -451,13 +367,12 @@ class Instance(object): rx = 0 tx = 0 - # Get a list of all network interfaces for this instance. - interfaces = get_interfaces(self.domain) + interfaces = self.conn.get_interfaces(self.instance_id) # Aggregate the in and out totals. for interface in interfaces: try: - stats = self.domain.interfaceStats(interface) + stats = self.conn.interface_stats(self.instance_id, interface) rx += stats[0] tx += stats[4] except TypeError: @@ -493,20 +408,24 @@ class InstanceMonitor(object, service.Service): Update resource usage for all running instances. """ try: - conn = libvirt.openReadOnly(None) - except libvirt.libvirtError: - logging.exception('unexpected libvirt error') + conn = virt_connection.get_connection(read_only=True) + except Exception, exn: + logging.exception('unexpected exception getting connection') time.sleep(FLAGS.monitoring_instances_delay) return - domain_ids = conn.listDomainsID() - + domain_ids = conn.list_instances() + try: + self.updateInstances_(conn, domain_ids) + except Exception, exn: + logging.exception('updateInstances_') + + def updateInstances_(self, conn, domain_ids): for domain_id in domain_ids: if not domain_id in self._instances: - domain = conn.lookupByID(domain_id) - instance = Instance(conn, domain) + instance = Instance(conn, domain_id) self._instances[domain_id] = instance - logging.debug('Found instance: %s', instance.instance_id) + logging.debug('Found instance: %s', domain_id) for key in self._instances.keys(): instance = self._instances[key] -- cgit From 1046fd21fad35fdb9922f667017937ec94774498 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sun, 18 Jul 2010 18:28:21 +0100 Subject: First commit of XenAPI-specific code (i.e. connections to the open-source community project Xen Cloud Platform, or the open-source commercial product Citrix XenServer). A new connection type has been added (xenapi) which means that libvirt calls will be replaced with calls to XenAPI. This support depends upon the XenAPI library (available from xen.org). However, the library is loaded on-demand, so there is no need for the library to be present unless you actually want to use XenAPI. The same is true of libvirt, so there is no need to have libvirt present if you are only using XenAPI. This work is incomplete. The VMs don't actually start yet, and won't until we settle on the proposed refactoring for bootable volumes. Also, VM console support is not yet refactored. Finally, xenapi.py does not support the metrics monitoring calls used by monitor.py (block_stats, interface_stats). XenAPI already includes HTTP access to RRDs for retrieving aggregated stats, so there is no need for monitor.py at all (xapi does it for you). The plan is to arrange for those RRDs to be passed straight to the reporting layer without the need for the aggregation code in nova-monitorinstance. --- nova/flags.py | 2 +- nova/virt/connection.py | 3 ++ nova/virt/xenapi.py | 138 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 142 insertions(+), 1 deletion(-) create mode 100644 nova/virt/xenapi.py diff --git a/nova/flags.py b/nova/flags.py index f9ebb28f7..caf2d2e93 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -36,7 +36,7 @@ DEFINE_bool = DEFINE_bool # Define any app-specific flags in their own files, docs at: # http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39 -DEFINE_string('connection_type', 'libvirt', 'libvirt or fake') +DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake') DEFINE_integer('s3_port', 3333, 's3 port') DEFINE_integer('s3_internal_port', 3334, 's3 port') DEFINE_string('s3_host', '127.0.0.1', 's3 host') diff --git a/nova/virt/connection.py b/nova/virt/connection.py index 25c817415..004adb19d 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -20,6 +20,7 @@ from nova import flags from nova.virt import fake from nova.virt import libvirt_conn +from nova.virt import xenapi FLAGS = flags.FLAGS @@ -33,6 +34,8 @@ def get_connection(read_only=False): conn = fake.get_connection(read_only) elif t == 'libvirt': conn = libvirt_conn.get_connection(read_only) + elif t == 'xenapi': + conn = xenapi.get_connection(read_only) else: raise Exception('Unknown connection type "%s"' % t) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py new file mode 100644 index 000000000..46ff9c5e4 --- /dev/null +++ b/nova/virt/xenapi.py @@ -0,0 +1,138 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A connection to XenServer or Xen Cloud Platform. +""" + +import logging + +from twisted.internet import defer +from twisted.internet import task + +from nova import exception +from nova import flags +from nova import process +from nova.compute import power_state + +XenAPI = None + + +def get_connection(_): + """Note that XenAPI doesn't have a read-only connection mode, so + the read_only parameter is ignored.""" + # This is loaded late so that there's no need to install this + # library when not using XenAPI. + global XenAPI + if XenAPI is not None: + XenAPI = __import__('XenAPI') + return XenAPIConnection('http://eli.testdev.hq.xensource.com', + 'root', 'xensource') + + +class XenAPIConnection(object): + + def __init__(self, url, user, pw): + self._conn = XenAPI.Session(url) + self._conn.login_with_password(user, pw) + self._pool = process.Pool() + + def list_instances(self): + result = [self._conn.xenapi.VM.get_name_label(vm) \ + for vm in self._conn.xenapi.VM.get_all()] + + @defer.inlineCallbacks + @exception.wrap_exception + def spawn(self, instance): + vm = self.lookup(instance.name) + if vm is not None: + raise Exception('Attempted to create non-unique name %s' % + instance.name) + mem = str(long(instance.datamodel['memory_kb']) * 1024) + vcpus = str(instance.datamodel['vcpus']) + rec = { + 'name_label': instance.name, + 'name_description': '', + 'is_a_template': False, + 'memory_static_min': '0', + 'memory_static_max': mem, + 'memory_dynamic_min': mem, + 'memory_dynamic_max': mem, + 'VCPUs_at_startup': vcpus, + 'VCPUs_max': vcpus, + 'VCPUs_params': {}, + 'actions_after_shutdown': 'destroy', + 'actions_after_reboot': 'restart', + 'actions_after_crash': 'destroy', + 'PV_bootloader': '', + 'PV_kernel': instance.datamodel['kernel_id'], + 'PV_ramdisk': instance.datamodel['ramdisk_id'], + 'PV_args': '', + 'PV_bootloader_args': '', + 'PV_legacy_args': '', + 'HVM_boot_policy': '', + 'HVM_boot_params': {}, + 'platform': {}, + 'PCI_bus': '', + 'recommendations': '', + 'affinity': '', + 'user_version': '0', + 'other_config': {}, + } + vm = yield self._conn.xenapi.VM.create(rec) + #yield self._conn.xenapi.VM.start(vm, False, False) + + + def reboot(self, instance): + vm = self.lookup(instance.name) + if vm is None: + raise Exception('instance not present %s' % instance.name) + yield self._conn.xenapi.VM.clean_reboot(vm) + + def destroy(self, instance): + vm = self.lookup(instance.name) + if vm is None: + raise Exception('instance not present %s' % instance.name) + yield self._conn.xenapi.VM.destroy(vm) + + def get_info(self, instance_id): + vm = self.lookup(instance_id) + if vm is None: + raise Exception('instance not present %s' % instance.name) + rec = self._conn.xenapi.VM.get_record(vm) + return {'state': power_state_from_xenapi[rec['power_state']], + 'max_mem': long(rec['memory_static_max']) >> 10, + 'mem': long(rec['memory_dynamic_max']) >> 10, + 'num_cpu': rec['VCPUs_max'], + 'cpu_time': 0} + + def lookup(self, i): + vms = self._conn.xenapi.VM.get_by_name_label(i) + n = len(vms) + if n == 0: + return None + elif n > 1: + raise Exception('duplicate name found: %s' % i) + else: + return vms[0] + + power_state_from_xenapi = { + 'Halted' : power_state.RUNNING, #FIXME + 'Running' : power_state.RUNNING, + 'Paused' : power_state.PAUSED, + 'Suspended': power_state.SHUTDOWN, # FIXME + 'Crashed' : power_state.CRASHED + } -- cgit From a3ca587654095ffd4b97103302fb0744e505e332 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 19 Jul 2010 13:19:26 -0500 Subject: Massive refactor of users.py Split users.py into manager.py and ldpadriver.py Added tons of docstrings Cleaned up public methods Simplified manager singleton handling --- bin/nova-api | 2 +- bin/nova-manage | 10 +- bin/nova-objectstore | 6 +- bin/nova-rsapi | 12 +- nova/auth/ldapdriver.py | 428 ++++++++++++++++ nova/auth/manager.py | 741 ++++++++++++++++++++++++++++ nova/auth/rbac.py | 2 +- nova/auth/users.py | 974 ------------------------------------- nova/cloudpipe/api.py | 2 +- nova/cloudpipe/pipelib.py | 4 +- nova/compute/network.py | 14 +- nova/endpoint/admin.py | 14 +- nova/endpoint/api.py | 4 +- nova/endpoint/cloud.py | 6 +- nova/endpoint/rackspace.py | 6 +- nova/tests/access_unittest.py | 6 +- nova/tests/api_unittest.py | 4 +- nova/tests/auth_unittest.py | 207 ++++++++ nova/tests/cloud_unittest.py | 12 +- nova/tests/network_unittest.py | 4 +- nova/tests/objectstore_unittest.py | 6 +- nova/tests/users_unittest.py | 207 -------- run_tests.py | 2 +- 23 files changed, 1434 insertions(+), 1239 deletions(-) create mode 100644 nova/auth/ldapdriver.py create mode 100644 nova/auth/manager.py delete mode 100644 nova/auth/users.py create mode 100644 nova/tests/auth_unittest.py delete mode 100644 nova/tests/users_unittest.py diff --git a/bin/nova-api b/bin/nova-api index 26f5dbc87..1f2009c30 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -29,7 +29,7 @@ from nova import flags from nova import rpc from nova import server from nova import utils -from nova.auth import users +from nova.auth import manager from nova.compute import model from nova.endpoint import admin from nova.endpoint import api diff --git a/bin/nova-manage b/bin/nova-manage index 56f89ce30..b0f0029ed 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -27,7 +27,7 @@ import time from nova import flags from nova import utils -from nova.auth import users +from nova.auth import manager from nova.compute import model from nova.compute import network from nova.cloudpipe import pipelib @@ -42,7 +42,7 @@ class NetworkCommands(object): class VpnCommands(object): def __init__(self): - self.manager = users.UserManager.instance() + self.manager = manager.AuthManager() self.instdir = model.InstanceDirectory() self.pipe = pipelib.CloudPipe(cloud.CloudController()) @@ -90,7 +90,7 @@ class VpnCommands(object): class RoleCommands(object): def __init__(self): - self.manager = users.UserManager.instance() + self.manager = manager.AuthManager() def add(self, user, role, project=None): """adds role to user @@ -113,7 +113,7 @@ class RoleCommands(object): class UserCommands(object): def __init__(self): - self.manager = users.UserManager.instance() + self.manager = manager.AuthManager() def __print_export(self, user): print 'export EC2_ACCESS_KEY=%s' % user.access @@ -153,7 +153,7 @@ class UserCommands(object): class ProjectCommands(object): def __init__(self): - self.manager = users.UserManager.instance() + self.manager = manager.AuthManager() def add(self, project, user): """adds user to project diff --git a/bin/nova-objectstore b/bin/nova-objectstore index 521f3d5d1..837eb2e0c 100755 --- a/bin/nova-objectstore +++ b/bin/nova-objectstore @@ -18,7 +18,7 @@ # under the License. """ - Tornado daemon for nova objectstore. Supports S3 API. + Tornado daemon for nova objectstore. Supports S3 API. """ import logging @@ -28,7 +28,7 @@ from tornado import ioloop from nova import flags from nova import server from nova import utils -from nova.auth import users +from nova.auth import manager from nova.objectstore import handler @@ -39,7 +39,7 @@ def main(argv): # FIXME: if this log statement isn't here, no logging # appears from other files and app won't start daemonized logging.debug('Started HTTP server on %s' % (FLAGS.s3_internal_port)) - app = handler.Application(users.UserManager()) + app = handler.Application(manager.AuthManager()) server = httpserver.HTTPServer(app) server.listen(FLAGS.s3_internal_port) ioloop.IOLoop.instance().start() diff --git a/bin/nova-rsapi b/bin/nova-rsapi index 5cbe2d8c1..306a1fc60 100755 --- a/bin/nova-rsapi +++ b/bin/nova-rsapi @@ -4,20 +4,20 @@ # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ - WSGI daemon for the main API endpoint. + WSGI daemon for the main API endpoint. """ import logging @@ -28,14 +28,14 @@ from nova import flags from nova import rpc from nova import server from nova import utils -from nova.auth import users +from nova.auth import manager from nova.endpoint import rackspace FLAGS = flags.FLAGS flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') def main(_argv): - user_manager = users.UserManager() + user_manager = manager.AuthManager() api_instance = rackspace.Api(user_manager) conn = rpc.Connection.instance() rpc_consumer = rpc.AdapterConsumer(connection=conn, diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py new file mode 100644 index 000000000..49443c99a --- /dev/null +++ b/nova/auth/ldapdriver.py @@ -0,0 +1,428 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Auth driver for ldap + +It should be easy to create a replacement for this driver supporting +other backends by creating another class that exposes the same +public methods. +""" + +import logging + +from nova import exception +from nova import flags +from nova.auth import manager + +try: + import ldap +except Exception, e: + from nova.auth import fakeldap as ldap +# NOTE(vish): this import is so we can use fakeldap even when real ldap +# is installed. +from nova.auth import fakeldap + +FLAGS = flags.FLAGS +flags.DEFINE_string('ldap_url', 'ldap://localhost', + 'Point this at your ldap server') +flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password') +flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com', + 'DN of admin user') +flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users') +flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com', + 'OU for Users') +flags.DEFINE_string('ldap_project_subtree', 'ou=Groups,dc=example,dc=com', + 'OU for Projects') +flags.DEFINE_string('role_project_subtree', 'ou=Groups,dc=example,dc=com', + 'OU for Roles') + +# NOTE(vish): mapping with these flags is necessary because we're going +# to tie in to an existing ldap schema +flags.DEFINE_string('ldap_cloudadmin', + 'cn=cloudadmins,ou=Groups,dc=example,dc=com', 'cn for Cloud Admins') +flags.DEFINE_string('ldap_itsec', + 'cn=itsec,ou=Groups,dc=example,dc=com', 'cn for ItSec') +flags.DEFINE_string('ldap_sysadmin', + 'cn=sysadmins,ou=Groups,dc=example,dc=com', 'cn for Sysadmins') +flags.DEFINE_string('ldap_netadmin', + 'cn=netadmins,ou=Groups,dc=example,dc=com', 'cn for NetAdmins') +flags.DEFINE_string('ldap_developer', + 'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers') + + +class LdapDriver(object): + def __enter__(self): + """Creates the connection to LDAP""" + if FLAGS.fake_users: + self.NO_SUCH_OBJECT = fakeldap.NO_SUCH_OBJECT + self.OBJECT_CLASS_VIOLATION = fakeldap.OBJECT_CLASS_VIOLATION + self.conn = fakeldap.initialize(FLAGS.ldap_url) + else: + self.NO_SUCH_OBJECT = ldap.NO_SUCH_OBJECT + self.OBJECT_CLASS_VIOLATION = ldap.OBJECT_CLASS_VIOLATION + self.conn = ldap.initialize(FLAGS.ldap_url) + self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + return self + + def __exit__(self, type, value, traceback): + """Destroys the connection to LDAP""" + self.conn.unbind_s() + return False + + def get_user(self, uid): + attr = self.__find_object(self.__uid_to_dn(uid), + '(objectclass=novaUser)') + return self.__to_user(attr) + + def get_user_from_access_key(self, access): + query = '(accessKey=%s)' % access + dn = FLAGS.ldap_user_subtree + return self.__to_user(self.__find_object(dn, query)) + + def get_key_pair(self, uid, key_name): + dn = 'cn=%s,%s' % (key_name, + self.__uid_to_dn(uid)) + attr = self.__find_object(dn, '(objectclass=novaKeyPair)') + return self.__to_key_pair(uid, attr) + + def get_project(self, name): + dn = 'cn=%s,%s' % (name, + FLAGS.ldap_project_subtree) + attr = self.__find_object(dn, '(objectclass=novaProject)') + return self.__to_project(attr) + + def get_users(self): + attrs = self.__find_objects(FLAGS.ldap_user_subtree, + '(objectclass=novaUser)') + return [self.__to_user(attr) for attr in attrs] + + def get_key_pairs(self, uid): + attrs = self.__find_objects(self.__uid_to_dn(uid), + '(objectclass=novaKeyPair)') + return [self.__to_key_pair(uid, attr) for attr in attrs] + + def get_projects(self): + attrs = self.__find_objects(FLAGS.ldap_project_subtree, + '(objectclass=novaProject)') + return [self.__to_project(attr) for attr in attrs] + + def create_user(self, name, access_key, secret_key, is_admin): + if self.__user_exists(name): + raise exception.Duplicate("LDAP user %s already exists" % name) + attr = [ + ('objectclass', ['person', + 'organizationalPerson', + 'inetOrgPerson', + 'novaUser']), + ('ou', [FLAGS.ldap_user_unit]), + ('uid', [name]), + ('sn', [name]), + ('cn', [name]), + ('secretKey', [secret_key]), + ('accessKey', [access_key]), + ('isAdmin', [str(is_admin).upper()]), + ] + self.conn.add_s(self.__uid_to_dn(name), attr) + return self.__to_user(dict(attr)) + + def create_key_pair(self, uid, key_name, public_key, fingerprint): + """create's a public key in the directory underneath the user""" + # TODO(vish): possibly refactor this to store keys in their own ou + # and put dn reference in the user object + attr = [ + ('objectclass', ['novaKeyPair']), + ('cn', [key_name]), + ('sshPublicKey', [public_key]), + ('keyFingerprint', [fingerprint]), + ] + self.conn.add_s('cn=%s,%s' % (key_name, + self.__uid_to_dn(uid)), + attr) + return self.__to_key_pair(uid, dict(attr)) + + def create_project(self, name, manager_uid, + description=None, member_uids=None): + if self.__project_exists(name): + raise exception.Duplicate("Project can't be created because " + "project %s already exists" % name) + if not self.__user_exists(manager_uid): + raise exception.NotFound("Project can't be created because " + "manager %s doesn't exist" % manager_uid) + manager_dn = self.__uid_to_dn(manager_uid) + # description is a required attribute + if description is None: + description = name + members = [] + if member_uids != None: + for member_uid in member_uids: + if not self.__user_exists(member_uid): + raise exception.NotFound("Project can't be created " + "because user %s doesn't exist" % member_uid) + members.append(self.__uid_to_dn(member_uid)) + # always add the manager as a member because members is required + if not manager_dn in members: + members.append(manager_dn) + attr = [ + ('objectclass', ['novaProject']), + ('cn', [name]), + ('description', [description]), + ('projectManager', [manager_dn]), + ('member', members) + ] + self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr) + return self.__to_project(dict(attr)) + + def add_to_project(self, uid, project_id): + dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) + return self.__add_to_group(uid, dn) + + def remove_from_project(self, uid, project_id): + dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) + return self.__remove_from_group(uid, dn) + + def is_in_project(self, uid, project_id): + dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) + return self.__is_in_group(uid, dn) + + def has_role(self, uid, role, project_id=None): + role_dn = self.__role_to_dn(role, project_id) + return self.__is_in_group(uid, role_dn) + + def add_role(self, uid, role, project_id=None): + role_dn = self.__role_to_dn(role, project_id) + if not self.__group_exists(role_dn): + # create the role if it doesn't exist + description = '%s role for %s' % (role, project_id) + self.__create_group(role_dn, role, uid, description) + else: + return self.__add_to_group(uid, role_dn) + + def remove_role(self, uid, role, project_id=None): + role_dn = self.__role_to_dn(role, project_id) + return self.__remove_from_group(uid, role_dn) + + def delete_user(self, uid): + if not self.__user_exists(uid): + raise exception.NotFound("User %s doesn't exist" % uid) + self.__delete_key_pairs(uid) + self.__remove_from_all(uid) + self.conn.delete_s('uid=%s,%s' % (uid, + FLAGS.ldap_user_subtree)) + + def delete_key_pair(self, uid, key_name): + if not self.__key_pair_exists(uid, key_name): + raise exception.NotFound("Key Pair %s doesn't exist for user %s" % + (key_name, uid)) + self.conn.delete_s('cn=%s,uid=%s,%s' % (key_name, uid, + FLAGS.ldap_user_subtree)) + + def delete_project(self, name): + project_dn = 'cn=%s,%s' % (name, FLAGS.ldap_project_subtree) + self.__delete_roles(project_dn) + self.__delete_group(project_dn) + + def __user_exists(self, name): + return self.get_user(name) != None + + def __key_pair_exists(self, uid, key_name): + return self.get_key_pair(uid, key_name) != None + + def __project_exists(self, name): + return self.get_project(name) != None + + def __find_object(self, dn, query = None): + objects = self.__find_objects(dn, query) + if len(objects) == 0: + return None + return objects[0] + + def __find_dns(self, dn, query=None): + try: + res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query) + except self.NO_SUCH_OBJECT: + return [] + # just return the DNs + return [dn for dn, attributes in res] + + def __find_objects(self, dn, query = None): + try: + res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query) + except self.NO_SUCH_OBJECT: + return [] + # just return the attributes + return [attributes for dn, attributes in res] + + def __find_role_dns(self, tree): + return self.__find_dns(tree, + '(&(objectclass=groupOfNames)(!(objectclass=novaProject)))') + + def __find_group_dns_with_member(self, tree, uid): + dns = self.__find_dns(tree, + '(&(objectclass=groupOfNames)(member=%s))' % + self.__uid_to_dn(uid)) + return dns + + def __group_exists(self, dn): + return self.__find_object(dn, '(objectclass=groupOfNames)') != None + + def __delete_key_pairs(self, uid): + keys = self.get_key_pairs(uid) + if keys != None: + for key in keys: + self.delete_key_pair(uid, key.name) + + def __role_to_dn(self, role, project_id=None): + if project_id == None: + return FLAGS.__getitem__("ldap_%s" % role).value + else: + return 'cn=%s,cn=%s,%s' % (role, + project_id, + FLAGS.ldap_project_subtree) + + def __create_group(self, group_dn, name, uid, + description, member_uids = None): + if self.__group_exists(group_dn): + raise exception.Duplicate("Group can't be created because " + "group %s already exists" % name) + members = [] + if member_uids != None: + for member_uid in member_uids: + if not self.__user_exists(member_uid): + raise exception.NotFound("Group can't be created " + "because user %s doesn't exist" % member_uid) + members.append(self.__uid_to_dn(member_uid)) + dn = self.__uid_to_dn(uid) + if not dn in members: + members.append(dn) + attr = [ + ('objectclass', ['groupOfNames']), + ('cn', [name]), + ('description', [description]), + ('member', members) + ] + self.conn.add_s(group_dn, attr) + + def __is_in_group(self, uid, group_dn): + if not self.__user_exists(uid): + raise exception.NotFound("User %s can't be searched in group " + "becuase the user doesn't exist" % (uid,)) + if not self.__group_exists(group_dn): + return False + res = self.__find_object(group_dn, + '(member=%s)' % self.__uid_to_dn(uid)) + return res != None + + def __add_to_group(self, uid, group_dn): + if not self.__user_exists(uid): + raise exception.NotFound("User %s can't be added to the group " + "becuase the user doesn't exist" % (uid,)) + if not self.__group_exists(group_dn): + raise exception.NotFound("The group at dn %s doesn't exist" % + (group_dn,)) + if self.__is_in_group(uid, group_dn): + raise exception.Duplicate("User %s is already a member of " + "the group %s" % (uid, group_dn)) + attr = [ + (ldap.MOD_ADD, 'member', self.__uid_to_dn(uid)) + ] + self.conn.modify_s(group_dn, attr) + + def __remove_from_group(self, uid, group_dn): + if not self.__group_exists(group_dn): + raise exception.NotFound("The group at dn %s doesn't exist" % + (group_dn,)) + if not self.__user_exists(uid): + raise exception.NotFound("User %s can't be removed from the " + "group because the user doesn't exist" % (uid,)) + if not self.__is_in_group(uid, group_dn): + raise exception.NotFound("User %s is not a member of the group" % + (uid,)) + self.__safe_remove_from_group(group_dn, uid) + + def __safe_remove_from_group(self, group_dn, uid): + # FIXME(vish): what if deleted user is a project manager? + attr = [(ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))] + try: + self.conn.modify_s(group_dn, attr) + except self.OBJECT_CLASS_VIOLATION: + logging.debug("Attempted to remove the last member of a group. " + "Deleting the group at %s instead." % group_dn ) + self.__delete_group(group_dn) + + def __remove_from_all(self, uid): + if not self.__user_exists(uid): + raise exception.NotFound("User %s can't be removed from all " + "because the user doesn't exist" % (uid,)) + dn = self.__uid_to_dn(uid) + role_dns = self.__find_group_dns_with_member( + FLAGS.role_project_subtree, uid) + for role_dn in role_dns: + self.__safe_remove_from_group(role_dn, uid) + project_dns = self.__find_group_dns_with_member( + FLAGS.ldap_project_subtree, uid) + for project_dn in project_dns: + self.__safe_remove_from_group(project_dn, uid) + + def __delete_group(self, group_dn): + if not self.__group_exists(group_dn): + raise exception.NotFound("Group at dn %s doesn't exist" % group_dn) + self.conn.delete_s(group_dn) + + def __delete_roles(self, project_dn): + for role_dn in self.__find_role_dns(project_dn): + self.__delete_group(role_dn) + + def __to_user(self, attr): + if attr == None: + return None + return manager.User( + id = attr['uid'][0], + name = attr['cn'][0], + access = attr['accessKey'][0], + secret = attr['secretKey'][0], + admin = (attr['isAdmin'][0] == 'TRUE') + ) + + def __to_key_pair(self, owner, attr): + if attr == None: + return None + return manager.KeyPair( + id = attr['cn'][0], + owner_id = owner, + public_key = attr['sshPublicKey'][0], + fingerprint = attr['keyFingerprint'][0], + ) + + def __to_project(self, attr): + if attr == None: + return None + member_dns = attr.get('member', []) + return manager.Project( + id = attr['cn'][0], + project_manager_id = self.__dn_to_uid(attr['projectManager'][0]), + description = attr.get('description', [None])[0], + member_ids = [self.__dn_to_uid(x) for x in member_dns] + ) + + def __dn_to_uid(self, dn): + return dn.split(',')[0].split('=')[1] + + def __uid_to_dn(self, dn): + return 'uid=%s,%s' % (dn, FLAGS.ldap_user_subtree) + diff --git a/nova/auth/manager.py b/nova/auth/manager.py new file mode 100644 index 000000000..0b5039684 --- /dev/null +++ b/nova/auth/manager.py @@ -0,0 +1,741 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Nova authentication management +""" + +import logging +import os +import shutil +import string +import tempfile +import uuid +import zipfile + +from nova import crypto +from nova import datastore +from nova import exception +from nova import flags +from nova import objectstore # for flags +from nova import signer +from nova import utils +from nova.auth import ldapdriver +FLAGS = flags.FLAGS + +# NOTE(vish): a user with one of these roles will be a superuser and +# have access to all api commands +flags.DEFINE_list('superuser_roles', ['cloudadmin'], + 'roles that ignore rbac checking completely') + +# NOTE(vish): a user with one of these roles will have it for every +# project, even if he or she is not a member of the project +flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'], + 'roles that apply to all projects') + +flags.DEFINE_string('credentials_template', + utils.abspath('auth/novarc.template'), + 'Template for creating users rc file') +flags.DEFINE_string('vpn_client_template', + utils.abspath('cloudpipe/client.ovpn.template'), + 'Template for creating users vpn file') +flags.DEFINE_string('credential_key_file', 'pk.pem', + 'Filename of private key in credentials zip') +flags.DEFINE_string('credential_cert_file', 'cert.pem', + 'Filename of certificate in credentials zip') +flags.DEFINE_string('credential_rc_file', 'novarc', + 'Filename of rc in credentials zip') + +flags.DEFINE_integer('vpn_start_port', 1000, + 'Start port for the cloudpipe VPN servers') +flags.DEFINE_integer('vpn_end_port', 2000, + 'End port for the cloudpipe VPN servers') + +flags.DEFINE_string('credential_cert_subject', + '/C=US/ST=California/L=MountainView/O=AnsoLabs/' + 'OU=NovaDev/CN=%s-%s', + 'Subject for certificate for users') + +flags.DEFINE_string('vpn_ip', '127.0.0.1', + 'Public IP for the cloudpipe VPN servers') + + +class AuthBase(object): + """Base class for objects relating to auth + + Objects derived from this class should be stupid data objects with + an id member. They may optionally contain methods that delegate to + AuthManager, but should not implement logic themselves. + """ + @classmethod + def safe_id(cls, obj): + """Safe get object id + + This method will return the id of the object if the object + is of this class, otherwise it will return the original object. + This allows methods to accept objects or ids as paramaters. + + """ + if isinstance(obj, cls): + return obj.id + else: + return obj + + +class User(AuthBase): + """Object representing a user""" + def __init__(self, id, name, access, secret, admin): + self.id = id + self.name = name + self.access = access + self.secret = secret + self.admin = admin + + def is_superuser(self): + return AuthManager().is_superuser(self) + + def is_admin(self): + return AuthManager().is_admin(self) + + def has_role(self, role): + return AuthManager().has_role(self, role) + + def add_role(self, role): + return AuthManager().add_role(self, role) + + def remove_role(self, role): + return AuthManager().remove_role(self, role) + + def is_project_member(self, project): + return AuthManager().is_project_member(self, project) + + def is_project_manager(self, project): + return AuthManager().is_project_manager(self, project) + + def generate_key_pair(self, name): + return AuthManager().generate_key_pair(self.id, name) + + def create_key_pair(self, name, public_key, fingerprint): + return AuthManager().create_key_pair(self.id, + name, + public_key, + fingerprint) + + def get_key_pair(self, name): + return AuthManager().get_key_pair(self.id, name) + + def delete_key_pair(self, name): + return AuthManager().delete_key_pair(self.id, name) + + def get_key_pairs(self): + return AuthManager().get_key_pairs(self.id) + + def __repr__(self): + return "User('%s', '%s', '%s', '%s', %s)" % (self.id, + self.name, + self.access, + self.secret, + self.admin) + + +class KeyPair(AuthBase): + """Represents an ssh key returned from the datastore + + Even though this object is named KeyPair, only the public key and + fingerprint is stored. The user's private key is not saved. + """ + def __init__(self, id, owner_id, public_key, fingerprint): + self.id = id + self.name = id + self.owner_id = owner_id + self.public_key = public_key + self.fingerprint = fingerprint + + def __repr__(self): + return "KeyPair('%s', '%s', '%s', '%s')" % (self.id, + self.owner_id, + self.public_key, + self.fingerprint) + + +class Project(AuthBase): + """Represents a Project returned from the datastore""" + def __init__(self, id, project_manager_id, description, member_ids): + self.project_manager_id = project_manager_id + self.id = id + self.name = id + self.description = description + self.member_ids = member_ids + + @property + def project_manager(self): + return AuthManager().get_user(self.project_manager_id) + + def has_manager(self, user): + return AuthManager().is_project_manager(user, self) + + def has_member(self, user): + return AuthManager().is_project_member(user, self) + + def add_role(self, user, role): + return AuthManager().add_role(user, role, self) + + def remove_role(self, user, role): + return AuthManager().remove_role(user, role, self) + + def has_role(self, user, role): + return AuthManager().has_role(user, role, self) + + def get_credentials(self, user): + return AuthManager().get_credentials(user, self) + + def __repr__(self): + return "Project('%s', '%s', '%s', %s)" % (self.id, + self.project_manager_id, + self.description, + self.member_ids) + + +class NoMorePorts(exception.Error): + pass + + +class Vpn(datastore.BasicModel): + """Manages vpn ips and ports for projects""" + def __init__(self, project_id): + self.project_id = project_id + super(Vpn, self).__init__() + + @property + def identifier(self): + return self.project_id + + @classmethod + def create(cls, project_id): + # TODO(vish): get list of vpn ips from redis + port = cls.find_free_port_for_ip(FLAGS.vpn_ip) + vpn = cls(project_id) + # save ip for project + vpn['project'] = project_id + vpn['ip'] = FLAGS.vpn_ip + vpn['port'] = port + vpn.save() + return vpn + + @classmethod + def find_free_port_for_ip(cls, ip): + # TODO(vish): these redis commands should be generalized and + # placed into a base class. Conceptually, it is + # similar to an association, but we are just + # storing a set of values instead of keys that + # should be turned into objects. + redis = datastore.Redis.instance() + key = 'ip:%s:ports' % ip + # TODO(vish): these ports should be allocated through an admin + # command instead of a flag + if (not redis.exists(key) and + not redis.exists(cls._redis_association_name('ip', ip))): + for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1): + redis.sadd(key, i) + + port = redis.spop(key) + if not port: + raise NoMorePorts() + return port + + @classmethod + def num_ports_for_ip(cls, ip): + return datastore.Redis.instance().scard('ip:%s:ports' % ip) + + @property + def ip(self): + return self['ip'] + + @property + def port(self): + return int(self['port']) + + def save(self): + self.associate_with('ip', self.ip) + super(Vpn, self).save() + + def destroy(self): + self.unassociate_with('ip', self.ip) + datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port) + super(Vpn, self).destroy() + + +class AuthManager(object): + """Manager Singleton for dealing with Users, Projects, and Keypairs + + Methods accept objects or ids. + + AuthManager uses a driver object to make requests to the data backend. + See ldapdriver.LdapDriver for reference. + + AuthManager also manages associated data related to Auth objects that + need to be more accessible, such as vpn ips and ports. + """ + _instance=None + def __new__(cls, *args, **kwargs): + if not cls._instance: + cls._instance = super(AuthManager, cls).__new__( + cls, *args, **kwargs) + return cls._instance + + def __init__(self, *args, **kwargs): + self.driver_class = kwargs.get('driver_class', ldapdriver.LdapDriver) + if FLAGS.fake_tests: + try: + self.create_user('fake', 'fake', 'fake') + except: pass + try: + self.create_user('user', 'user', 'user') + except: pass + try: + self.create_user('admin', 'admin', 'admin', True) + except: pass + + def authenticate(self, access, signature, params, verb='GET', + server_string='127.0.0.1:8773', path='/', + verify_signature=True): + """Authenticates AWS request using access key and signature + + If the project is not specified, attempts to authenticate to + a project with the same name as the user. This way, older tools + that have no project knowledge will still work. + + @type access: str + @param access: Access key for user in the form "access:project". + + @type signature: str + @param signature: Signature of the request. + + @type params: list of str + @param params: Web paramaters used for the signature. + + @type verb: str + @param verb: Web request verb ('GET' or 'POST'). + + @type server_string: str + @param server_string: Web request server string. + + @type path: str + @param path: Web request path. + + @type verify_signature: bool + @param verify_signature: Whether to verify the signature. + + @rtype: tuple (User, Project) + @return: User and project that the request represents. + """ + # TODO(vish): check for valid timestamp + (access_key, sep, project_name) = access.partition(':') + + user = self.get_user_from_access_key(access_key) + if user == None: + raise exception.NotFound('No user found for access key %s' % + access_key) + if project_name is '': + project_name = user.name + + project = self.get_project(project_name) + if project == None: + raise exception.NotFound('No project called %s could be found' % + project_name) + if not self.is_admin(user) and not self.is_project_member(user, + project): + raise exception.NotFound('User %s is not a member of project %s' % + (user.id, project.id)) + if verify_signature: + # NOTE(vish): hmac can't handle unicode, so encode ensures that + # secret isn't unicode + expected_signature = signer.Signer(user.secret.encode()).generate( + params, verb, server_string, path) + logging.debug('user.secret: %s', user.secret) + logging.debug('expected_signature: %s', expected_signature) + logging.debug('signature: %s', signature) + if signature != expected_signature: + raise exception.NotAuthorized('Signature does not match') + return (user, project) + + def is_superuser(self, user): + """Checks for superuser status, allowing user to bypass rbac + + @type user: User or uid + @param user: User to check. + + @rtype: bool + @return: True for superuser. + """ + if not isinstance(user, User): + user = self.get_user(user) + # NOTE(vish): admin flag on user represents superuser + if user.admin: + return True + for role in FLAGS.superuser_roles: + if self.has_role(user, role): + return True + + def is_admin(self, user): + """Checks for admin status, allowing user to access all projects + + @type user: User or uid + @param user: User to check. + + @rtype: bool + @return: True for admin. + """ + if not isinstance(user, User): + user = self.get_user(user) + if self.is_superuser(user): + return True + for role in FLAGS.global_roles: + if self.has_role(user, role): + return True + + def has_role(self, user, role, project=None): + """Checks existence of role for user + + If project is not specified, checks for a global role. If project + is specified, checks for the union of the global role and the + project role. + + Role 'projectmanager' only works for projects and simply checks to + see if the user is the project_manager of the specified project. It + is the same as calling is_project_manager(user, project). + + @type user: User or uid + @param user: User to check. + + @type role: str + @param role: Role to check. + + @type project: Project or project_id + @param project: Project in which to look for local role. + + @rtype: bool + @return: True if the user has the role. + """ + with self.driver_class() as drv: + if role == 'projectmanager': + if not project: + raise exception.Error("Must specify project") + return self.is_project_manager(user, project) + + global_role = drv.has_role(User.safe_id(user), + role, + None) + if not global_role: + return global_role + + if not project or role in FLAGS.global_roles: + return global_role + + return drv.has_role(User.safe_id(user), + role, + Project.safe_id(project)) + + def add_role(self, user, role, project=None): + """Adds role for user + + If project is not specified, adds a global role. If project + is specified, adds a local role. + + The 'projectmanager' role is special and can't be added or removed. + + @type user: User or uid + @param user: User to which to add role. + + @type role: str + @param role: Role to add. + + @type project: Project or project_id + @param project: Project in which to add local role. + """ + with self.driver_class() as drv: + drv.add_role(User.safe_id(user), role, Project.safe_id(project)) + + def remove_role(self, user, role, project=None): + """Removes role for user + + If project is not specified, removes a global role. If project + is specified, removes a local role. + + The 'projectmanager' role is special and can't be added or removed. + + @type user: User or uid + @param user: User from which to remove role. + + @type role: str + @param role: Role to remove. + + @type project: Project or project_id + @param project: Project in which to remove local role. + """ + with self.driver_class() as drv: + drv.remove_role(User.safe_id(user), role, Project.safe_id(project)) + + def create_project(self, name, manager_user, + description=None, member_users=None): + """Create a project + + @type name: str + @param name: Name of the project to create. The name will also be + used as the project id. + + @type manager_user: User or uid + @param manager_user: This user will be the project manager. + + @type description: str + @param project: Description of the project. If no description is + specified, the name of the project will be used. + + @type member_users: list of User or uid + @param: Initial project members. The project manager will always be + added as a member, even if he isn't specified in this list. + + @rtype: Project + @return: The new project. + """ + if member_users: + member_users = [User.safe_id(u) for u in member_users] + # NOTE(vish): try to associate a vpn ip and port first because + # if it throws an exception, we save having to + # create and destroy a project + Vpn.create(name) + with self.driver_class() as drv: + return drv.create_project(name, + User.safe_id(manager_user), + description, + member_users) + + def get_projects(self): + """Retrieves list of all projects""" + with self.driver_class() as drv: + return drv.get_projects() + + + def get_project(self, project): + """Get project object by id""" + with self.driver_class() as drv: + return drv.get_project(Project.safe_id(project)) + + def add_to_project(self, user, project): + """Add user to project""" + with self.driver_class() as drv: + return drv.add_to_project(User.safe_id(user), + Project.safe_id(project)) + + def is_project_manager(self, user, project): + """Checks if user is project manager""" + if not isinstance(project, Project): + project = self.get_project(project) + return User.safe_id(user) == project.project_manager_id + + def is_project_member(self, user, project): + """Checks to see if user is a member of project""" + if not isinstance(project, Project): + project = self.get_project(project) + return User.safe_id(user) in project.member_ids + + def remove_from_project(self, user, project): + """Removes a user from a project""" + with self.driver_class() as drv: + return drv.remove_from_project(User.safe_id(user), + Project.safe_id(project)) + + def delete_project(self, project): + """Deletes a project""" + with self.driver_class() as drv: + return drv.delete_project(Project.safe_id(project)) + + def get_user(self, uid): + """Retrieves a user by id""" + with self.driver_class() as drv: + return drv.get_user(uid) + + def get_user_from_access_key(self, access_key): + """Retrieves a user by access key""" + with self.driver_class() as drv: + return drv.get_user_from_access_key(access_key) + + def get_users(self): + """Retrieves a list of all users""" + with self.driver_class() as drv: + return drv.get_users() + + def create_user(self, user, access=None, secret=None, + admin=False, create_project=True): + """Creates a user + + @type user: str + @param name: Name of the user to create. The name will also be + used as the user id. + + @type access: str + @param access: Access Key (defaults to a random uuid) + + @type secret: str + @param secret: Secret Key (defaults to a random uuid) + + @type admin: bool + @param admin: Whether to set the admin flag. The admin flag gives + superuser status regardless of roles specifed for the user. + + @type create_project: bool + @param: Whether to create a project for the user with the same name. + + @rtype: User + @return: The new user. + """ + if access == None: access = str(uuid.uuid4()) + if secret == None: secret = str(uuid.uuid4()) + with self.driver_class() as drv: + user = User.safe_id(user) + result = drv.create_user(user, access, secret, admin) + if create_project: + # NOTE(vish): if the project creation fails, we delete + # the user and return an exception + try: + drv.create_project(user, user, user) + except Exception: + with self.driver_class() as drv: + drv.delete_user(user) + raise + return result + + def delete_user(self, user, delete_project=True): + """Deletes a user""" + with self.driver_class() as drv: + user = User.safe_id(user) + if delete_project: + try: + drv.delete_project(user) + except exception.NotFound: + pass + drv.delete_user(user) + + def generate_key_pair(self, user, key_name): + """Generates a key pair for a user + + Generates a public and private key, stores the public key using the + key_name, and returns the private key and fingerprint. + + @type user: User or uid + @param user: User for which to create key pair. + + @type key_name: str + @param key_name: Name to use for the generated KeyPair. + + @rtype: tuple (private_key, fingerprint) + @return: A tuple containing the private_key and fingerprint. + """ + # NOTE(vish): generating key pair is slow so check for legal + # creation before creating keypair + uid = User.safe_id(user) + with self.driver_class() as drv: + if not drv.get_user(uid): + raise exception.NotFound("User %s doesn't exist" % user) + if drv.get_key_pair(uid, key_name): + raise exception.Duplicate("The keypair %s already exists" + % key_name) + private_key, public_key, fingerprint = crypto.generate_key_pair() + self.create_key_pair(uid, key_name, public_key, fingerprint) + return private_key, fingerprint + + def create_key_pair(self, user, key_name, public_key, fingerprint): + """Creates a key pair for user""" + with self.driver_class() as drv: + return drv.create_key_pair(User.safe_id(user), key_name, + public_key, fingerprint) + + def get_key_pair(self, user, key_name): + """Retrieves a key pair for user""" + with self.driver_class() as drv: + return drv.get_key_pair(User.safe_id(user), key_name) + + def get_key_pairs(self, user): + """Retrieves all key pairs for user""" + with self.driver_class() as drv: + return drv.get_key_pairs(User.safe_id(user)) + + def delete_key_pair(self, user, key_name): + """Deletes a key pair for user""" + with self.driver_class() as drv: + drv.delete_key_pair(User.safe_id(user), key_name) + + def get_credentials(self, user, project=None): + """Get credential zip for user in project""" + if not isinstance(user, User): + user = self.get_user(user) + if project is None: + project = user.id + pid = Project.safe_id(project) + rc = self.__generate_rc(user.access, user.secret, pid) + private_key, signed_cert = self.__generate_x509_cert(user.id, pid) + + vpn = Vpn(pid) + configfile = open(FLAGS.vpn_client_template,"r") + s = string.Template(configfile.read()) + configfile.close() + config = s.substitute(keyfile=FLAGS.credential_key_file, + certfile=FLAGS.credential_cert_file, + ip=vpn.ip, + port=vpn.port) + + tmpdir = tempfile.mkdtemp() + zf = os.path.join(tmpdir, "temp.zip") + zippy = zipfile.ZipFile(zf, 'w') + zippy.writestr(FLAGS.credential_rc_file, rc) + zippy.writestr(FLAGS.credential_key_file, private_key) + zippy.writestr(FLAGS.credential_cert_file, signed_cert) + zippy.writestr("nebula-client.conf", config) + zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id)) + zippy.close() + with open(zf, 'rb') as f: + buffer = f.read() + + shutil.rmtree(tmpdir) + return buffer + + def __generate_rc(self, access, secret, pid): + """Generate rc file for user""" + rc = open(FLAGS.credentials_template).read() + rc = rc % { 'access': access, + 'project': pid, + 'secret': secret, + 'ec2': FLAGS.ec2_url, + 's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port), + 'nova': FLAGS.ca_file, + 'cert': FLAGS.credential_cert_file, + 'key': FLAGS.credential_key_file, + } + return rc + + def __generate_x509_cert(self, uid, pid): + """Generate x509 cert for user""" + (private_key, csr) = crypto.generate_x509_cert( + self.__cert_subject(uid)) + # TODO(joshua): This should be async call back to the cloud controller + signed_cert = crypto.sign_csr(csr, pid) + return (private_key, signed_cert) + + def __cert_subject(self, uid): + """Helper to generate cert subject""" + return FLAGS.credential_cert_subject % (uid, utils.isotime()) diff --git a/nova/auth/rbac.py b/nova/auth/rbac.py index 9e2bb830c..7fab9419f 100644 --- a/nova/auth/rbac.py +++ b/nova/auth/rbac.py @@ -17,7 +17,7 @@ # under the License. from nova import exception -from nova.auth import users +from nova.auth import manager def allow(*roles): diff --git a/nova/auth/users.py b/nova/auth/users.py deleted file mode 100644 index fc08dc34d..000000000 --- a/nova/auth/users.py +++ /dev/null @@ -1,974 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Nova users and user management, including RBAC hooks. -""" - -import datetime -import logging -import os -import shutil -import signer -import string -import tempfile -import uuid -import zipfile - -try: - import ldap -except Exception, e: - import fakeldap as ldap - -import fakeldap - -# TODO(termie): clean up these imports -from nova import datastore -from nova import exception -from nova import flags -from nova import crypto -from nova import utils -from nova import objectstore # for flags - -FLAGS = flags.FLAGS - -flags.DEFINE_string('ldap_url', 'ldap://localhost', - 'Point this at your ldap server') -flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password') -flags.DEFINE_string('user_dn', 'cn=Manager,dc=example,dc=com', - 'DN of admin user') -flags.DEFINE_string('user_unit', 'Users', 'OID for Users') -flags.DEFINE_string('user_ldap_subtree', 'ou=Users,dc=example,dc=com', - 'OU for Users') -flags.DEFINE_string('project_ldap_subtree', 'ou=Groups,dc=example,dc=com', - 'OU for Projects') -flags.DEFINE_string('role_ldap_subtree', 'ou=Groups,dc=example,dc=com', - 'OU for Roles') - -# NOTE(vish): mapping with these flags is necessary because we're going -# to tie in to an existing ldap schema -flags.DEFINE_string('ldap_cloudadmin', - 'cn=cloudadmins,ou=Groups,dc=example,dc=com', 'cn for Cloud Admins') -flags.DEFINE_string('ldap_itsec', - 'cn=itsec,ou=Groups,dc=example,dc=com', 'cn for ItSec') -flags.DEFINE_string('ldap_sysadmin', - 'cn=sysadmins,ou=Groups,dc=example,dc=com', 'cn for Sysadmins') -flags.DEFINE_string('ldap_netadmin', - 'cn=netadmins,ou=Groups,dc=example,dc=com', 'cn for NetAdmins') -flags.DEFINE_string('ldap_developer', - 'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers') - -# NOTE(vish): a user with one of these roles will be a superuser and -# have access to all api commands -flags.DEFINE_list('superuser_roles', ['cloudadmin'], - 'roles that ignore rbac checking completely') - -# NOTE(vish): a user with one of these roles will have it for every -# project, even if he or she is not a member of the project -flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'], - 'roles that apply to all projects') - -flags.DEFINE_string('credentials_template', - utils.abspath('auth/novarc.template'), - 'Template for creating users rc file') -flags.DEFINE_string('vpn_client_template', - utils.abspath('cloudpipe/client.ovpn.template'), - 'Template for creating users vpn file') -flags.DEFINE_string('credential_key_file', 'pk.pem', - 'Filename of private key in credentials zip') -flags.DEFINE_string('credential_cert_file', 'cert.pem', - 'Filename of certificate in credentials zip') -flags.DEFINE_string('credential_rc_file', 'novarc', - 'Filename of rc in credentials zip') - -flags.DEFINE_integer('vpn_start_port', 1000, - 'Start port for the cloudpipe VPN servers') -flags.DEFINE_integer('vpn_end_port', 2000, - 'End port for the cloudpipe VPN servers') - -flags.DEFINE_string('credential_cert_subject', - '/C=US/ST=California/L=MountainView/O=AnsoLabs/' - 'OU=NovaDev/CN=%s-%s', - 'Subject for certificate for users') - -flags.DEFINE_string('vpn_ip', '127.0.0.1', - 'Public IP for the cloudpipe VPN servers') - - -class AuthBase(object): - @classmethod - def safe_id(cls, obj): - """Safe get object id. - - This method will return the id of the object if the object - is of this class, otherwise it will return the original object. - This allows methods to accept objects or ids as paramaters. - - """ - if isinstance(obj, cls): - return obj.id - else: - return obj - - -class User(AuthBase): - """id and name are currently the same""" - def __init__(self, id, name, access, secret, admin): - self.id = id - self.name = name - self.access = access - self.secret = secret - self.admin = admin - - def is_superuser(self): - """allows user to bypass rbac completely""" - if self.admin: - return True - for role in FLAGS.superuser_roles: - if self.has_role(role): - return True - - def is_admin(self): - """allows user to see objects from all projects""" - if self.is_superuser(): - return True - for role in FLAGS.global_roles: - if self.has_role(role): - return True - - def has_role(self, role): - return UserManager.instance().has_role(self, role) - - def add_role(self, role): - return UserManager.instance().add_role(self, role) - - def remove_role(self, role): - return UserManager.instance().remove_role(self, role) - - def is_project_member(self, project): - return UserManager.instance().is_project_member(self, project) - - def is_project_manager(self, project): - return UserManager.instance().is_project_manager(self, project) - - def generate_rc(self, project=None): - if project is None: - project = self.id - rc = open(FLAGS.credentials_template).read() - rc = rc % { 'access': self.access, - 'project': project, - 'secret': self.secret, - 'ec2': FLAGS.ec2_url, - 's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port), - 'nova': FLAGS.ca_file, - 'cert': FLAGS.credential_cert_file, - 'key': FLAGS.credential_key_file, - } - return rc - - def generate_key_pair(self, name): - return UserManager.instance().generate_key_pair(self.id, name) - - def create_key_pair(self, name, public_key, fingerprint): - return UserManager.instance().create_key_pair(self.id, - name, - public_key, - fingerprint) - - def get_key_pair(self, name): - return UserManager.instance().get_key_pair(self.id, name) - - def delete_key_pair(self, name): - return UserManager.instance().delete_key_pair(self.id, name) - - def get_key_pairs(self): - return UserManager.instance().get_key_pairs(self.id) - - def __repr__(self): - return "User('%s', '%s', '%s', '%s', %s)" % ( - self.id, self.name, self.access, self.secret, self.admin) - - -class KeyPair(AuthBase): - def __init__(self, id, owner_id, public_key, fingerprint): - self.id = id - self.name = id - self.owner_id = owner_id - self.public_key = public_key - self.fingerprint = fingerprint - - def delete(self): - return UserManager.instance().delete_key_pair(self.owner, self.name) - - def __repr__(self): - return "KeyPair('%s', '%s', '%s', '%s')" % ( - self.id, self.owner_id, self.public_key, self.fingerprint) - - -class Group(AuthBase): - """id and name are currently the same""" - def __init__(self, id, description = None, member_ids = None): - self.id = id - self.name = id - self.description = description - self.member_ids = member_ids - - def has_member(self, user): - return User.safe_id(user) in self.member_ids - - def __repr__(self): - return "Group('%s', '%s', %s)" % ( - self.id, self.description, self.member_ids) - - -class Project(Group): - def __init__(self, id, project_manager_id, description, member_ids): - self.project_manager_id = project_manager_id - super(Project, self).__init__(id, description, member_ids) - - @property - def project_manager(self): - return UserManager.instance().get_user(self.project_manager_id) - - def has_manager(self, user): - return User.safe_id(user) == self.project_manager_id - - def add_role(self, user, role): - return UserManager.instance().add_role(user, role, self) - - def remove_role(self, user, role): - return UserManager.instance().remove_role(user, role, self) - - def has_role(self, user, role): - return UserManager.instance().has_role(user, role, self) - - @property - def vpn_ip(self): - return Vpn(self.id).ip - - @property - def vpn_port(self): - return Vpn(self.id).port - - def get_credentials(self, user): - if not isinstance(user, User): - user = UserManager.instance().get_user(user) - rc = user.generate_rc(self.id) - private_key, signed_cert = self.generate_x509_cert(user) - - configfile = open(FLAGS.vpn_client_template,"r") - s = string.Template(configfile.read()) - configfile.close() - config = s.substitute(keyfile=FLAGS.credential_key_file, - certfile=FLAGS.credential_cert_file, - ip=self.vpn_ip, - port=self.vpn_port) - - tmpdir = tempfile.mkdtemp() - zf = os.path.join(tmpdir, "temp.zip") - zippy = zipfile.ZipFile(zf, 'w') - zippy.writestr(FLAGS.credential_rc_file, rc) - zippy.writestr(FLAGS.credential_key_file, private_key) - zippy.writestr(FLAGS.credential_cert_file, signed_cert) - zippy.writestr("nebula-client.conf", config) - zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(self.id)) - zippy.close() - with open(zf, 'rb') as f: - buffer = f.read() - - shutil.rmtree(tmpdir) - return buffer - - def generate_x509_cert(self, user): - return UserManager.instance().generate_x509_cert(user, self) - - def __repr__(self): - return "Project('%s', '%s', '%s', %s)" % ( - self.id, self.project_manager_id, - self.description, self.member_ids) - - -class NoMorePorts(exception.Error): - pass - - -class Vpn(datastore.BasicModel): - def __init__(self, project_id): - self.project_id = project_id - super(Vpn, self).__init__() - - @property - def identifier(self): - return self.project_id - - @classmethod - def create(cls, project_id): - # TODO(vish): get list of vpn ips from redis - port = cls.find_free_port_for_ip(FLAGS.vpn_ip) - vpn = cls(project_id) - # save ip for project - vpn['project'] = project_id - vpn['ip'] = FLAGS.vpn_ip - vpn['port'] = port - vpn.save() - return vpn - - @classmethod - def find_free_port_for_ip(cls, ip): - # TODO(vish): these redis commands should be generalized and - # placed into a base class. Conceptually, it is - # similar to an association, but we are just - # storing a set of values instead of keys that - # should be turned into objects. - redis = datastore.Redis.instance() - key = 'ip:%s:ports' % ip - # TODO(vish): these ports should be allocated through an admin - # command instead of a flag - if (not redis.exists(key) and - not redis.exists(cls._redis_association_name('ip', ip))): - for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1): - redis.sadd(key, i) - - port = redis.spop(key) - if not port: - raise NoMorePorts() - return port - - @classmethod - def num_ports_for_ip(cls, ip): - return datastore.Redis.instance().scard('ip:%s:ports' % ip) - - @property - def ip(self): - return self['ip'] - - @property - def port(self): - return int(self['port']) - - def save(self): - self.associate_with('ip', self.ip) - super(Vpn, self).save() - - def destroy(self): - self.unassociate_with('ip', self.ip) - datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port) - super(Vpn, self).destroy() - - -class UserManager(object): - def __init__(self): - if hasattr(self.__class__, '_instance'): - raise Exception('Attempted to instantiate singleton') - - @classmethod - def instance(cls): - if not hasattr(cls, '_instance'): - inst = UserManager() - cls._instance = inst - if FLAGS.fake_users: - try: - inst.create_user('fake', 'fake', 'fake') - except: pass - try: - inst.create_user('user', 'user', 'user') - except: pass - try: - inst.create_user('admin', 'admin', 'admin', True) - except: pass - return cls._instance - - def authenticate(self, access, signature, params, verb='GET', - server_string='127.0.0.1:8773', path='/', - verify_signature=True): - # TODO: Check for valid timestamp - (access_key, sep, project_name) = access.partition(':') - - user = self.get_user_from_access_key(access_key) - if user == None: - raise exception.NotFound('No user found for access key %s' % - access_key) - if project_name is '': - project_name = user.name - - project = self.get_project(project_name) - if project == None: - raise exception.NotFound('No project called %s could be found' % - project_name) - if not user.is_admin() and not project.has_member(user): - raise exception.NotFound('User %s is not a member of project %s' % - (user.id, project.id)) - if verify_signature: - # NOTE(vish): hmac can't handle unicode, so encode ensures that - # secret isn't unicode - expected_signature = signer.Signer(user.secret.encode()).generate( - params, verb, server_string, path) - logging.debug('user.secret: %s', user.secret) - logging.debug('expected_signature: %s', expected_signature) - logging.debug('signature: %s', signature) - if signature != expected_signature: - raise exception.NotAuthorized('Signature does not match') - return (user, project) - - def has_role(self, user, role, project=None): - with LDAPWrapper() as conn: - if role == 'projectmanager': - if not project: - raise exception.Error("Must specify project") - return self.is_project_manager(user, project) - - global_role = conn.has_role(User.safe_id(user), - role, - None) - if not global_role: - return global_role - - if not project or role in FLAGS.global_roles: - return global_role - - return conn.has_role(User.safe_id(user), - role, - Project.safe_id(project)) - - def add_role(self, user, role, project=None): - with LDAPWrapper() as conn: - return conn.add_role(User.safe_id(user), role, - Project.safe_id(project)) - - def remove_role(self, user, role, project=None): - with LDAPWrapper() as conn: - return conn.remove_role(User.safe_id(user), role, - Project.safe_id(project)) - - def create_project(self, name, manager_user, - description=None, member_users=None): - if member_users: - member_users = [User.safe_id(u) for u in member_users] - # NOTE(vish): try to associate a vpn ip and port first because - # if it throws an exception, we save having to - # create and destroy a project - Vpn.create(name) - with LDAPWrapper() as conn: - return conn.create_project(name, - User.safe_id(manager_user), - description, - member_users) - - - def get_projects(self): - with LDAPWrapper() as conn: - return conn.find_projects() - - - def get_project(self, project): - with LDAPWrapper() as conn: - return conn.find_project(Project.safe_id(project)) - - def add_to_project(self, user, project): - with LDAPWrapper() as conn: - return conn.add_to_project(User.safe_id(user), - Project.safe_id(project)) - - def is_project_manager(self, user, project): - if not isinstance(project, Project): - project = self.get_project(project) - return project.has_manager(user) - - def is_project_member(self, user, project): - if isinstance(project, Project): - return project.has_member(user) - else: - with LDAPWrapper() as conn: - return conn.is_in_project(User.safe_id(user), project) - - def remove_from_project(self, user, project): - with LDAPWrapper() as conn: - return conn.remove_from_project(User.safe_id(user), - Project.safe_id(project)) - - def delete_project(self, project): - with LDAPWrapper() as conn: - return conn.delete_project(Project.safe_id(project)) - - def get_user(self, uid): - with LDAPWrapper() as conn: - return conn.find_user(uid) - - def get_user_from_access_key(self, access_key): - with LDAPWrapper() as conn: - return conn.find_user_by_access_key(access_key) - - def get_users(self): - with LDAPWrapper() as conn: - return conn.find_users() - - def create_user(self, user, access=None, secret=None, - admin=False, create_project=True): - if access == None: access = str(uuid.uuid4()) - if secret == None: secret = str(uuid.uuid4()) - with LDAPWrapper() as conn: - user = User.safe_id(user) - result = conn.create_user(user, access, secret, admin) - if create_project: - # NOTE(vish): if the project creation fails, we delete - # the user and return an exception - try: - conn.create_project(user, user, user) - except Exception: - with LDAPWrapper() as conn: - conn.delete_user(user) - raise - return result - - def delete_user(self, user, delete_project=True): - with LDAPWrapper() as conn: - user = User.safe_id(user) - if delete_project: - try: - conn.delete_project(user) - except exception.NotFound: - pass - conn.delete_user(user) - - def generate_key_pair(self, user, key_name): - # generating key pair is slow so delay generation - # until after check - user = User.safe_id(user) - with LDAPWrapper() as conn: - if not conn.user_exists(user): - raise exception.NotFound("User %s doesn't exist" % user) - if conn.key_pair_exists(user, key_name): - raise exception.Duplicate("The keypair %s already exists" - % key_name) - private_key, public_key, fingerprint = crypto.generate_key_pair() - self.create_key_pair(User.safe_id(user), key_name, - public_key, fingerprint) - return private_key, fingerprint - - def create_key_pair(self, user, key_name, public_key, fingerprint): - with LDAPWrapper() as conn: - return conn.create_key_pair(User.safe_id(user), key_name, - public_key, fingerprint) - - def get_key_pair(self, user, key_name): - with LDAPWrapper() as conn: - return conn.find_key_pair(User.safe_id(user), key_name) - - def get_key_pairs(self, user): - with LDAPWrapper() as conn: - return conn.find_key_pairs(User.safe_id(user)) - - def delete_key_pair(self, user, key_name): - with LDAPWrapper() as conn: - conn.delete_key_pair(User.safe_id(user), key_name) - - def generate_x509_cert(self, user, project): - (private_key, csr) = crypto.generate_x509_cert( - self.__cert_subject(User.safe_id(user))) - # TODO - This should be async call back to the cloud controller - signed_cert = crypto.sign_csr(csr, Project.safe_id(project)) - return (private_key, signed_cert) - - def __cert_subject(self, uid): - # FIXME(ja) - this should be pulled from a global configuration - return FLAGS.credential_cert_subject % (uid, utils.isotime()) - - -class LDAPWrapper(object): - def __init__(self): - self.user = FLAGS.user_dn - self.passwd = FLAGS.ldap_password - - def __enter__(self): - self.connect() - return self - - def __exit__(self, type, value, traceback): - self.conn.unbind_s() - return False - - def connect(self): - """ connect to ldap as admin user """ - if FLAGS.fake_users: - self.NO_SUCH_OBJECT = fakeldap.NO_SUCH_OBJECT - self.OBJECT_CLASS_VIOLATION = fakeldap.OBJECT_CLASS_VIOLATION - self.conn = fakeldap.initialize(FLAGS.ldap_url) - else: - self.NO_SUCH_OBJECT = ldap.NO_SUCH_OBJECT - self.OBJECT_CLASS_VIOLATION = ldap.OBJECT_CLASS_VIOLATION - self.conn = ldap.initialize(FLAGS.ldap_url) - self.conn.simple_bind_s(self.user, self.passwd) - - def find_object(self, dn, query = None): - objects = self.find_objects(dn, query) - if len(objects) == 0: - return None - return objects[0] - - def find_dns(self, dn, query=None): - try: - res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query) - except self.NO_SUCH_OBJECT: - return [] - # just return the DNs - return [dn for dn, attributes in res] - - def find_objects(self, dn, query = None): - try: - res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query) - except self.NO_SUCH_OBJECT: - return [] - # just return the attributes - return [attributes for dn, attributes in res] - - def find_users(self): - attrs = self.find_objects(FLAGS.user_ldap_subtree, - '(objectclass=novaUser)') - return [self.__to_user(attr) for attr in attrs] - - def find_key_pairs(self, uid): - attrs = self.find_objects(self.__uid_to_dn(uid), - '(objectclass=novaKeyPair)') - return [self.__to_key_pair(uid, attr) for attr in attrs] - - def find_projects(self): - attrs = self.find_objects(FLAGS.project_ldap_subtree, - '(objectclass=novaProject)') - return [self.__to_project(attr) for attr in attrs] - - def find_roles(self, tree): - attrs = self.find_objects(tree, - '(&(objectclass=groupOfNames)(!(objectclass=novaProject)))') - return [self.__to_group(attr) for attr in attrs] - - def find_group_dns_with_member(self, tree, uid): - dns = self.find_dns(tree, - '(&(objectclass=groupOfNames)(member=%s))' % - self.__uid_to_dn(uid)) - return dns - - def find_user(self, uid): - attr = self.find_object(self.__uid_to_dn(uid), - '(objectclass=novaUser)') - return self.__to_user(attr) - - def find_key_pair(self, uid, key_name): - dn = 'cn=%s,%s' % (key_name, - self.__uid_to_dn(uid)) - attr = self.find_object(dn, '(objectclass=novaKeyPair)') - return self.__to_key_pair(uid, attr) - - def find_group(self, dn): - """uses dn directly instead of custructing it from name""" - attr = self.find_object(dn, '(objectclass=groupOfNames)') - return self.__to_group(attr) - - def find_project(self, name): - dn = 'cn=%s,%s' % (name, - FLAGS.project_ldap_subtree) - attr = self.find_object(dn, '(objectclass=novaProject)') - return self.__to_project(attr) - - def user_exists(self, name): - return self.find_user(name) != None - - def key_pair_exists(self, uid, key_name): - return self.find_key_pair(uid, key_name) != None - - def project_exists(self, name): - return self.find_project(name) != None - - def group_exists(self, dn): - return self.find_group(dn) != None - - def delete_key_pairs(self, uid): - keys = self.find_key_pairs(uid) - if keys != None: - for key in keys: - self.delete_key_pair(uid, key.name) - - def create_user(self, name, access_key, secret_key, is_admin): - if self.user_exists(name): - raise exception.Duplicate("LDAP user %s already exists" % name) - attr = [ - ('objectclass', ['person', - 'organizationalPerson', - 'inetOrgPerson', - 'novaUser']), - ('ou', [FLAGS.user_unit]), - ('uid', [name]), - ('sn', [name]), - ('cn', [name]), - ('secretKey', [secret_key]), - ('accessKey', [access_key]), - ('isAdmin', [str(is_admin).upper()]), - ] - self.conn.add_s(self.__uid_to_dn(name), attr) - return self.__to_user(dict(attr)) - - def create_project(self, name, manager_uid, - description=None, member_uids=None): - if self.project_exists(name): - raise exception.Duplicate("Project can't be created because " - "project %s already exists" % name) - if not self.user_exists(manager_uid): - raise exception.NotFound("Project can't be created because " - "manager %s doesn't exist" % manager_uid) - manager_dn = self.__uid_to_dn(manager_uid) - # description is a required attribute - if description is None: - description = name - members = [] - if member_uids != None: - for member_uid in member_uids: - if not self.user_exists(member_uid): - raise exception.NotFound("Project can't be created " - "because user %s doesn't exist" % member_uid) - members.append(self.__uid_to_dn(member_uid)) - # always add the manager as a member because members is required - if not manager_dn in members: - members.append(manager_dn) - attr = [ - ('objectclass', ['novaProject']), - ('cn', [name]), - ('description', [description]), - ('projectManager', [manager_dn]), - ('member', members) - ] - self.conn.add_s('cn=%s,%s' % (name, FLAGS.project_ldap_subtree), attr) - return self.__to_project(dict(attr)) - - def add_to_project(self, uid, project_id): - dn = 'cn=%s,%s' % (project_id, FLAGS.project_ldap_subtree) - return self.add_to_group(uid, dn) - - def remove_from_project(self, uid, project_id): - dn = 'cn=%s,%s' % (project_id, FLAGS.project_ldap_subtree) - return self.remove_from_group(uid, dn) - - def is_in_project(self, uid, project_id): - dn = 'cn=%s,%s' % (project_id, FLAGS.project_ldap_subtree) - return self.is_in_group(uid, dn) - - def __role_to_dn(self, role, project_id=None): - if project_id == None: - return FLAGS.__getitem__("ldap_%s" % role).value - else: - return 'cn=%s,cn=%s,%s' % (role, - project_id, - FLAGS.project_ldap_subtree) - - def __create_group(self, group_dn, name, uid, - description, member_uids = None): - if self.group_exists(group_dn): - raise exception.Duplicate("Group can't be created because " - "group %s already exists" % name) - members = [] - if member_uids != None: - for member_uid in member_uids: - if not self.user_exists(member_uid): - raise exception.NotFound("Group can't be created " - "because user %s doesn't exist" % member_uid) - members.append(self.__uid_to_dn(member_uid)) - dn = self.__uid_to_dn(uid) - if not dn in members: - members.append(dn) - attr = [ - ('objectclass', ['groupOfNames']), - ('cn', [name]), - ('description', [description]), - ('member', members) - ] - self.conn.add_s(group_dn, attr) - return self.__to_group(dict(attr)) - - def has_role(self, uid, role, project_id=None): - role_dn = self.__role_to_dn(role, project_id) - return self.is_in_group(uid, role_dn) - - def add_role(self, uid, role, project_id=None): - role_dn = self.__role_to_dn(role, project_id) - if not self.group_exists(role_dn): - # create the role if it doesn't exist - description = '%s role for %s' % (role, project_id) - self.__create_group(role_dn, role, uid, description) - else: - return self.add_to_group(uid, role_dn) - - def remove_role(self, uid, role, project_id=None): - role_dn = self.__role_to_dn(role, project_id) - return self.remove_from_group(uid, role_dn) - - def is_in_group(self, uid, group_dn): - if not self.user_exists(uid): - raise exception.NotFound("User %s can't be searched in group " - "becuase the user doesn't exist" % (uid,)) - if not self.group_exists(group_dn): - return False - res = self.find_object(group_dn, - '(member=%s)' % self.__uid_to_dn(uid)) - return res != None - - def add_to_group(self, uid, group_dn): - if not self.user_exists(uid): - raise exception.NotFound("User %s can't be added to the group " - "becuase the user doesn't exist" % (uid,)) - if not self.group_exists(group_dn): - raise exception.NotFound("The group at dn %s doesn't exist" % - (group_dn,)) - if self.is_in_group(uid, group_dn): - raise exception.Duplicate("User %s is already a member of " - "the group %s" % (uid, group_dn)) - attr = [ - (ldap.MOD_ADD, 'member', self.__uid_to_dn(uid)) - ] - self.conn.modify_s(group_dn, attr) - - def remove_from_group(self, uid, group_dn): - if not self.group_exists(group_dn): - raise exception.NotFound("The group at dn %s doesn't exist" % - (group_dn,)) - if not self.user_exists(uid): - raise exception.NotFound("User %s can't be removed from the " - "group because the user doesn't exist" % (uid,)) - if not self.is_in_group(uid, group_dn): - raise exception.NotFound("User %s is not a member of the group" % - (uid,)) - self._safe_remove_from_group(group_dn, uid) - - def _safe_remove_from_group(self, group_dn, uid): - # FIXME(vish): what if deleted user is a project manager? - attr = [(ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))] - try: - self.conn.modify_s(group_dn, attr) - except self.OBJECT_CLASS_VIOLATION: - logging.debug("Attempted to remove the last member of a group. " - "Deleting the group at %s instead." % group_dn ) - self.delete_group(group_dn) - - def remove_from_all(self, uid): - if not self.user_exists(uid): - raise exception.NotFound("User %s can't be removed from all " - "because the user doesn't exist" % (uid,)) - dn = self.__uid_to_dn(uid) - role_dns = self.find_group_dns_with_member( - FLAGS.role_ldap_subtree, uid) - for role_dn in role_dns: - self._safe_remove_from_group(role_dn, uid) - project_dns = self.find_group_dns_with_member( - FLAGS.project_ldap_subtree, uid) - for project_dn in project_dns: - self._safe_remove_from_group(project_dn, uid) - - def create_key_pair(self, uid, key_name, public_key, fingerprint): - """create's a public key in the directory underneath the user""" - # TODO(vish): possibly refactor this to store keys in their own ou - # and put dn reference in the user object - attr = [ - ('objectclass', ['novaKeyPair']), - ('cn', [key_name]), - ('sshPublicKey', [public_key]), - ('keyFingerprint', [fingerprint]), - ] - self.conn.add_s('cn=%s,%s' % (key_name, - self.__uid_to_dn(uid)), - attr) - return self.__to_key_pair(uid, dict(attr)) - - def find_user_by_access_key(self, access): - query = '(accessKey=%s)' % access - dn = FLAGS.user_ldap_subtree - return self.__to_user(self.find_object(dn, query)) - - def delete_user(self, uid): - if not self.user_exists(uid): - raise exception.NotFound("User %s doesn't exist" % uid) - self.delete_key_pairs(uid) - self.remove_from_all(uid) - self.conn.delete_s('uid=%s,%s' % (uid, - FLAGS.user_ldap_subtree)) - - def delete_key_pair(self, uid, key_name): - if not self.key_pair_exists(uid, key_name): - raise exception.NotFound("Key Pair %s doesn't exist for user %s" % - (key_name, uid)) - self.conn.delete_s('cn=%s,uid=%s,%s' % (key_name, uid, - FLAGS.user_ldap_subtree)) - - def delete_group(self, group_dn): - if not self.group_exists(group_dn): - raise exception.NotFound("Group at dn %s doesn't exist" % group_dn) - self.conn.delete_s(group_dn) - - def delete_roles(self, project_dn): - roles = self.find_roles(project_dn) - for role in roles: - self.delete_group('cn=%s,%s' % (role.id, project_dn)) - - def delete_project(self, name): - project_dn = 'cn=%s,%s' % (name, FLAGS.project_ldap_subtree) - self.delete_roles(project_dn) - self.delete_group(project_dn) - - def __to_user(self, attr): - if attr == None: - return None - return User( - id = attr['uid'][0], - name = attr['cn'][0], - access = attr['accessKey'][0], - secret = attr['secretKey'][0], - admin = (attr['isAdmin'][0] == 'TRUE') - ) - - def __to_key_pair(self, owner, attr): - if attr == None: - return None - return KeyPair( - id = attr['cn'][0], - owner_id = owner, - public_key = attr['sshPublicKey'][0], - fingerprint = attr['keyFingerprint'][0], - ) - - def __to_group(self, attr): - if attr == None: - return None - member_dns = attr.get('member', []) - return Group( - id = attr['cn'][0], - description = attr.get('description', [None])[0], - member_ids = [self.__dn_to_uid(x) for x in member_dns] - ) - - def __to_project(self, attr): - if attr == None: - return None - member_dns = attr.get('member', []) - return Project( - id = attr['cn'][0], - project_manager_id = self.__dn_to_uid(attr['projectManager'][0]), - description = attr.get('description', [None])[0], - member_ids = [self.__dn_to_uid(x) for x in member_dns] - ) - - def __dn_to_uid(self, dn): - return dn.split(',')[0].split('=')[1] - - def __uid_to_dn(self, dn): - return 'uid=%s,%s' % (dn, FLAGS.user_ldap_subtree) diff --git a/nova/cloudpipe/api.py b/nova/cloudpipe/api.py index a5f78a16d..0bffe9aa3 100644 --- a/nova/cloudpipe/api.py +++ b/nova/cloudpipe/api.py @@ -25,7 +25,7 @@ import tornado.web import urllib from nova import crypto -from nova.auth import users +from nova.auth import manager _log = logging.getLogger("api") diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py index 63f7ae222..5b0ed3471 100644 --- a/nova/cloudpipe/pipelib.py +++ b/nova/cloudpipe/pipelib.py @@ -31,7 +31,7 @@ import zipfile from nova import exception from nova import flags from nova import utils -from nova.auth import users +from nova.auth import manager from nova.endpoint import api @@ -44,7 +44,7 @@ flags.DEFINE_string('boot_script_template', class CloudPipe(object): def __init__(self, cloud_controller): self.controller = cloud_controller - self.manager = users.UserManager.instance() + self.manager = manager.AuthManager() def launch_vpn_instance(self, project_id): logging.debug( "Launching VPN for %s" % (project_id)) diff --git a/nova/compute/network.py b/nova/compute/network.py index 90d6b2dc6..370e2bf44 100644 --- a/nova/compute/network.py +++ b/nova/compute/network.py @@ -29,7 +29,7 @@ from nova import datastore from nova import exception from nova import flags from nova import utils -from nova.auth import users +from nova.auth import manager from nova.compute import exception as compute_exception from nova.compute import linux_net @@ -209,11 +209,11 @@ class BaseNetwork(datastore.BasicModel): @property def user(self): - return users.UserManager.instance().get_user(self['user_id']) + return manager.AuthManager().get_user(self['user_id']) @property def project(self): - return users.UserManager.instance().get_project(self['project_id']) + return manager.AuthManager().get_project(self['project_id']) @property def _hosts_key(self): @@ -511,7 +511,7 @@ def get_vlan_for_project(project_id): if not known_vlans.has_key(vstr): return Vlan.create(project_id, vnum) old_project_id = known_vlans[vstr] - if not users.UserManager.instance().get_project(old_project_id): + if not manager.AuthManager().get_project(old_project_id): vlan = Vlan.lookup(old_project_id) if vlan: # NOTE(todd): This doesn't check for vlan id match, because @@ -537,7 +537,7 @@ def get_network_by_interface(iface, security_group='default'): def get_network_by_address(address): logging.debug("Get Network By Address: %s" % address) - for project in users.UserManager.instance().get_projects(): + for project in manager.AuthManager().get_projects(): net = get_project_network(project.id) if address in net.assigned: logging.debug("Found %s in %s" % (address, project.id)) @@ -577,7 +577,7 @@ def get_project_network(project_id, security_group='default'): """ get a project's private network, allocating one if needed """ # TODO(todd): It looks goofy to get a project from a UserManager. # Refactor to still use the LDAP backend, but not User specific. - project = users.UserManager.instance().get_project(project_id) + project = manager.AuthManager().get_project(project_id) if not project: raise exception.Error("Project %s doesn't exist, uhoh." % project_id) @@ -587,5 +587,5 @@ def get_project_network(project_id, security_group='default'): def restart_nets(): """ Ensure the network for each user is enabled""" - for project in users.UserManager.instance().get_projects(): + for project in manager.AuthManager().get_projects(): get_project_network(project.id).express() diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py index b97a6727f..55a8e4238 100644 --- a/nova/endpoint/admin.py +++ b/nova/endpoint/admin.py @@ -22,7 +22,7 @@ Admin API controller, exposed through http via the api worker. import base64 -from nova.auth import users +from nova.auth import manager from nova.compute import model def user_dict(user, base64_file=None): @@ -69,18 +69,18 @@ class AdminController(object): @admin_only def describe_user(self, _context, name, **_kwargs): """Returns user data, including access and secret keys.""" - return user_dict(users.UserManager.instance().get_user(name)) + return user_dict(manager.AuthManager().get_user(name)) @admin_only def describe_users(self, _context, **_kwargs): """Returns all users - should be changed to deal with a list.""" return {'userSet': - [user_dict(u) for u in users.UserManager.instance().get_users()] } + [user_dict(u) for u in manager.AuthManager().get_users()] } @admin_only def register_user(self, _context, name, **_kwargs): """Creates a new user, and returns generated credentials.""" - return user_dict(users.UserManager.instance().create_user(name)) + return user_dict(manager.AuthManager().create_user(name)) @admin_only def deregister_user(self, _context, name, **_kwargs): @@ -88,7 +88,7 @@ class AdminController(object): Should throw an exception if the user has instances, volumes, or buckets remaining. """ - users.UserManager.instance().delete_user(name) + manager.AuthManager().delete_user(name) return True @@ -100,8 +100,8 @@ class AdminController(object): """ if project is None: project = name - project = users.UserManager.instance().get_project(project) - user = users.UserManager.instance().get_user(name) + project = manager.AuthManager().get_project(project) + user = manager.AuthManager().get_user(name) return user_dict(user, base64.b64encode(project.get_credentials(user))) @admin_only diff --git a/nova/endpoint/api.py b/nova/endpoint/api.py index 79a2aaddb..78a18b9ea 100755 --- a/nova/endpoint/api.py +++ b/nova/endpoint/api.py @@ -35,7 +35,7 @@ from nova import crypto from nova import exception from nova import flags from nova import utils -from nova.auth import users +from nova.auth import manager import nova.cloudpipe.api from nova.endpoint import cloud @@ -266,7 +266,7 @@ class APIRequestHandler(tornado.web.RequestHandler): # Authenticate the request. try: - (user, project) = users.UserManager.instance().authenticate( + (user, project) = manager.AuthManager().authenticate( access, signature, auth_params, diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 3b7b4804b..8eac1ce4a 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -35,7 +35,7 @@ from nova import flags from nova import rpc from nova import utils from nova.auth import rbac -from nova.auth import users +from nova.auth import manager from nova.compute import model from nova.compute import network from nova.compute import node @@ -48,9 +48,9 @@ FLAGS = flags.FLAGS flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') def _gen_key(user_id, key_name): - """ Tuck this into UserManager """ + """ Tuck this into AuthManager """ try: - manager = users.UserManager.instance() + manager = manager.AuthManager() private_key, fingerprint = manager.generate_key_pair(user_id, key_name) except Exception as ex: return {'exception': ex} diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py index 9208ddab7..605f9b8e0 100644 --- a/nova/endpoint/rackspace.py +++ b/nova/endpoint/rackspace.py @@ -34,7 +34,7 @@ from nova import exception from nova import flags from nova import rpc from nova import utils -from nova.auth import users +from nova.auth import manager from nova.compute import model from nova.compute import network from nova.endpoint import images @@ -78,11 +78,11 @@ class Api(object): def build_context(self, env): rv = {} if env.has_key("HTTP_X_AUTH_TOKEN"): - rv['user'] = users.UserManager.instance().get_user_from_access_key( + rv['user'] = manager.AuthManager().get_user_from_access_key( env['HTTP_X_AUTH_TOKEN'] ) if rv['user']: - rv['project'] = users.UserManager.instance().get_project( + rv['project'] = manager.AuthManager().get_project( rv['user'].name ) return rv diff --git a/nova/tests/access_unittest.py b/nova/tests/access_unittest.py index 8500dd0cb..832a4b279 100644 --- a/nova/tests/access_unittest.py +++ b/nova/tests/access_unittest.py @@ -22,7 +22,7 @@ import logging from nova import exception from nova import flags from nova import test -from nova.auth.users import UserManager +from nova.auth import manager from nova.auth import rbac @@ -35,7 +35,7 @@ class AccessTestCase(test.BaseTestCase): super(AccessTestCase, self).setUp() FLAGS.fake_libvirt = True FLAGS.fake_storage = True - um = UserManager.instance() + um = manager.AuthManager() # Make test users try: self.testadmin = um.create_user('testadmin') @@ -79,7 +79,7 @@ class AccessTestCase(test.BaseTestCase): #user is set in each test def tearDown(self): - um = UserManager.instance() + um = manager.AuthManager() # Delete the test project um.delete_project('testproj') # Delete the test user diff --git a/nova/tests/api_unittest.py b/nova/tests/api_unittest.py index e5e2afe26..5c26192bd 100644 --- a/nova/tests/api_unittest.py +++ b/nova/tests/api_unittest.py @@ -26,7 +26,7 @@ from twisted.internet import defer from nova import flags from nova import test -from nova.auth import users +from nova.auth import manager from nova.endpoint import api from nova.endpoint import cloud @@ -150,7 +150,7 @@ class ApiEc2TestCase(test.BaseTestCase): def setUp(self): super(ApiEc2TestCase, self).setUp() - self.users = users.UserManager.instance() + self.users = manager.AuthManager() self.cloud = cloud.CloudController() self.host = '127.0.0.1' diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py new file mode 100644 index 000000000..000f6bf17 --- /dev/null +++ b/nova/tests/auth_unittest.py @@ -0,0 +1,207 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +from M2Crypto import BIO +from M2Crypto import RSA +from M2Crypto import X509 +import unittest + +from nova import crypto +from nova import flags +from nova import test +from nova.auth import manager +from nova.endpoint import cloud + +FLAGS = flags.FLAGS + + +class AuthTestCase(test.BaseTestCase): + flush_db = False + def setUp(self): + super(AuthTestCase, self).setUp() + self.flags(fake_libvirt=True, + fake_storage=True) + self.users = manager.AuthManager() + + def test_001_can_create_users(self): + self.users.create_user('test1', 'access', 'secret') + self.users.create_user('test2') + + def test_002_can_get_user(self): + user = self.users.get_user('test1') + + def test_003_can_retreive_properties(self): + user = self.users.get_user('test1') + self.assertEqual('test1', user.id) + self.assertEqual('access', user.access) + self.assertEqual('secret', user.secret) + + def test_004_signature_is_valid(self): + #self.assertTrue(self.users.authenticate( **boto.generate_url ... ? ? ? )) + pass + #raise NotImplementedError + + def test_005_can_get_credentials(self): + return + credentials = self.users.get_user('test1').get_credentials() + self.assertEqual(credentials, + 'export EC2_ACCESS_KEY="access"\n' + + 'export EC2_SECRET_KEY="secret"\n' + + 'export EC2_URL="http://127.0.0.1:8773/services/Cloud"\n' + + 'export S3_URL="http://127.0.0.1:3333/"\n' + + 'export EC2_USER_ID="test1"\n') + + def test_006_test_key_storage(self): + user = self.users.get_user('test1') + user.create_key_pair('public', 'key', 'fingerprint') + key = user.get_key_pair('public') + self.assertEqual('key', key.public_key) + self.assertEqual('fingerprint', key.fingerprint) + + def test_007_test_key_generation(self): + user = self.users.get_user('test1') + private_key, fingerprint = user.generate_key_pair('public2') + key = RSA.load_key_string(private_key, callback=lambda: None) + bio = BIO.MemoryBuffer() + public_key = user.get_key_pair('public2').public_key + key.save_pub_key_bio(bio) + converted = crypto.ssl_pub_to_ssh_pub(bio.read()) + # assert key fields are equal + self.assertEqual(public_key.split(" ")[1].strip(), + converted.split(" ")[1].strip()) + + def test_008_can_list_key_pairs(self): + keys = self.users.get_user('test1').get_key_pairs() + self.assertTrue(filter(lambda k: k.name == 'public', keys)) + self.assertTrue(filter(lambda k: k.name == 'public2', keys)) + + def test_009_can_delete_key_pair(self): + self.users.get_user('test1').delete_key_pair('public') + keys = self.users.get_user('test1').get_key_pairs() + self.assertFalse(filter(lambda k: k.name == 'public', keys)) + + def test_010_can_list_users(self): + users = self.users.get_users() + logging.warn(users) + self.assertTrue(filter(lambda u: u.id == 'test1', users)) + + def test_101_can_add_user_role(self): + self.assertFalse(self.users.has_role('test1', 'itsec')) + self.users.add_role('test1', 'itsec') + self.assertTrue(self.users.has_role('test1', 'itsec')) + + def test_199_can_remove_user_role(self): + self.assertTrue(self.users.has_role('test1', 'itsec')) + self.users.remove_role('test1', 'itsec') + self.assertFalse(self.users.has_role('test1', 'itsec')) + + def test_201_can_create_project(self): + project = self.users.create_project('testproj', 'test1', 'A test project', ['test1']) + self.assertTrue(filter(lambda p: p.name == 'testproj', self.users.get_projects())) + self.assertEqual(project.name, 'testproj') + self.assertEqual(project.description, 'A test project') + self.assertEqual(project.project_manager_id, 'test1') + self.assertTrue(project.has_member('test1')) + + def test_202_user1_is_project_member(self): + self.assertTrue(self.users.get_user('test1').is_project_member('testproj')) + + def test_203_user2_is_not_project_member(self): + self.assertFalse(self.users.get_user('test2').is_project_member('testproj')) + + def test_204_user1_is_project_manager(self): + self.assertTrue(self.users.get_user('test1').is_project_manager('testproj')) + + def test_205_user2_is_not_project_manager(self): + self.assertFalse(self.users.get_user('test2').is_project_manager('testproj')) + + def test_206_can_add_user_to_project(self): + self.users.add_to_project('test2', 'testproj') + self.assertTrue(self.users.get_project('testproj').has_member('test2')) + + def test_208_can_remove_user_from_project(self): + self.users.remove_from_project('test2', 'testproj') + self.assertFalse(self.users.get_project('testproj').has_member('test2')) + + def test_209_can_generate_x509(self): + # MUST HAVE RUN CLOUD SETUP BY NOW + self.cloud = cloud.CloudController() + self.cloud.setup() + private_key, signed_cert_string = self.users.get_project('testproj').generate_x509_cert('test1') + logging.debug(signed_cert_string) + + # Need to verify that it's signed by the right intermediate CA + full_chain = crypto.fetch_ca(project_id='testproj', chain=True) + int_cert = crypto.fetch_ca(project_id='testproj', chain=False) + cloud_cert = crypto.fetch_ca() + logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain) + signed_cert = X509.load_cert_string(signed_cert_string) + chain_cert = X509.load_cert_string(full_chain) + int_cert = X509.load_cert_string(int_cert) + cloud_cert = X509.load_cert_string(cloud_cert) + self.assertTrue(signed_cert.verify(chain_cert.get_pubkey())) + self.assertTrue(signed_cert.verify(int_cert.get_pubkey())) + + if not FLAGS.use_intermediate_ca: + self.assertTrue(signed_cert.verify(cloud_cert.get_pubkey())) + else: + self.assertFalse(signed_cert.verify(cloud_cert.get_pubkey())) + + def test_210_can_add_project_role(self): + project = self.users.get_project('testproj') + self.assertFalse(project.has_role('test1', 'sysadmin')) + self.users.add_role('test1', 'sysadmin') + self.assertFalse(project.has_role('test1', 'sysadmin')) + project.add_role('test1', 'sysadmin') + self.assertTrue(project.has_role('test1', 'sysadmin')) + + def test_211_can_remove_project_role(self): + project = self.users.get_project('testproj') + self.assertTrue(project.has_role('test1', 'sysadmin')) + project.remove_role('test1', 'sysadmin') + self.assertFalse(project.has_role('test1', 'sysadmin')) + self.users.remove_role('test1', 'sysadmin') + self.assertFalse(project.has_role('test1', 'sysadmin')) + + def test_212_vpn_ip_and_port_looks_valid(self): + project = self.users.get_project('testproj') + self.assert_(project.vpn_ip) + self.assert_(project.vpn_port >= FLAGS.vpn_start_port) + self.assert_(project.vpn_port <= FLAGS.vpn_end_port) + + def test_213_too_many_vpns(self): + for i in xrange(users.Vpn.num_ports_for_ip(FLAGS.vpn_ip)): + users.Vpn.create("vpnuser%s" % i) + self.assertRaises(users.NoMorePorts, users.Vpn.create, "boom") + + def test_299_can_delete_project(self): + self.users.delete_project('testproj') + self.assertFalse(filter(lambda p: p.name == 'testproj', self.users.get_projects())) + + def test_999_can_delete_users(self): + self.users.delete_user('test1') + users = self.users.get_users() + self.assertFalse(filter(lambda u: u.id == 'test1', users)) + self.users.delete_user('test2') + self.assertEqual(self.users.get_user('test2'), None) + + +if __name__ == "__main__": + # TODO: Implement use_fake as an option + unittest.main() diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py index b8614fdc8..3abef28a1 100644 --- a/nova/tests/cloud_unittest.py +++ b/nova/tests/cloud_unittest.py @@ -27,7 +27,7 @@ from xml.etree import ElementTree from nova import flags from nova import rpc from nova import test -from nova.auth import users +from nova.auth import manager from nova.compute import node from nova.endpoint import api from nova.endpoint import cloud @@ -61,15 +61,15 @@ class CloudTestCase(test.BaseTestCase): self.injected.append(self.node_consumer.attach_to_tornado(self.ioloop)) try: - users.UserManager.instance().create_user('admin', 'admin', 'admin') + manager.AuthManager().create_user('admin', 'admin', 'admin') except: pass - admin = users.UserManager.instance().get_user('admin') - project = users.UserManager.instance().create_project('proj', 'admin', 'proj') + admin = manager.AuthManager().get_user('admin') + project = manager.AuthManager().create_project('proj', 'admin', 'proj') self.context = api.APIRequestContext(handler=None,project=project,user=admin) def tearDown(self): - users.UserManager.instance().delete_project('proj') - users.UserManager.instance().delete_user('admin') + manager.AuthManager().delete_project('proj') + manager.AuthManager().delete_user('admin') def test_console_output(self): if FLAGS.fake_libvirt: diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index a822cc1d9..fd0e64724 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -26,7 +26,7 @@ from nova import test from nova import exception from nova.compute.exception import NoMoreAddresses from nova.compute import network -from nova.auth import users +from nova.auth import manager from nova import utils @@ -38,7 +38,7 @@ class NetworkTestCase(test.TrialTestCase): fake_network=True, network_size=32) logging.getLogger().setLevel(logging.DEBUG) - self.manager = users.UserManager.instance() + self.manager = manager.AuthManager() self.dnsmasq = FakeDNSMasq() try: self.manager.create_user('netuser', 'netuser', 'netuser') diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py index f47ca7f00..85bcd7c67 100644 --- a/nova/tests/objectstore_unittest.py +++ b/nova/tests/objectstore_unittest.py @@ -26,7 +26,7 @@ import tempfile from nova import flags from nova import objectstore from nova import test -from nova.auth import users +from nova.auth import manager FLAGS = flags.FLAGS @@ -57,7 +57,7 @@ class ObjectStoreTestCase(test.BaseTestCase): ca_path=os.path.join(os.path.dirname(__file__), 'CA')) logging.getLogger().setLevel(logging.DEBUG) - self.um = users.UserManager.instance() + self.um = manager.AuthManager() try: self.um.create_user('user1') except: pass @@ -177,7 +177,7 @@ class ObjectStoreTestCase(test.BaseTestCase): # FLAGS.images_path = os.path.join(tempdir, 'images') # FLAGS.ca_path = os.path.join(os.path.dirname(__file__), 'CA') # -# self.users = users.UserManager.instance() +# self.users = manager.AuthManager() # self.app = handler.Application(self.users) # # self.host = '127.0.0.1' diff --git a/nova/tests/users_unittest.py b/nova/tests/users_unittest.py deleted file mode 100644 index 301721075..000000000 --- a/nova/tests/users_unittest.py +++ /dev/null @@ -1,207 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -from M2Crypto import BIO -from M2Crypto import RSA -from M2Crypto import X509 -import unittest - -from nova import crypto -from nova import flags -from nova import test -from nova.auth import users -from nova.endpoint import cloud - -FLAGS = flags.FLAGS - - -class UserTestCase(test.BaseTestCase): - flush_db = False - def setUp(self): - super(UserTestCase, self).setUp() - self.flags(fake_libvirt=True, - fake_storage=True) - self.users = users.UserManager.instance() - - def test_001_can_create_users(self): - self.users.create_user('test1', 'access', 'secret') - self.users.create_user('test2') - - def test_002_can_get_user(self): - user = self.users.get_user('test1') - - def test_003_can_retreive_properties(self): - user = self.users.get_user('test1') - self.assertEqual('test1', user.id) - self.assertEqual('access', user.access) - self.assertEqual('secret', user.secret) - - def test_004_signature_is_valid(self): - #self.assertTrue(self.users.authenticate( **boto.generate_url ... ? ? ? )) - pass - #raise NotImplementedError - - def test_005_can_get_credentials(self): - return - credentials = self.users.get_user('test1').get_credentials() - self.assertEqual(credentials, - 'export EC2_ACCESS_KEY="access"\n' + - 'export EC2_SECRET_KEY="secret"\n' + - 'export EC2_URL="http://127.0.0.1:8773/services/Cloud"\n' + - 'export S3_URL="http://127.0.0.1:3333/"\n' + - 'export EC2_USER_ID="test1"\n') - - def test_006_test_key_storage(self): - user = self.users.get_user('test1') - user.create_key_pair('public', 'key', 'fingerprint') - key = user.get_key_pair('public') - self.assertEqual('key', key.public_key) - self.assertEqual('fingerprint', key.fingerprint) - - def test_007_test_key_generation(self): - user = self.users.get_user('test1') - private_key, fingerprint = user.generate_key_pair('public2') - key = RSA.load_key_string(private_key, callback=lambda: None) - bio = BIO.MemoryBuffer() - public_key = user.get_key_pair('public2').public_key - key.save_pub_key_bio(bio) - converted = crypto.ssl_pub_to_ssh_pub(bio.read()) - # assert key fields are equal - self.assertEqual(public_key.split(" ")[1].strip(), - converted.split(" ")[1].strip()) - - def test_008_can_list_key_pairs(self): - keys = self.users.get_user('test1').get_key_pairs() - self.assertTrue(filter(lambda k: k.name == 'public', keys)) - self.assertTrue(filter(lambda k: k.name == 'public2', keys)) - - def test_009_can_delete_key_pair(self): - self.users.get_user('test1').delete_key_pair('public') - keys = self.users.get_user('test1').get_key_pairs() - self.assertFalse(filter(lambda k: k.name == 'public', keys)) - - def test_010_can_list_users(self): - users = self.users.get_users() - logging.warn(users) - self.assertTrue(filter(lambda u: u.id == 'test1', users)) - - def test_101_can_add_user_role(self): - self.assertFalse(self.users.has_role('test1', 'itsec')) - self.users.add_role('test1', 'itsec') - self.assertTrue(self.users.has_role('test1', 'itsec')) - - def test_199_can_remove_user_role(self): - self.assertTrue(self.users.has_role('test1', 'itsec')) - self.users.remove_role('test1', 'itsec') - self.assertFalse(self.users.has_role('test1', 'itsec')) - - def test_201_can_create_project(self): - project = self.users.create_project('testproj', 'test1', 'A test project', ['test1']) - self.assertTrue(filter(lambda p: p.name == 'testproj', self.users.get_projects())) - self.assertEqual(project.name, 'testproj') - self.assertEqual(project.description, 'A test project') - self.assertEqual(project.project_manager_id, 'test1') - self.assertTrue(project.has_member('test1')) - - def test_202_user1_is_project_member(self): - self.assertTrue(self.users.get_user('test1').is_project_member('testproj')) - - def test_203_user2_is_not_project_member(self): - self.assertFalse(self.users.get_user('test2').is_project_member('testproj')) - - def test_204_user1_is_project_manager(self): - self.assertTrue(self.users.get_user('test1').is_project_manager('testproj')) - - def test_205_user2_is_not_project_manager(self): - self.assertFalse(self.users.get_user('test2').is_project_manager('testproj')) - - def test_206_can_add_user_to_project(self): - self.users.add_to_project('test2', 'testproj') - self.assertTrue(self.users.get_project('testproj').has_member('test2')) - - def test_208_can_remove_user_from_project(self): - self.users.remove_from_project('test2', 'testproj') - self.assertFalse(self.users.get_project('testproj').has_member('test2')) - - def test_209_can_generate_x509(self): - # MUST HAVE RUN CLOUD SETUP BY NOW - self.cloud = cloud.CloudController() - self.cloud.setup() - private_key, signed_cert_string = self.users.get_project('testproj').generate_x509_cert('test1') - logging.debug(signed_cert_string) - - # Need to verify that it's signed by the right intermediate CA - full_chain = crypto.fetch_ca(project_id='testproj', chain=True) - int_cert = crypto.fetch_ca(project_id='testproj', chain=False) - cloud_cert = crypto.fetch_ca() - logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain) - signed_cert = X509.load_cert_string(signed_cert_string) - chain_cert = X509.load_cert_string(full_chain) - int_cert = X509.load_cert_string(int_cert) - cloud_cert = X509.load_cert_string(cloud_cert) - self.assertTrue(signed_cert.verify(chain_cert.get_pubkey())) - self.assertTrue(signed_cert.verify(int_cert.get_pubkey())) - - if not FLAGS.use_intermediate_ca: - self.assertTrue(signed_cert.verify(cloud_cert.get_pubkey())) - else: - self.assertFalse(signed_cert.verify(cloud_cert.get_pubkey())) - - def test_210_can_add_project_role(self): - project = self.users.get_project('testproj') - self.assertFalse(project.has_role('test1', 'sysadmin')) - self.users.add_role('test1', 'sysadmin') - self.assertFalse(project.has_role('test1', 'sysadmin')) - project.add_role('test1', 'sysadmin') - self.assertTrue(project.has_role('test1', 'sysadmin')) - - def test_211_can_remove_project_role(self): - project = self.users.get_project('testproj') - self.assertTrue(project.has_role('test1', 'sysadmin')) - project.remove_role('test1', 'sysadmin') - self.assertFalse(project.has_role('test1', 'sysadmin')) - self.users.remove_role('test1', 'sysadmin') - self.assertFalse(project.has_role('test1', 'sysadmin')) - - def test_212_vpn_ip_and_port_looks_valid(self): - project = self.users.get_project('testproj') - self.assert_(project.vpn_ip) - self.assert_(project.vpn_port >= FLAGS.vpn_start_port) - self.assert_(project.vpn_port <= FLAGS.vpn_end_port) - - def test_213_too_many_vpns(self): - for i in xrange(users.Vpn.num_ports_for_ip(FLAGS.vpn_ip)): - users.Vpn.create("vpnuser%s" % i) - self.assertRaises(users.NoMorePorts, users.Vpn.create, "boom") - - def test_299_can_delete_project(self): - self.users.delete_project('testproj') - self.assertFalse(filter(lambda p: p.name == 'testproj', self.users.get_projects())) - - def test_999_can_delete_users(self): - self.users.delete_user('test1') - users = self.users.get_users() - self.assertFalse(filter(lambda u: u.id == 'test1', users)) - self.users.delete_user('test2') - self.assertEqual(self.users.get_user('test2'), None) - - -if __name__ == "__main__": - # TODO: Implement use_fake as an option - unittest.main() diff --git a/run_tests.py b/run_tests.py index eb26459c5..f42d315e0 100644 --- a/run_tests.py +++ b/run_tests.py @@ -57,7 +57,7 @@ from nova.tests.node_unittest import * from nova.tests.objectstore_unittest import * from nova.tests.process_unittest import * from nova.tests.storage_unittest import * -from nova.tests.users_unittest import * +from nova.tests.auth_unittest import * from nova.tests.validator_unittest import * -- cgit From bc524d362391c22ece2c2b24d11239837fe5db39 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 19 Jul 2010 17:10:25 -0500 Subject: LdapDriver cleanup: docstrings and parameter ordering --- nova/auth/ldapdriver.py | 61 +++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 5 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 49443c99a..21c87a576 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -67,6 +67,10 @@ flags.DEFINE_string('ldap_developer', class LdapDriver(object): + """Ldap Auth driver + + Defines enter and exit and therefore supports the with/as syntax. + """ def __enter__(self): """Creates the connection to LDAP""" if FLAGS.fake_users: @@ -86,43 +90,51 @@ class LdapDriver(object): return False def get_user(self, uid): + """Retrieve user by id""" attr = self.__find_object(self.__uid_to_dn(uid), '(objectclass=novaUser)') return self.__to_user(attr) def get_user_from_access_key(self, access): + """Retrieve user by access key""" query = '(accessKey=%s)' % access dn = FLAGS.ldap_user_subtree return self.__to_user(self.__find_object(dn, query)) def get_key_pair(self, uid, key_name): + """Retrieve key pair by uid and key name""" dn = 'cn=%s,%s' % (key_name, self.__uid_to_dn(uid)) attr = self.__find_object(dn, '(objectclass=novaKeyPair)') return self.__to_key_pair(uid, attr) def get_project(self, name): + """Retrieve project by name""" dn = 'cn=%s,%s' % (name, FLAGS.ldap_project_subtree) attr = self.__find_object(dn, '(objectclass=novaProject)') return self.__to_project(attr) def get_users(self): + """Retrieve list of users""" attrs = self.__find_objects(FLAGS.ldap_user_subtree, '(objectclass=novaUser)') return [self.__to_user(attr) for attr in attrs] def get_key_pairs(self, uid): + """Retrieve list of key pairs""" attrs = self.__find_objects(self.__uid_to_dn(uid), '(objectclass=novaKeyPair)') return [self.__to_key_pair(uid, attr) for attr in attrs] def get_projects(self): + """Retrieve list of projects""" attrs = self.__find_objects(FLAGS.ldap_project_subtree, '(objectclass=novaProject)') return [self.__to_project(attr) for attr in attrs] def create_user(self, name, access_key, secret_key, is_admin): + """Create a user""" if self.__user_exists(name): raise exception.Duplicate("LDAP user %s already exists" % name) attr = [ @@ -142,7 +154,7 @@ class LdapDriver(object): return self.__to_user(dict(attr)) def create_key_pair(self, uid, key_name, public_key, fingerprint): - """create's a public key in the directory underneath the user""" + """Create a key pair""" # TODO(vish): possibly refactor this to store keys in their own ou # and put dn reference in the user object attr = [ @@ -158,6 +170,7 @@ class LdapDriver(object): def create_project(self, name, manager_uid, description=None, member_uids=None): + """Create a project""" if self.__project_exists(name): raise exception.Duplicate("Project can't be created because " "project %s already exists" % name) @@ -189,22 +202,31 @@ class LdapDriver(object): return self.__to_project(dict(attr)) def add_to_project(self, uid, project_id): + """Add user to project""" dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) return self.__add_to_group(uid, dn) def remove_from_project(self, uid, project_id): + """Remove user from project""" dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) return self.__remove_from_group(uid, dn) def is_in_project(self, uid, project_id): + """Check if user is in project""" dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) return self.__is_in_group(uid, dn) def has_role(self, uid, role, project_id=None): + """Check if user has role + + If project is specified, it checks for local role, otherwise it + checks for global role + """ role_dn = self.__role_to_dn(role, project_id) return self.__is_in_group(uid, role_dn) def add_role(self, uid, role, project_id=None): + """Add role for user (or user and project)""" role_dn = self.__role_to_dn(role, project_id) if not self.__group_exists(role_dn): # create the role if it doesn't exist @@ -214,10 +236,12 @@ class LdapDriver(object): return self.__add_to_group(uid, role_dn) def remove_role(self, uid, role, project_id=None): + """Remove role for user (or user and project)""" role_dn = self.__role_to_dn(role, project_id) return self.__remove_from_group(uid, role_dn) def delete_user(self, uid): + """Delete a user""" if not self.__user_exists(uid): raise exception.NotFound("User %s doesn't exist" % uid) self.__delete_key_pairs(uid) @@ -226,6 +250,7 @@ class LdapDriver(object): FLAGS.ldap_user_subtree)) def delete_key_pair(self, uid, key_name): + """Delete a key pair""" if not self.__key_pair_exists(uid, key_name): raise exception.NotFound("Key Pair %s doesn't exist for user %s" % (key_name, uid)) @@ -233,26 +258,33 @@ class LdapDriver(object): FLAGS.ldap_user_subtree)) def delete_project(self, name): + """Delete a project""" project_dn = 'cn=%s,%s' % (name, FLAGS.ldap_project_subtree) self.__delete_roles(project_dn) self.__delete_group(project_dn) def __user_exists(self, name): + """Check if user exists""" return self.get_user(name) != None def __key_pair_exists(self, uid, key_name): + """Check if key pair exists""" + return self.get_user(uid) != None return self.get_key_pair(uid, key_name) != None def __project_exists(self, name): + """Check if project exists""" return self.get_project(name) != None def __find_object(self, dn, query = None): + """Find an object by dn and query""" objects = self.__find_objects(dn, query) if len(objects) == 0: return None return objects[0] def __find_dns(self, dn, query=None): + """Find dns by query""" try: res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query) except self.NO_SUCH_OBJECT: @@ -261,6 +293,7 @@ class LdapDriver(object): return [dn for dn, attributes in res] def __find_objects(self, dn, query = None): + """Find objects by query""" try: res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query) except self.NO_SUCH_OBJECT: @@ -269,25 +302,30 @@ class LdapDriver(object): return [attributes for dn, attributes in res] def __find_role_dns(self, tree): + """Find dns of role objects in given tree""" return self.__find_dns(tree, '(&(objectclass=groupOfNames)(!(objectclass=novaProject)))') def __find_group_dns_with_member(self, tree, uid): + """Find dns of group objects in a given tree that contain member""" dns = self.__find_dns(tree, '(&(objectclass=groupOfNames)(member=%s))' % self.__uid_to_dn(uid)) return dns def __group_exists(self, dn): + """Check if group exists""" return self.__find_object(dn, '(objectclass=groupOfNames)') != None def __delete_key_pairs(self, uid): + """Delete all key pairs for user""" keys = self.get_key_pairs(uid) if keys != None: for key in keys: self.delete_key_pair(uid, key.name) def __role_to_dn(self, role, project_id=None): + """Convert role to corresponding dn""" if project_id == None: return FLAGS.__getitem__("ldap_%s" % role).value else: @@ -297,6 +335,7 @@ class LdapDriver(object): def __create_group(self, group_dn, name, uid, description, member_uids = None): + """Create a group""" if self.__group_exists(group_dn): raise exception.Duplicate("Group can't be created because " "group %s already exists" % name) @@ -319,6 +358,7 @@ class LdapDriver(object): self.conn.add_s(group_dn, attr) def __is_in_group(self, uid, group_dn): + """Check if user is in group""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be searched in group " "becuase the user doesn't exist" % (uid,)) @@ -329,6 +369,7 @@ class LdapDriver(object): return res != None def __add_to_group(self, uid, group_dn): + """Add user to group""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be added to the group " "becuase the user doesn't exist" % (uid,)) @@ -344,6 +385,7 @@ class LdapDriver(object): self.conn.modify_s(group_dn, attr) def __remove_from_group(self, uid, group_dn): + """Remove user from group""" if not self.__group_exists(group_dn): raise exception.NotFound("The group at dn %s doesn't exist" % (group_dn,)) @@ -353,9 +395,10 @@ class LdapDriver(object): if not self.__is_in_group(uid, group_dn): raise exception.NotFound("User %s is not a member of the group" % (uid,)) - self.__safe_remove_from_group(group_dn, uid) + self.__safe_remove_from_group(uid, group_dn) - def __safe_remove_from_group(self, group_dn, uid): + def __safe_remove_from_group(self, uid, group_dn): + """Remove user from group, deleting group if user is last member""" # FIXME(vish): what if deleted user is a project manager? attr = [(ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))] try: @@ -366,6 +409,7 @@ class LdapDriver(object): self.__delete_group(group_dn) def __remove_from_all(self, uid): + """Remove user from all roles and projects""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be removed from all " "because the user doesn't exist" % (uid,)) @@ -373,22 +417,25 @@ class LdapDriver(object): role_dns = self.__find_group_dns_with_member( FLAGS.role_project_subtree, uid) for role_dn in role_dns: - self.__safe_remove_from_group(role_dn, uid) + self.__safe_remove_from_group(uid, role_dn) project_dns = self.__find_group_dns_with_member( FLAGS.ldap_project_subtree, uid) for project_dn in project_dns: - self.__safe_remove_from_group(project_dn, uid) + self.__safe_remove_from_group(uid, role_dn) def __delete_group(self, group_dn): + """Delete Group""" if not self.__group_exists(group_dn): raise exception.NotFound("Group at dn %s doesn't exist" % group_dn) self.conn.delete_s(group_dn) def __delete_roles(self, project_dn): + """Delete all roles for project""" for role_dn in self.__find_role_dns(project_dn): self.__delete_group(role_dn) def __to_user(self, attr): + """Convert ldap attributes to User object""" if attr == None: return None return manager.User( @@ -400,6 +447,7 @@ class LdapDriver(object): ) def __to_key_pair(self, owner, attr): + """Convert ldap attributes to KeyPair object""" if attr == None: return None return manager.KeyPair( @@ -410,6 +458,7 @@ class LdapDriver(object): ) def __to_project(self, attr): + """Convert ldap attributes to Project object""" if attr == None: return None member_dns = attr.get('member', []) @@ -421,8 +470,10 @@ class LdapDriver(object): ) def __dn_to_uid(self, dn): + """Convert user dn to uid""" return dn.split(',')[0].split('=')[1] def __uid_to_dn(self, dn): + """Convert uid to dn""" return 'uid=%s,%s' % (dn, FLAGS.ldap_user_subtree) -- cgit From 57ff625ec300bcc10b701b57aa75f989fbaf1679 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 19 Jul 2010 20:20:41 -0500 Subject: More docstrings, don't autocreate projects --- nova/auth/ldapdriver.py | 8 +++-- nova/auth/manager.py | 96 ++++++++++++++++++++++++------------------------- 2 files changed, 52 insertions(+), 52 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 21c87a576..89c4defda 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -108,9 +108,9 @@ class LdapDriver(object): attr = self.__find_object(dn, '(objectclass=novaKeyPair)') return self.__to_key_pair(uid, attr) - def get_project(self, name): - """Retrieve project by name""" - dn = 'cn=%s,%s' % (name, + def get_project(self, pid): + """Retrieve project by id""" + dn = 'cn=%s,%s' % (pid, FLAGS.ldap_project_subtree) attr = self.__find_object(dn, '(objectclass=novaProject)') return self.__to_project(attr) @@ -452,6 +452,7 @@ class LdapDriver(object): return None return manager.KeyPair( id = attr['cn'][0], + name = attr['cn'][0], owner_id = owner, public_key = attr['sshPublicKey'][0], fingerprint = attr['keyFingerprint'][0], @@ -464,6 +465,7 @@ class LdapDriver(object): member_dns = attr.get('member', []) return manager.Project( id = attr['cn'][0], + name = attr['cn'][0], project_manager_id = self.__dn_to_uid(attr['projectManager'][0]), description = attr.get('description', [None])[0], member_ids = [self.__dn_to_uid(x) for x in member_dns] diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 0b5039684..87cfd9a91 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -159,26 +159,27 @@ class KeyPair(AuthBase): Even though this object is named KeyPair, only the public key and fingerprint is stored. The user's private key is not saved. """ - def __init__(self, id, owner_id, public_key, fingerprint): + def __init__(self, id, name, owner_id, public_key, fingerprint): self.id = id - self.name = id + self.name = name self.owner_id = owner_id self.public_key = public_key self.fingerprint = fingerprint def __repr__(self): - return "KeyPair('%s', '%s', '%s', '%s')" % (self.id, - self.owner_id, - self.public_key, - self.fingerprint) + return "KeyPair('%s', '%s', '%s', '%s', '%s')" % (self.id, + self.name, + self.owner_id, + self.public_key, + self.fingerprint) class Project(AuthBase): """Represents a Project returned from the datastore""" - def __init__(self, id, project_manager_id, description, member_ids): - self.project_manager_id = project_manager_id + def __init__(self, id, name, project_manager_id, description, member_ids): self.id = id - self.name = id + self.name = name + self.project_manager_id = project_manager_id self.description = description self.member_ids = member_ids @@ -205,10 +206,11 @@ class Project(AuthBase): return AuthManager().get_credentials(user, self) def __repr__(self): - return "Project('%s', '%s', '%s', %s)" % (self.id, - self.project_manager_id, - self.description, - self.member_ids) + return "Project('%s', '%s', '%s', '%s', %s)" % (self.id, + self.name, + self.project_manager_id, + self.description, + self.member_ids) class NoMorePorts(exception.Error): @@ -223,10 +225,16 @@ class Vpn(datastore.BasicModel): @property def identifier(self): + """Identifier used for key in redis""" return self.project_id @classmethod def create(cls, project_id): + """Creates a vpn for project + + This method finds a free ip and port and stores the associated + values in the datastore. + """ # TODO(vish): get list of vpn ips from redis port = cls.find_free_port_for_ip(FLAGS.vpn_ip) vpn = cls(project_id) @@ -239,6 +247,7 @@ class Vpn(datastore.BasicModel): @classmethod def find_free_port_for_ip(cls, ip): + """Finds a free port for a given ip from the redis set""" # TODO(vish): these redis commands should be generalized and # placed into a base class. Conceptually, it is # similar to an association, but we are just @@ -260,21 +269,26 @@ class Vpn(datastore.BasicModel): @classmethod def num_ports_for_ip(cls, ip): + """Calculates the number of free ports for a given ip""" return datastore.Redis.instance().scard('ip:%s:ports' % ip) @property def ip(self): + """The ip assigned to the project""" return self['ip'] @property def port(self): + """The port assigned to the project""" return int(self['port']) def save(self): + """Saves the association to the given ip""" self.associate_with('ip', self.ip) super(Vpn, self).save() def destroy(self): + """Cleans up datastore and adds port back to pool""" self.unassociate_with('ip', self.ip) datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port) super(Vpn, self).destroy() @@ -345,19 +359,22 @@ class AuthManager(object): @return: User and project that the request represents. """ # TODO(vish): check for valid timestamp - (access_key, sep, project_name) = access.partition(':') + (access_key, sep, project_id) = access.partition(':') user = self.get_user_from_access_key(access_key) if user == None: raise exception.NotFound('No user found for access key %s' % access_key) - if project_name is '': - project_name = user.name - project = self.get_project(project_name) + # NOTE(vish): if we stop using project name as id we need better + # logic to find a default project for user + if project_id is '': + project_id = user.name + + project = self.get_project(project_id) if project == None: raise exception.NotFound('No project called %s could be found' % - project_name) + project_id) if not self.is_admin(user) and not self.is_project_member(user, project): raise exception.NotFound('User %s is not a member of project %s' % @@ -521,9 +538,9 @@ class AuthManager(object): Vpn.create(name) with self.driver_class() as drv: return drv.create_project(name, - User.safe_id(manager_user), - description, - member_users) + User.safe_id(manager_user), + description, + member_users) def get_projects(self): """Retrieves list of all projects""" @@ -531,10 +548,10 @@ class AuthManager(object): return drv.get_projects() - def get_project(self, project): + def get_project(self, pid): """Get project object by id""" with self.driver_class() as drv: - return drv.get_project(Project.safe_id(project)) + return drv.get_project(pid) def add_to_project(self, user, project): """Add user to project""" @@ -580,13 +597,11 @@ class AuthManager(object): with self.driver_class() as drv: return drv.get_users() - def create_user(self, user, access=None, secret=None, - admin=False, create_project=True): + def create_user(self, name, access=None, secret=None, admin=False): """Creates a user - @type user: str - @param name: Name of the user to create. The name will also be - used as the user id. + @type name: str + @param name: Name of the user to create. @type access: str @param access: Access Key (defaults to a random uuid) @@ -607,29 +622,12 @@ class AuthManager(object): if access == None: access = str(uuid.uuid4()) if secret == None: secret = str(uuid.uuid4()) with self.driver_class() as drv: - user = User.safe_id(user) - result = drv.create_user(user, access, secret, admin) - if create_project: - # NOTE(vish): if the project creation fails, we delete - # the user and return an exception - try: - drv.create_project(user, user, user) - except Exception: - with self.driver_class() as drv: - drv.delete_user(user) - raise - return result - - def delete_user(self, user, delete_project=True): + return drv.create_user(name, access, secret, admin) + + def delete_user(self, user): """Deletes a user""" with self.driver_class() as drv: - user = User.safe_id(user) - if delete_project: - try: - drv.delete_project(user) - except exception.NotFound: - pass - drv.delete_user(user) + drv.delete_user(User.safe_id(user)) def generate_key_pair(self, user, key_name): """Generates a key pair for a user -- cgit From 8b0c70cce2dd914f1ab4caca8883d616c7c669d6 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Mon, 19 Jul 2010 21:39:33 -0700 Subject: Map exceptions to 404 / 403 codes, as was done before the move to twisted. However, I don't think this is the right way to do this in Twisted. For example, exceptions thrown after the render method returns will not be mapped --- nova/objectstore/handler.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index c670ee02f..c3c4486bf 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -116,7 +116,21 @@ def get_context(request): logging.debug("Authentication Failure: %s" % ex) raise exception.NotAuthorized -class S3(Resource): +class ErrorHandlingResource(Resource): + """Maps exceptions to 404 / 401 codes. Won't work for exceptions thrown after NOT_DONE_YET is returned.""" + # TODO: This needs to be plugged in to the right place in twisted... + # This doesn't look like it's the right place (consider exceptions in getChild; or after NOT_DONE_YET is returned + def render(self, request): + try: + return Resource.render(self, request) + except exception.NotFound: + request.setResponseCode(404) + return '' + except exception.NotAuthorized: + request.setResponseCode(403) + return '' + +class S3(ErrorHandlingResource): """Implementation of an S3-like storage server based on local files.""" def getChild(self, name, request): request.context = get_context(request) @@ -136,7 +150,7 @@ class S3(Resource): }}) return server.NOT_DONE_YET -class BucketResource(Resource): +class BucketResource(ErrorHandlingResource): def __init__(self, name): Resource.__init__(self) self.name = name @@ -186,7 +200,7 @@ class BucketResource(Resource): return '' -class ObjectResource(Resource): +class ObjectResource(ErrorHandlingResource): def __init__(self, bucket, name): Resource.__init__(self) self.bucket = bucket @@ -227,7 +241,7 @@ class ObjectResource(Resource): request.setResponseCode(204) return '' -class ImageResource(Resource): +class ImageResource(ErrorHandlingResource): isLeaf = True def getChild(self, name, request): -- cgit From 0f9be756f44e831545bf5c31606e0419b61d6ddd Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 20 Jul 2010 09:15:36 -0500 Subject: Test cleanup, make driver return dictionaries and construct objects in manager --- nova/auth/ldapdriver.py | 45 +++++++++--------- nova/auth/manager.py | 105 +++++++++++++++++++++++++---------------- nova/tests/api_unittest.py | 23 ++++----- nova/tests/auth_unittest.py | 95 +++++++++++++++++++------------------ nova/tests/network_unittest.py | 79 ++++++++++++++++--------------- 5 files changed, 187 insertions(+), 160 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 89c4defda..d330ae729 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -28,7 +28,6 @@ import logging from nova import exception from nova import flags -from nova.auth import manager try: import ldap @@ -322,7 +321,7 @@ class LdapDriver(object): keys = self.get_key_pairs(uid) if keys != None: for key in keys: - self.delete_key_pair(uid, key.name) + self.delete_key_pair(uid, key['name']) def __role_to_dn(self, role, project_id=None): """Convert role to corresponding dn""" @@ -438,38 +437,38 @@ class LdapDriver(object): """Convert ldap attributes to User object""" if attr == None: return None - return manager.User( - id = attr['uid'][0], - name = attr['cn'][0], - access = attr['accessKey'][0], - secret = attr['secretKey'][0], - admin = (attr['isAdmin'][0] == 'TRUE') - ) + return { + 'id': attr['uid'][0], + 'name': attr['cn'][0], + 'access': attr['accessKey'][0], + 'secret': attr['secretKey'][0], + 'admin': (attr['isAdmin'][0] == 'TRUE') + } def __to_key_pair(self, owner, attr): """Convert ldap attributes to KeyPair object""" if attr == None: return None - return manager.KeyPair( - id = attr['cn'][0], - name = attr['cn'][0], - owner_id = owner, - public_key = attr['sshPublicKey'][0], - fingerprint = attr['keyFingerprint'][0], - ) + return { + 'id': attr['cn'][0], + 'name': attr['cn'][0], + 'owner_id': owner, + 'public_key': attr['sshPublicKey'][0], + 'fingerprint': attr['keyFingerprint'][0], + } def __to_project(self, attr): """Convert ldap attributes to Project object""" if attr == None: return None member_dns = attr.get('member', []) - return manager.Project( - id = attr['cn'][0], - name = attr['cn'][0], - project_manager_id = self.__dn_to_uid(attr['projectManager'][0]), - description = attr.get('description', [None])[0], - member_ids = [self.__dn_to_uid(x) for x in member_dns] - ) + return { + 'id': attr['cn'][0], + 'name': attr['cn'][0], + 'project_manager_id': self.__dn_to_uid(attr['projectManager'][0]), + 'description': attr.get('description', [None])[0], + 'member_ids': [self.__dn_to_uid(x) for x in member_dns] + } def __dn_to_uid(self, dn): """Convert user dn to uid""" diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 87cfd9a91..2facffe51 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -33,9 +33,9 @@ from nova import datastore from nova import exception from nova import flags from nova import objectstore # for flags -from nova import signer from nova import utils from nova.auth import ldapdriver +from nova.auth import signer FLAGS = flags.FLAGS # NOTE(vish): a user with one of these roles will be a superuser and @@ -187,6 +187,14 @@ class Project(AuthBase): def project_manager(self): return AuthManager().get_user(self.project_manager_id) + @property + def vpn_ip(self): + return AuthManager().get_project_vpn_ip(self) + + @property + def vpn_port(self): + return AuthManager().get_project_vpn_port(self) + def has_manager(self, user): return AuthManager().is_project_manager(user, self) @@ -314,16 +322,6 @@ class AuthManager(object): def __init__(self, *args, **kwargs): self.driver_class = kwargs.get('driver_class', ldapdriver.LdapDriver) - if FLAGS.fake_tests: - try: - self.create_user('fake', 'fake', 'fake') - except: pass - try: - self.create_user('user', 'user', 'user') - except: pass - try: - self.create_user('admin', 'admin', 'admin', True) - except: pass def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', @@ -508,6 +506,21 @@ class AuthManager(object): with self.driver_class() as drv: drv.remove_role(User.safe_id(user), role, Project.safe_id(project)) + def get_project(self, pid): + """Get project object by id""" + with self.driver_class() as drv: + project_dict = drv.get_project(pid) + if project_dict: + return Project(**project_dict) + + def get_projects(self): + """Retrieves list of all projects""" + with self.driver_class() as drv: + project_list = drv.get_projects() + if not project_list: + return [] + return [Project(**project_dict) for project_dict in project_list] + def create_project(self, name, manager_user, description=None, member_users=None): """Create a project @@ -532,26 +545,14 @@ class AuthManager(object): """ if member_users: member_users = [User.safe_id(u) for u in member_users] - # NOTE(vish): try to associate a vpn ip and port first because - # if it throws an exception, we save having to - # create and destroy a project - Vpn.create(name) with self.driver_class() as drv: - return drv.create_project(name, - User.safe_id(manager_user), - description, - member_users) - - def get_projects(self): - """Retrieves list of all projects""" - with self.driver_class() as drv: - return drv.get_projects() - - - def get_project(self, pid): - """Get project object by id""" - with self.driver_class() as drv: - return drv.get_project(pid) + project_dict = drv.create_project(name, + User.safe_id(manager_user), + description, + member_users) + if project_dict: + Vpn.create(project_dict['id']) + return Project(**project_dict) def add_to_project(self, user, project): """Add user to project""" @@ -577,6 +578,12 @@ class AuthManager(object): return drv.remove_from_project(User.safe_id(user), Project.safe_id(project)) + def get_project_vpn_ip(self, project): + return Vpn(Project.safe_id(project)).ip + + def get_project_vpn_port(self, project): + return Vpn(Project.safe_id(project)).port + def delete_project(self, project): """Deletes a project""" with self.driver_class() as drv: @@ -585,17 +592,24 @@ class AuthManager(object): def get_user(self, uid): """Retrieves a user by id""" with self.driver_class() as drv: - return drv.get_user(uid) + user_dict = drv.get_user(uid) + if user_dict: + return User(**user_dict) def get_user_from_access_key(self, access_key): """Retrieves a user by access key""" with self.driver_class() as drv: - return drv.get_user_from_access_key(access_key) + user_dict = drv.get_user_from_access_key(access_key) + if user_dict: + return User(**user_dict) def get_users(self): """Retrieves a list of all users""" with self.driver_class() as drv: - return drv.get_users() + user_list = drv.get_users() + if not user_list: + return [] + return [User(**user_dict) for user_dict in user_list] def create_user(self, name, access=None, secret=None, admin=False): """Creates a user @@ -622,7 +636,9 @@ class AuthManager(object): if access == None: access = str(uuid.uuid4()) if secret == None: secret = str(uuid.uuid4()) with self.driver_class() as drv: - return drv.create_user(name, access, secret, admin) + user_dict = drv.create_user(name, access, secret, admin) + if user_dict: + return User(**user_dict) def delete_user(self, user): """Deletes a user""" @@ -660,18 +676,27 @@ class AuthManager(object): def create_key_pair(self, user, key_name, public_key, fingerprint): """Creates a key pair for user""" with self.driver_class() as drv: - return drv.create_key_pair(User.safe_id(user), key_name, - public_key, fingerprint) + kp_dict = drv.create_key_pair(User.safe_id(user), + key_name, + public_key, + fingerprint) + if kp_dict: + return KeyPair(**kp_dict) def get_key_pair(self, user, key_name): """Retrieves a key pair for user""" with self.driver_class() as drv: - return drv.get_key_pair(User.safe_id(user), key_name) + kp_dict = drv.get_key_pair(User.safe_id(user), key_name) + if kp_dict: + return KeyPair(**kp_dict) def get_key_pairs(self, user): """Retrieves all key pairs for user""" with self.driver_class() as drv: - return drv.get_key_pairs(User.safe_id(user)) + kp_list = drv.get_key_pairs(User.safe_id(user)) + if not kp_list: + return [] + return [KeyPair(**kp_dict) for kp_dict in kp_list] def delete_key_pair(self, user, key_name): """Deletes a key pair for user""" @@ -686,7 +711,7 @@ class AuthManager(object): project = user.id pid = Project.safe_id(project) rc = self.__generate_rc(user.access, user.secret, pid) - private_key, signed_cert = self.__generate_x509_cert(user.id, pid) + private_key, signed_cert = self._generate_x509_cert(user.id, pid) vpn = Vpn(pid) configfile = open(FLAGS.vpn_client_template,"r") @@ -726,7 +751,7 @@ class AuthManager(object): } return rc - def __generate_x509_cert(self, uid, pid): + def _generate_x509_cert(self, uid, pid): """Generate x509 cert for user""" (private_key, csr) = crypto.generate_x509_cert( self.__cert_subject(uid)) diff --git a/nova/tests/api_unittest.py b/nova/tests/api_unittest.py index 5c26192bd..4477a1fe6 100644 --- a/nova/tests/api_unittest.py +++ b/nova/tests/api_unittest.py @@ -150,7 +150,7 @@ class ApiEc2TestCase(test.BaseTestCase): def setUp(self): super(ApiEc2TestCase, self).setUp() - self.users = manager.AuthManager() + self.manager = manager.AuthManager() self.cloud = cloud.CloudController() self.host = '127.0.0.1' @@ -175,25 +175,22 @@ class ApiEc2TestCase(test.BaseTestCase): def test_describe_instances(self): self.expect_http() self.mox.ReplayAll() - try: - self.users.create_user('fake', 'fake', 'fake') - except Exception, _err: - pass # User may already exist + user = self.manager.create_user('fake', 'fake', 'fake') + project = self.manager.create_project('fake', 'fake', 'fake') self.assertEqual(self.ec2.get_all_instances(), []) - self.users.delete_user('fake') + self.manager.delete_project(project) + self.manager.delete_user(user) def test_get_all_key_pairs(self): self.expect_http() self.mox.ReplayAll() keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") for x in range(random.randint(4, 8))) - try: - self.users.create_user('fake', 'fake', 'fake') - except Exception, _err: - pass # User may already exist - self.users.generate_key_pair('fake', keyname) + user = self.manager.create_user('fake', 'fake', 'fake') + project = self.manager.create_project('fake', 'fake', 'fake') + self.manager.generate_key_pair(user.id, keyname) rv = self.ec2.get_all_key_pairs() self.assertTrue(filter(lambda k: k.name == keyname, rv)) - self.users.delete_user('fake') - + self.manager.delete_project(project) + self.manager.delete_user(user) diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index 000f6bf17..0cd377b70 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -37,29 +37,29 @@ class AuthTestCase(test.BaseTestCase): super(AuthTestCase, self).setUp() self.flags(fake_libvirt=True, fake_storage=True) - self.users = manager.AuthManager() + self.manager = manager.AuthManager() def test_001_can_create_users(self): - self.users.create_user('test1', 'access', 'secret') - self.users.create_user('test2') + self.manager.create_user('test1', 'access', 'secret') + self.manager.create_user('test2') def test_002_can_get_user(self): - user = self.users.get_user('test1') + user = self.manager.get_user('test1') def test_003_can_retreive_properties(self): - user = self.users.get_user('test1') + user = self.manager.get_user('test1') self.assertEqual('test1', user.id) self.assertEqual('access', user.access) self.assertEqual('secret', user.secret) def test_004_signature_is_valid(self): - #self.assertTrue(self.users.authenticate( **boto.generate_url ... ? ? ? )) + #self.assertTrue(self.manager.authenticate( **boto.generate_url ... ? ? ? )) pass #raise NotImplementedError def test_005_can_get_credentials(self): return - credentials = self.users.get_user('test1').get_credentials() + credentials = self.manager.get_user('test1').get_credentials() self.assertEqual(credentials, 'export EC2_ACCESS_KEY="access"\n' + 'export EC2_SECRET_KEY="secret"\n' + @@ -68,14 +68,14 @@ class AuthTestCase(test.BaseTestCase): 'export EC2_USER_ID="test1"\n') def test_006_test_key_storage(self): - user = self.users.get_user('test1') + user = self.manager.get_user('test1') user.create_key_pair('public', 'key', 'fingerprint') key = user.get_key_pair('public') self.assertEqual('key', key.public_key) self.assertEqual('fingerprint', key.fingerprint) def test_007_test_key_generation(self): - user = self.users.get_user('test1') + user = self.manager.get_user('test1') private_key, fingerprint = user.generate_key_pair('public2') key = RSA.load_key_string(private_key, callback=lambda: None) bio = BIO.MemoryBuffer() @@ -87,71 +87,71 @@ class AuthTestCase(test.BaseTestCase): converted.split(" ")[1].strip()) def test_008_can_list_key_pairs(self): - keys = self.users.get_user('test1').get_key_pairs() + keys = self.manager.get_user('test1').get_key_pairs() self.assertTrue(filter(lambda k: k.name == 'public', keys)) self.assertTrue(filter(lambda k: k.name == 'public2', keys)) def test_009_can_delete_key_pair(self): - self.users.get_user('test1').delete_key_pair('public') - keys = self.users.get_user('test1').get_key_pairs() + self.manager.get_user('test1').delete_key_pair('public') + keys = self.manager.get_user('test1').get_key_pairs() self.assertFalse(filter(lambda k: k.name == 'public', keys)) def test_010_can_list_users(self): - users = self.users.get_users() + users = self.manager.get_users() logging.warn(users) self.assertTrue(filter(lambda u: u.id == 'test1', users)) def test_101_can_add_user_role(self): - self.assertFalse(self.users.has_role('test1', 'itsec')) - self.users.add_role('test1', 'itsec') - self.assertTrue(self.users.has_role('test1', 'itsec')) + self.assertFalse(self.manager.has_role('test1', 'itsec')) + self.manager.add_role('test1', 'itsec') + self.assertTrue(self.manager.has_role('test1', 'itsec')) def test_199_can_remove_user_role(self): - self.assertTrue(self.users.has_role('test1', 'itsec')) - self.users.remove_role('test1', 'itsec') - self.assertFalse(self.users.has_role('test1', 'itsec')) + self.assertTrue(self.manager.has_role('test1', 'itsec')) + self.manager.remove_role('test1', 'itsec') + self.assertFalse(self.manager.has_role('test1', 'itsec')) def test_201_can_create_project(self): - project = self.users.create_project('testproj', 'test1', 'A test project', ['test1']) - self.assertTrue(filter(lambda p: p.name == 'testproj', self.users.get_projects())) + project = self.manager.create_project('testproj', 'test1', 'A test project', ['test1']) + self.assertTrue(filter(lambda p: p.name == 'testproj', self.manager.get_projects())) self.assertEqual(project.name, 'testproj') self.assertEqual(project.description, 'A test project') self.assertEqual(project.project_manager_id, 'test1') self.assertTrue(project.has_member('test1')) def test_202_user1_is_project_member(self): - self.assertTrue(self.users.get_user('test1').is_project_member('testproj')) + self.assertTrue(self.manager.get_user('test1').is_project_member('testproj')) def test_203_user2_is_not_project_member(self): - self.assertFalse(self.users.get_user('test2').is_project_member('testproj')) + self.assertFalse(self.manager.get_user('test2').is_project_member('testproj')) def test_204_user1_is_project_manager(self): - self.assertTrue(self.users.get_user('test1').is_project_manager('testproj')) + self.assertTrue(self.manager.get_user('test1').is_project_manager('testproj')) def test_205_user2_is_not_project_manager(self): - self.assertFalse(self.users.get_user('test2').is_project_manager('testproj')) + self.assertFalse(self.manager.get_user('test2').is_project_manager('testproj')) def test_206_can_add_user_to_project(self): - self.users.add_to_project('test2', 'testproj') - self.assertTrue(self.users.get_project('testproj').has_member('test2')) + self.manager.add_to_project('test2', 'testproj') + self.assertTrue(self.manager.get_project('testproj').has_member('test2')) def test_208_can_remove_user_from_project(self): - self.users.remove_from_project('test2', 'testproj') - self.assertFalse(self.users.get_project('testproj').has_member('test2')) + self.manager.remove_from_project('test2', 'testproj') + self.assertFalse(self.manager.get_project('testproj').has_member('test2')) def test_209_can_generate_x509(self): # MUST HAVE RUN CLOUD SETUP BY NOW self.cloud = cloud.CloudController() self.cloud.setup() - private_key, signed_cert_string = self.users.get_project('testproj').generate_x509_cert('test1') - logging.debug(signed_cert_string) + _key, cert_str = self.manager._generate_x509_cert('test1', 'testproj') + logging.debug(cert_str) # Need to verify that it's signed by the right intermediate CA full_chain = crypto.fetch_ca(project_id='testproj', chain=True) int_cert = crypto.fetch_ca(project_id='testproj', chain=False) cloud_cert = crypto.fetch_ca() logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain) - signed_cert = X509.load_cert_string(signed_cert_string) + signed_cert = X509.load_cert_string(cert_str) chain_cert = X509.load_cert_string(full_chain) int_cert = X509.load_cert_string(int_cert) cloud_cert = X509.load_cert_string(cloud_cert) @@ -164,42 +164,45 @@ class AuthTestCase(test.BaseTestCase): self.assertFalse(signed_cert.verify(cloud_cert.get_pubkey())) def test_210_can_add_project_role(self): - project = self.users.get_project('testproj') + project = self.manager.get_project('testproj') self.assertFalse(project.has_role('test1', 'sysadmin')) - self.users.add_role('test1', 'sysadmin') + self.manager.add_role('test1', 'sysadmin') self.assertFalse(project.has_role('test1', 'sysadmin')) project.add_role('test1', 'sysadmin') self.assertTrue(project.has_role('test1', 'sysadmin')) def test_211_can_remove_project_role(self): - project = self.users.get_project('testproj') + project = self.manager.get_project('testproj') self.assertTrue(project.has_role('test1', 'sysadmin')) project.remove_role('test1', 'sysadmin') self.assertFalse(project.has_role('test1', 'sysadmin')) - self.users.remove_role('test1', 'sysadmin') + self.manager.remove_role('test1', 'sysadmin') self.assertFalse(project.has_role('test1', 'sysadmin')) def test_212_vpn_ip_and_port_looks_valid(self): - project = self.users.get_project('testproj') + project = self.manager.get_project('testproj') self.assert_(project.vpn_ip) self.assert_(project.vpn_port >= FLAGS.vpn_start_port) self.assert_(project.vpn_port <= FLAGS.vpn_end_port) def test_213_too_many_vpns(self): - for i in xrange(users.Vpn.num_ports_for_ip(FLAGS.vpn_ip)): - users.Vpn.create("vpnuser%s" % i) - self.assertRaises(users.NoMorePorts, users.Vpn.create, "boom") + vpns = [] + for i in xrange(manager.Vpn.num_ports_for_ip(FLAGS.vpn_ip)): + vpns.append(manager.Vpn.create("vpnuser%s" % i)) + self.assertRaises(manager.NoMorePorts, manager.Vpn.create, "boom") + for vpn in vpns: + vpn.destroy() def test_299_can_delete_project(self): - self.users.delete_project('testproj') - self.assertFalse(filter(lambda p: p.name == 'testproj', self.users.get_projects())) + self.manager.delete_project('testproj') + self.assertFalse(filter(lambda p: p.name == 'testproj', self.manager.get_projects())) def test_999_can_delete_users(self): - self.users.delete_user('test1') - users = self.users.get_users() + self.manager.delete_user('test1') + users = self.manager.get_users() self.assertFalse(filter(lambda u: u.id == 'test1', users)) - self.users.delete_user('test2') - self.assertEqual(self.users.get_user('test2'), None) + self.manager.delete_user('test2') + self.assertEqual(self.manager.get_user('test2'), None) if __name__ == "__main__": diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 68cd488be..0e1b55065 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -39,59 +39,61 @@ class NetworkTestCase(test.TrialTestCase): logging.getLogger().setLevel(logging.DEBUG) self.manager = manager.AuthManager() self.dnsmasq = FakeDNSMasq() - try: - self.manager.create_user('netuser', 'netuser', 'netuser') - except: pass + self.user = self.manager.create_user('netuser', 'netuser', 'netuser') + self.projects = [] + self.projects.append(self.manager.create_project('netuser', + 'netuser', + 'netuser')) for i in range(0, 6): name = 'project%s' % i - if not self.manager.get_project(name): - self.manager.create_project(name, 'netuser', name) + self.projects.append(self.manager.create_project(name, + 'netuser', + name)) self.network = network.PublicNetworkController() def tearDown(self): super(NetworkTestCase, self).tearDown() - for i in range(0, 6): - name = 'project%s' % i - self.manager.delete_project(name) - self.manager.delete_user('netuser') + for project in self.projects: + self.manager.delete_project(project) + self.manager.delete_user(self.user) def test_public_network_allocation(self): pubnet = IPy.IP(flags.FLAGS.public_range) - address = self.network.allocate_ip("netuser", "project0", "public") + address = self.network.allocate_ip(self.user.id, self.projects[0].id, "public") self.assertTrue(IPy.IP(address) in pubnet) self.assertTrue(IPy.IP(address) in self.network.network) def test_allocate_deallocate_ip(self): address = network.allocate_ip( - "netuser", "project0", utils.generate_mac()) + self.user.id, self.projects[0].id, utils.generate_mac()) logging.debug("Was allocated %s" % (address)) - net = network.get_project_network("project0", "default") - self.assertEqual(True, is_in_project(address, "project0")) + net = network.get_project_network(self.projects[0].id, "default") + self.assertEqual(True, is_in_project(address, self.projects[0].id)) mac = utils.generate_mac() hostname = "test-host" self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name) rv = network.deallocate_ip(address) # Doesn't go away until it's dhcp released - self.assertEqual(True, is_in_project(address, "project0")) + self.assertEqual(True, is_in_project(address, self.projects[0].id)) self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name) - self.assertEqual(False, is_in_project(address, "project0")) + self.assertEqual(False, is_in_project(address, self.projects[0].id)) def test_range_allocation(self): mac = utils.generate_mac() secondmac = utils.generate_mac() hostname = "test-host" address = network.allocate_ip( - "netuser", "project0", mac) + self.user.id, self.projects[0].id, mac) secondaddress = network.allocate_ip( - "netuser", "project1", secondmac) - net = network.get_project_network("project0", "default") - secondnet = network.get_project_network("project1", "default") + self.user, "project1", secondmac) + net = network.get_project_network(self.projects[0].id, "default") + secondnet = network.get_project_network(self.projects[1].id, "default") - self.assertEqual(True, is_in_project(address, "project0")) - self.assertEqual(True, is_in_project(secondaddress, "project1")) - self.assertEqual(False, is_in_project(address, "project1")) + self.assertEqual(True, is_in_project(address, self.projects[0].id)) + self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id)) + self.assertEqual(False, is_in_project(address, self.projects[1].id)) # Addresses are allocated before they're issued self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name) @@ -100,34 +102,34 @@ class NetworkTestCase(test.TrialTestCase): rv = network.deallocate_ip(address) self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name) - self.assertEqual(False, is_in_project(address, "project0")) + self.assertEqual(False, is_in_project(address, self.projects[0].id)) # First address release shouldn't affect the second - self.assertEqual(True, is_in_project(secondaddress, "project1")) + self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id)) rv = network.deallocate_ip(secondaddress) self.dnsmasq.release_ip(secondmac, secondaddress, hostname, secondnet.bridge_name) - self.assertEqual(False, is_in_project(secondaddress, "project1")) + self.assertEqual(False, is_in_project(secondaddress, self.projects[1].id)) def test_subnet_edge(self): - secondaddress = network.allocate_ip("netuser", "project0", + secondaddress = network.allocate_ip(self.user.id, self.projects[0].id, utils.generate_mac()) hostname = "toomany-hosts" - for project in range(1,5): - project_id = "project%s" % (project) + for i in range(1,5): + project_id = self.projects[i].id mac = utils.generate_mac() mac2 = utils.generate_mac() mac3 = utils.generate_mac() address = network.allocate_ip( - "netuser", project_id, mac) + self.user, project_id, mac) address2 = network.allocate_ip( - "netuser", project_id, mac2) + self.user, project_id, mac2) address3 = network.allocate_ip( - "netuser", project_id, mac3) - self.assertEqual(False, is_in_project(address, "project0")) - self.assertEqual(False, is_in_project(address2, "project0")) - self.assertEqual(False, is_in_project(address3, "project0")) + self.user, project_id, mac3) + self.assertEqual(False, is_in_project(address, self.projects[0].id)) + self.assertEqual(False, is_in_project(address2, self.projects[0].id)) + self.assertEqual(False, is_in_project(address3, self.projects[0].id)) rv = network.deallocate_ip(address) rv = network.deallocate_ip(address2) rv = network.deallocate_ip(address3) @@ -135,7 +137,7 @@ class NetworkTestCase(test.TrialTestCase): self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name) self.dnsmasq.release_ip(mac2, address2, hostname, net.bridge_name) self.dnsmasq.release_ip(mac3, address3, hostname, net.bridge_name) - net = network.get_project_network("project0", "default") + net = network.get_project_network(self.projects[0].id, "default") rv = network.deallocate_ip(secondaddress) self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name) @@ -150,22 +152,23 @@ class NetworkTestCase(test.TrialTestCase): Network size is 32, there are 5 addresses reserved for VPN. So we should get 23 usable addresses """ - net = network.get_project_network("project0", "default") + net = network.get_project_network(self.projects[0].id, "default") hostname = "toomany-hosts" macs = {} addresses = {} for i in range(0, 22): macs[i] = utils.generate_mac() - addresses[i] = network.allocate_ip("netuser", "project0", macs[i]) + addresses[i] = network.allocate_ip(self.user.id, self.projects[0].id, macs[i]) self.dnsmasq.issue_ip(macs[i], addresses[i], hostname, net.bridge_name) - self.assertRaises(NoMoreAddresses, network.allocate_ip, "netuser", "project0", utils.generate_mac()) + self.assertRaises(NoMoreAddresses, network.allocate_ip, self.user.id, self.projects[0].id, utils.generate_mac()) for i in range(0, 22): rv = network.deallocate_ip(addresses[i]) self.dnsmasq.release_ip(macs[i], addresses[i], hostname, net.bridge_name) def is_in_project(address, project_id): + print address, list(network.get_project_network(project_id).list_addresses()) return address in network.get_project_network(project_id).list_addresses() def _get_project_addresses(project_id): -- cgit From 79b5ab9a9e18fdee3d65311b6ff16cc39d7d2513 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 20 Jul 2010 09:22:53 -0500 Subject: network unittest clean up --- nova/tests/network_unittest.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 0e1b55065..237750d7f 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -87,7 +87,7 @@ class NetworkTestCase(test.TrialTestCase): address = network.allocate_ip( self.user.id, self.projects[0].id, mac) secondaddress = network.allocate_ip( - self.user, "project1", secondmac) + self.user, self.projects[1].id, secondmac) net = network.get_project_network(self.projects[0].id, "default") secondnet = network.get_project_network(self.projects[1].id, "default") @@ -168,7 +168,6 @@ class NetworkTestCase(test.TrialTestCase): self.dnsmasq.release_ip(macs[i], addresses[i], hostname, net.bridge_name) def is_in_project(address, project_id): - print address, list(network.get_project_network(project_id).list_addresses()) return address in network.get_project_network(project_id).list_addresses() def _get_project_addresses(project_id): -- cgit From cb702cb1a88ec94577c5871ab0402471dac0ec7c Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 20 Jul 2010 14:09:53 -0500 Subject: Cleanup per suggestions Move ugly import statement to avoid try except Vpn ip and port returns none if vpn isn't allocated get_credentials returns exception if vpn isn't allocated Flag for using vpns --- nova/auth/ldapdriver.py | 32 +++++++++++--------------------- nova/auth/manager.py | 37 +++++++++++++++++++++++++++---------- 2 files changed, 38 insertions(+), 31 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index d330ae729..4ba09517c 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -29,14 +29,6 @@ import logging from nova import exception from nova import flags -try: - import ldap -except Exception, e: - from nova.auth import fakeldap as ldap -# NOTE(vish): this import is so we can use fakeldap even when real ldap -# is installed. -from nova.auth import fakeldap - FLAGS = flags.FLAGS flags.DEFINE_string('ldap_url', 'ldap://localhost', 'Point this at your ldap server') @@ -73,13 +65,11 @@ class LdapDriver(object): def __enter__(self): """Creates the connection to LDAP""" if FLAGS.fake_users: - self.NO_SUCH_OBJECT = fakeldap.NO_SUCH_OBJECT - self.OBJECT_CLASS_VIOLATION = fakeldap.OBJECT_CLASS_VIOLATION - self.conn = fakeldap.initialize(FLAGS.ldap_url) + from nova.auth import fakeldap as ldap else: - self.NO_SUCH_OBJECT = ldap.NO_SUCH_OBJECT - self.OBJECT_CLASS_VIOLATION = ldap.OBJECT_CLASS_VIOLATION - self.conn = ldap.initialize(FLAGS.ldap_url) + import ldap + self.ldap = ldap + self.conn = self.ldap.initialize(FLAGS.ldap_url) self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) return self @@ -285,8 +275,8 @@ class LdapDriver(object): def __find_dns(self, dn, query=None): """Find dns by query""" try: - res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query) - except self.NO_SUCH_OBJECT: + res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query) + except self.ldap.NO_SUCH_OBJECT: return [] # just return the DNs return [dn for dn, attributes in res] @@ -294,8 +284,8 @@ class LdapDriver(object): def __find_objects(self, dn, query = None): """Find objects by query""" try: - res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query) - except self.NO_SUCH_OBJECT: + res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query) + except self.ldap.NO_SUCH_OBJECT: return [] # just return the attributes return [attributes for dn, attributes in res] @@ -379,7 +369,7 @@ class LdapDriver(object): raise exception.Duplicate("User %s is already a member of " "the group %s" % (uid, group_dn)) attr = [ - (ldap.MOD_ADD, 'member', self.__uid_to_dn(uid)) + (self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid)) ] self.conn.modify_s(group_dn, attr) @@ -399,10 +389,10 @@ class LdapDriver(object): def __safe_remove_from_group(self, uid, group_dn): """Remove user from group, deleting group if user is last member""" # FIXME(vish): what if deleted user is a project manager? - attr = [(ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))] + attr = [(self.ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))] try: self.conn.modify_s(group_dn, attr) - except self.OBJECT_CLASS_VIOLATION: + except self.ldap.OBJECT_CLASS_VIOLATION: logging.debug("Attempted to remove the last member of a group. " "Deleting the group at %s instead." % group_dn ) self.__delete_group(group_dn) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 2facffe51..3496ea161 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -41,13 +41,15 @@ FLAGS = flags.FLAGS # NOTE(vish): a user with one of these roles will be a superuser and # have access to all api commands flags.DEFINE_list('superuser_roles', ['cloudadmin'], - 'roles that ignore rbac checking completely') + 'Roles that ignore rbac checking completely') # NOTE(vish): a user with one of these roles will have it for every # project, even if he or she is not a member of the project flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'], - 'roles that apply to all projects') + 'Roles that apply to all projects') + +flags.DEFINE_bool('use_vpn', True, 'Support per-project vpns') flags.DEFINE_string('credentials_template', utils.abspath('auth/novarc.template'), 'Template for creating users rc file') @@ -189,11 +191,13 @@ class Project(AuthBase): @property def vpn_ip(self): - return AuthManager().get_project_vpn_ip(self) + ip, port = AuthManager().get_project_vpn_data(self) + return ip @property def vpn_port(self): - return AuthManager().get_project_vpn_port(self) + ip, port = AuthManager().get_project_vpn_data(self) + return port def has_manager(self, user): return AuthManager().is_project_manager(user, self) @@ -551,7 +555,8 @@ class AuthManager(object): description, member_users) if project_dict: - Vpn.create(project_dict['id']) + if FLAGS.use_vpn: + Vpn.create(project_dict['id']) return Project(**project_dict) def add_to_project(self, user, project): @@ -578,11 +583,20 @@ class AuthManager(object): return drv.remove_from_project(User.safe_id(user), Project.safe_id(project)) - def get_project_vpn_ip(self, project): - return Vpn(Project.safe_id(project)).ip + def get_project_vpn_data(self, project): + """Gets vpn ip and port for project - def get_project_vpn_port(self, project): - return Vpn(Project.safe_id(project)).port + @type project: Project or project_id + @param project: Project from which to get associated vpn data + + @rvalue: tuple of (str, str) + @return: A tuple containing (ip, port) or None, None if vpn has + not been allocated for user. + """ + vpn = Vpn.lookup(Project.safe_id(project)) + if not vpn: + return None, None + return (vpn.ip, vpn.port) def delete_project(self, project): """Deletes a project""" @@ -713,7 +727,10 @@ class AuthManager(object): rc = self.__generate_rc(user.access, user.secret, pid) private_key, signed_cert = self._generate_x509_cert(user.id, pid) - vpn = Vpn(pid) + vpn = Vpn.lookup(pid) + if not vpn: + raise exception.Error("No vpn data allocated for project %s" % + project.name) configfile = open(FLAGS.vpn_client_template,"r") s = string.Template(configfile.read()) configfile.close() -- cgit From aea63a32542ea2534513532b645491687e48367b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 20 Jul 2010 14:29:49 -0500 Subject: Move self.ldap to global ldap to make changes easier if we ever implement settings --- nova/auth/ldapdriver.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 4ba09517c..a94b219d6 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -64,12 +64,12 @@ class LdapDriver(object): """ def __enter__(self): """Creates the connection to LDAP""" + global ldap if FLAGS.fake_users: from nova.auth import fakeldap as ldap else: import ldap - self.ldap = ldap - self.conn = self.ldap.initialize(FLAGS.ldap_url) + self.conn = ldap.initialize(FLAGS.ldap_url) self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) return self @@ -275,8 +275,8 @@ class LdapDriver(object): def __find_dns(self, dn, query=None): """Find dns by query""" try: - res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query) - except self.ldap.NO_SUCH_OBJECT: + res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query) + except ldap.NO_SUCH_OBJECT: return [] # just return the DNs return [dn for dn, attributes in res] @@ -284,8 +284,8 @@ class LdapDriver(object): def __find_objects(self, dn, query = None): """Find objects by query""" try: - res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query) - except self.ldap.NO_SUCH_OBJECT: + res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query) + except ldap.NO_SUCH_OBJECT: return [] # just return the attributes return [attributes for dn, attributes in res] @@ -369,7 +369,7 @@ class LdapDriver(object): raise exception.Duplicate("User %s is already a member of " "the group %s" % (uid, group_dn)) attr = [ - (self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid)) + (ldap.MOD_ADD, 'member', self.__uid_to_dn(uid)) ] self.conn.modify_s(group_dn, attr) @@ -389,10 +389,10 @@ class LdapDriver(object): def __safe_remove_from_group(self, uid, group_dn): """Remove user from group, deleting group if user is last member""" # FIXME(vish): what if deleted user is a project manager? - attr = [(self.ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))] + attr = [(ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))] try: self.conn.modify_s(group_dn, attr) - except self.ldap.OBJECT_CLASS_VIOLATION: + except ldap.OBJECT_CLASS_VIOLATION: logging.debug("Attempted to remove the last member of a group. " "Deleting the group at %s instead." % group_dn ) self.__delete_group(group_dn) -- cgit From fd25c2699867e16908aaadc3380236f84cc3cc5a Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 20 Jul 2010 17:05:02 -0500 Subject: remove spaces from export statements in scripts relating to certs --- CA/geninter.sh | 2 +- nova/cloudpipe/bootscript.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CA/geninter.sh b/CA/geninter.sh index 6c0528d1b..46f8f79ad 100755 --- a/CA/geninter.sh +++ b/CA/geninter.sh @@ -17,7 +17,7 @@ # under the License. # ARG is the id of the user -export SUBJ=/C=US/ST=California/L=Mountain View/O=Anso Labs/OU=Nova Dev/CN=customer-intCA-$3 +export SUBJ="/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=customer-intCA-$3" mkdir INTER/$1 cd INTER/$1 cp ../../openssl.cnf.tmpl openssl.cnf diff --git a/nova/cloudpipe/bootscript.sh b/nova/cloudpipe/bootscript.sh index 43fc2ecab..82ec2012a 100755 --- a/nova/cloudpipe/bootscript.sh +++ b/nova/cloudpipe/bootscript.sh @@ -24,7 +24,7 @@ export VPN_IP=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f2 export BROADCAST=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f3 | awk '{print $1}'` export DHCP_MASK=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f4 | awk '{print $1}'` export GATEWAY=`netstat -r | grep default | cut -d' ' -f10` -export SUBJ=/C=US/ST=California/L=Mountain View/O=Anso Labs/OU=Nova Dev/CN=customer-vpn-$VPN_IP +export SUBJ="/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=customer-vpn-$VPN_IP" DHCP_LOWER=`echo $BROADCAST | awk -F. '{print $1"."$2"."$3"." $4 - 10 }'` DHCP_UPPER=`echo $BROADCAST | awk -F. '{print $1"."$2"."$3"." $4 - 1 }'` -- cgit From 0506cce7d934ad093f5808606627aa19f43428ef Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 20 Jul 2010 17:55:38 -0500 Subject: Fixed the broken reference to --- CA/geninter.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CA/geninter.sh b/CA/geninter.sh index 46f8f79ad..7d6c280d5 100755 --- a/CA/geninter.sh +++ b/CA/geninter.sh @@ -17,7 +17,7 @@ # under the License. # ARG is the id of the user -export SUBJ="/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=customer-intCA-$3" +export SUBJ="/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=customer-intCA-$1" mkdir INTER/$1 cd INTER/$1 cp ../../openssl.cnf.tmpl openssl.cnf -- cgit From 1b6efa80e19a60d71a762683fa1edee02645355c Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 20 Jul 2010 22:28:23 -0500 Subject: fix for describe addresses showing everyone's public ips --- nova/endpoint/cloud.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 3b7b4804b..4fa9b5afd 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -453,21 +453,21 @@ class CloudController(object): def format_addresses(self, context): addresses = [] - # TODO(vish): move authorization checking into network.py for address in self.network.host_objs: - #logging.debug(address_record) - address_rv = { - 'public_ip': address['address'], - 'instance_id' : address.get('instance_id', 'free') - } - if context.user.is_admin(): - address_rv['instance_id'] = "%s (%s, %s)" % ( - address['instance_id'], - address['user_id'], - address['project_id'], - ) + # TODO(vish): implement a by_project iterator for addresses + if (context.user.is_admin() or + address['project_id'] == self.project.id): + address_rv = { + 'public_ip': address['address'], + 'instance_id' : address.get('instance_id', 'free') + } + if context.user.is_admin(): + address_rv['instance_id'] = "%s (%s, %s)" % ( + address['instance_id'], + address['user_id'], + address['project_id'], + ) addresses.append(address_rv) - # logging.debug(addresses) return {'addressesSet': addresses} @rbac.allow('netadmin') -- cgit From 2d49a870fe89b3266f908a5711a5d412fa6d7a19 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 21 Jul 2010 08:46:53 -0500 Subject: remove all of the unused saved return values from attach_to_twisted --- bin/nova-compute | 4 ++-- bin/nova-volume | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bin/nova-compute b/bin/nova-compute index 4b559beb4..49710e1b3 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -74,8 +74,8 @@ def main(): pulse = task.LoopingCall(n.report_state, FLAGS.node_name, bin_name) pulse.start(interval=FLAGS.compute_report_state_interval, now=False) - injected = consumer_all.attach_to_twisted() - injected = consumer_node.attach_to_twisted() + consumer_all.attach_to_twisted() + consumer_node.attach_to_twisted() # This is the parent service that twistd will be looking for when it # parses this file, return it so that we can get it into globals below diff --git a/bin/nova-volume b/bin/nova-volume index 64b726627..7d4b65205 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -70,8 +70,8 @@ def main(): pulse = task.LoopingCall(bs.report_state, FLAGS.node_name, bin_name) pulse.start(interval=FLAGS.volume_report_state_interval, now=False) - injected = consumer_all.attach_to_twisted() - injected = consumer_node.attach_to_twisted() + consumer_all.attach_to_twisted() + consumer_node.attach_to_twisted() # This is the parent service that twistd will be looking for when it # parses this file, return it so that we can get it into globals below -- cgit From 302afc13da7a83dcdf8bde0d6370b675c9b14218 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Wed, 21 Jul 2010 14:35:39 -0400 Subject: Fixes up Bucket to throw proper NotFound and NotEmpty exceptions in constructor and delete() method, and fixes up objectstore_unittest to properly use assertRaises() to check for proper exceptions and remove the assert_ calls. --- nova/exception.py | 3 +++ nova/objectstore/bucket.py | 4 ++-- nova/tests/objectstore_unittest.py | 35 ++++++++++++----------------------- 3 files changed, 17 insertions(+), 25 deletions(-) diff --git a/nova/exception.py b/nova/exception.py index bda002d1e..2108123de 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -44,6 +44,9 @@ class Duplicate(Error): class NotAuthorized(Error): pass +class NotEmpty(Error): + pass + def wrap_exception(f): def _wrap(*args, **kw): try: diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py index 090ef4e61..b42a96233 100644 --- a/nova/objectstore/bucket.py +++ b/nova/objectstore/bucket.py @@ -107,7 +107,7 @@ class Bucket(object): try: return context.user.is_admin() or self.owner_id == context.project.id except Exception, e: - pass + return False def list_keys(self, prefix='', marker=None, max_keys=1000, terse=False): object_names = [] @@ -161,7 +161,7 @@ class Bucket(object): def delete(self): if len(os.listdir(self.path)) > 0: - raise exception.NotAuthorized() + raise exception.NotEmpty() os.rmdir(self.path) os.remove(self.path+'.json') diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py index f47ca7f00..c0b6e97a5 100644 --- a/nova/tests/objectstore_unittest.py +++ b/nova/tests/objectstore_unittest.py @@ -23,6 +23,7 @@ import os import shutil import tempfile +from nova.exception import NotEmpty, NotFound, NotAuthorized from nova import flags from nova import objectstore from nova import test @@ -96,49 +97,37 @@ class ObjectStoreTestCase(test.BaseTestCase): # another user is not authorized self.context.user = self.um.get_user('user2') self.context.project = self.um.get_project('proj2') - self.assert_(bucket.is_authorized(self.context) == False) + self.assertFalse(bucket.is_authorized(self.context)) # admin is authorized to use bucket self.context.user = self.um.get_user('admin_user') self.context.project = None - self.assert_(bucket.is_authorized(self.context)) + self.assertTrue(bucket.is_authorized(self.context)) # new buckets are empty - self.assert_(bucket.list_keys()['Contents'] == []) + self.assertTrue(bucket.list_keys()['Contents'] == []) # storing keys works bucket['foo'] = "bar" - self.assert_(len(bucket.list_keys()['Contents']) == 1) + self.assertEquals(len(bucket.list_keys()['Contents']), 1) - self.assert_(bucket['foo'].read() == 'bar') + self.assertEquals(bucket['foo'].read(), 'bar') # md5 of key works - self.assert_(bucket['foo'].md5 == hashlib.md5('bar').hexdigest()) - - # deleting non-empty bucket throws exception - exception = False - try: - bucket.delete() - except: - exception = True + self.assertEquals(bucket['foo'].md5, hashlib.md5('bar').hexdigest()) - self.assert_(exception) + # deleting non-empty bucket should throw a NotEmpty exception + self.assertRaises(NotEmpty, bucket.delete) # deleting key del bucket['foo'] - # deleting empty button + # deleting empty bucket bucket.delete() # accessing deleted bucket throws exception - exception = False - try: - objectstore.bucket.Bucket('new_bucket') - except: - exception = True - - self.assert_(exception) + self.assertRaises(NotFound, objectstore.bucket.Bucket, 'new_bucket') def test_images(self): self.context.user = self.um.get_user('user1') @@ -167,7 +156,7 @@ class ObjectStoreTestCase(test.BaseTestCase): # verify image permissions self.context.user = self.um.get_user('user2') self.context.project = self.um.get_project('proj2') - self.assert_(my_img.is_authorized(self.context) == False) + self.assertFalse(my_img.is_authorized(self.context)) # class ApiObjectStoreTestCase(test.BaseTestCase): # def setUp(self): -- cgit From 2c7e49ddeba2e9015c541712e5c52e0d902804b0 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Wed, 21 Jul 2010 15:28:43 -0400 Subject: reorder import statement and remove commented-out test case that is the same as api_unittest in objectstore_unittest --- nova/tests/objectstore_unittest.py | 35 +---------------------------------- 1 file changed, 1 insertion(+), 34 deletions(-) diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py index c0b6e97a5..8ae1f6e78 100644 --- a/nova/tests/objectstore_unittest.py +++ b/nova/tests/objectstore_unittest.py @@ -23,11 +23,11 @@ import os import shutil import tempfile -from nova.exception import NotEmpty, NotFound, NotAuthorized from nova import flags from nova import objectstore from nova import test from nova.auth import users +from nova.exception import NotEmpty, NotFound, NotAuthorized FLAGS = flags.FLAGS @@ -157,36 +157,3 @@ class ObjectStoreTestCase(test.BaseTestCase): self.context.user = self.um.get_user('user2') self.context.project = self.um.get_project('proj2') self.assertFalse(my_img.is_authorized(self.context)) - -# class ApiObjectStoreTestCase(test.BaseTestCase): -# def setUp(self): -# super(ApiObjectStoreTestCase, self).setUp() -# FLAGS.fake_users = True -# FLAGS.buckets_path = os.path.join(tempdir, 'buckets') -# FLAGS.images_path = os.path.join(tempdir, 'images') -# FLAGS.ca_path = os.path.join(os.path.dirname(__file__), 'CA') -# -# self.users = users.UserManager.instance() -# self.app = handler.Application(self.users) -# -# self.host = '127.0.0.1' -# -# self.conn = boto.s3.connection.S3Connection( -# aws_access_key_id=user.access, -# aws_secret_access_key=user.secret, -# is_secure=False, -# calling_format=boto.s3.connection.OrdinaryCallingFormat(), -# port=FLAGS.s3_port, -# host=FLAGS.s3_host) -# -# self.mox.StubOutWithMock(self.ec2, 'new_http_connection') -# -# def tearDown(self): -# FLAGS.Reset() -# super(ApiObjectStoreTestCase, self).tearDown() -# -# def test_describe_instances(self): -# self.expect_http() -# self.mox.ReplayAll() -# -# self.assertEqual(self.ec2.get_all_instances(), []) -- cgit From 3b982f62bb7cb92cbe9e96a1bde4410b3d06f997 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 21 Jul 2010 14:42:22 -0500 Subject: refactor daemons to use common base class in preparation for network refactor --- bin/nova-compute | 68 +---- bin/nova-network | 32 +++ bin/nova-volume | 68 +---- nova/compute/computenode.py | 582 +++++++++++++++++++++++++++++++++++++++++ nova/compute/node.py | 581 ---------------------------------------- nova/endpoint/cloud.py | 18 +- nova/endpoint/rackspace.py | 1 - nova/flags.py | 4 +- nova/network/__init__.py | 32 +++ nova/network/networknode.py | 35 +++ nova/node.py | 103 ++++++++ nova/tests/cloud_unittest.py | 4 +- nova/tests/compute_unittest.py | 128 +++++++++ nova/tests/future_unittest.py | 75 ------ nova/tests/model_unittest.py | 1 - nova/tests/node_unittest.py | 128 --------- nova/tests/storage_unittest.py | 115 -------- nova/tests/volume_unittest.py | 115 ++++++++ nova/twistd.py | 12 +- nova/volume/storage.py | 321 ----------------------- nova/volume/volumenode.py | 305 +++++++++++++++++++++ run_tests.py | 4 +- 22 files changed, 1363 insertions(+), 1369 deletions(-) create mode 100644 bin/nova-network create mode 100644 nova/compute/computenode.py delete mode 100644 nova/compute/node.py create mode 100644 nova/network/__init__.py create mode 100644 nova/network/networknode.py create mode 100644 nova/node.py create mode 100644 nova/tests/compute_unittest.py delete mode 100644 nova/tests/future_unittest.py delete mode 100644 nova/tests/node_unittest.py delete mode 100644 nova/tests/storage_unittest.py create mode 100644 nova/tests/volume_unittest.py delete mode 100644 nova/volume/storage.py create mode 100644 nova/volume/volumenode.py diff --git a/bin/nova-compute b/bin/nova-compute index 49710e1b3..67c93fcb8 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -19,80 +19,14 @@ """ Twistd daemon for the nova compute nodes. - Receives messages via AMQP, manages pool of worker threads - for async tasks. """ -import logging -import os -import sys - -# NOTE(termie): kludge so that we can run this from the bin directory in the -# checkout without having to screw with paths -NOVA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'nova') -if os.path.exists(NOVA_PATH): - sys.path.insert(0, os.path.dirname(NOVA_PATH)) - -from twisted.internet import task -from twisted.application import service - -from nova import flags -from nova import rpc from nova import twistd from nova.compute import node -FLAGS = flags.FLAGS -# NOTE(termie): This file will necessarily be re-imported under different -# context when the twistd.serve() call is made below so any -# flags we define here will have to be conditionally defined, -# flags defined by imported modules are safe. -if 'compute_report_state_interval' not in FLAGS: - flags.DEFINE_integer('compute_report_state_interval', 10, - 'seconds between nodes reporting state to cloud', - lower_bound=1) -logging.getLogger().setLevel(logging.DEBUG) - -def main(): - logging.warn('Starting compute node') - n = node.Node() - d = n.adopt_instances() - d.addCallback(lambda x: logging.info('Adopted %d instances', x)) - - conn = rpc.Connection.instance() - consumer_all = rpc.AdapterConsumer( - connection=conn, - topic='%s' % FLAGS.compute_topic, - proxy=n) - - consumer_node = rpc.AdapterConsumer( - connection=conn, - topic='%s.%s' % (FLAGS.compute_topic, FLAGS.node_name), - proxy=n) - - bin_name = os.path.basename(__file__) - pulse = task.LoopingCall(n.report_state, FLAGS.node_name, bin_name) - pulse.start(interval=FLAGS.compute_report_state_interval, now=False) - - consumer_all.attach_to_twisted() - consumer_node.attach_to_twisted() - - # This is the parent service that twistd will be looking for when it - # parses this file, return it so that we can get it into globals below - application = service.Application(bin_name) - n.setServiceParent(application) - return application - - -# NOTE(termie): When this script is executed from the commandline what it will -# actually do is tell the twistd application runner that it -# should run this file as a twistd application (see below). if __name__ == '__main__': twistd.serve(__file__) -# NOTE(termie): When this script is loaded by the twistd application runner -# this code path will be executed and twistd will expect a -# variable named 'application' to be available, it will then -# handle starting it and stopping it. if __name__ == '__builtin__': - application = main() + application = node.ComputeNode.create() diff --git a/bin/nova-network b/bin/nova-network new file mode 100644 index 000000000..c69690081 --- /dev/null +++ b/bin/nova-network @@ -0,0 +1,32 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" + Twistd daemon for the nova network nodes. +""" + +from nova import twistd +from nova.network import node + + +if __name__ == '__main__': + twistd.serve(__file__) + +if __name__ == '__builtin__': + application = node.NetworkNode.create() diff --git a/bin/nova-volume b/bin/nova-volume index 7d4b65205..cdf2782bc 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -18,77 +18,15 @@ # under the License. """ - Tornado Storage daemon manages AoE volumes via AMQP messaging. + Twistd daemon for the nova volume nodes. """ -import logging -import os -import sys - -# NOTE(termie): kludge so that we can run this from the bin directory in the -# checkout without having to screw with paths -NOVA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'nova') -if os.path.exists(NOVA_PATH): - sys.path.insert(0, os.path.dirname(NOVA_PATH)) - -from twisted.internet import task -from twisted.application import service - -from nova import flags -from nova import rpc from nova import twistd -from nova.volume import storage - - -FLAGS = flags.FLAGS -# NOTE(termie): This file will necessarily be re-imported under different -# context when the twistd.serve() call is made below so any -# flags we define here will have to be conditionally defined, -# flags defined by imported modules are safe. -if 'volume_report_state_interval' not in FLAGS: - flags.DEFINE_integer('volume_report_state_interval', 10, - 'seconds between nodes reporting state to cloud', - lower_bound=1) - - -def main(): - logging.warn('Starting volume node') - bs = storage.BlockStore() - - conn = rpc.Connection.instance() - consumer_all = rpc.AdapterConsumer( - connection=conn, - topic='%s' % FLAGS.storage_topic, - proxy=bs) - - consumer_node = rpc.AdapterConsumer( - connection=conn, - topic='%s.%s' % (FLAGS.storage_topic, FLAGS.node_name), - proxy=bs) - - bin_name = os.path.basename(__file__) - pulse = task.LoopingCall(bs.report_state, FLAGS.node_name, bin_name) - pulse.start(interval=FLAGS.volume_report_state_interval, now=False) - - consumer_all.attach_to_twisted() - consumer_node.attach_to_twisted() - - # This is the parent service that twistd will be looking for when it - # parses this file, return it so that we can get it into globals below - application = service.Application(bin_name) - bs.setServiceParent(application) - return application +from nova.volume import node -# NOTE(termie): When this script is executed from the commandline what it will -# actually do is tell the twistd application runner that it -# should run this file as a twistd application (see below). if __name__ == '__main__': twistd.serve(__file__) -# NOTE(termie): When this script is loaded by the twistd application runner -# this code path will be executed and twistd will expect a -# variable named 'application' to be available, it will then -# handle starting it and stopping it. if __name__ == '__builtin__': - application = main() + application = node.VolumeNode.create() diff --git a/nova/compute/computenode.py b/nova/compute/computenode.py new file mode 100644 index 000000000..f26da1996 --- /dev/null +++ b/nova/compute/computenode.py @@ -0,0 +1,582 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Compute Node: + + Runs on each compute node, managing the + hypervisor using libvirt. + +""" + +import base64 +import json +import logging +import os +import shutil +import sys +from twisted.internet import defer +from twisted.internet import task +from twisted.application import service + + +try: + import libvirt +except Exception, err: + logging.warning('no libvirt found') + +from nova import exception +from nova import fakevirt +from nova import flags +from nova import node +from nova import process +from nova import utils +from nova.compute import disk +from nova.compute import model +from nova.compute import network +from nova.objectstore import image # for image_path flag +from nova.volume import volumenode + + +FLAGS = flags.FLAGS +flags.DEFINE_string('libvirt_xml_template', + utils.abspath('compute/libvirt.xml.template'), + 'Libvirt XML Template') +flags.DEFINE_bool('use_s3', True, + 'whether to get images from s3 or use local copy') +flags.DEFINE_string('instances_path', utils.abspath('../instances'), + 'where instances are stored on disk') + +INSTANCE_TYPES = {} +INSTANCE_TYPES['m1.tiny'] = {'memory_mb': 512, 'vcpus': 1, 'local_gb': 0} +INSTANCE_TYPES['m1.small'] = {'memory_mb': 1024, 'vcpus': 1, 'local_gb': 10} +INSTANCE_TYPES['m1.medium'] = {'memory_mb': 2048, 'vcpus': 2, 'local_gb': 10} +INSTANCE_TYPES['m1.large'] = {'memory_mb': 4096, 'vcpus': 4, 'local_gb': 10} +INSTANCE_TYPES['m1.xlarge'] = {'memory_mb': 8192, 'vcpus': 4, 'local_gb': 10} +INSTANCE_TYPES['c1.medium'] = {'memory_mb': 2048, 'vcpus': 4, 'local_gb': 10} + + +def _image_path(path=''): + return os.path.join(FLAGS.images_path, path) + + +def _image_url(path): + return "%s:%s/_images/%s" % (FLAGS.s3_host, FLAGS.s3_port, path) + + +class ComputeNode(node.Node): + """ + Manages the running instances. + """ + def __init__(self): + """ load configuration options for this node and connect to libvirt """ + super(ComputeNode, self).__init__() + self._instances = {} + self._conn = self._get_connection() + self.instdir = model.InstanceDirectory() + # TODO(joshua): This needs to ensure system state, specifically: modprobe aoe + + def _get_connection(self): + """ returns a libvirt connection object """ + # TODO(termie): maybe lazy load after initial check for permissions + # TODO(termie): check whether we can be disconnected + if FLAGS.fake_libvirt: + conn = fakevirt.FakeVirtConnection.instance() + else: + auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], + 'root', + None] + conn = libvirt.openAuth('qemu:///system', auth, 0) + if conn == None: + logging.error('Failed to open connection to the hypervisor') + sys.exit(1) + return conn + + def noop(self): + """ simple test of an AMQP message call """ + return defer.succeed('PONG') + + def get_instance(self, instance_id): + # inst = self.instdir.get(instance_id) + # return inst + if self.instdir.exists(instance_id): + return Instance.fromName(self._conn, instance_id) + return None + + @exception.wrap_exception + def adopt_instances(self): + """ if there are instances already running, adopt them """ + return defer.succeed(0) + instance_names = [self._conn.lookupByID(x).name() + for x in self._conn.listDomainsID()] + for name in instance_names: + try: + new_inst = Instance.fromName(self._conn, name) + new_inst.update_state() + except: + pass + return defer.succeed(len(self._instances)) + + @exception.wrap_exception + def describe_instances(self): + retval = {} + for inst in self.instdir.by_node(FLAGS.node_name): + retval[inst['instance_id']] = ( + Instance.fromName(self._conn, inst['instance_id'])) + return retval + + @defer.inlineCallbacks + def report_state(self, nodename, daemon): + # TODO(termie): make this pattern be more elegant. -todd + try: + record = model.Daemon(nodename, daemon) + record.heartbeat() + if getattr(self, "model_disconnected", False): + self.model_disconnected = False + logging.error("Recovered model server connection!") + + except model.ConnectionError, ex: + if not getattr(self, "model_disconnected", False): + self.model_disconnected = True + logging.exception("model server went away") + yield + + # @exception.wrap_exception + def run_instance(self, instance_id, **_kwargs): + """ launch a new instance with specified options """ + logging.debug("Starting instance %s..." % (instance_id)) + inst = self.instdir.get(instance_id) + if not FLAGS.simple_network: + # TODO: Get the real security group of launch in here + security_group = "default" + net = network.BridgedNetwork.get_network_for_project(inst['user_id'], + inst['project_id'], + security_group).express() + inst['node_name'] = FLAGS.node_name + inst.save() + # TODO(vish) check to make sure the availability zone matches + new_inst = Instance(self._conn, name=instance_id, data=inst) + logging.info("Instances current state is %s", new_inst.state) + if new_inst.is_running(): + raise exception.Error("Instance is already running") + d = new_inst.spawn() + return d + + @exception.wrap_exception + def terminate_instance(self, instance_id): + """ terminate an instance on this machine """ + logging.debug("Got told to terminate instance %s" % instance_id) + instance = self.get_instance(instance_id) + # inst = self.instdir.get(instance_id) + if not instance: + raise exception.Error( + 'trying to terminate unknown instance: %s' % instance_id) + d = instance.destroy() + # d.addCallback(lambda x: inst.destroy()) + return d + + @exception.wrap_exception + def reboot_instance(self, instance_id): + """ reboot an instance on this server + KVM doesn't support reboot, so we terminate and restart """ + instance = self.get_instance(instance_id) + if not instance: + raise exception.Error( + 'trying to reboot unknown instance: %s' % instance_id) + return instance.reboot() + + @defer.inlineCallbacks + @exception.wrap_exception + def get_console_output(self, instance_id): + """ send the console output for an instance """ + logging.debug("Getting console output for %s" % (instance_id)) + inst = self.instdir.get(instance_id) + instance = self.get_instance(instance_id) + if not instance: + raise exception.Error( + 'trying to get console log for unknown: %s' % instance_id) + rv = yield instance.console_output() + # TODO(termie): this stuff belongs in the API layer, no need to + # munge the data we send to ourselves + output = {"InstanceId" : instance_id, + "Timestamp" : "2", + "output" : base64.b64encode(rv)} + defer.returnValue(output) + + @defer.inlineCallbacks + @exception.wrap_exception + def attach_volume(self, instance_id = None, + volume_id = None, mountpoint = None): + volume = volumenode.get_volume(volume_id) + yield self._init_aoe() + yield process.simple_execute( + "sudo virsh attach-disk %s /dev/etherd/%s %s" % + (instance_id, + volume['aoe_device'], + mountpoint.rpartition('/dev/')[2])) + volume.finish_attach() + defer.returnValue(True) + + @defer.inlineCallbacks + def _init_aoe(self): + yield process.simple_execute("sudo aoe-discover") + yield process.simple_execute("sudo aoe-stat") + + @defer.inlineCallbacks + @exception.wrap_exception + def detach_volume(self, instance_id, volume_id): + """ detach a volume from an instance """ + # despite the documentation, virsh detach-disk just wants the device + # name without the leading /dev/ + volume = volumenode.get_volume(volume_id) + target = volume['mountpoint'].rpartition('/dev/')[2] + yield process.simple_execute( + "sudo virsh detach-disk %s %s " % (instance_id, target)) + volume.finish_detach() + defer.returnValue(True) + + +class Group(object): + def __init__(self, group_id): + self.group_id = group_id + + +class ProductCode(object): + def __init__(self, product_code): + self.product_code = product_code + + +class Instance(object): + + NOSTATE = 0x00 + RUNNING = 0x01 + BLOCKED = 0x02 + PAUSED = 0x03 + SHUTDOWN = 0x04 + SHUTOFF = 0x05 + CRASHED = 0x06 + + def __init__(self, conn, name, data): + """ spawn an instance with a given name """ + self._conn = conn + # TODO(vish): this can be removed after data has been updated + # data doesn't seem to have a working iterator so in doesn't work + if data.get('owner_id', None) is not None: + data['user_id'] = data['owner_id'] + data['project_id'] = data['owner_id'] + self.datamodel = data + + size = data.get('instance_type', FLAGS.default_instance_type) + if size not in INSTANCE_TYPES: + raise exception.Error('invalid instance type: %s' % size) + + self.datamodel.update(INSTANCE_TYPES[size]) + + self.datamodel['name'] = name + self.datamodel['instance_id'] = name + self.datamodel['basepath'] = data.get( + 'basepath', os.path.abspath( + os.path.join(FLAGS.instances_path, self.name))) + self.datamodel['memory_kb'] = int(self.datamodel['memory_mb']) * 1024 + self.datamodel.setdefault('image_id', FLAGS.default_image) + self.datamodel.setdefault('kernel_id', FLAGS.default_kernel) + self.datamodel.setdefault('ramdisk_id', FLAGS.default_ramdisk) + self.datamodel.setdefault('project_id', self.datamodel['user_id']) + self.datamodel.setdefault('bridge_name', None) + #self.datamodel.setdefault('key_data', None) + #self.datamodel.setdefault('key_name', None) + #self.datamodel.setdefault('addressing_type', None) + + # TODO(joshua) - The ugly non-flat ones + self.datamodel['groups'] = data.get('security_group', 'default') + # TODO(joshua): Support product codes somehow + self.datamodel.setdefault('product_codes', None) + + self.datamodel.save() + logging.debug("Finished init of Instance with id of %s" % name) + + def toXml(self): + # TODO(termie): cache? + logging.debug("Starting the toXML method") + libvirt_xml = open(FLAGS.libvirt_xml_template).read() + xml_info = self.datamodel.copy() + # TODO(joshua): Make this xml express the attached disks as well + + # TODO(termie): lazy lazy hack because xml is annoying + xml_info['nova'] = json.dumps(self.datamodel.copy()) + libvirt_xml = libvirt_xml % xml_info + logging.debug("Finished the toXML method") + + return libvirt_xml + + @classmethod + def fromName(cls, conn, name): + """ use the saved data for reloading the instance """ + instdir = model.InstanceDirectory() + instance = instdir.get(name) + return cls(conn=conn, name=name, data=instance) + + def set_state(self, state_code, state_description=None): + self.datamodel['state'] = state_code + if not state_description: + state_description = STATE_NAMES[state_code] + self.datamodel['state_description'] = state_description + self.datamodel.save() + + @property + def state(self): + # it is a string in datamodel + return int(self.datamodel['state']) + + @property + def name(self): + return self.datamodel['name'] + + def is_pending(self): + return (self.state == Instance.NOSTATE or self.state == 'pending') + + def is_destroyed(self): + return self.state == Instance.SHUTOFF + + def is_running(self): + logging.debug("Instance state is: %s" % self.state) + return (self.state == Instance.RUNNING or self.state == 'running') + + def describe(self): + return self.datamodel + + def info(self): + logging.debug("Getting info for dom %s" % self.name) + virt_dom = self._conn.lookupByName(self.name) + (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() + return {'state': state, + 'max_mem': max_mem, + 'mem': mem, + 'num_cpu': num_cpu, + 'cpu_time': cpu_time, + 'node_name': FLAGS.node_name} + + def basepath(self, path=''): + return os.path.abspath(os.path.join(self.datamodel['basepath'], path)) + + def update_state(self): + self.datamodel.update(self.info()) + self.set_state(self.state) + self.datamodel.save() # Extra, but harmless + + @exception.wrap_exception + def destroy(self): + if self.is_destroyed(): + self.datamodel.destroy() + raise exception.Error('trying to destroy already destroyed' + ' instance: %s' % self.name) + + self.set_state(Instance.NOSTATE, 'shutting_down') + try: + virt_dom = self._conn.lookupByName(self.name) + virt_dom.destroy() + except Exception, _err: + pass + # If the instance is already terminated, we're still happy + d = defer.Deferred() + d.addCallback(lambda x: self._cleanup()) + d.addCallback(lambda x: self.datamodel.destroy()) + # TODO(termie): short-circuit me for tests + # WE'LL save this for when we do shutdown, + # instead of destroy - but destroy returns immediately + timer = task.LoopingCall(f=None) + def _wait_for_shutdown(): + try: + self.update_state() + if self.state == Instance.SHUTDOWN: + timer.stop() + d.callback(None) + except Exception: + self.set_state(Instance.SHUTDOWN) + timer.stop() + d.callback(None) + timer.f = _wait_for_shutdown + timer.start(interval=0.5, now=True) + return d + + def _cleanup(self): + target = os.path.abspath(self.datamodel['basepath']) + logging.info("Deleting instance files at %s", target) + shutil.rmtree(target) + + @defer.inlineCallbacks + @exception.wrap_exception + def reboot(self): + if not self.is_running(): + raise exception.Error( + 'trying to reboot a non-running' + 'instance: %s (state: %s)' % (self.name, self.state)) + + logging.debug('rebooting instance %s' % self.name) + self.set_state(Instance.NOSTATE, 'rebooting') + yield self._conn.lookupByName(self.name).destroy() + self._conn.createXML(self.toXml(), 0) + + d = defer.Deferred() + timer = task.LoopingCall(f=None) + def _wait_for_reboot(): + try: + self.update_state() + if self.is_running(): + logging.debug('rebooted instance %s' % self.name) + timer.stop() + d.callback(None) + except Exception: + self.set_state(Instance.SHUTDOWN) + timer.stop() + d.callback(None) + timer.f = _wait_for_reboot + timer.start(interval=0.5, now=True) + yield d + + def _fetch_s3_image(self, image, path): + url = _image_url('%s/image' % image) + d = process.simple_execute( + 'curl --silent %s -o %s' % (url, path)) + return d + + def _fetch_local_image(self, image, path): + source = _image_path('%s/image' % image) + d = process.simple_execute('cp %s %s' % (source, path)) + return d + + @defer.inlineCallbacks + def _create_image(self, libvirt_xml): + # syntactic nicety + data = self.datamodel + basepath = self.basepath + + # ensure directories exist and are writable + yield process.simple_execute( + 'mkdir -p %s' % basepath()) + yield process.simple_execute( + 'chmod 0777 %s' % basepath()) + + + # TODO(termie): these are blocking calls, it would be great + # if they weren't. + logging.info('Creating image for: %s', data['instance_id']) + f = open(basepath('libvirt.xml'), 'w') + f.write(libvirt_xml) + f.close() + + if FLAGS.fake_libvirt: + logging.info('fake_libvirt, nothing to do for create_image') + raise defer.returnValue(None); + + if FLAGS.use_s3: + _fetch_file = self._fetch_s3_image + else: + _fetch_file = self._fetch_local_image + + if not os.path.exists(basepath('disk')): + yield _fetch_file(data['image_id'], basepath('disk-raw')) + if not os.path.exists(basepath('kernel')): + yield _fetch_file(data['kernel_id'], basepath('kernel')) + if not os.path.exists(basepath('ramdisk')): + yield _fetch_file(data['ramdisk_id'], basepath('ramdisk')) + + execute = lambda cmd, input=None: \ + process.simple_execute(cmd=cmd, + input=input, + error_ok=1) + + key = data['key_data'] + net = None + if FLAGS.simple_network: + with open(FLAGS.simple_network_template) as f: + net = f.read() % {'address': data['private_dns_name'], + 'network': FLAGS.simple_network_network, + 'netmask': FLAGS.simple_network_netmask, + 'gateway': FLAGS.simple_network_gateway, + 'broadcast': FLAGS.simple_network_broadcast, + 'dns': FLAGS.simple_network_dns} + if key or net: + logging.info('Injecting data into image %s', data['image_id']) + yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute) + + if os.path.exists(basepath('disk')): + yield process.simple_execute( + 'rm -f %s' % basepath('disk')) + + bytes = (INSTANCE_TYPES[data['instance_type']]['local_gb'] + * 1024 * 1024 * 1024) + yield disk.partition( + basepath('disk-raw'), basepath('disk'), bytes, execute=execute) + + @defer.inlineCallbacks + @exception.wrap_exception + def spawn(self): + self.set_state(Instance.NOSTATE, 'spawning') + logging.debug("Starting spawn in Instance") + + xml = self.toXml() + self.set_state(Instance.NOSTATE, 'launching') + logging.info('self %s', self) + try: + yield self._create_image(xml) + self._conn.createXML(xml, 0) + # TODO(termie): this should actually register + # a callback to check for successful boot + logging.debug("Instance is running") + + local_d = defer.Deferred() + timer = task.LoopingCall(f=None) + def _wait_for_boot(): + try: + self.update_state() + if self.is_running(): + logging.debug('booted instance %s' % self.name) + timer.stop() + local_d.callback(None) + except Exception: + self.set_state(Instance.SHUTDOWN) + logging.error('Failed to boot instance %s' % self.name) + timer.stop() + local_d.callback(None) + timer.f = _wait_for_boot + timer.start(interval=0.5, now=True) + except Exception, ex: + logging.debug(ex) + self.set_state(Instance.SHUTDOWN) + + @exception.wrap_exception + def console_output(self): + if not FLAGS.fake_libvirt: + fname = os.path.abspath( + os.path.join(self.datamodel['basepath'], 'console.log')) + with open(fname, 'r') as f: + console = f.read() + else: + console = 'FAKE CONSOLE OUTPUT' + return defer.succeed(console) + +STATE_NAMES = { + Instance.NOSTATE : 'pending', + Instance.RUNNING : 'running', + Instance.BLOCKED : 'blocked', + Instance.PAUSED : 'paused', + Instance.SHUTDOWN : 'shutdown', + Instance.SHUTOFF : 'shutdown', + Instance.CRASHED : 'crashed', +} diff --git a/nova/compute/node.py b/nova/compute/node.py deleted file mode 100644 index 7cae86d02..000000000 --- a/nova/compute/node.py +++ /dev/null @@ -1,581 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Compute Node: - - Runs on each compute node, managing the - hypervisor using libvirt. - -""" - -import base64 -import json -import logging -import os -import shutil -import sys -from twisted.internet import defer -from twisted.internet import task -from twisted.application import service - - -try: - import libvirt -except Exception, err: - logging.warning('no libvirt found') - -from nova import exception -from nova import fakevirt -from nova import flags -from nova import process -from nova import utils -from nova.compute import disk -from nova.compute import model -from nova.compute import network -from nova.objectstore import image # for image_path flag -from nova.volume import storage - - -FLAGS = flags.FLAGS -flags.DEFINE_string('libvirt_xml_template', - utils.abspath('compute/libvirt.xml.template'), - 'Libvirt XML Template') -flags.DEFINE_bool('use_s3', True, - 'whether to get images from s3 or use local copy') -flags.DEFINE_string('instances_path', utils.abspath('../instances'), - 'where instances are stored on disk') - -INSTANCE_TYPES = {} -INSTANCE_TYPES['m1.tiny'] = {'memory_mb': 512, 'vcpus': 1, 'local_gb': 0} -INSTANCE_TYPES['m1.small'] = {'memory_mb': 1024, 'vcpus': 1, 'local_gb': 10} -INSTANCE_TYPES['m1.medium'] = {'memory_mb': 2048, 'vcpus': 2, 'local_gb': 10} -INSTANCE_TYPES['m1.large'] = {'memory_mb': 4096, 'vcpus': 4, 'local_gb': 10} -INSTANCE_TYPES['m1.xlarge'] = {'memory_mb': 8192, 'vcpus': 4, 'local_gb': 10} -INSTANCE_TYPES['c1.medium'] = {'memory_mb': 2048, 'vcpus': 4, 'local_gb': 10} - - -def _image_path(path=''): - return os.path.join(FLAGS.images_path, path) - - -def _image_url(path): - return "%s:%s/_images/%s" % (FLAGS.s3_host, FLAGS.s3_port, path) - - -class Node(object, service.Service): - """ - Manages the running instances. - """ - def __init__(self): - """ load configuration options for this node and connect to libvirt """ - super(Node, self).__init__() - self._instances = {} - self._conn = self._get_connection() - self.instdir = model.InstanceDirectory() - # TODO(joshua): This needs to ensure system state, specifically: modprobe aoe - - def _get_connection(self): - """ returns a libvirt connection object """ - # TODO(termie): maybe lazy load after initial check for permissions - # TODO(termie): check whether we can be disconnected - if FLAGS.fake_libvirt: - conn = fakevirt.FakeVirtConnection.instance() - else: - auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], - 'root', - None] - conn = libvirt.openAuth('qemu:///system', auth, 0) - if conn == None: - logging.error('Failed to open connection to the hypervisor') - sys.exit(1) - return conn - - def noop(self): - """ simple test of an AMQP message call """ - return defer.succeed('PONG') - - def get_instance(self, instance_id): - # inst = self.instdir.get(instance_id) - # return inst - if self.instdir.exists(instance_id): - return Instance.fromName(self._conn, instance_id) - return None - - @exception.wrap_exception - def adopt_instances(self): - """ if there are instances already running, adopt them """ - return defer.succeed(0) - instance_names = [self._conn.lookupByID(x).name() - for x in self._conn.listDomainsID()] - for name in instance_names: - try: - new_inst = Instance.fromName(self._conn, name) - new_inst.update_state() - except: - pass - return defer.succeed(len(self._instances)) - - @exception.wrap_exception - def describe_instances(self): - retval = {} - for inst in self.instdir.by_node(FLAGS.node_name): - retval[inst['instance_id']] = ( - Instance.fromName(self._conn, inst['instance_id'])) - return retval - - @defer.inlineCallbacks - def report_state(self, nodename, daemon): - # TODO(termie): make this pattern be more elegant. -todd - try: - record = model.Daemon(nodename, daemon) - record.heartbeat() - if getattr(self, "model_disconnected", False): - self.model_disconnected = False - logging.error("Recovered model server connection!") - - except model.ConnectionError, ex: - if not getattr(self, "model_disconnected", False): - self.model_disconnected = True - logging.exception("model server went away") - yield - - # @exception.wrap_exception - def run_instance(self, instance_id, **_kwargs): - """ launch a new instance with specified options """ - logging.debug("Starting instance %s..." % (instance_id)) - inst = self.instdir.get(instance_id) - if not FLAGS.simple_network: - # TODO: Get the real security group of launch in here - security_group = "default" - net = network.BridgedNetwork.get_network_for_project(inst['user_id'], - inst['project_id'], - security_group).express() - inst['node_name'] = FLAGS.node_name - inst.save() - # TODO(vish) check to make sure the availability zone matches - new_inst = Instance(self._conn, name=instance_id, data=inst) - logging.info("Instances current state is %s", new_inst.state) - if new_inst.is_running(): - raise exception.Error("Instance is already running") - d = new_inst.spawn() - return d - - @exception.wrap_exception - def terminate_instance(self, instance_id): - """ terminate an instance on this machine """ - logging.debug("Got told to terminate instance %s" % instance_id) - instance = self.get_instance(instance_id) - # inst = self.instdir.get(instance_id) - if not instance: - raise exception.Error( - 'trying to terminate unknown instance: %s' % instance_id) - d = instance.destroy() - # d.addCallback(lambda x: inst.destroy()) - return d - - @exception.wrap_exception - def reboot_instance(self, instance_id): - """ reboot an instance on this server - KVM doesn't support reboot, so we terminate and restart """ - instance = self.get_instance(instance_id) - if not instance: - raise exception.Error( - 'trying to reboot unknown instance: %s' % instance_id) - return instance.reboot() - - @defer.inlineCallbacks - @exception.wrap_exception - def get_console_output(self, instance_id): - """ send the console output for an instance """ - logging.debug("Getting console output for %s" % (instance_id)) - inst = self.instdir.get(instance_id) - instance = self.get_instance(instance_id) - if not instance: - raise exception.Error( - 'trying to get console log for unknown: %s' % instance_id) - rv = yield instance.console_output() - # TODO(termie): this stuff belongs in the API layer, no need to - # munge the data we send to ourselves - output = {"InstanceId" : instance_id, - "Timestamp" : "2", - "output" : base64.b64encode(rv)} - defer.returnValue(output) - - @defer.inlineCallbacks - @exception.wrap_exception - def attach_volume(self, instance_id = None, - volume_id = None, mountpoint = None): - volume = storage.get_volume(volume_id) - yield self._init_aoe() - yield process.simple_execute( - "sudo virsh attach-disk %s /dev/etherd/%s %s" % - (instance_id, - volume['aoe_device'], - mountpoint.rpartition('/dev/')[2])) - volume.finish_attach() - defer.returnValue(True) - - @defer.inlineCallbacks - def _init_aoe(self): - yield process.simple_execute("sudo aoe-discover") - yield process.simple_execute("sudo aoe-stat") - - @defer.inlineCallbacks - @exception.wrap_exception - def detach_volume(self, instance_id, volume_id): - """ detach a volume from an instance """ - # despite the documentation, virsh detach-disk just wants the device - # name without the leading /dev/ - volume = storage.get_volume(volume_id) - target = volume['mountpoint'].rpartition('/dev/')[2] - yield process.simple_execute( - "sudo virsh detach-disk %s %s " % (instance_id, target)) - volume.finish_detach() - defer.returnValue(True) - - -class Group(object): - def __init__(self, group_id): - self.group_id = group_id - - -class ProductCode(object): - def __init__(self, product_code): - self.product_code = product_code - - -class Instance(object): - - NOSTATE = 0x00 - RUNNING = 0x01 - BLOCKED = 0x02 - PAUSED = 0x03 - SHUTDOWN = 0x04 - SHUTOFF = 0x05 - CRASHED = 0x06 - - def __init__(self, conn, name, data): - """ spawn an instance with a given name """ - self._conn = conn - # TODO(vish): this can be removed after data has been updated - # data doesn't seem to have a working iterator so in doesn't work - if data.get('owner_id', None) is not None: - data['user_id'] = data['owner_id'] - data['project_id'] = data['owner_id'] - self.datamodel = data - - size = data.get('instance_type', FLAGS.default_instance_type) - if size not in INSTANCE_TYPES: - raise exception.Error('invalid instance type: %s' % size) - - self.datamodel.update(INSTANCE_TYPES[size]) - - self.datamodel['name'] = name - self.datamodel['instance_id'] = name - self.datamodel['basepath'] = data.get( - 'basepath', os.path.abspath( - os.path.join(FLAGS.instances_path, self.name))) - self.datamodel['memory_kb'] = int(self.datamodel['memory_mb']) * 1024 - self.datamodel.setdefault('image_id', FLAGS.default_image) - self.datamodel.setdefault('kernel_id', FLAGS.default_kernel) - self.datamodel.setdefault('ramdisk_id', FLAGS.default_ramdisk) - self.datamodel.setdefault('project_id', self.datamodel['user_id']) - self.datamodel.setdefault('bridge_name', None) - #self.datamodel.setdefault('key_data', None) - #self.datamodel.setdefault('key_name', None) - #self.datamodel.setdefault('addressing_type', None) - - # TODO(joshua) - The ugly non-flat ones - self.datamodel['groups'] = data.get('security_group', 'default') - # TODO(joshua): Support product codes somehow - self.datamodel.setdefault('product_codes', None) - - self.datamodel.save() - logging.debug("Finished init of Instance with id of %s" % name) - - def toXml(self): - # TODO(termie): cache? - logging.debug("Starting the toXML method") - libvirt_xml = open(FLAGS.libvirt_xml_template).read() - xml_info = self.datamodel.copy() - # TODO(joshua): Make this xml express the attached disks as well - - # TODO(termie): lazy lazy hack because xml is annoying - xml_info['nova'] = json.dumps(self.datamodel.copy()) - libvirt_xml = libvirt_xml % xml_info - logging.debug("Finished the toXML method") - - return libvirt_xml - - @classmethod - def fromName(cls, conn, name): - """ use the saved data for reloading the instance """ - instdir = model.InstanceDirectory() - instance = instdir.get(name) - return cls(conn=conn, name=name, data=instance) - - def set_state(self, state_code, state_description=None): - self.datamodel['state'] = state_code - if not state_description: - state_description = STATE_NAMES[state_code] - self.datamodel['state_description'] = state_description - self.datamodel.save() - - @property - def state(self): - # it is a string in datamodel - return int(self.datamodel['state']) - - @property - def name(self): - return self.datamodel['name'] - - def is_pending(self): - return (self.state == Instance.NOSTATE or self.state == 'pending') - - def is_destroyed(self): - return self.state == Instance.SHUTOFF - - def is_running(self): - logging.debug("Instance state is: %s" % self.state) - return (self.state == Instance.RUNNING or self.state == 'running') - - def describe(self): - return self.datamodel - - def info(self): - logging.debug("Getting info for dom %s" % self.name) - virt_dom = self._conn.lookupByName(self.name) - (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() - return {'state': state, - 'max_mem': max_mem, - 'mem': mem, - 'num_cpu': num_cpu, - 'cpu_time': cpu_time, - 'node_name': FLAGS.node_name} - - def basepath(self, path=''): - return os.path.abspath(os.path.join(self.datamodel['basepath'], path)) - - def update_state(self): - self.datamodel.update(self.info()) - self.set_state(self.state) - self.datamodel.save() # Extra, but harmless - - @exception.wrap_exception - def destroy(self): - if self.is_destroyed(): - self.datamodel.destroy() - raise exception.Error('trying to destroy already destroyed' - ' instance: %s' % self.name) - - self.set_state(Instance.NOSTATE, 'shutting_down') - try: - virt_dom = self._conn.lookupByName(self.name) - virt_dom.destroy() - except Exception, _err: - pass - # If the instance is already terminated, we're still happy - d = defer.Deferred() - d.addCallback(lambda x: self._cleanup()) - d.addCallback(lambda x: self.datamodel.destroy()) - # TODO(termie): short-circuit me for tests - # WE'LL save this for when we do shutdown, - # instead of destroy - but destroy returns immediately - timer = task.LoopingCall(f=None) - def _wait_for_shutdown(): - try: - self.update_state() - if self.state == Instance.SHUTDOWN: - timer.stop() - d.callback(None) - except Exception: - self.set_state(Instance.SHUTDOWN) - timer.stop() - d.callback(None) - timer.f = _wait_for_shutdown - timer.start(interval=0.5, now=True) - return d - - def _cleanup(self): - target = os.path.abspath(self.datamodel['basepath']) - logging.info("Deleting instance files at %s", target) - shutil.rmtree(target) - - @defer.inlineCallbacks - @exception.wrap_exception - def reboot(self): - if not self.is_running(): - raise exception.Error( - 'trying to reboot a non-running' - 'instance: %s (state: %s)' % (self.name, self.state)) - - logging.debug('rebooting instance %s' % self.name) - self.set_state(Instance.NOSTATE, 'rebooting') - yield self._conn.lookupByName(self.name).destroy() - self._conn.createXML(self.toXml(), 0) - - d = defer.Deferred() - timer = task.LoopingCall(f=None) - def _wait_for_reboot(): - try: - self.update_state() - if self.is_running(): - logging.debug('rebooted instance %s' % self.name) - timer.stop() - d.callback(None) - except Exception: - self.set_state(Instance.SHUTDOWN) - timer.stop() - d.callback(None) - timer.f = _wait_for_reboot - timer.start(interval=0.5, now=True) - yield d - - def _fetch_s3_image(self, image, path): - url = _image_url('%s/image' % image) - d = process.simple_execute( - 'curl --silent %s -o %s' % (url, path)) - return d - - def _fetch_local_image(self, image, path): - source = _image_path('%s/image' % image) - d = process.simple_execute('cp %s %s' % (source, path)) - return d - - @defer.inlineCallbacks - def _create_image(self, libvirt_xml): - # syntactic nicety - data = self.datamodel - basepath = self.basepath - - # ensure directories exist and are writable - yield process.simple_execute( - 'mkdir -p %s' % basepath()) - yield process.simple_execute( - 'chmod 0777 %s' % basepath()) - - - # TODO(termie): these are blocking calls, it would be great - # if they weren't. - logging.info('Creating image for: %s', data['instance_id']) - f = open(basepath('libvirt.xml'), 'w') - f.write(libvirt_xml) - f.close() - - if FLAGS.fake_libvirt: - logging.info('fake_libvirt, nothing to do for create_image') - raise defer.returnValue(None); - - if FLAGS.use_s3: - _fetch_file = self._fetch_s3_image - else: - _fetch_file = self._fetch_local_image - - if not os.path.exists(basepath('disk')): - yield _fetch_file(data['image_id'], basepath('disk-raw')) - if not os.path.exists(basepath('kernel')): - yield _fetch_file(data['kernel_id'], basepath('kernel')) - if not os.path.exists(basepath('ramdisk')): - yield _fetch_file(data['ramdisk_id'], basepath('ramdisk')) - - execute = lambda cmd, input=None: \ - process.simple_execute(cmd=cmd, - input=input, - error_ok=1) - - key = data['key_data'] - net = None - if FLAGS.simple_network: - with open(FLAGS.simple_network_template) as f: - net = f.read() % {'address': data['private_dns_name'], - 'network': FLAGS.simple_network_network, - 'netmask': FLAGS.simple_network_netmask, - 'gateway': FLAGS.simple_network_gateway, - 'broadcast': FLAGS.simple_network_broadcast, - 'dns': FLAGS.simple_network_dns} - if key or net: - logging.info('Injecting data into image %s', data['image_id']) - yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute) - - if os.path.exists(basepath('disk')): - yield process.simple_execute( - 'rm -f %s' % basepath('disk')) - - bytes = (INSTANCE_TYPES[data['instance_type']]['local_gb'] - * 1024 * 1024 * 1024) - yield disk.partition( - basepath('disk-raw'), basepath('disk'), bytes, execute=execute) - - @defer.inlineCallbacks - @exception.wrap_exception - def spawn(self): - self.set_state(Instance.NOSTATE, 'spawning') - logging.debug("Starting spawn in Instance") - - xml = self.toXml() - self.set_state(Instance.NOSTATE, 'launching') - logging.info('self %s', self) - try: - yield self._create_image(xml) - self._conn.createXML(xml, 0) - # TODO(termie): this should actually register - # a callback to check for successful boot - logging.debug("Instance is running") - - local_d = defer.Deferred() - timer = task.LoopingCall(f=None) - def _wait_for_boot(): - try: - self.update_state() - if self.is_running(): - logging.debug('booted instance %s' % self.name) - timer.stop() - local_d.callback(None) - except Exception: - self.set_state(Instance.SHUTDOWN) - logging.error('Failed to boot instance %s' % self.name) - timer.stop() - local_d.callback(None) - timer.f = _wait_for_boot - timer.start(interval=0.5, now=True) - except Exception, ex: - logging.debug(ex) - self.set_state(Instance.SHUTDOWN) - - @exception.wrap_exception - def console_output(self): - if not FLAGS.fake_libvirt: - fname = os.path.abspath( - os.path.join(self.datamodel['basepath'], 'console.log')) - with open(fname, 'r') as f: - console = f.read() - else: - console = 'FAKE CONSOLE OUTPUT' - return defer.succeed(console) - -STATE_NAMES = { - Instance.NOSTATE : 'pending', - Instance.RUNNING : 'running', - Instance.BLOCKED : 'blocked', - Instance.PAUSED : 'paused', - Instance.SHUTDOWN : 'shutdown', - Instance.SHUTOFF : 'shutdown', - Instance.CRASHED : 'crashed', -} diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 3b7b4804b..eaa608b1e 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -38,9 +38,9 @@ from nova.auth import rbac from nova.auth import users from nova.compute import model from nova.compute import network -from nova.compute import node +from nova.compute import computenode from nova.endpoint import images -from nova.volume import storage +from nova.volume import volumenode FLAGS = flags.FLAGS @@ -76,7 +76,7 @@ class CloudController(object): def volumes(self): """ returns a list of all volumes """ for volume_id in datastore.Redis.instance().smembers("volumes"): - volume = storage.get_volume(volume_id) + volume = volumenode.get_volume(volume_id) yield volume def __str__(self): @@ -103,7 +103,7 @@ class CloudController(object): result = {} for instance in self.instdir.all: if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], node.INSTANCE_TYPES[instance['instance_type']]['vcpus']) + line = '%s slots=%d' % (instance['private_dns_name'], computenode.INSTANCE_TYPES[instance['instance_type']]['vcpus']) if instance['key_name'] in result: result[instance['key_name']].append(line) else: @@ -296,8 +296,8 @@ class CloudController(object): @rbac.allow('projectmanager', 'sysadmin') def create_volume(self, context, size, **kwargs): - # TODO(vish): refactor this to create the volume object here and tell storage to create it - res = rpc.call(FLAGS.storage_topic, {"method": "create_volume", + # TODO(vish): refactor this to create the volume object here and tell volumenode to create it + res = rpc.call(FLAGS.volume_topic, {"method": "create_volume", "args" : {"size": size, "user_id": context.user.id, "project_id": context.project.id}}) @@ -331,7 +331,7 @@ class CloudController(object): raise exception.NotFound('Instance %s could not be found' % instance_id) def _get_volume(self, context, volume_id): - volume = storage.get_volume(volume_id) + volume = volumenode.get_volume(volume_id) if context.user.is_admin() or volume['project_id'] == context.project.id: return volume raise exception.NotFound('Volume %s could not be found' % volume_id) @@ -628,8 +628,8 @@ class CloudController(object): def delete_volume(self, context, volume_id, **kwargs): # TODO: return error if not authorized volume = self._get_volume(context, volume_id) - storage_node = volume['node_name'] - rpc.cast('%s.%s' % (FLAGS.storage_topic, storage_node), + volume_node = volume['node_name'] + rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node), {"method": "delete_volume", "args" : {"volume_id": volume_id}}) return defer.succeed(True) diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py index 9208ddab7..08e435c5d 100644 --- a/nova/endpoint/rackspace.py +++ b/nova/endpoint/rackspace.py @@ -39,7 +39,6 @@ from nova.compute import model from nova.compute import network from nova.endpoint import images from nova.endpoint import wsgi -from nova.volume import storage FLAGS = flags.FLAGS diff --git a/nova/flags.py b/nova/flags.py index 06ea1e007..ffb395f13 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -40,7 +40,9 @@ DEFINE_integer('s3_port', 3333, 's3 port') DEFINE_string('s3_host', '127.0.0.1', 's3 host') #DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on') -DEFINE_string('storage_topic', 'storage', 'the topic storage nodes listen on') +DEFINE_string('volume_topic', 'volume', 'the topic volume nodes listen on') +DEFINE_string('network_topic', 'network', 'the topic network nodes listen on') + DEFINE_bool('fake_libvirt', False, 'whether to use a fake libvirt or not') DEFINE_bool('verbose', False, 'show debug output') diff --git a/nova/network/__init__.py b/nova/network/__init__.py new file mode 100644 index 000000000..dcc54db09 --- /dev/null +++ b/nova/network/__init__.py @@ -0,0 +1,32 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`nova.network` -- Network Nodes +===================================================== + +.. automodule:: nova.network + :platform: Unix + :synopsis: Network is responsible for managing networking +.. moduleauthor:: Jesse Andrews +.. moduleauthor:: Devin Carlen +.. moduleauthor:: Vishvananda Ishaya +.. moduleauthor:: Joshua McKenty +.. moduleauthor:: Manish Singh +.. moduleauthor:: Andy Smith +""" diff --git a/nova/network/networknode.py b/nova/network/networknode.py new file mode 100644 index 000000000..e5a346551 --- /dev/null +++ b/nova/network/networknode.py @@ -0,0 +1,35 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Network Nodes are responsible for allocating ips and setting up network +""" + +import logging + +from nova import flags +from nova import node + + +FLAGS = flags.FLAGS + +class NetworkNode(node.Node): + """Allocates ips and sets up networks""" + + def __init__(self): + logging.debug("Network node working") diff --git a/nova/node.py b/nova/node.py new file mode 100644 index 000000000..852344da9 --- /dev/null +++ b/nova/node.py @@ -0,0 +1,103 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Generic Node baseclass for all workers that run on hosts +""" + +import inspect +import logging +import os + +from twisted.internet import defer +from twisted.internet import task +from twisted.application import service + +from nova import datastore +from nova import flags +from nova import rpc +from nova.compute import model + + +FLAGS = flags.FLAGS + +flags.DEFINE_integer('report_interval', 10, + 'seconds between nodes reporting state to cloud', + lower_bound=1) + +class Node(object, service.Service): + """Base class for workers that run on hosts""" + + @classmethod + def create(cls, + report_interval=None, # defaults to flag + bin_name=None, # defaults to basename of executable + topic=None): # defaults to basename - "nova-" part + """Instantiates class and passes back application object""" + if not report_interval: + # NOTE(vish): set here because if it is set to flag in the + # parameter list, it wrongly uses the default + report_interval = FLAGS.report_interval + # NOTE(vish): magic to automatically determine bin_name and topic + if not bin_name: + bin_name = os.path.basename(inspect.stack()[-1][1]) + if not topic: + topic = bin_name.rpartition("nova-")[2] + logging.warn("Starting %s node" % topic) + node_instance = cls() + + conn = rpc.Connection.instance() + consumer_all = rpc.AdapterConsumer( + connection=conn, + topic='%s' % topic, + proxy=node_instance) + + consumer_node = rpc.AdapterConsumer( + connection=conn, + topic='%s.%s' % (topic, FLAGS.node_name), + proxy=node_instance) + + pulse = task.LoopingCall(node_instance.report_state, + FLAGS.node_name, + bin_name) + pulse.start(interval=report_interval, now=False) + + consumer_all.attach_to_twisted() + consumer_node.attach_to_twisted() + + # This is the parent service that twistd will be looking for when it + # parses this file, return it so that we can get it into globals below + application = service.Application(bin_name) + node_instance.setServiceParent(application) + return application + + @defer.inlineCallbacks + def report_state(self, nodename, daemon): + # TODO(termie): make this pattern be more elegant. -todd + try: + record = model.Daemon(nodename, daemon) + record.heartbeat() + if getattr(self, "model_disconnected", False): + self.model_disconnected = False + logging.error("Recovered model server connection!") + + except datastore.ConnectionError, ex: + if not getattr(self, "model_disconnected", False): + self.model_disconnected = True + logging.exception("model server went away") + yield diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py index b8614fdc8..7ab2c257a 100644 --- a/nova/tests/cloud_unittest.py +++ b/nova/tests/cloud_unittest.py @@ -28,7 +28,7 @@ from nova import flags from nova import rpc from nova import test from nova.auth import users -from nova.compute import node +from nova.compute import computenode from nova.endpoint import api from nova.endpoint import cloud @@ -54,7 +54,7 @@ class CloudTestCase(test.BaseTestCase): self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop)) # set up a node - self.node = node.Node() + self.node = computenode.ComputeNode() self.node_consumer = rpc.AdapterConsumer(connection=self.conn, topic=FLAGS.compute_topic, proxy=self.node) diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py new file mode 100644 index 000000000..4c0f1afb3 --- /dev/null +++ b/nova/tests/compute_unittest.py @@ -0,0 +1,128 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import time +from twisted.internet import defer +from xml.etree import ElementTree + +from nova import exception +from nova import flags +from nova import test +from nova import utils +from nova.compute import model +from nova.compute import computenode + + +FLAGS = flags.FLAGS + + +class InstanceXmlTestCase(test.TrialTestCase): + # @defer.inlineCallbacks + def test_serialization(self): + # TODO: Reimplement this, it doesn't make sense in redis-land + return + + # instance_id = 'foo' + # first_node = node.Node() + # inst = yield first_node.run_instance(instance_id) + # + # # force the state so that we can verify that it changes + # inst._s['state'] = node.Instance.NOSTATE + # xml = inst.toXml() + # self.assert_(ElementTree.parse(StringIO.StringIO(xml))) + # + # second_node = node.Node() + # new_inst = node.Instance.fromXml(second_node._conn, pool=second_node._pool, xml=xml) + # self.assertEqual(new_inst.state, node.Instance.RUNNING) + # rv = yield first_node.terminate_instance(instance_id) + + +class ComputeConnectionTestCase(test.TrialTestCase): + def setUp(self): + logging.getLogger().setLevel(logging.DEBUG) + super(ComputeConnectionTestCase, self).setUp() + self.flags(fake_libvirt=True, + fake_storage=True, + fake_users=True) + self.node = computenode.ComputeNode() + + def create_instance(self): + instdir = model.InstanceDirectory() + inst = instdir.new() + # TODO(ja): add ami, ari, aki, user_data + inst['reservation_id'] = 'r-fakeres' + inst['launch_time'] = '10' + inst['user_id'] = 'fake' + inst['project_id'] = 'fake' + inst['instance_type'] = 'm1.tiny' + inst['node_name'] = FLAGS.node_name + inst['mac_address'] = utils.generate_mac() + inst['ami_launch_index'] = 0 + inst.save() + return inst['instance_id'] + + @defer.inlineCallbacks + def test_run_describe_terminate(self): + instance_id = self.create_instance() + + rv = yield self.node.run_instance(instance_id) + + rv = yield self.node.describe_instances() + logging.info("Running instances: %s", rv) + self.assertEqual(rv[instance_id].name, instance_id) + + rv = yield self.node.terminate_instance(instance_id) + + rv = yield self.node.describe_instances() + logging.info("After terminating instances: %s", rv) + self.assertEqual(rv, {}) + + @defer.inlineCallbacks + def test_reboot(self): + instance_id = self.create_instance() + rv = yield self.node.run_instance(instance_id) + + rv = yield self.node.describe_instances() + self.assertEqual(rv[instance_id].name, instance_id) + + yield self.node.reboot_instance(instance_id) + + rv = yield self.node.describe_instances() + self.assertEqual(rv[instance_id].name, instance_id) + rv = yield self.node.terminate_instance(instance_id) + + @defer.inlineCallbacks + def test_console_output(self): + instance_id = self.create_instance() + rv = yield self.node.run_instance(instance_id) + + console = yield self.node.get_console_output(instance_id) + self.assert_(console) + rv = yield self.node.terminate_instance(instance_id) + + @defer.inlineCallbacks + def test_run_instance_existing(self): + instance_id = self.create_instance() + rv = yield self.node.run_instance(instance_id) + + rv = yield self.node.describe_instances() + self.assertEqual(rv[instance_id].name, instance_id) + + self.assertRaises(exception.Error, self.node.run_instance, instance_id) + rv = yield self.node.terminate_instance(instance_id) diff --git a/nova/tests/future_unittest.py b/nova/tests/future_unittest.py deleted file mode 100644 index da5470ffe..000000000 --- a/nova/tests/future_unittest.py +++ /dev/null @@ -1,75 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import mox -import StringIO -import time -from tornado import ioloop -from twisted.internet import defer -import unittest -from xml.etree import ElementTree - -from nova import cloud -from nova import exception -from nova import flags -from nova import node -from nova import rpc -from nova import test - - -FLAGS = flags.FLAGS - - -class AdminTestCase(test.BaseTestCase): - def setUp(self): - super(AdminTestCase, self).setUp() - self.flags(fake_libvirt=True, - fake_rabbit=True) - - self.conn = rpc.Connection.instance() - - logging.getLogger().setLevel(logging.INFO) - - # set up our cloud - self.cloud = cloud.CloudController() - self.cloud_consumer = rpc.AdapterConsumer(connection=self.conn, - topic=FLAGS.cloud_topic, - proxy=self.cloud) - self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop)) - - # set up a node - self.node = node.Node() - self.node_consumer = rpc.AdapterConsumer(connection=self.conn, - topic=FLAGS.compute_topic, - proxy=self.node) - self.injected.append(self.node_consumer.attach_to_tornado(self.ioloop)) - - def test_flush_terminated(self): - # Launch an instance - - # Wait until it's running - - # Terminate it - - # Wait until it's terminated - - # Flush terminated nodes - - # ASSERT that it's gone - pass diff --git a/nova/tests/model_unittest.py b/nova/tests/model_unittest.py index 1bd7e527f..f84b6d11c 100644 --- a/nova/tests/model_unittest.py +++ b/nova/tests/model_unittest.py @@ -25,7 +25,6 @@ from nova import flags from nova import test from nova import utils from nova.compute import model -from nova.compute import node FLAGS = flags.FLAGS diff --git a/nova/tests/node_unittest.py b/nova/tests/node_unittest.py deleted file mode 100644 index 93942d79e..000000000 --- a/nova/tests/node_unittest.py +++ /dev/null @@ -1,128 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import time -from twisted.internet import defer -from xml.etree import ElementTree - -from nova import exception -from nova import flags -from nova import test -from nova import utils -from nova.compute import model -from nova.compute import node - - -FLAGS = flags.FLAGS - - -class InstanceXmlTestCase(test.TrialTestCase): - # @defer.inlineCallbacks - def test_serialization(self): - # TODO: Reimplement this, it doesn't make sense in redis-land - return - - # instance_id = 'foo' - # first_node = node.Node() - # inst = yield first_node.run_instance(instance_id) - # - # # force the state so that we can verify that it changes - # inst._s['state'] = node.Instance.NOSTATE - # xml = inst.toXml() - # self.assert_(ElementTree.parse(StringIO.StringIO(xml))) - # - # second_node = node.Node() - # new_inst = node.Instance.fromXml(second_node._conn, pool=second_node._pool, xml=xml) - # self.assertEqual(new_inst.state, node.Instance.RUNNING) - # rv = yield first_node.terminate_instance(instance_id) - - -class NodeConnectionTestCase(test.TrialTestCase): - def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) - super(NodeConnectionTestCase, self).setUp() - self.flags(fake_libvirt=True, - fake_storage=True, - fake_users=True) - self.node = node.Node() - - def create_instance(self): - instdir = model.InstanceDirectory() - inst = instdir.new() - # TODO(ja): add ami, ari, aki, user_data - inst['reservation_id'] = 'r-fakeres' - inst['launch_time'] = '10' - inst['user_id'] = 'fake' - inst['project_id'] = 'fake' - inst['instance_type'] = 'm1.tiny' - inst['node_name'] = FLAGS.node_name - inst['mac_address'] = utils.generate_mac() - inst['ami_launch_index'] = 0 - inst.save() - return inst['instance_id'] - - @defer.inlineCallbacks - def test_run_describe_terminate(self): - instance_id = self.create_instance() - - rv = yield self.node.run_instance(instance_id) - - rv = yield self.node.describe_instances() - logging.info("Running instances: %s", rv) - self.assertEqual(rv[instance_id].name, instance_id) - - rv = yield self.node.terminate_instance(instance_id) - - rv = yield self.node.describe_instances() - logging.info("After terminating instances: %s", rv) - self.assertEqual(rv, {}) - - @defer.inlineCallbacks - def test_reboot(self): - instance_id = self.create_instance() - rv = yield self.node.run_instance(instance_id) - - rv = yield self.node.describe_instances() - self.assertEqual(rv[instance_id].name, instance_id) - - yield self.node.reboot_instance(instance_id) - - rv = yield self.node.describe_instances() - self.assertEqual(rv[instance_id].name, instance_id) - rv = yield self.node.terminate_instance(instance_id) - - @defer.inlineCallbacks - def test_console_output(self): - instance_id = self.create_instance() - rv = yield self.node.run_instance(instance_id) - - console = yield self.node.get_console_output(instance_id) - self.assert_(console) - rv = yield self.node.terminate_instance(instance_id) - - @defer.inlineCallbacks - def test_run_instance_existing(self): - instance_id = self.create_instance() - rv = yield self.node.run_instance(instance_id) - - rv = yield self.node.describe_instances() - self.assertEqual(rv[instance_id].name, instance_id) - - self.assertRaises(exception.Error, self.node.run_instance, instance_id) - rv = yield self.node.terminate_instance(instance_id) diff --git a/nova/tests/storage_unittest.py b/nova/tests/storage_unittest.py deleted file mode 100644 index 60576d74f..000000000 --- a/nova/tests/storage_unittest.py +++ /dev/null @@ -1,115 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from nova import exception -from nova import flags -from nova import test -from nova.compute import node -from nova.volume import storage - - -FLAGS = flags.FLAGS - - -class StorageTestCase(test.TrialTestCase): - def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) - super(StorageTestCase, self).setUp() - self.mynode = node.Node() - self.mystorage = None - self.flags(fake_libvirt=True, - fake_storage=True) - self.mystorage = storage.BlockStore() - - def test_run_create_volume(self): - vol_size = '0' - user_id = 'fake' - project_id = 'fake' - volume_id = self.mystorage.create_volume(vol_size, user_id, project_id) - # TODO(termie): get_volume returns differently than create_volume - self.assertEqual(volume_id, - storage.get_volume(volume_id)['volume_id']) - - rv = self.mystorage.delete_volume(volume_id) - self.assertRaises(exception.Error, - storage.get_volume, - volume_id) - - def test_too_big_volume(self): - vol_size = '1001' - user_id = 'fake' - project_id = 'fake' - self.assertRaises(TypeError, - self.mystorage.create_volume, - vol_size, user_id, project_id) - - def test_too_many_volumes(self): - vol_size = '1' - user_id = 'fake' - project_id = 'fake' - num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1 - total_slots = FLAGS.slots_per_shelf * num_shelves - vols = [] - for i in xrange(total_slots): - vid = self.mystorage.create_volume(vol_size, user_id, project_id) - vols.append(vid) - self.assertRaises(storage.NoMoreVolumes, - self.mystorage.create_volume, - vol_size, user_id, project_id) - for id in vols: - self.mystorage.delete_volume(id) - - def test_run_attach_detach_volume(self): - # Create one volume and one node to test with - instance_id = "storage-test" - vol_size = "5" - user_id = "fake" - project_id = 'fake' - mountpoint = "/dev/sdf" - volume_id = self.mystorage.create_volume(vol_size, user_id, project_id) - - volume_obj = storage.get_volume(volume_id) - volume_obj.start_attach(instance_id, mountpoint) - rv = yield self.mynode.attach_volume(volume_id, - instance_id, - mountpoint) - self.assertEqual(volume_obj['status'], "in-use") - self.assertEqual(volume_obj['attachStatus'], "attached") - self.assertEqual(volume_obj['instance_id'], instance_id) - self.assertEqual(volume_obj['mountpoint'], mountpoint) - - self.assertRaises(exception.Error, - self.mystorage.delete_volume, - volume_id) - - rv = yield self.mystorage.detach_volume(volume_id) - volume_obj = storage.get_volume(volume_id) - self.assertEqual(volume_obj['status'], "available") - - rv = self.mystorage.delete_volume(volume_id) - self.assertRaises(exception.Error, - storage.get_volume, - volume_id) - - def test_multi_node(self): - # TODO(termie): Figure out how to test with two nodes, - # each of them having a different FLAG for storage_node - # This will allow us to test cross-node interactions - pass diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py new file mode 100644 index 000000000..c176453d8 --- /dev/null +++ b/nova/tests/volume_unittest.py @@ -0,0 +1,115 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +from nova import exception +from nova import flags +from nova import test +from nova.compute import computenode +from nova.volume import volumenode + + +FLAGS = flags.FLAGS + + +class VolumeTestCase(test.TrialTestCase): + def setUp(self): + logging.getLogger().setLevel(logging.DEBUG) + super(VolumeTestCase, self).setUp() + self.mynode = computenode.ComputeNode() + self.mystorage = None + self.flags(fake_libvirt=True, + fake_storage=True) + self.mystorage = volumenode.VolumeNode() + + def test_run_create_volume(self): + vol_size = '0' + user_id = 'fake' + project_id = 'fake' + volume_id = self.mystorage.create_volume(vol_size, user_id, project_id) + # TODO(termie): get_volume returns differently than create_volume + self.assertEqual(volume_id, + volumenode.get_volume(volume_id)['volume_id']) + + rv = self.mystorage.delete_volume(volume_id) + self.assertRaises(exception.Error, + volumenode.get_volume, + volume_id) + + def test_too_big_volume(self): + vol_size = '1001' + user_id = 'fake' + project_id = 'fake' + self.assertRaises(TypeError, + self.mystorage.create_volume, + vol_size, user_id, project_id) + + def test_too_many_volumes(self): + vol_size = '1' + user_id = 'fake' + project_id = 'fake' + num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1 + total_slots = FLAGS.slots_per_shelf * num_shelves + vols = [] + for i in xrange(total_slots): + vid = self.mystorage.create_volume(vol_size, user_id, project_id) + vols.append(vid) + self.assertRaises(volumenode.NoMoreVolumes, + self.mystorage.create_volume, + vol_size, user_id, project_id) + for id in vols: + self.mystorage.delete_volume(id) + + def test_run_attach_detach_volume(self): + # Create one volume and one node to test with + instance_id = "storage-test" + vol_size = "5" + user_id = "fake" + project_id = 'fake' + mountpoint = "/dev/sdf" + volume_id = self.mystorage.create_volume(vol_size, user_id, project_id) + + volume_obj = volumenode.get_volume(volume_id) + volume_obj.start_attach(instance_id, mountpoint) + rv = yield self.mynode.attach_volume(volume_id, + instance_id, + mountpoint) + self.assertEqual(volume_obj['status'], "in-use") + self.assertEqual(volume_obj['attachStatus'], "attached") + self.assertEqual(volume_obj['instance_id'], instance_id) + self.assertEqual(volume_obj['mountpoint'], mountpoint) + + self.assertRaises(exception.Error, + self.mystorage.delete_volume, + volume_id) + + rv = yield self.mystorage.detach_volume(volume_id) + volume_obj = volumenode.get_volume(volume_id) + self.assertEqual(volume_obj['status'], "available") + + rv = self.mystorage.delete_volume(volume_id) + self.assertRaises(exception.Error, + volumenode.get_volume, + volume_id) + + def test_multi_node(self): + # TODO(termie): Figure out how to test with two nodes, + # each of them having a different FLAG for storage_node + # This will allow us to test cross-node interactions + pass diff --git a/nova/twistd.py b/nova/twistd.py index 32a46ce03..fc7dad26a 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -32,7 +32,6 @@ from twisted.python import log from twisted.python import reflect from twisted.python import runtime from twisted.python import usage -import UserDict from nova import flags @@ -161,6 +160,13 @@ def WrapTwistedOptions(wrapped): except (AttributeError, KeyError): self._data[key] = value + def get(self, key, default): + key = key.replace('-', '_') + try: + return getattr(FLAGS, key) + except (AttributeError, KeyError): + self._data.get(key, default) + return TwistedOptionsToFlags @@ -210,8 +216,12 @@ def serve(filename): elif FLAGS.pidfile.endswith('twistd.pid'): FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name) + print FLAGS.logfile if not FLAGS.logfile: FLAGS.logfile = '%s.log' % name + elif FLAGS.logfile.endswith('twistd.log'): + FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name) + print FLAGS.logfile action = 'start' if len(argv) > 1: diff --git a/nova/volume/storage.py b/nova/volume/storage.py deleted file mode 100644 index 121bc01e6..000000000 --- a/nova/volume/storage.py +++ /dev/null @@ -1,321 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Nova Storage manages creating, attaching, detaching, and -destroying persistent storage volumes, ala EBS. -Currently uses Ata-over-Ethernet. -""" - -import glob -import logging -import os -import shutil -import socket -import tempfile - -from twisted.application import service -from twisted.internet import defer - -from nova import datastore -from nova import exception -from nova import flags -from nova import process -from nova import utils -from nova import validate -from nova.compute import model - - -FLAGS = flags.FLAGS -flags.DEFINE_string('storage_dev', '/dev/sdb', - 'Physical device to use for volumes') -flags.DEFINE_string('volume_group', 'nova-volumes', - 'Name for the VG that will contain exported volumes') -flags.DEFINE_string('aoe_eth_dev', 'eth0', - 'Which device to export the volumes on') -flags.DEFINE_string('storage_name', - socket.gethostname(), - 'name of this node') -flags.DEFINE_integer('first_shelf_id', - utils.last_octet(utils.get_my_ip()) * 10, - 'AoE starting shelf_id for this node') -flags.DEFINE_integer('last_shelf_id', - utils.last_octet(utils.get_my_ip()) * 10 + 9, - 'AoE starting shelf_id for this node') -flags.DEFINE_string('aoe_export_dir', - '/var/lib/vblade-persist/vblades', - 'AoE directory where exports are created') -flags.DEFINE_integer('slots_per_shelf', - 16, - 'Number of AoE slots per shelf') -flags.DEFINE_string('storage_availability_zone', - 'nova', - 'availability zone of this node') -flags.DEFINE_boolean('fake_storage', False, - 'Should we make real storage volumes to attach?') - - -class NoMoreVolumes(exception.Error): - pass - -def get_volume(volume_id): - """ Returns a redis-backed volume object """ - volume_class = Volume - if FLAGS.fake_storage: - volume_class = FakeVolume - if datastore.Redis.instance().sismember('volumes', volume_id): - return volume_class(volume_id=volume_id) - raise exception.Error("Volume does not exist") - -class BlockStore(object, service.Service): - """ - There is one BlockStore running on each volume node. - However, each BlockStore can report on the state of - *all* volumes in the cluster. - """ - def __init__(self): - super(BlockStore, self).__init__() - self.volume_class = Volume - if FLAGS.fake_storage: - FLAGS.aoe_export_dir = tempfile.mkdtemp() - self.volume_class = FakeVolume - self._init_volume_group() - - def __del__(self): - # TODO(josh): Get rid of this destructor, volumes destroy themselves - if FLAGS.fake_storage: - try: - shutil.rmtree(FLAGS.aoe_export_dir) - except Exception, err: - pass - - @defer.inlineCallbacks - def report_state(self, nodename, daemon): - # TODO(termie): make this pattern be more elegant. -todd - try: - record = model.Daemon(nodename, daemon) - record.heartbeat() - if getattr(self, "model_disconnected", False): - self.model_disconnected = False - logging.error("Recovered model server connection!") - - except model.ConnectionError, ex: - if not getattr(self, "model_disconnected", False): - self.model_disconnected = True - logging.exception("model server went away") - yield - - @validate.rangetest(size=(0, 1000)) - def create_volume(self, size, user_id, project_id): - """ - Creates an exported volume (fake or real), - restarts exports to make it available. - Volume at this point has size, owner, and zone. - """ - logging.debug("Creating volume of size: %s" % (size)) - vol = self.volume_class.create(size, user_id, project_id) - datastore.Redis.instance().sadd('volumes', vol['volume_id']) - datastore.Redis.instance().sadd('volumes:%s' % (FLAGS.storage_name), vol['volume_id']) - self._restart_exports() - return vol['volume_id'] - - def by_node(self, node_id): - """ returns a list of volumes for a node """ - for volume_id in datastore.Redis.instance().smembers('volumes:%s' % (node_id)): - yield self.volume_class(volume_id=volume_id) - - @property - def all(self): - """ returns a list of all volumes """ - for volume_id in datastore.Redis.instance().smembers('volumes'): - yield self.volume_class(volume_id=volume_id) - - def delete_volume(self, volume_id): - logging.debug("Deleting volume with id of: %s" % (volume_id)) - vol = get_volume(volume_id) - if vol['status'] == "attached": - raise exception.Error("Volume is still attached") - if vol['node_name'] != FLAGS.storage_name: - raise exception.Error("Volume is not local to this node") - vol.destroy() - datastore.Redis.instance().srem('volumes', vol['volume_id']) - datastore.Redis.instance().srem('volumes:%s' % (FLAGS.storage_name), vol['volume_id']) - return True - - @defer.inlineCallbacks - def _restart_exports(self): - if FLAGS.fake_storage: - return - yield process.simple_execute( - "sudo vblade-persist auto all") - yield process.simple_execute( - "sudo vblade-persist start all") - - @defer.inlineCallbacks - def _init_volume_group(self): - if FLAGS.fake_storage: - return - yield process.simple_execute( - "sudo pvcreate %s" % (FLAGS.storage_dev)) - yield process.simple_execute( - "sudo vgcreate %s %s" % (FLAGS.volume_group, - FLAGS.storage_dev)) - -class Volume(datastore.BasicModel): - - def __init__(self, volume_id=None): - self.volume_id = volume_id - super(Volume, self).__init__() - - @property - def identifier(self): - return self.volume_id - - def default_state(self): - return {"volume_id": self.volume_id} - - @classmethod - def create(cls, size, user_id, project_id): - volume_id = utils.generate_uid('vol') - vol = cls(volume_id) - vol['node_name'] = FLAGS.storage_name - vol['size'] = size - vol['user_id'] = user_id - vol['project_id'] = project_id - vol['availability_zone'] = FLAGS.storage_availability_zone - vol["instance_id"] = 'none' - vol["mountpoint"] = 'none' - vol['attach_time'] = 'none' - vol['status'] = "creating" # creating | available | in-use - vol['attach_status'] = "detached" # attaching | attached | detaching | detached - vol['delete_on_termination'] = 'False' - vol.save() - vol.create_lv() - vol._setup_export() - # TODO(joshua) - We need to trigger a fanout message for aoe-discover on all the nodes - # TODO(joshua - vol['status'] = "available" - vol.save() - return vol - - def start_attach(self, instance_id, mountpoint): - """ """ - self['instance_id'] = instance_id - self['mountpoint'] = mountpoint - self['status'] = "in-use" - self['attach_status'] = "attaching" - self['attach_time'] = utils.isotime() - self['delete_on_termination'] = 'False' - self.save() - - def finish_attach(self): - """ """ - self['attach_status'] = "attached" - self.save() - - def start_detach(self): - """ """ - self['attach_status'] = "detaching" - self.save() - - def finish_detach(self): - self['instance_id'] = None - self['mountpoint'] = None - self['status'] = "available" - self['attach_status'] = "detached" - self.save() - - def destroy(self): - try: - self._remove_export() - except: - pass - self._delete_lv() - super(Volume, self).destroy() - - @defer.inlineCallbacks - def create_lv(self): - if str(self['size']) == '0': - sizestr = '100M' - else: - sizestr = '%sG' % self['size'] - yield process.simple_execute( - "sudo lvcreate -L %s -n %s %s" % (sizestr, - self['volume_id'], - FLAGS.volume_group)) - - @defer.inlineCallbacks - def _delete_lv(self): - yield process.simple_execute( - "sudo lvremove -f %s/%s" % (FLAGS.volume_group, - self['volume_id'])) - - def _setup_export(self): - (shelf_id, blade_id) = get_next_aoe_numbers() - self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id) - self['shelf_id'] = shelf_id - self['blade_id'] = blade_id - self.save() - self._exec_export() - - @defer.inlineCallbacks - def _exec_export(self): - yield process.simple_execute( - "sudo vblade-persist setup %s %s %s /dev/%s/%s" % - (self['shelf_id'], - self['blade_id'], - FLAGS.aoe_eth_dev, - FLAGS.volume_group, - self['volume_id'])) - - @defer.inlineCallbacks - def _remove_export(self): - yield process.simple_execute( - "sudo vblade-persist stop %s %s" % (self['shelf_id'], - self['blade_id'])) - yield process.simple_execute( - "sudo vblade-persist destroy %s %s" % (self['shelf_id'], - self['blade_id'])) - - -class FakeVolume(Volume): - def create_lv(self): - pass - - def _exec_export(self): - fname = os.path.join(FLAGS.aoe_export_dir, self['aoe_device']) - f = file(fname, "w") - f.close() - - def _remove_export(self): - pass - - def _delete_lv(self): - pass - -def get_next_aoe_numbers(): - for shelf_id in xrange(FLAGS.first_shelf_id, FLAGS.last_shelf_id + 1): - aoes = glob.glob("%s/e%s.*" % (FLAGS.aoe_export_dir, shelf_id)) - if not aoes: - blade_id = 0 - else: - blade_id = int(max([int(a.rpartition('.')[2]) for a in aoes])) + 1 - if blade_id < FLAGS.slots_per_shelf: - logging.debug("Next shelf.blade is %s.%s", shelf_id, blade_id) - return (shelf_id, blade_id) - raise NoMoreVolumes() diff --git a/nova/volume/volumenode.py b/nova/volume/volumenode.py new file mode 100644 index 000000000..6b4ad0d87 --- /dev/null +++ b/nova/volume/volumenode.py @@ -0,0 +1,305 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Nova Storage manages creating, attaching, detaching, and +destroying persistent storage volumes, ala EBS. +Currently uses Ata-over-Ethernet. +""" + +import glob +import logging +import os +import shutil +import socket +import tempfile + +from twisted.application import service +from twisted.internet import defer + +from nova import datastore +from nova import exception +from nova import flags +from nova import node +from nova import process +from nova import utils +from nova import validate + + +FLAGS = flags.FLAGS +flags.DEFINE_string('storage_dev', '/dev/sdb', + 'Physical device to use for volumes') +flags.DEFINE_string('volume_group', 'nova-volumes', + 'Name for the VG that will contain exported volumes') +flags.DEFINE_string('aoe_eth_dev', 'eth0', + 'Which device to export the volumes on') +flags.DEFINE_string('storage_name', + socket.gethostname(), + 'name of this node') +flags.DEFINE_integer('first_shelf_id', + utils.last_octet(utils.get_my_ip()) * 10, + 'AoE starting shelf_id for this node') +flags.DEFINE_integer('last_shelf_id', + utils.last_octet(utils.get_my_ip()) * 10 + 9, + 'AoE starting shelf_id for this node') +flags.DEFINE_string('aoe_export_dir', + '/var/lib/vblade-persist/vblades', + 'AoE directory where exports are created') +flags.DEFINE_integer('slots_per_shelf', + 16, + 'Number of AoE slots per shelf') +flags.DEFINE_string('storage_availability_zone', + 'nova', + 'availability zone of this node') +flags.DEFINE_boolean('fake_storage', False, + 'Should we make real storage volumes to attach?') + + +class NoMoreVolumes(exception.Error): + pass + +def get_volume(volume_id): + """ Returns a redis-backed volume object """ + volume_class = Volume + if FLAGS.fake_storage: + volume_class = FakeVolume + if datastore.Redis.instance().sismember('volumes', volume_id): + return volume_class(volume_id=volume_id) + raise exception.Error("Volume does not exist") + +class VolumeNode(node.Node): + """ + There is one VolumeNode running on each host. + However, each VolumeNode can report on the state of + *all* volumes in the cluster. + """ + def __init__(self): + super(VolumeNode, self).__init__() + self.volume_class = Volume + if FLAGS.fake_storage: + FLAGS.aoe_export_dir = tempfile.mkdtemp() + self.volume_class = FakeVolume + self._init_volume_group() + + def __del__(self): + # TODO(josh): Get rid of this destructor, volumes destroy themselves + if FLAGS.fake_storage: + try: + shutil.rmtree(FLAGS.aoe_export_dir) + except Exception, err: + pass + + @validate.rangetest(size=(0, 1000)) + def create_volume(self, size, user_id, project_id): + """ + Creates an exported volume (fake or real), + restarts exports to make it available. + Volume at this point has size, owner, and zone. + """ + logging.debug("Creating volume of size: %s" % (size)) + vol = self.volume_class.create(size, user_id, project_id) + datastore.Redis.instance().sadd('volumes', vol['volume_id']) + datastore.Redis.instance().sadd('volumes:%s' % (FLAGS.storage_name), vol['volume_id']) + self._restart_exports() + return vol['volume_id'] + + def by_node(self, node_id): + """ returns a list of volumes for a node """ + for volume_id in datastore.Redis.instance().smembers('volumes:%s' % (node_id)): + yield self.volume_class(volume_id=volume_id) + + @property + def all(self): + """ returns a list of all volumes """ + for volume_id in datastore.Redis.instance().smembers('volumes'): + yield self.volume_class(volume_id=volume_id) + + def delete_volume(self, volume_id): + logging.debug("Deleting volume with id of: %s" % (volume_id)) + vol = get_volume(volume_id) + if vol['status'] == "attached": + raise exception.Error("Volume is still attached") + if vol['node_name'] != FLAGS.storage_name: + raise exception.Error("Volume is not local to this node") + vol.destroy() + datastore.Redis.instance().srem('volumes', vol['volume_id']) + datastore.Redis.instance().srem('volumes:%s' % (FLAGS.storage_name), vol['volume_id']) + return True + + @defer.inlineCallbacks + def _restart_exports(self): + if FLAGS.fake_storage: + return + yield process.simple_execute( + "sudo vblade-persist auto all") + yield process.simple_execute( + "sudo vblade-persist start all") + + @defer.inlineCallbacks + def _init_volume_group(self): + if FLAGS.fake_storage: + return + yield process.simple_execute( + "sudo pvcreate %s" % (FLAGS.storage_dev)) + yield process.simple_execute( + "sudo vgcreate %s %s" % (FLAGS.volume_group, + FLAGS.storage_dev)) + +class Volume(datastore.BasicModel): + + def __init__(self, volume_id=None): + self.volume_id = volume_id + super(Volume, self).__init__() + + @property + def identifier(self): + return self.volume_id + + def default_state(self): + return {"volume_id": self.volume_id} + + @classmethod + def create(cls, size, user_id, project_id): + volume_id = utils.generate_uid('vol') + vol = cls(volume_id) + vol['node_name'] = FLAGS.storage_name + vol['size'] = size + vol['user_id'] = user_id + vol['project_id'] = project_id + vol['availability_zone'] = FLAGS.storage_availability_zone + vol["instance_id"] = 'none' + vol["mountpoint"] = 'none' + vol['attach_time'] = 'none' + vol['status'] = "creating" # creating | available | in-use + vol['attach_status'] = "detached" # attaching | attached | detaching | detached + vol['delete_on_termination'] = 'False' + vol.save() + vol.create_lv() + vol._setup_export() + # TODO(joshua) - We need to trigger a fanout message for aoe-discover on all the nodes + # TODO(joshua + vol['status'] = "available" + vol.save() + return vol + + def start_attach(self, instance_id, mountpoint): + """ """ + self['instance_id'] = instance_id + self['mountpoint'] = mountpoint + self['status'] = "in-use" + self['attach_status'] = "attaching" + self['attach_time'] = utils.isotime() + self['delete_on_termination'] = 'False' + self.save() + + def finish_attach(self): + """ """ + self['attach_status'] = "attached" + self.save() + + def start_detach(self): + """ """ + self['attach_status'] = "detaching" + self.save() + + def finish_detach(self): + self['instance_id'] = None + self['mountpoint'] = None + self['status'] = "available" + self['attach_status'] = "detached" + self.save() + + def destroy(self): + try: + self._remove_export() + except: + pass + self._delete_lv() + super(Volume, self).destroy() + + @defer.inlineCallbacks + def create_lv(self): + if str(self['size']) == '0': + sizestr = '100M' + else: + sizestr = '%sG' % self['size'] + yield process.simple_execute( + "sudo lvcreate -L %s -n %s %s" % (sizestr, + self['volume_id'], + FLAGS.volume_group)) + + @defer.inlineCallbacks + def _delete_lv(self): + yield process.simple_execute( + "sudo lvremove -f %s/%s" % (FLAGS.volume_group, + self['volume_id'])) + + def _setup_export(self): + (shelf_id, blade_id) = get_next_aoe_numbers() + self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id) + self['shelf_id'] = shelf_id + self['blade_id'] = blade_id + self.save() + self._exec_export() + + @defer.inlineCallbacks + def _exec_export(self): + yield process.simple_execute( + "sudo vblade-persist setup %s %s %s /dev/%s/%s" % + (self['shelf_id'], + self['blade_id'], + FLAGS.aoe_eth_dev, + FLAGS.volume_group, + self['volume_id'])) + + @defer.inlineCallbacks + def _remove_export(self): + yield process.simple_execute( + "sudo vblade-persist stop %s %s" % (self['shelf_id'], + self['blade_id'])) + yield process.simple_execute( + "sudo vblade-persist destroy %s %s" % (self['shelf_id'], + self['blade_id'])) + + +class FakeVolume(Volume): + def create_lv(self): + pass + + def _exec_export(self): + fname = os.path.join(FLAGS.aoe_export_dir, self['aoe_device']) + f = file(fname, "w") + f.close() + + def _remove_export(self): + pass + + def _delete_lv(self): + pass + +def get_next_aoe_numbers(): + for shelf_id in xrange(FLAGS.first_shelf_id, FLAGS.last_shelf_id + 1): + aoes = glob.glob("%s/e%s.*" % (FLAGS.aoe_export_dir, shelf_id)) + if not aoes: + blade_id = 0 + else: + blade_id = int(max([int(a.rpartition('.')[2]) for a in aoes])) + 1 + if blade_id < FLAGS.slots_per_shelf: + logging.debug("Next shelf.blade is %s.%s", shelf_id, blade_id) + return (shelf_id, blade_id) + raise NoMoreVolumes() diff --git a/run_tests.py b/run_tests.py index db8a582ea..ae2874f58 100644 --- a/run_tests.py +++ b/run_tests.py @@ -52,14 +52,14 @@ from nova import twistd from nova.tests.access_unittest import * from nova.tests.api_unittest import * from nova.tests.cloud_unittest import * +from nova.tests.compute_unittest import * from nova.tests.model_unittest import * from nova.tests.network_unittest import * -from nova.tests.node_unittest import * from nova.tests.objectstore_unittest import * from nova.tests.process_unittest import * -from nova.tests.storage_unittest import * from nova.tests.users_unittest import * from nova.tests.validator_unittest import * +from nova.tests.volume_unittest import * FLAGS = flags.FLAGS -- cgit From 80d03bbeb1f1acb82c495747dcf5cc9390df025a Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 21 Jul 2010 14:55:16 -0500 Subject: make nova-network executable --- bin/nova-network | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 bin/nova-network diff --git a/bin/nova-network b/bin/nova-network old mode 100644 new mode 100755 -- cgit From 898102508a1c2a1087b3ffce36b3fb890f5d3775 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 21 Jul 2010 19:56:08 -0500 Subject: refactoring of imports for fakeldapdriver --- nova/auth/fakeldapdriver.py | 32 ++++++++++++++++++++++++++ nova/auth/ldapdriver.py | 27 +++++++++++----------- nova/auth/manager.py | 47 ++++++++++++++++++++------------------ nova/flags.py | 1 - nova/tests/cloud_unittest.py | 3 +-- nova/tests/fake_flags.py | 2 +- nova/tests/model_unittest.py | 3 +-- nova/tests/node_unittest.py | 3 +-- nova/tests/objectstore_unittest.py | 3 +-- nova/tests/real_flags.py | 1 - 10 files changed, 75 insertions(+), 47 deletions(-) create mode 100644 nova/auth/fakeldapdriver.py diff --git a/nova/auth/fakeldapdriver.py b/nova/auth/fakeldapdriver.py new file mode 100644 index 000000000..833548c79 --- /dev/null +++ b/nova/auth/fakeldapdriver.py @@ -0,0 +1,32 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Fake Auth driver for ldap + +""" + +from nova.auth import ldapdriver + +class AuthDriver(ldapdriver.AuthDriver): + """Ldap Auth driver + + Defines enter and exit and therefore supports the with/as syntax. + """ + def __init__(self): + self.ldap = __import__('nova.auth.fakeldap', fromlist=True) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index a94b219d6..beab97e49 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -57,19 +57,18 @@ flags.DEFINE_string('ldap_developer', 'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers') -class LdapDriver(object): +class AuthDriver(object): """Ldap Auth driver Defines enter and exit and therefore supports the with/as syntax. """ + def __init__(self): + """Imports the LDAP module""" + self.ldap = __import__('ldap') + def __enter__(self): """Creates the connection to LDAP""" - global ldap - if FLAGS.fake_users: - from nova.auth import fakeldap as ldap - else: - import ldap - self.conn = ldap.initialize(FLAGS.ldap_url) + self.conn = self.ldap.initialize(FLAGS.ldap_url) self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) return self @@ -275,8 +274,8 @@ class LdapDriver(object): def __find_dns(self, dn, query=None): """Find dns by query""" try: - res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query) - except ldap.NO_SUCH_OBJECT: + res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query) + except self.ldap.NO_SUCH_OBJECT: return [] # just return the DNs return [dn for dn, attributes in res] @@ -284,8 +283,8 @@ class LdapDriver(object): def __find_objects(self, dn, query = None): """Find objects by query""" try: - res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query) - except ldap.NO_SUCH_OBJECT: + res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query) + except self.ldap.NO_SUCH_OBJECT: return [] # just return the attributes return [attributes for dn, attributes in res] @@ -369,7 +368,7 @@ class LdapDriver(object): raise exception.Duplicate("User %s is already a member of " "the group %s" % (uid, group_dn)) attr = [ - (ldap.MOD_ADD, 'member', self.__uid_to_dn(uid)) + (self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid)) ] self.conn.modify_s(group_dn, attr) @@ -389,10 +388,10 @@ class LdapDriver(object): def __safe_remove_from_group(self, uid, group_dn): """Remove user from group, deleting group if user is last member""" # FIXME(vish): what if deleted user is a project manager? - attr = [(ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))] + attr = [(self.ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))] try: self.conn.modify_s(group_dn, attr) - except ldap.OBJECT_CLASS_VIOLATION: + except self.ldap.OBJECT_CLASS_VIOLATION: logging.debug("Attempted to remove the last member of a group. " "Deleting the group at %s instead." % group_dn ) self.__delete_group(group_dn) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 3496ea161..130bed7c2 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -34,7 +34,6 @@ from nova import exception from nova import flags from nova import objectstore # for flags from nova import utils -from nova.auth import ldapdriver from nova.auth import signer FLAGS = flags.FLAGS @@ -76,6 +75,8 @@ flags.DEFINE_string('credential_cert_subject', flags.DEFINE_string('vpn_ip', '127.0.0.1', 'Public IP for the cloudpipe VPN servers') +flags.DEFINE_string('auth_driver', 'fakeldapdriver', + 'Driver that auth manager uses') class AuthBase(object): """Base class for objects relating to auth @@ -312,7 +313,7 @@ class AuthManager(object): Methods accept objects or ids. AuthManager uses a driver object to make requests to the data backend. - See ldapdriver.LdapDriver for reference. + See ldapdriver for reference. AuthManager also manages associated data related to Auth objects that need to be more accessible, such as vpn ips and ports. @@ -325,7 +326,9 @@ class AuthManager(object): return cls._instance def __init__(self, *args, **kwargs): - self.driver_class = kwargs.get('driver_class', ldapdriver.LdapDriver) + """Imports the driver module and saves the Driver class""" + mod = __import__(FLAGS.auth_driver, fromlist=True) + self.driver = mod.AuthDriver def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', @@ -451,7 +454,7 @@ class AuthManager(object): @rtype: bool @return: True if the user has the role. """ - with self.driver_class() as drv: + with self.driver() as drv: if role == 'projectmanager': if not project: raise exception.Error("Must specify project") @@ -487,7 +490,7 @@ class AuthManager(object): @type project: Project or project_id @param project: Project in which to add local role. """ - with self.driver_class() as drv: + with self.driver() as drv: drv.add_role(User.safe_id(user), role, Project.safe_id(project)) def remove_role(self, user, role, project=None): @@ -507,19 +510,19 @@ class AuthManager(object): @type project: Project or project_id @param project: Project in which to remove local role. """ - with self.driver_class() as drv: + with self.driver() as drv: drv.remove_role(User.safe_id(user), role, Project.safe_id(project)) def get_project(self, pid): """Get project object by id""" - with self.driver_class() as drv: + with self.driver() as drv: project_dict = drv.get_project(pid) if project_dict: return Project(**project_dict) def get_projects(self): """Retrieves list of all projects""" - with self.driver_class() as drv: + with self.driver() as drv: project_list = drv.get_projects() if not project_list: return [] @@ -549,7 +552,7 @@ class AuthManager(object): """ if member_users: member_users = [User.safe_id(u) for u in member_users] - with self.driver_class() as drv: + with self.driver() as drv: project_dict = drv.create_project(name, User.safe_id(manager_user), description, @@ -561,7 +564,7 @@ class AuthManager(object): def add_to_project(self, user, project): """Add user to project""" - with self.driver_class() as drv: + with self.driver() as drv: return drv.add_to_project(User.safe_id(user), Project.safe_id(project)) @@ -579,7 +582,7 @@ class AuthManager(object): def remove_from_project(self, user, project): """Removes a user from a project""" - with self.driver_class() as drv: + with self.driver() as drv: return drv.remove_from_project(User.safe_id(user), Project.safe_id(project)) @@ -600,26 +603,26 @@ class AuthManager(object): def delete_project(self, project): """Deletes a project""" - with self.driver_class() as drv: + with self.driver() as drv: return drv.delete_project(Project.safe_id(project)) def get_user(self, uid): """Retrieves a user by id""" - with self.driver_class() as drv: + with self.driver() as drv: user_dict = drv.get_user(uid) if user_dict: return User(**user_dict) def get_user_from_access_key(self, access_key): """Retrieves a user by access key""" - with self.driver_class() as drv: + with self.driver() as drv: user_dict = drv.get_user_from_access_key(access_key) if user_dict: return User(**user_dict) def get_users(self): """Retrieves a list of all users""" - with self.driver_class() as drv: + with self.driver() as drv: user_list = drv.get_users() if not user_list: return [] @@ -649,14 +652,14 @@ class AuthManager(object): """ if access == None: access = str(uuid.uuid4()) if secret == None: secret = str(uuid.uuid4()) - with self.driver_class() as drv: + with self.driver() as drv: user_dict = drv.create_user(name, access, secret, admin) if user_dict: return User(**user_dict) def delete_user(self, user): """Deletes a user""" - with self.driver_class() as drv: + with self.driver() as drv: drv.delete_user(User.safe_id(user)) def generate_key_pair(self, user, key_name): @@ -677,7 +680,7 @@ class AuthManager(object): # NOTE(vish): generating key pair is slow so check for legal # creation before creating keypair uid = User.safe_id(user) - with self.driver_class() as drv: + with self.driver() as drv: if not drv.get_user(uid): raise exception.NotFound("User %s doesn't exist" % user) if drv.get_key_pair(uid, key_name): @@ -689,7 +692,7 @@ class AuthManager(object): def create_key_pair(self, user, key_name, public_key, fingerprint): """Creates a key pair for user""" - with self.driver_class() as drv: + with self.driver() as drv: kp_dict = drv.create_key_pair(User.safe_id(user), key_name, public_key, @@ -699,14 +702,14 @@ class AuthManager(object): def get_key_pair(self, user, key_name): """Retrieves a key pair for user""" - with self.driver_class() as drv: + with self.driver() as drv: kp_dict = drv.get_key_pair(User.safe_id(user), key_name) if kp_dict: return KeyPair(**kp_dict) def get_key_pairs(self, user): """Retrieves all key pairs for user""" - with self.driver_class() as drv: + with self.driver() as drv: kp_list = drv.get_key_pairs(User.safe_id(user)) if not kp_list: return [] @@ -714,7 +717,7 @@ class AuthManager(object): def delete_key_pair(self, user, key_name): """Deletes a key pair for user""" - with self.driver_class() as drv: + with self.driver() as drv: drv.delete_key_pair(User.safe_id(user), key_name) def get_credentials(self, user, project=None): diff --git a/nova/flags.py b/nova/flags.py index 06ea1e007..3ad6a3ad5 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -46,7 +46,6 @@ DEFINE_bool('fake_libvirt', False, DEFINE_bool('verbose', False, 'show debug output') DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit') DEFINE_bool('fake_network', False, 'should we use fake network devices and addresses') -DEFINE_bool('fake_users', False, 'use fake users') DEFINE_string('rabbit_host', 'localhost', 'rabbit host') DEFINE_integer('rabbit_port', 5672, 'rabbit port') DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py index 3abef28a1..741973201 100644 --- a/nova/tests/cloud_unittest.py +++ b/nova/tests/cloud_unittest.py @@ -40,8 +40,7 @@ class CloudTestCase(test.BaseTestCase): def setUp(self): super(CloudTestCase, self).setUp() self.flags(fake_libvirt=True, - fake_storage=True, - fake_users=True) + fake_storage=True) self.conn = rpc.Connection.instance() logging.getLogger().setLevel(logging.DEBUG) diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index d32f40d8f..57575b44b 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -24,5 +24,5 @@ FLAGS.fake_libvirt = True FLAGS.fake_storage = True FLAGS.fake_rabbit = True FLAGS.fake_network = True -FLAGS.fake_users = True +FLAGS.auth_driver = 'nova.auth.fakeldapdriver' FLAGS.verbose = True diff --git a/nova/tests/model_unittest.py b/nova/tests/model_unittest.py index 1bd7e527f..1b94e5798 100644 --- a/nova/tests/model_unittest.py +++ b/nova/tests/model_unittest.py @@ -35,8 +35,7 @@ class ModelTestCase(test.TrialTestCase): def setUp(self): super(ModelTestCase, self).setUp() self.flags(fake_libvirt=True, - fake_storage=True, - fake_users=True) + fake_storage=True) def tearDown(self): model.Instance('i-test').destroy() diff --git a/nova/tests/node_unittest.py b/nova/tests/node_unittest.py index 93942d79e..55c957696 100644 --- a/nova/tests/node_unittest.py +++ b/nova/tests/node_unittest.py @@ -58,8 +58,7 @@ class NodeConnectionTestCase(test.TrialTestCase): logging.getLogger().setLevel(logging.DEBUG) super(NodeConnectionTestCase, self).setUp() self.flags(fake_libvirt=True, - fake_storage=True, - fake_users=True) + fake_storage=True) self.node = node.Node() def create_instance(self): diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py index 85bcd7c67..1703adb62 100644 --- a/nova/tests/objectstore_unittest.py +++ b/nova/tests/objectstore_unittest.py @@ -51,8 +51,7 @@ os.makedirs(os.path.join(oss_tempdir, 'buckets')) class ObjectStoreTestCase(test.BaseTestCase): def setUp(self): super(ObjectStoreTestCase, self).setUp() - self.flags(fake_users=True, - buckets_path=os.path.join(oss_tempdir, 'buckets'), + self.flags(buckets_path=os.path.join(oss_tempdir, 'buckets'), images_path=os.path.join(oss_tempdir, 'images'), ca_path=os.path.join(os.path.dirname(__file__), 'CA')) logging.getLogger().setLevel(logging.DEBUG) diff --git a/nova/tests/real_flags.py b/nova/tests/real_flags.py index 9e106f227..f054a8f19 100644 --- a/nova/tests/real_flags.py +++ b/nova/tests/real_flags.py @@ -24,5 +24,4 @@ FLAGS.fake_libvirt = False FLAGS.fake_storage = False FLAGS.fake_rabbit = False FLAGS.fake_network = False -FLAGS.fake_users = False FLAGS.verbose = False -- cgit From 5066e1f55fa672f6b6eec1523b5334e6fe9609a2 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 21 Jul 2010 21:54:50 -0500 Subject: added todo for ABC --- nova/auth/ldapdriver.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index beab97e49..0535977af 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -57,6 +57,10 @@ flags.DEFINE_string('ldap_developer', 'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers') +# TODO(vish): make an abstract base class with the same public methods +# to define a set interface for AuthDrivers. I'm delaying +# creating this now because I'm expecting an auth refactor +# in which we may want to change the interface a bit more. class AuthDriver(object): """Ldap Auth driver -- cgit From b03f05a0eb321350297e2f86a05ec4593bfc4049 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 22 Jul 2010 07:51:03 -0500 Subject: typo fixes and extra print statements removed --- bin/nova-compute | 4 ++-- bin/nova-network | 4 ++-- bin/nova-volume | 4 ++-- nova/twistd.py | 2 -- 4 files changed, 6 insertions(+), 8 deletions(-) diff --git a/bin/nova-compute b/bin/nova-compute index 67c93fcb8..1d5fa709d 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -22,11 +22,11 @@ """ from nova import twistd -from nova.compute import node +from nova.compute import computenode if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': - application = node.ComputeNode.create() + application = computenode.ComputeNode.create() diff --git a/bin/nova-network b/bin/nova-network index c69690081..db9d4b970 100755 --- a/bin/nova-network +++ b/bin/nova-network @@ -22,11 +22,11 @@ """ from nova import twistd -from nova.network import node +from nova.network import networknode if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': - application = node.NetworkNode.create() + application = networknode.NetworkNode.create() diff --git a/bin/nova-volume b/bin/nova-volume index cdf2782bc..2e9b530a7 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -22,11 +22,11 @@ """ from nova import twistd -from nova.volume import node +from nova.volume import volumenode if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': - application = node.VolumeNode.create() + application = volumenode.VolumeNode.create() diff --git a/nova/twistd.py b/nova/twistd.py index fc7dad26a..909b23590 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -216,12 +216,10 @@ def serve(filename): elif FLAGS.pidfile.endswith('twistd.pid'): FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name) - print FLAGS.logfile if not FLAGS.logfile: FLAGS.logfile = '%s.log' % name elif FLAGS.logfile.endswith('twistd.log'): FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name) - print FLAGS.logfile action = 'start' if len(argv) > 1: -- cgit From 700f668b58911afe2b98ae8cab79910ae817657b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 22 Jul 2010 09:03:28 -0500 Subject: syslog changes --- nova/twistd.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/nova/twistd.py b/nova/twistd.py index 909b23590..b389a73b5 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -215,11 +215,14 @@ def serve(filename): FLAGS.pidfile = '%s.pid' % name elif FLAGS.pidfile.endswith('twistd.pid'): FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name) - if not FLAGS.logfile: FLAGS.logfile = '%s.log' % name elif FLAGS.logfile.endswith('twistd.log'): FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name) + if not FLAGS.prefix: + FLAGS.prefix = name + elif FLAGS.prefix.endswith('twisted'): + FLAGS.prefix = FLAGS.prefix.replace('twisted', name) action = 'start' if len(argv) > 1: @@ -237,7 +240,7 @@ def serve(filename): sys.exit(1) formatter = logging.Formatter( - name + '(%(name)s): %(levelname)s %(message)s') + '(%(name)s): %(levelname)s %(message)r') handler = logging.StreamHandler(log.StdioOnnaStick()) handler.setFormatter(formatter) logging.getLogger().addHandler(handler) @@ -247,11 +250,6 @@ def serve(filename): else: logging.getLogger().setLevel(logging.WARNING) - if FLAGS.syslog: - syslog = logging.handlers.SysLogHandler(address='/dev/log') - syslog.setFormatter(formatter) - logging.getLogger().addHandler(syslog) - logging.debug("Full set of FLAGS:") for flag in FLAGS: logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) -- cgit From f6ae05f993016f45af2c19718a6e84e50e4a775e Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Thu, 22 Jul 2010 11:49:13 -0700 Subject: Nobody wants to take on this twisted cleanup. It works for now, but could be much nicer if twisted has a nice hook-point for exception mapping --- nova/objectstore/handler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index c3c4486bf..098e7a167 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -118,7 +118,7 @@ def get_context(request): class ErrorHandlingResource(Resource): """Maps exceptions to 404 / 401 codes. Won't work for exceptions thrown after NOT_DONE_YET is returned.""" - # TODO: This needs to be plugged in to the right place in twisted... + # TODO(unassigned) (calling-all-twisted-experts): This needs to be plugged in to the right place in twisted... # This doesn't look like it's the right place (consider exceptions in getChild; or after NOT_DONE_YET is returned def render(self, request): try: -- cgit From d0f3ad141b848a927c452ba6c71180b21047bef8 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 22 Jul 2010 17:45:18 -0500 Subject: Fix syslogging of exceptions by stripping newlines from the exception info --- nova/twistd.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/nova/twistd.py b/nova/twistd.py index b389a73b5..ecb6e2892 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -22,7 +22,6 @@ manage pid files and support syslogging. """ import logging -import logging.handlers import os import signal import sys @@ -239,8 +238,16 @@ def serve(filename): print 'usage: %s [options] [start|stop|restart]' % argv[0] sys.exit(1) - formatter = logging.Formatter( - '(%(name)s): %(levelname)s %(message)r') + class NoNewlineFormatter(logging.Formatter): + """Strips newlines from default formatter""" + def format(self, record): + """Grabs default formatter's output and strips newlines""" + data = logging.Formatter.format(self, record) + return data.replace("\n", "--") + + # NOTE(vish): syslog-ng doesn't handle newlines from trackbacks very well + formatter = NoNewlineFormatter( + '(%(name)s): %(levelname)s %(message)s') handler = logging.StreamHandler(log.StdioOnnaStick()) handler.setFormatter(formatter) logging.getLogger().addHandler(handler) -- cgit From a766736b79f794f745438dd597f2aa529ad62ddc Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Fri, 23 Jul 2010 04:44:23 +0200 Subject: Adds a Makefile to fill dependencies for testing. Depends upon pip being installed, but pip is pretty much the standard nowadays and is just an easy_install away if it isn't there. The only dependency installed on to the system is virtualenv which is used to make the other dependencies local to the current environment. Does not remove the need to install redis by hand, though I am in favor of making that possible (using aptitude on linux and brew on os x) I look forward to cutting away at some of these dependencies in further commits. --- Makefile | 27 +++++++++++++++++++++++++++ tools/pip-requires | 14 ++++++++++++++ 2 files changed, 41 insertions(+) create mode 100644 Makefile create mode 100644 tools/pip-requires diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..da69f2b72 --- /dev/null +++ b/Makefile @@ -0,0 +1,27 @@ +venv=.venv +with_venv=source $(venv)/bin/activate +installed=$(venv)/lib/python2.6/site-packages +twisted=$(installed)/twisted/__init__.py + + +test: python-dependencies $(twisted) + $(with_venv) && python run_tests.py + +clean: + rm -rf _trial_temp + rm -rf keys + rm -rf instances + rm -rf networks + +clean-all: clean + rm -rf $(venv) + +python-dependencies: $(venv) + pip install -q -E $(venv) -r tools/pip-requires + +$(venv): + pip install -q virtualenv + virtualenv -q --no-site-packages $(venv) + +$(twisted): + pip install -q -E $(venv) http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz diff --git a/tools/pip-requires b/tools/pip-requires new file mode 100644 index 000000000..edb5fc01c --- /dev/null +++ b/tools/pip-requires @@ -0,0 +1,14 @@ +IPy==0.70 +M2Crypto==0.20.2 +amqplib==0.6.1 +anyjson==0.2.4 +boto==2.0b1 +carrot==0.10.5 +lockfile==0.8 +mox==0.5.0 +python-daemon==1.5.5 +python-gflags==1.3 +redis==2.0.0 +tornado==1.0 +wsgiref==0.1.2 +zope.interface==3.6.1 -- cgit From f3350750a7c68b191c3a85ac9caa5eff59b182a6 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 23 Jul 2010 08:03:26 -0500 Subject: Check signature for S3 requests. --- nova/auth/signer.py | 8 ++++++++ nova/auth/users.py | 13 +++++++++++-- nova/objectstore/handler.py | 14 +++++++------- 3 files changed, 26 insertions(+), 9 deletions(-) diff --git a/nova/auth/signer.py b/nova/auth/signer.py index 83831bfac..7d7471575 100644 --- a/nova/auth/signer.py +++ b/nova/auth/signer.py @@ -48,6 +48,7 @@ import hashlib import hmac import logging import urllib +import boto.utils from nova.exception import Error @@ -59,6 +60,13 @@ class Signer(object): if hashlib.sha256: self.hmac_256 = hmac.new(secret_key, digestmod=hashlib.sha256) + def s3_authorization(self, headers, verb, path): + c_string = boto.utils.canonical_string(verb, path, headers) + hmac = self.hmac.copy() + hmac.update(c_string) + b64_hmac = base64.encodestring(hmac.digest()).strip() + return b64_hmac + def generate(self, params, verb, server_string, path): if params['SignatureVersion'] == '0': return self._calc_signature_0(params) diff --git a/nova/auth/users.py b/nova/auth/users.py index fc08dc34d..0e9ca4eeb 100644 --- a/nova/auth/users.py +++ b/nova/auth/users.py @@ -395,11 +395,13 @@ class UserManager(object): def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', - verify_signature=True): + check_type='ec2', headers=None): # TODO: Check for valid timestamp (access_key, sep, project_name) = access.partition(':') + logging.info('Looking up user: %r', access_key) user = self.get_user_from_access_key(access_key) + logging.info('user: %r', user) if user == None: raise exception.NotFound('No user found for access key %s' % access_key) @@ -413,7 +415,14 @@ class UserManager(object): if not user.is_admin() and not project.has_member(user): raise exception.NotFound('User %s is not a member of project %s' % (user.id, project.id)) - if verify_signature: + if check_type == 's3': + expected_signature = signer.Signer(user.secret.encode()).s3_authorization(headers, verb, path) + logging.debug('user.secret: %s', user.secret) + logging.debug('expected_signature: %s', expected_signature) + logging.debug('signature: %s', signature) + if signature != expected_signature: + raise exception.NotAuthorized('Signature does not match') + elif check_type == 'ec2': # NOTE(vish): hmac can't handle unicode, so encode ensures that # secret isn't unicode expected_signature = signer.Signer(user.secret.encode()).generate( diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index b2ed3d482..655cab752 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -106,8 +106,8 @@ def get_context(request): access, sep, secret = request.getHeader('Authorization').split(' ')[1].rpartition(':') um = users.UserManager.instance() print 'um %s' % um - (user, project) = um.authenticate(access, secret, {}, request.method, request.host, request.uri, False) - # FIXME: check signature here! + + (user, project) = um.authenticate(access, secret, {}, request.method, request.getRequestHostname(), request.uri, headers=request.getAllHeaders(), check_type='s3') return api.APIRequestContext(None, user, project) except exception.Error, ex: logging.debug("Authentication Failure: %s" % ex) @@ -117,7 +117,6 @@ class S3(Resource): """Implementation of an S3-like storage server based on local files.""" def getChild(self, name, request): request.context = get_context(request) - if name == '': return self elif name == '_images': @@ -126,6 +125,7 @@ class S3(Resource): return BucketResource(name) def render_GET(self, request): + logging.debug('List of buckets requested') buckets = [b for b in bucket.Bucket.all() if b.is_authorized(request.context)] render_xml(request, {"ListAllMyBucketsResult": { @@ -169,7 +169,8 @@ class BucketResource(Resource): logging.exception(e) logging.debug("calling bucket.Bucket.create(%r, %r)" % (self.name, request.context)) bucket.Bucket.create(self.name, request.context) - return '' + request.finish() + return server.NOT_DONE_YET def render_DELETE(self, request): logging.debug("Deleting bucket %s" % (self.name)) @@ -225,8 +226,6 @@ class ObjectResource(Resource): return '' class ImageResource(Resource): - isLeaf = True - def getChild(self, name, request): if name == '': return self @@ -239,9 +238,10 @@ class ImageResource(Resource): """ returns a json listing of all images that a user has permissions to see """ - images = [i for i in image.Image.all() if i.is_authorized(self.context)] + images = [i for i in image.Image.all() if i.is_authorized(request.context)] request.write(json.dumps([i.metadata for i in images])) + request.finish() return server.NOT_DONE_YET def render_PUT(self, request): -- cgit From abac2033b4aeb399786735a94ce2ddf5c64e6e9e Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 23 Jul 2010 23:55:39 +0200 Subject: Add (completely untested) code to include an Authorization header for the S3 request to fetch an image. --- nova/compute/node.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/nova/compute/node.py b/nova/compute/node.py index 4683f1c8d..7c09d9583 100644 --- a/nova/compute/node.py +++ b/nova/compute/node.py @@ -25,6 +25,7 @@ Compute Node: """ import base64 +import boto.utils import json import logging import os @@ -45,11 +46,13 @@ from nova import fakevirt from nova import flags from nova import process from nova import utils +from nova.auth import signer from nova.compute import disk from nova.compute import model from nova.compute import network from nova.objectstore import image # for image_path flag from nova.volume import storage +from nova.users import UserManager FLAGS = flags.FLAGS @@ -446,8 +449,12 @@ class Instance(object): def _fetch_s3_image(self, image, path): url = _image_url('%s/image' % image) + user_id = self.datamodel['user_id'] + user = UserManager.instance().get_user(user_id) + auth = signer.Signer(user.secret.encode()).s3_authorization({}, 'GET', url) + auth_header = 'Authorization: %s:%s' % (user_id, auth) d = process.simple_execute( - 'curl --silent %s -o %s' % (url, path)) + 'curl --silent %s -o "%s"' % (url, auth_header, path)) return d def _fetch_local_image(self, image, path): -- cgit From 35fda702abf91792d3c4753a1bbccdb119eaf6eb Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 23 Jul 2010 15:27:18 -0700 Subject: renamed xxxnode to xxservice --- bin/nova-compute | 4 +- bin/nova-network | 4 +- bin/nova-volume | 4 +- nova/compute/computenode.py | 582 ----------------------------------------- nova/compute/computeservice.py | 581 ++++++++++++++++++++++++++++++++++++++++ nova/endpoint/cloud.py | 15 +- nova/network/networknode.py | 35 --- nova/network/networkservice.py | 35 +++ nova/node.py | 103 -------- nova/service.py | 103 ++++++++ nova/test.py | 8 +- nova/tests/cloud_unittest.py | 18 +- nova/tests/compute_unittest.py | 36 +-- nova/tests/volume_unittest.py | 46 ++-- nova/volume/volumenode.py | 305 --------------------- nova/volume/volumeservice.py | 304 +++++++++++++++++++++ 16 files changed, 1090 insertions(+), 1093 deletions(-) delete mode 100644 nova/compute/computenode.py create mode 100644 nova/compute/computeservice.py delete mode 100644 nova/network/networknode.py create mode 100644 nova/network/networkservice.py delete mode 100644 nova/node.py create mode 100644 nova/service.py delete mode 100644 nova/volume/volumenode.py create mode 100644 nova/volume/volumeservice.py diff --git a/bin/nova-compute b/bin/nova-compute index 1d5fa709d..7ef5d074a 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -22,11 +22,11 @@ """ from nova import twistd -from nova.compute import computenode +from nova.compute import computeservice if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': - application = computenode.ComputeNode.create() + application = computeservice.ComputeService.create() diff --git a/bin/nova-network b/bin/nova-network index db9d4b970..0d3aa0002 100755 --- a/bin/nova-network +++ b/bin/nova-network @@ -22,11 +22,11 @@ """ from nova import twistd -from nova.network import networknode +from nova.network import networkservice if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': - application = networknode.NetworkNode.create() + application = networkservice.NetworkService.create() diff --git a/bin/nova-volume b/bin/nova-volume index 2e9b530a7..c1c0163cf 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -22,11 +22,11 @@ """ from nova import twistd -from nova.volume import volumenode +from nova.volume import volumeservice if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': - application = volumenode.VolumeNode.create() + application = volumeservice.VolumeService.create() diff --git a/nova/compute/computenode.py b/nova/compute/computenode.py deleted file mode 100644 index f26da1996..000000000 --- a/nova/compute/computenode.py +++ /dev/null @@ -1,582 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Compute Node: - - Runs on each compute node, managing the - hypervisor using libvirt. - -""" - -import base64 -import json -import logging -import os -import shutil -import sys -from twisted.internet import defer -from twisted.internet import task -from twisted.application import service - - -try: - import libvirt -except Exception, err: - logging.warning('no libvirt found') - -from nova import exception -from nova import fakevirt -from nova import flags -from nova import node -from nova import process -from nova import utils -from nova.compute import disk -from nova.compute import model -from nova.compute import network -from nova.objectstore import image # for image_path flag -from nova.volume import volumenode - - -FLAGS = flags.FLAGS -flags.DEFINE_string('libvirt_xml_template', - utils.abspath('compute/libvirt.xml.template'), - 'Libvirt XML Template') -flags.DEFINE_bool('use_s3', True, - 'whether to get images from s3 or use local copy') -flags.DEFINE_string('instances_path', utils.abspath('../instances'), - 'where instances are stored on disk') - -INSTANCE_TYPES = {} -INSTANCE_TYPES['m1.tiny'] = {'memory_mb': 512, 'vcpus': 1, 'local_gb': 0} -INSTANCE_TYPES['m1.small'] = {'memory_mb': 1024, 'vcpus': 1, 'local_gb': 10} -INSTANCE_TYPES['m1.medium'] = {'memory_mb': 2048, 'vcpus': 2, 'local_gb': 10} -INSTANCE_TYPES['m1.large'] = {'memory_mb': 4096, 'vcpus': 4, 'local_gb': 10} -INSTANCE_TYPES['m1.xlarge'] = {'memory_mb': 8192, 'vcpus': 4, 'local_gb': 10} -INSTANCE_TYPES['c1.medium'] = {'memory_mb': 2048, 'vcpus': 4, 'local_gb': 10} - - -def _image_path(path=''): - return os.path.join(FLAGS.images_path, path) - - -def _image_url(path): - return "%s:%s/_images/%s" % (FLAGS.s3_host, FLAGS.s3_port, path) - - -class ComputeNode(node.Node): - """ - Manages the running instances. - """ - def __init__(self): - """ load configuration options for this node and connect to libvirt """ - super(ComputeNode, self).__init__() - self._instances = {} - self._conn = self._get_connection() - self.instdir = model.InstanceDirectory() - # TODO(joshua): This needs to ensure system state, specifically: modprobe aoe - - def _get_connection(self): - """ returns a libvirt connection object """ - # TODO(termie): maybe lazy load after initial check for permissions - # TODO(termie): check whether we can be disconnected - if FLAGS.fake_libvirt: - conn = fakevirt.FakeVirtConnection.instance() - else: - auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], - 'root', - None] - conn = libvirt.openAuth('qemu:///system', auth, 0) - if conn == None: - logging.error('Failed to open connection to the hypervisor') - sys.exit(1) - return conn - - def noop(self): - """ simple test of an AMQP message call """ - return defer.succeed('PONG') - - def get_instance(self, instance_id): - # inst = self.instdir.get(instance_id) - # return inst - if self.instdir.exists(instance_id): - return Instance.fromName(self._conn, instance_id) - return None - - @exception.wrap_exception - def adopt_instances(self): - """ if there are instances already running, adopt them """ - return defer.succeed(0) - instance_names = [self._conn.lookupByID(x).name() - for x in self._conn.listDomainsID()] - for name in instance_names: - try: - new_inst = Instance.fromName(self._conn, name) - new_inst.update_state() - except: - pass - return defer.succeed(len(self._instances)) - - @exception.wrap_exception - def describe_instances(self): - retval = {} - for inst in self.instdir.by_node(FLAGS.node_name): - retval[inst['instance_id']] = ( - Instance.fromName(self._conn, inst['instance_id'])) - return retval - - @defer.inlineCallbacks - def report_state(self, nodename, daemon): - # TODO(termie): make this pattern be more elegant. -todd - try: - record = model.Daemon(nodename, daemon) - record.heartbeat() - if getattr(self, "model_disconnected", False): - self.model_disconnected = False - logging.error("Recovered model server connection!") - - except model.ConnectionError, ex: - if not getattr(self, "model_disconnected", False): - self.model_disconnected = True - logging.exception("model server went away") - yield - - # @exception.wrap_exception - def run_instance(self, instance_id, **_kwargs): - """ launch a new instance with specified options """ - logging.debug("Starting instance %s..." % (instance_id)) - inst = self.instdir.get(instance_id) - if not FLAGS.simple_network: - # TODO: Get the real security group of launch in here - security_group = "default" - net = network.BridgedNetwork.get_network_for_project(inst['user_id'], - inst['project_id'], - security_group).express() - inst['node_name'] = FLAGS.node_name - inst.save() - # TODO(vish) check to make sure the availability zone matches - new_inst = Instance(self._conn, name=instance_id, data=inst) - logging.info("Instances current state is %s", new_inst.state) - if new_inst.is_running(): - raise exception.Error("Instance is already running") - d = new_inst.spawn() - return d - - @exception.wrap_exception - def terminate_instance(self, instance_id): - """ terminate an instance on this machine """ - logging.debug("Got told to terminate instance %s" % instance_id) - instance = self.get_instance(instance_id) - # inst = self.instdir.get(instance_id) - if not instance: - raise exception.Error( - 'trying to terminate unknown instance: %s' % instance_id) - d = instance.destroy() - # d.addCallback(lambda x: inst.destroy()) - return d - - @exception.wrap_exception - def reboot_instance(self, instance_id): - """ reboot an instance on this server - KVM doesn't support reboot, so we terminate and restart """ - instance = self.get_instance(instance_id) - if not instance: - raise exception.Error( - 'trying to reboot unknown instance: %s' % instance_id) - return instance.reboot() - - @defer.inlineCallbacks - @exception.wrap_exception - def get_console_output(self, instance_id): - """ send the console output for an instance """ - logging.debug("Getting console output for %s" % (instance_id)) - inst = self.instdir.get(instance_id) - instance = self.get_instance(instance_id) - if not instance: - raise exception.Error( - 'trying to get console log for unknown: %s' % instance_id) - rv = yield instance.console_output() - # TODO(termie): this stuff belongs in the API layer, no need to - # munge the data we send to ourselves - output = {"InstanceId" : instance_id, - "Timestamp" : "2", - "output" : base64.b64encode(rv)} - defer.returnValue(output) - - @defer.inlineCallbacks - @exception.wrap_exception - def attach_volume(self, instance_id = None, - volume_id = None, mountpoint = None): - volume = volumenode.get_volume(volume_id) - yield self._init_aoe() - yield process.simple_execute( - "sudo virsh attach-disk %s /dev/etherd/%s %s" % - (instance_id, - volume['aoe_device'], - mountpoint.rpartition('/dev/')[2])) - volume.finish_attach() - defer.returnValue(True) - - @defer.inlineCallbacks - def _init_aoe(self): - yield process.simple_execute("sudo aoe-discover") - yield process.simple_execute("sudo aoe-stat") - - @defer.inlineCallbacks - @exception.wrap_exception - def detach_volume(self, instance_id, volume_id): - """ detach a volume from an instance """ - # despite the documentation, virsh detach-disk just wants the device - # name without the leading /dev/ - volume = volumenode.get_volume(volume_id) - target = volume['mountpoint'].rpartition('/dev/')[2] - yield process.simple_execute( - "sudo virsh detach-disk %s %s " % (instance_id, target)) - volume.finish_detach() - defer.returnValue(True) - - -class Group(object): - def __init__(self, group_id): - self.group_id = group_id - - -class ProductCode(object): - def __init__(self, product_code): - self.product_code = product_code - - -class Instance(object): - - NOSTATE = 0x00 - RUNNING = 0x01 - BLOCKED = 0x02 - PAUSED = 0x03 - SHUTDOWN = 0x04 - SHUTOFF = 0x05 - CRASHED = 0x06 - - def __init__(self, conn, name, data): - """ spawn an instance with a given name """ - self._conn = conn - # TODO(vish): this can be removed after data has been updated - # data doesn't seem to have a working iterator so in doesn't work - if data.get('owner_id', None) is not None: - data['user_id'] = data['owner_id'] - data['project_id'] = data['owner_id'] - self.datamodel = data - - size = data.get('instance_type', FLAGS.default_instance_type) - if size not in INSTANCE_TYPES: - raise exception.Error('invalid instance type: %s' % size) - - self.datamodel.update(INSTANCE_TYPES[size]) - - self.datamodel['name'] = name - self.datamodel['instance_id'] = name - self.datamodel['basepath'] = data.get( - 'basepath', os.path.abspath( - os.path.join(FLAGS.instances_path, self.name))) - self.datamodel['memory_kb'] = int(self.datamodel['memory_mb']) * 1024 - self.datamodel.setdefault('image_id', FLAGS.default_image) - self.datamodel.setdefault('kernel_id', FLAGS.default_kernel) - self.datamodel.setdefault('ramdisk_id', FLAGS.default_ramdisk) - self.datamodel.setdefault('project_id', self.datamodel['user_id']) - self.datamodel.setdefault('bridge_name', None) - #self.datamodel.setdefault('key_data', None) - #self.datamodel.setdefault('key_name', None) - #self.datamodel.setdefault('addressing_type', None) - - # TODO(joshua) - The ugly non-flat ones - self.datamodel['groups'] = data.get('security_group', 'default') - # TODO(joshua): Support product codes somehow - self.datamodel.setdefault('product_codes', None) - - self.datamodel.save() - logging.debug("Finished init of Instance with id of %s" % name) - - def toXml(self): - # TODO(termie): cache? - logging.debug("Starting the toXML method") - libvirt_xml = open(FLAGS.libvirt_xml_template).read() - xml_info = self.datamodel.copy() - # TODO(joshua): Make this xml express the attached disks as well - - # TODO(termie): lazy lazy hack because xml is annoying - xml_info['nova'] = json.dumps(self.datamodel.copy()) - libvirt_xml = libvirt_xml % xml_info - logging.debug("Finished the toXML method") - - return libvirt_xml - - @classmethod - def fromName(cls, conn, name): - """ use the saved data for reloading the instance """ - instdir = model.InstanceDirectory() - instance = instdir.get(name) - return cls(conn=conn, name=name, data=instance) - - def set_state(self, state_code, state_description=None): - self.datamodel['state'] = state_code - if not state_description: - state_description = STATE_NAMES[state_code] - self.datamodel['state_description'] = state_description - self.datamodel.save() - - @property - def state(self): - # it is a string in datamodel - return int(self.datamodel['state']) - - @property - def name(self): - return self.datamodel['name'] - - def is_pending(self): - return (self.state == Instance.NOSTATE or self.state == 'pending') - - def is_destroyed(self): - return self.state == Instance.SHUTOFF - - def is_running(self): - logging.debug("Instance state is: %s" % self.state) - return (self.state == Instance.RUNNING or self.state == 'running') - - def describe(self): - return self.datamodel - - def info(self): - logging.debug("Getting info for dom %s" % self.name) - virt_dom = self._conn.lookupByName(self.name) - (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() - return {'state': state, - 'max_mem': max_mem, - 'mem': mem, - 'num_cpu': num_cpu, - 'cpu_time': cpu_time, - 'node_name': FLAGS.node_name} - - def basepath(self, path=''): - return os.path.abspath(os.path.join(self.datamodel['basepath'], path)) - - def update_state(self): - self.datamodel.update(self.info()) - self.set_state(self.state) - self.datamodel.save() # Extra, but harmless - - @exception.wrap_exception - def destroy(self): - if self.is_destroyed(): - self.datamodel.destroy() - raise exception.Error('trying to destroy already destroyed' - ' instance: %s' % self.name) - - self.set_state(Instance.NOSTATE, 'shutting_down') - try: - virt_dom = self._conn.lookupByName(self.name) - virt_dom.destroy() - except Exception, _err: - pass - # If the instance is already terminated, we're still happy - d = defer.Deferred() - d.addCallback(lambda x: self._cleanup()) - d.addCallback(lambda x: self.datamodel.destroy()) - # TODO(termie): short-circuit me for tests - # WE'LL save this for when we do shutdown, - # instead of destroy - but destroy returns immediately - timer = task.LoopingCall(f=None) - def _wait_for_shutdown(): - try: - self.update_state() - if self.state == Instance.SHUTDOWN: - timer.stop() - d.callback(None) - except Exception: - self.set_state(Instance.SHUTDOWN) - timer.stop() - d.callback(None) - timer.f = _wait_for_shutdown - timer.start(interval=0.5, now=True) - return d - - def _cleanup(self): - target = os.path.abspath(self.datamodel['basepath']) - logging.info("Deleting instance files at %s", target) - shutil.rmtree(target) - - @defer.inlineCallbacks - @exception.wrap_exception - def reboot(self): - if not self.is_running(): - raise exception.Error( - 'trying to reboot a non-running' - 'instance: %s (state: %s)' % (self.name, self.state)) - - logging.debug('rebooting instance %s' % self.name) - self.set_state(Instance.NOSTATE, 'rebooting') - yield self._conn.lookupByName(self.name).destroy() - self._conn.createXML(self.toXml(), 0) - - d = defer.Deferred() - timer = task.LoopingCall(f=None) - def _wait_for_reboot(): - try: - self.update_state() - if self.is_running(): - logging.debug('rebooted instance %s' % self.name) - timer.stop() - d.callback(None) - except Exception: - self.set_state(Instance.SHUTDOWN) - timer.stop() - d.callback(None) - timer.f = _wait_for_reboot - timer.start(interval=0.5, now=True) - yield d - - def _fetch_s3_image(self, image, path): - url = _image_url('%s/image' % image) - d = process.simple_execute( - 'curl --silent %s -o %s' % (url, path)) - return d - - def _fetch_local_image(self, image, path): - source = _image_path('%s/image' % image) - d = process.simple_execute('cp %s %s' % (source, path)) - return d - - @defer.inlineCallbacks - def _create_image(self, libvirt_xml): - # syntactic nicety - data = self.datamodel - basepath = self.basepath - - # ensure directories exist and are writable - yield process.simple_execute( - 'mkdir -p %s' % basepath()) - yield process.simple_execute( - 'chmod 0777 %s' % basepath()) - - - # TODO(termie): these are blocking calls, it would be great - # if they weren't. - logging.info('Creating image for: %s', data['instance_id']) - f = open(basepath('libvirt.xml'), 'w') - f.write(libvirt_xml) - f.close() - - if FLAGS.fake_libvirt: - logging.info('fake_libvirt, nothing to do for create_image') - raise defer.returnValue(None); - - if FLAGS.use_s3: - _fetch_file = self._fetch_s3_image - else: - _fetch_file = self._fetch_local_image - - if not os.path.exists(basepath('disk')): - yield _fetch_file(data['image_id'], basepath('disk-raw')) - if not os.path.exists(basepath('kernel')): - yield _fetch_file(data['kernel_id'], basepath('kernel')) - if not os.path.exists(basepath('ramdisk')): - yield _fetch_file(data['ramdisk_id'], basepath('ramdisk')) - - execute = lambda cmd, input=None: \ - process.simple_execute(cmd=cmd, - input=input, - error_ok=1) - - key = data['key_data'] - net = None - if FLAGS.simple_network: - with open(FLAGS.simple_network_template) as f: - net = f.read() % {'address': data['private_dns_name'], - 'network': FLAGS.simple_network_network, - 'netmask': FLAGS.simple_network_netmask, - 'gateway': FLAGS.simple_network_gateway, - 'broadcast': FLAGS.simple_network_broadcast, - 'dns': FLAGS.simple_network_dns} - if key or net: - logging.info('Injecting data into image %s', data['image_id']) - yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute) - - if os.path.exists(basepath('disk')): - yield process.simple_execute( - 'rm -f %s' % basepath('disk')) - - bytes = (INSTANCE_TYPES[data['instance_type']]['local_gb'] - * 1024 * 1024 * 1024) - yield disk.partition( - basepath('disk-raw'), basepath('disk'), bytes, execute=execute) - - @defer.inlineCallbacks - @exception.wrap_exception - def spawn(self): - self.set_state(Instance.NOSTATE, 'spawning') - logging.debug("Starting spawn in Instance") - - xml = self.toXml() - self.set_state(Instance.NOSTATE, 'launching') - logging.info('self %s', self) - try: - yield self._create_image(xml) - self._conn.createXML(xml, 0) - # TODO(termie): this should actually register - # a callback to check for successful boot - logging.debug("Instance is running") - - local_d = defer.Deferred() - timer = task.LoopingCall(f=None) - def _wait_for_boot(): - try: - self.update_state() - if self.is_running(): - logging.debug('booted instance %s' % self.name) - timer.stop() - local_d.callback(None) - except Exception: - self.set_state(Instance.SHUTDOWN) - logging.error('Failed to boot instance %s' % self.name) - timer.stop() - local_d.callback(None) - timer.f = _wait_for_boot - timer.start(interval=0.5, now=True) - except Exception, ex: - logging.debug(ex) - self.set_state(Instance.SHUTDOWN) - - @exception.wrap_exception - def console_output(self): - if not FLAGS.fake_libvirt: - fname = os.path.abspath( - os.path.join(self.datamodel['basepath'], 'console.log')) - with open(fname, 'r') as f: - console = f.read() - else: - console = 'FAKE CONSOLE OUTPUT' - return defer.succeed(console) - -STATE_NAMES = { - Instance.NOSTATE : 'pending', - Instance.RUNNING : 'running', - Instance.BLOCKED : 'blocked', - Instance.PAUSED : 'paused', - Instance.SHUTDOWN : 'shutdown', - Instance.SHUTOFF : 'shutdown', - Instance.CRASHED : 'crashed', -} diff --git a/nova/compute/computeservice.py b/nova/compute/computeservice.py new file mode 100644 index 000000000..5568e3888 --- /dev/null +++ b/nova/compute/computeservice.py @@ -0,0 +1,581 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Compute Service: + + Runs on each compute host, managing the + hypervisor using libvirt. + +""" + +import base64 +import json +import logging +import os +import shutil +import sys +from twisted.internet import defer +from twisted.internet import task + + +try: + import libvirt +except Exception, err: + logging.warning('no libvirt found') + +from nova import exception +from nova import fakevirt +from nova import flags +from nova import process +from nova import service +from nova import utils +from nova.compute import disk +from nova.compute import model +from nova.compute import network +from nova.objectstore import image # for image_path flag +from nova.volume import volumeservice + + +FLAGS = flags.FLAGS +flags.DEFINE_string('libvirt_xml_template', + utils.abspath('compute/libvirt.xml.template'), + 'Libvirt XML Template') +flags.DEFINE_bool('use_s3', True, + 'whether to get images from s3 or use local copy') +flags.DEFINE_string('instances_path', utils.abspath('../instances'), + 'where instances are stored on disk') + +INSTANCE_TYPES = {} +INSTANCE_TYPES['m1.tiny'] = {'memory_mb': 512, 'vcpus': 1, 'local_gb': 0} +INSTANCE_TYPES['m1.small'] = {'memory_mb': 1024, 'vcpus': 1, 'local_gb': 10} +INSTANCE_TYPES['m1.medium'] = {'memory_mb': 2048, 'vcpus': 2, 'local_gb': 10} +INSTANCE_TYPES['m1.large'] = {'memory_mb': 4096, 'vcpus': 4, 'local_gb': 10} +INSTANCE_TYPES['m1.xlarge'] = {'memory_mb': 8192, 'vcpus': 4, 'local_gb': 10} +INSTANCE_TYPES['c1.medium'] = {'memory_mb': 2048, 'vcpus': 4, 'local_gb': 10} + + +def _image_path(path=''): + return os.path.join(FLAGS.images_path, path) + + +def _image_url(path): + return "%s:%s/_images/%s" % (FLAGS.s3_host, FLAGS.s3_port, path) + + +class ComputeService(service.Service): + """ + Manages the running instances. + """ + def __init__(self): + """ load configuration options for this node and connect to libvirt """ + super(ComputeService, self).__init__() + self._instances = {} + self._conn = self._get_connection() + self.instdir = model.InstanceDirectory() + # TODO(joshua): This needs to ensure system state, specifically: modprobe aoe + + def _get_connection(self): + """ returns a libvirt connection object """ + # TODO(termie): maybe lazy load after initial check for permissions + # TODO(termie): check whether we can be disconnected + if FLAGS.fake_libvirt: + conn = fakevirt.FakeVirtConnection.instance() + else: + auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], + 'root', + None] + conn = libvirt.openAuth('qemu:///system', auth, 0) + if conn == None: + logging.error('Failed to open connection to the hypervisor') + sys.exit(1) + return conn + + def noop(self): + """ simple test of an AMQP message call """ + return defer.succeed('PONG') + + def get_instance(self, instance_id): + # inst = self.instdir.get(instance_id) + # return inst + if self.instdir.exists(instance_id): + return Instance.fromName(self._conn, instance_id) + return None + + @exception.wrap_exception + def adopt_instances(self): + """ if there are instances already running, adopt them """ + return defer.succeed(0) + instance_names = [self._conn.lookupByID(x).name() + for x in self._conn.listDomainsID()] + for name in instance_names: + try: + new_inst = Instance.fromName(self._conn, name) + new_inst.update_state() + except: + pass + return defer.succeed(len(self._instances)) + + @exception.wrap_exception + def describe_instances(self): + retval = {} + for inst in self.instdir.by_node(FLAGS.node_name): + retval[inst['instance_id']] = ( + Instance.fromName(self._conn, inst['instance_id'])) + return retval + + @defer.inlineCallbacks + def report_state(self, nodename, daemon): + # TODO(termie): make this pattern be more elegant. -todd + try: + record = model.Daemon(nodename, daemon) + record.heartbeat() + if getattr(self, "model_disconnected", False): + self.model_disconnected = False + logging.error("Recovered model server connection!") + + except model.ConnectionError, ex: + if not getattr(self, "model_disconnected", False): + self.model_disconnected = True + logging.exception("model server went away") + yield + + # @exception.wrap_exception + def run_instance(self, instance_id, **_kwargs): + """ launch a new instance with specified options """ + logging.debug("Starting instance %s..." % (instance_id)) + inst = self.instdir.get(instance_id) + if not FLAGS.simple_network: + # TODO: Get the real security group of launch in here + security_group = "default" + net = network.BridgedNetwork.get_network_for_project(inst['user_id'], + inst['project_id'], + security_group).express() + inst['node_name'] = FLAGS.node_name + inst.save() + # TODO(vish) check to make sure the availability zone matches + new_inst = Instance(self._conn, name=instance_id, data=inst) + logging.info("Instances current state is %s", new_inst.state) + if new_inst.is_running(): + raise exception.Error("Instance is already running") + d = new_inst.spawn() + return d + + @exception.wrap_exception + def terminate_instance(self, instance_id): + """ terminate an instance on this machine """ + logging.debug("Got told to terminate instance %s" % instance_id) + instance = self.get_instance(instance_id) + # inst = self.instdir.get(instance_id) + if not instance: + raise exception.Error( + 'trying to terminate unknown instance: %s' % instance_id) + d = instance.destroy() + # d.addCallback(lambda x: inst.destroy()) + return d + + @exception.wrap_exception + def reboot_instance(self, instance_id): + """ reboot an instance on this server + KVM doesn't support reboot, so we terminate and restart """ + instance = self.get_instance(instance_id) + if not instance: + raise exception.Error( + 'trying to reboot unknown instance: %s' % instance_id) + return instance.reboot() + + @defer.inlineCallbacks + @exception.wrap_exception + def get_console_output(self, instance_id): + """ send the console output for an instance """ + logging.debug("Getting console output for %s" % (instance_id)) + inst = self.instdir.get(instance_id) + instance = self.get_instance(instance_id) + if not instance: + raise exception.Error( + 'trying to get console log for unknown: %s' % instance_id) + rv = yield instance.console_output() + # TODO(termie): this stuff belongs in the API layer, no need to + # munge the data we send to ourselves + output = {"InstanceId" : instance_id, + "Timestamp" : "2", + "output" : base64.b64encode(rv)} + defer.returnValue(output) + + @defer.inlineCallbacks + @exception.wrap_exception + def attach_volume(self, instance_id = None, + volume_id = None, mountpoint = None): + volume = volumeservice.get_volume(volume_id) + yield self._init_aoe() + yield process.simple_execute( + "sudo virsh attach-disk %s /dev/etherd/%s %s" % + (instance_id, + volume['aoe_device'], + mountpoint.rpartition('/dev/')[2])) + volume.finish_attach() + defer.returnValue(True) + + @defer.inlineCallbacks + def _init_aoe(self): + yield process.simple_execute("sudo aoe-discover") + yield process.simple_execute("sudo aoe-stat") + + @defer.inlineCallbacks + @exception.wrap_exception + def detach_volume(self, instance_id, volume_id): + """ detach a volume from an instance """ + # despite the documentation, virsh detach-disk just wants the device + # name without the leading /dev/ + volume = volumeservice.get_volume(volume_id) + target = volume['mountpoint'].rpartition('/dev/')[2] + yield process.simple_execute( + "sudo virsh detach-disk %s %s " % (instance_id, target)) + volume.finish_detach() + defer.returnValue(True) + + +class Group(object): + def __init__(self, group_id): + self.group_id = group_id + + +class ProductCode(object): + def __init__(self, product_code): + self.product_code = product_code + + +class Instance(object): + + NOSTATE = 0x00 + RUNNING = 0x01 + BLOCKED = 0x02 + PAUSED = 0x03 + SHUTDOWN = 0x04 + SHUTOFF = 0x05 + CRASHED = 0x06 + + def __init__(self, conn, name, data): + """ spawn an instance with a given name """ + self._conn = conn + # TODO(vish): this can be removed after data has been updated + # data doesn't seem to have a working iterator so in doesn't work + if data.get('owner_id', None) is not None: + data['user_id'] = data['owner_id'] + data['project_id'] = data['owner_id'] + self.datamodel = data + + size = data.get('instance_type', FLAGS.default_instance_type) + if size not in INSTANCE_TYPES: + raise exception.Error('invalid instance type: %s' % size) + + self.datamodel.update(INSTANCE_TYPES[size]) + + self.datamodel['name'] = name + self.datamodel['instance_id'] = name + self.datamodel['basepath'] = data.get( + 'basepath', os.path.abspath( + os.path.join(FLAGS.instances_path, self.name))) + self.datamodel['memory_kb'] = int(self.datamodel['memory_mb']) * 1024 + self.datamodel.setdefault('image_id', FLAGS.default_image) + self.datamodel.setdefault('kernel_id', FLAGS.default_kernel) + self.datamodel.setdefault('ramdisk_id', FLAGS.default_ramdisk) + self.datamodel.setdefault('project_id', self.datamodel['user_id']) + self.datamodel.setdefault('bridge_name', None) + #self.datamodel.setdefault('key_data', None) + #self.datamodel.setdefault('key_name', None) + #self.datamodel.setdefault('addressing_type', None) + + # TODO(joshua) - The ugly non-flat ones + self.datamodel['groups'] = data.get('security_group', 'default') + # TODO(joshua): Support product codes somehow + self.datamodel.setdefault('product_codes', None) + + self.datamodel.save() + logging.debug("Finished init of Instance with id of %s" % name) + + def toXml(self): + # TODO(termie): cache? + logging.debug("Starting the toXML method") + libvirt_xml = open(FLAGS.libvirt_xml_template).read() + xml_info = self.datamodel.copy() + # TODO(joshua): Make this xml express the attached disks as well + + # TODO(termie): lazy lazy hack because xml is annoying + xml_info['nova'] = json.dumps(self.datamodel.copy()) + libvirt_xml = libvirt_xml % xml_info + logging.debug("Finished the toXML method") + + return libvirt_xml + + @classmethod + def fromName(cls, conn, name): + """ use the saved data for reloading the instance """ + instdir = model.InstanceDirectory() + instance = instdir.get(name) + return cls(conn=conn, name=name, data=instance) + + def set_state(self, state_code, state_description=None): + self.datamodel['state'] = state_code + if not state_description: + state_description = STATE_NAMES[state_code] + self.datamodel['state_description'] = state_description + self.datamodel.save() + + @property + def state(self): + # it is a string in datamodel + return int(self.datamodel['state']) + + @property + def name(self): + return self.datamodel['name'] + + def is_pending(self): + return (self.state == Instance.NOSTATE or self.state == 'pending') + + def is_destroyed(self): + return self.state == Instance.SHUTOFF + + def is_running(self): + logging.debug("Instance state is: %s" % self.state) + return (self.state == Instance.RUNNING or self.state == 'running') + + def describe(self): + return self.datamodel + + def info(self): + logging.debug("Getting info for dom %s" % self.name) + virt_dom = self._conn.lookupByName(self.name) + (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() + return {'state': state, + 'max_mem': max_mem, + 'mem': mem, + 'num_cpu': num_cpu, + 'cpu_time': cpu_time, + 'node_name': FLAGS.node_name} + + def basepath(self, path=''): + return os.path.abspath(os.path.join(self.datamodel['basepath'], path)) + + def update_state(self): + self.datamodel.update(self.info()) + self.set_state(self.state) + self.datamodel.save() # Extra, but harmless + + @exception.wrap_exception + def destroy(self): + if self.is_destroyed(): + self.datamodel.destroy() + raise exception.Error('trying to destroy already destroyed' + ' instance: %s' % self.name) + + self.set_state(Instance.NOSTATE, 'shutting_down') + try: + virt_dom = self._conn.lookupByName(self.name) + virt_dom.destroy() + except Exception, _err: + pass + # If the instance is already terminated, we're still happy + d = defer.Deferred() + d.addCallback(lambda x: self._cleanup()) + d.addCallback(lambda x: self.datamodel.destroy()) + # TODO(termie): short-circuit me for tests + # WE'LL save this for when we do shutdown, + # instead of destroy - but destroy returns immediately + timer = task.LoopingCall(f=None) + def _wait_for_shutdown(): + try: + self.update_state() + if self.state == Instance.SHUTDOWN: + timer.stop() + d.callback(None) + except Exception: + self.set_state(Instance.SHUTDOWN) + timer.stop() + d.callback(None) + timer.f = _wait_for_shutdown + timer.start(interval=0.5, now=True) + return d + + def _cleanup(self): + target = os.path.abspath(self.datamodel['basepath']) + logging.info("Deleting instance files at %s", target) + shutil.rmtree(target) + + @defer.inlineCallbacks + @exception.wrap_exception + def reboot(self): + if not self.is_running(): + raise exception.Error( + 'trying to reboot a non-running' + 'instance: %s (state: %s)' % (self.name, self.state)) + + logging.debug('rebooting instance %s' % self.name) + self.set_state(Instance.NOSTATE, 'rebooting') + yield self._conn.lookupByName(self.name).destroy() + self._conn.createXML(self.toXml(), 0) + + d = defer.Deferred() + timer = task.LoopingCall(f=None) + def _wait_for_reboot(): + try: + self.update_state() + if self.is_running(): + logging.debug('rebooted instance %s' % self.name) + timer.stop() + d.callback(None) + except Exception: + self.set_state(Instance.SHUTDOWN) + timer.stop() + d.callback(None) + timer.f = _wait_for_reboot + timer.start(interval=0.5, now=True) + yield d + + def _fetch_s3_image(self, image, path): + url = _image_url('%s/image' % image) + d = process.simple_execute( + 'curl --silent %s -o %s' % (url, path)) + return d + + def _fetch_local_image(self, image, path): + source = _image_path('%s/image' % image) + d = process.simple_execute('cp %s %s' % (source, path)) + return d + + @defer.inlineCallbacks + def _create_image(self, libvirt_xml): + # syntactic nicety + data = self.datamodel + basepath = self.basepath + + # ensure directories exist and are writable + yield process.simple_execute( + 'mkdir -p %s' % basepath()) + yield process.simple_execute( + 'chmod 0777 %s' % basepath()) + + + # TODO(termie): these are blocking calls, it would be great + # if they weren't. + logging.info('Creating image for: %s', data['instance_id']) + f = open(basepath('libvirt.xml'), 'w') + f.write(libvirt_xml) + f.close() + + if FLAGS.fake_libvirt: + logging.info('fake_libvirt, nothing to do for create_image') + raise defer.returnValue(None); + + if FLAGS.use_s3: + _fetch_file = self._fetch_s3_image + else: + _fetch_file = self._fetch_local_image + + if not os.path.exists(basepath('disk')): + yield _fetch_file(data['image_id'], basepath('disk-raw')) + if not os.path.exists(basepath('kernel')): + yield _fetch_file(data['kernel_id'], basepath('kernel')) + if not os.path.exists(basepath('ramdisk')): + yield _fetch_file(data['ramdisk_id'], basepath('ramdisk')) + + execute = lambda cmd, input=None: \ + process.simple_execute(cmd=cmd, + input=input, + error_ok=1) + + key = data['key_data'] + net = None + if FLAGS.simple_network: + with open(FLAGS.simple_network_template) as f: + net = f.read() % {'address': data['private_dns_name'], + 'network': FLAGS.simple_network_network, + 'netmask': FLAGS.simple_network_netmask, + 'gateway': FLAGS.simple_network_gateway, + 'broadcast': FLAGS.simple_network_broadcast, + 'dns': FLAGS.simple_network_dns} + if key or net: + logging.info('Injecting data into image %s', data['image_id']) + yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute) + + if os.path.exists(basepath('disk')): + yield process.simple_execute( + 'rm -f %s' % basepath('disk')) + + bytes = (INSTANCE_TYPES[data['instance_type']]['local_gb'] + * 1024 * 1024 * 1024) + yield disk.partition( + basepath('disk-raw'), basepath('disk'), bytes, execute=execute) + + @defer.inlineCallbacks + @exception.wrap_exception + def spawn(self): + self.set_state(Instance.NOSTATE, 'spawning') + logging.debug("Starting spawn in Instance") + + xml = self.toXml() + self.set_state(Instance.NOSTATE, 'launching') + logging.info('self %s', self) + try: + yield self._create_image(xml) + self._conn.createXML(xml, 0) + # TODO(termie): this should actually register + # a callback to check for successful boot + logging.debug("Instance is running") + + local_d = defer.Deferred() + timer = task.LoopingCall(f=None) + def _wait_for_boot(): + try: + self.update_state() + if self.is_running(): + logging.debug('booted instance %s' % self.name) + timer.stop() + local_d.callback(None) + except Exception: + self.set_state(Instance.SHUTDOWN) + logging.error('Failed to boot instance %s' % self.name) + timer.stop() + local_d.callback(None) + timer.f = _wait_for_boot + timer.start(interval=0.5, now=True) + except Exception, ex: + logging.debug(ex) + self.set_state(Instance.SHUTDOWN) + + @exception.wrap_exception + def console_output(self): + if not FLAGS.fake_libvirt: + fname = os.path.abspath( + os.path.join(self.datamodel['basepath'], 'console.log')) + with open(fname, 'r') as f: + console = f.read() + else: + console = 'FAKE CONSOLE OUTPUT' + return defer.succeed(console) + +STATE_NAMES = { + Instance.NOSTATE : 'pending', + Instance.RUNNING : 'running', + Instance.BLOCKED : 'blocked', + Instance.PAUSED : 'paused', + Instance.SHUTDOWN : 'shutdown', + Instance.SHUTOFF : 'shutdown', + Instance.CRASHED : 'crashed', +} diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index eaa608b1e..6e9bdead8 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -23,7 +23,6 @@ datastore. """ import base64 -import json import logging import os import time @@ -38,9 +37,9 @@ from nova.auth import rbac from nova.auth import users from nova.compute import model from nova.compute import network -from nova.compute import computenode +from nova.compute import computeservice from nova.endpoint import images -from nova.volume import volumenode +from nova.volume import volumeservice FLAGS = flags.FLAGS @@ -76,7 +75,7 @@ class CloudController(object): def volumes(self): """ returns a list of all volumes """ for volume_id in datastore.Redis.instance().smembers("volumes"): - volume = volumenode.get_volume(volume_id) + volume = volumeservice.get_volume(volume_id) yield volume def __str__(self): @@ -103,7 +102,7 @@ class CloudController(object): result = {} for instance in self.instdir.all: if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], computenode.INSTANCE_TYPES[instance['instance_type']]['vcpus']) + line = '%s slots=%d' % (instance['private_dns_name'], computeservice.INSTANCE_TYPES[instance['instance_type']]['vcpus']) if instance['key_name'] in result: result[instance['key_name']].append(line) else: @@ -296,7 +295,7 @@ class CloudController(object): @rbac.allow('projectmanager', 'sysadmin') def create_volume(self, context, size, **kwargs): - # TODO(vish): refactor this to create the volume object here and tell volumenode to create it + # TODO(vish): refactor this to create the volume object here and tell volumeservice to create it res = rpc.call(FLAGS.volume_topic, {"method": "create_volume", "args" : {"size": size, "user_id": context.user.id, @@ -331,7 +330,7 @@ class CloudController(object): raise exception.NotFound('Instance %s could not be found' % instance_id) def _get_volume(self, context, volume_id): - volume = volumenode.get_volume(volume_id) + volume = volumeservice.get_volume(volume_id) if context.user.is_admin() or volume['project_id'] == context.project.id: return volume raise exception.NotFound('Volume %s could not be found' % volume_id) @@ -578,7 +577,7 @@ class CloudController(object): "args": {"instance_id" : inst.instance_id}}) logging.debug("Casting to node for %s's instance with IP of %s" % (context.user.name, inst['private_dns_name'])) - # TODO: Make the NetworkComputeNode figure out the network name from ip. + # TODO: Make Network figure out the network name from ip. return defer.succeed(self._format_instances( context, reservation_id)) diff --git a/nova/network/networknode.py b/nova/network/networknode.py deleted file mode 100644 index e5a346551..000000000 --- a/nova/network/networknode.py +++ /dev/null @@ -1,35 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Network Nodes are responsible for allocating ips and setting up network -""" - -import logging - -from nova import flags -from nova import node - - -FLAGS = flags.FLAGS - -class NetworkNode(node.Node): - """Allocates ips and sets up networks""" - - def __init__(self): - logging.debug("Network node working") diff --git a/nova/network/networkservice.py b/nova/network/networkservice.py new file mode 100644 index 000000000..9d87e05e6 --- /dev/null +++ b/nova/network/networkservice.py @@ -0,0 +1,35 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Network Nodes are responsible for allocating ips and setting up network +""" + +import logging + +from nova import flags +from nova import service + + +FLAGS = flags.FLAGS + +class NetworkService(service.Service): + """Allocates ips and sets up networks""" + + def __init__(self): + logging.debug("Network node working") diff --git a/nova/node.py b/nova/node.py deleted file mode 100644 index 852344da9..000000000 --- a/nova/node.py +++ /dev/null @@ -1,103 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Generic Node baseclass for all workers that run on hosts -""" - -import inspect -import logging -import os - -from twisted.internet import defer -from twisted.internet import task -from twisted.application import service - -from nova import datastore -from nova import flags -from nova import rpc -from nova.compute import model - - -FLAGS = flags.FLAGS - -flags.DEFINE_integer('report_interval', 10, - 'seconds between nodes reporting state to cloud', - lower_bound=1) - -class Node(object, service.Service): - """Base class for workers that run on hosts""" - - @classmethod - def create(cls, - report_interval=None, # defaults to flag - bin_name=None, # defaults to basename of executable - topic=None): # defaults to basename - "nova-" part - """Instantiates class and passes back application object""" - if not report_interval: - # NOTE(vish): set here because if it is set to flag in the - # parameter list, it wrongly uses the default - report_interval = FLAGS.report_interval - # NOTE(vish): magic to automatically determine bin_name and topic - if not bin_name: - bin_name = os.path.basename(inspect.stack()[-1][1]) - if not topic: - topic = bin_name.rpartition("nova-")[2] - logging.warn("Starting %s node" % topic) - node_instance = cls() - - conn = rpc.Connection.instance() - consumer_all = rpc.AdapterConsumer( - connection=conn, - topic='%s' % topic, - proxy=node_instance) - - consumer_node = rpc.AdapterConsumer( - connection=conn, - topic='%s.%s' % (topic, FLAGS.node_name), - proxy=node_instance) - - pulse = task.LoopingCall(node_instance.report_state, - FLAGS.node_name, - bin_name) - pulse.start(interval=report_interval, now=False) - - consumer_all.attach_to_twisted() - consumer_node.attach_to_twisted() - - # This is the parent service that twistd will be looking for when it - # parses this file, return it so that we can get it into globals below - application = service.Application(bin_name) - node_instance.setServiceParent(application) - return application - - @defer.inlineCallbacks - def report_state(self, nodename, daemon): - # TODO(termie): make this pattern be more elegant. -todd - try: - record = model.Daemon(nodename, daemon) - record.heartbeat() - if getattr(self, "model_disconnected", False): - self.model_disconnected = False - logging.error("Recovered model server connection!") - - except datastore.ConnectionError, ex: - if not getattr(self, "model_disconnected", False): - self.model_disconnected = True - logging.exception("model server went away") - yield diff --git a/nova/service.py b/nova/service.py new file mode 100644 index 000000000..96281bc6b --- /dev/null +++ b/nova/service.py @@ -0,0 +1,103 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Generic Node baseclass for all workers that run on hosts +""" + +import inspect +import logging +import os + +from twisted.internet import defer +from twisted.internet import task +from twisted.application import service + +from nova import datastore +from nova import flags +from nova import rpc +from nova.compute import model + + +FLAGS = flags.FLAGS + +flags.DEFINE_integer('report_interval', 10, + 'seconds between nodes reporting state to cloud', + lower_bound=1) + +class Service(object, service.Service): + """Base class for workers that run on hosts""" + + @classmethod + def create(cls, + report_interval=None, # defaults to flag + bin_name=None, # defaults to basename of executable + topic=None): # defaults to basename - "nova-" part + """Instantiates class and passes back application object""" + if not report_interval: + # NOTE(vish): set here because if it is set to flag in the + # parameter list, it wrongly uses the default + report_interval = FLAGS.report_interval + # NOTE(vish): magic to automatically determine bin_name and topic + if not bin_name: + bin_name = os.path.basename(inspect.stack()[-1][1]) + if not topic: + topic = bin_name.rpartition("nova-")[2] + logging.warn("Starting %s node" % topic) + node_instance = cls() + + conn = rpc.Connection.instance() + consumer_all = rpc.AdapterConsumer( + connection=conn, + topic='%s' % topic, + proxy=node_instance) + + consumer_node = rpc.AdapterConsumer( + connection=conn, + topic='%s.%s' % (topic, FLAGS.node_name), + proxy=node_instance) + + pulse = task.LoopingCall(node_instance.report_state, + FLAGS.node_name, + bin_name) + pulse.start(interval=report_interval, now=False) + + consumer_all.attach_to_twisted() + consumer_node.attach_to_twisted() + + # This is the parent service that twistd will be looking for when it + # parses this file, return it so that we can get it into globals below + application = service.Application(bin_name) + node_instance.setServiceParent(application) + return application + + @defer.inlineCallbacks + def report_state(self, nodename, daemon): + # TODO(termie): make this pattern be more elegant. -todd + try: + record = model.Daemon(nodename, daemon) + record.heartbeat() + if getattr(self, "model_disconnected", False): + self.model_disconnected = False + logging.error("Recovered model server connection!") + + except datastore.ConnectionError, ex: + if not getattr(self, "model_disconnected", False): + self.model_disconnected = True + logging.exception("model server went away") + yield diff --git a/nova/test.py b/nova/test.py index 5dcf0b9b0..6fbcab5e4 100644 --- a/nova/test.py +++ b/nova/test.py @@ -156,9 +156,9 @@ class BaseTestCase(TrialTestCase): Example (callback chain, ugly): - d = self.node.terminate_instance(instance_id) # a Deferred instance + d = self.compute.terminate_instance(instance_id) # a Deferred instance def _describe(_): - d_desc = self.node.describe_instances() # another Deferred instance + d_desc = self.compute.describe_instances() # another Deferred instance return d_desc def _checkDescribe(rv): self.assertEqual(rv, []) @@ -169,8 +169,8 @@ class BaseTestCase(TrialTestCase): Example (inline callbacks! yay!): - yield self.node.terminate_instance(instance_id) - rv = yield self.node.describe_instances() + yield self.compute.terminate_instance(instance_id) + rv = yield self.compute.describe_instances() self.assertEqual(rv, []) If the test fits the Inline Callbacks pattern we will automatically diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py index 7ab2c257a..38f4de8d9 100644 --- a/nova/tests/cloud_unittest.py +++ b/nova/tests/cloud_unittest.py @@ -28,7 +28,7 @@ from nova import flags from nova import rpc from nova import test from nova.auth import users -from nova.compute import computenode +from nova.compute import computeservice from nova.endpoint import api from nova.endpoint import cloud @@ -53,12 +53,12 @@ class CloudTestCase(test.BaseTestCase): proxy=self.cloud) self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop)) - # set up a node - self.node = computenode.ComputeNode() - self.node_consumer = rpc.AdapterConsumer(connection=self.conn, + # set up a service + self.compute = computeservice.ComputeService() + self.compute_consumer = rpc.AdapterConsumer(connection=self.conn, topic=FLAGS.compute_topic, - proxy=self.node) - self.injected.append(self.node_consumer.attach_to_tornado(self.ioloop)) + proxy=self.compute) + self.injected.append(self.compute_consumer.attach_to_tornado(self.ioloop)) try: users.UserManager.instance().create_user('admin', 'admin', 'admin') @@ -76,11 +76,11 @@ class CloudTestCase(test.BaseTestCase): logging.debug("Can't test instances without a real virtual env.") return instance_id = 'foo' - inst = yield self.node.run_instance(instance_id) + inst = yield self.compute.run_instance(instance_id) output = yield self.cloud.get_console_output(self.context, [instance_id]) logging.debug(output) self.assert_(output) - rv = yield self.node.terminate_instance(instance_id) + rv = yield self.compute.terminate_instance(instance_id) def test_run_instances(self): if FLAGS.fake_libvirt: @@ -112,7 +112,7 @@ class CloudTestCase(test.BaseTestCase): # for instance in reservations[res_id]: for instance in reservations[reservations.keys()[0]]: logging.debug("Terminating instance %s" % instance['instance_id']) - rv = yield self.node.terminate_instance(instance['instance_id']) + rv = yield self.compute.terminate_instance(instance['instance_id']) def test_instance_update_state(self): def instance(num): diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index 4c0f1afb3..db08308bb 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -26,7 +26,7 @@ from nova import flags from nova import test from nova import utils from nova.compute import model -from nova.compute import computenode +from nova.compute import computeservice FLAGS = flags.FLAGS @@ -60,7 +60,7 @@ class ComputeConnectionTestCase(test.TrialTestCase): self.flags(fake_libvirt=True, fake_storage=True, fake_users=True) - self.node = computenode.ComputeNode() + self.compute = computeservice.ComputeService() def create_instance(self): instdir = model.InstanceDirectory() @@ -81,48 +81,48 @@ class ComputeConnectionTestCase(test.TrialTestCase): def test_run_describe_terminate(self): instance_id = self.create_instance() - rv = yield self.node.run_instance(instance_id) + rv = yield self.compute.run_instance(instance_id) - rv = yield self.node.describe_instances() + rv = yield self.compute.describe_instances() logging.info("Running instances: %s", rv) self.assertEqual(rv[instance_id].name, instance_id) - rv = yield self.node.terminate_instance(instance_id) + rv = yield self.compute.terminate_instance(instance_id) - rv = yield self.node.describe_instances() + rv = yield self.compute.describe_instances() logging.info("After terminating instances: %s", rv) self.assertEqual(rv, {}) @defer.inlineCallbacks def test_reboot(self): instance_id = self.create_instance() - rv = yield self.node.run_instance(instance_id) + rv = yield self.compute.run_instance(instance_id) - rv = yield self.node.describe_instances() + rv = yield self.compute.describe_instances() self.assertEqual(rv[instance_id].name, instance_id) - yield self.node.reboot_instance(instance_id) + yield self.compute.reboot_instance(instance_id) - rv = yield self.node.describe_instances() + rv = yield self.compute.describe_instances() self.assertEqual(rv[instance_id].name, instance_id) - rv = yield self.node.terminate_instance(instance_id) + rv = yield self.compute.terminate_instance(instance_id) @defer.inlineCallbacks def test_console_output(self): instance_id = self.create_instance() - rv = yield self.node.run_instance(instance_id) + rv = yield self.compute.run_instance(instance_id) - console = yield self.node.get_console_output(instance_id) + console = yield self.compute.get_console_output(instance_id) self.assert_(console) - rv = yield self.node.terminate_instance(instance_id) + rv = yield self.compute.terminate_instance(instance_id) @defer.inlineCallbacks def test_run_instance_existing(self): instance_id = self.create_instance() - rv = yield self.node.run_instance(instance_id) + rv = yield self.compute.run_instance(instance_id) - rv = yield self.node.describe_instances() + rv = yield self.compute.describe_instances() self.assertEqual(rv[instance_id].name, instance_id) - self.assertRaises(exception.Error, self.node.run_instance, instance_id) - rv = yield self.node.terminate_instance(instance_id) + self.assertRaises(exception.Error, self.compute.run_instance, instance_id) + rv = yield self.compute.terminate_instance(instance_id) diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index c176453d8..568b199a0 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -21,8 +21,8 @@ import logging from nova import exception from nova import flags from nova import test -from nova.compute import computenode -from nova.volume import volumenode +from nova.compute import computeservice +from nova.volume import volumeservice FLAGS = flags.FLAGS @@ -32,24 +32,24 @@ class VolumeTestCase(test.TrialTestCase): def setUp(self): logging.getLogger().setLevel(logging.DEBUG) super(VolumeTestCase, self).setUp() - self.mynode = computenode.ComputeNode() - self.mystorage = None + self.compute = computeservice.ComputeService() + self.volume = None self.flags(fake_libvirt=True, fake_storage=True) - self.mystorage = volumenode.VolumeNode() + self.volume = volumeservice.VolumeService() def test_run_create_volume(self): vol_size = '0' user_id = 'fake' project_id = 'fake' - volume_id = self.mystorage.create_volume(vol_size, user_id, project_id) + volume_id = self.volume.create_volume(vol_size, user_id, project_id) # TODO(termie): get_volume returns differently than create_volume self.assertEqual(volume_id, - volumenode.get_volume(volume_id)['volume_id']) + volumeservice.get_volume(volume_id)['volume_id']) - rv = self.mystorage.delete_volume(volume_id) + rv = self.volume.delete_volume(volume_id) self.assertRaises(exception.Error, - volumenode.get_volume, + volumeservice.get_volume, volume_id) def test_too_big_volume(self): @@ -57,7 +57,7 @@ class VolumeTestCase(test.TrialTestCase): user_id = 'fake' project_id = 'fake' self.assertRaises(TypeError, - self.mystorage.create_volume, + self.volume.create_volume, vol_size, user_id, project_id) def test_too_many_volumes(self): @@ -68,26 +68,26 @@ class VolumeTestCase(test.TrialTestCase): total_slots = FLAGS.slots_per_shelf * num_shelves vols = [] for i in xrange(total_slots): - vid = self.mystorage.create_volume(vol_size, user_id, project_id) + vid = self.volume.create_volume(vol_size, user_id, project_id) vols.append(vid) - self.assertRaises(volumenode.NoMoreVolumes, - self.mystorage.create_volume, + self.assertRaises(volumeservice.NoMoreVolumes, + self.volume.create_volume, vol_size, user_id, project_id) for id in vols: - self.mystorage.delete_volume(id) + self.volume.delete_volume(id) def test_run_attach_detach_volume(self): - # Create one volume and one node to test with + # Create one volume and one compute to test with instance_id = "storage-test" vol_size = "5" user_id = "fake" project_id = 'fake' mountpoint = "/dev/sdf" - volume_id = self.mystorage.create_volume(vol_size, user_id, project_id) + volume_id = self.volume.create_volume(vol_size, user_id, project_id) - volume_obj = volumenode.get_volume(volume_id) + volume_obj = volumeservice.get_volume(volume_id) volume_obj.start_attach(instance_id, mountpoint) - rv = yield self.mynode.attach_volume(volume_id, + rv = yield self.compute.attach_volume(volume_id, instance_id, mountpoint) self.assertEqual(volume_obj['status'], "in-use") @@ -96,16 +96,16 @@ class VolumeTestCase(test.TrialTestCase): self.assertEqual(volume_obj['mountpoint'], mountpoint) self.assertRaises(exception.Error, - self.mystorage.delete_volume, + self.volume.delete_volume, volume_id) - rv = yield self.mystorage.detach_volume(volume_id) - volume_obj = volumenode.get_volume(volume_id) + rv = yield self.volume.detach_volume(volume_id) + volume_obj = volumeservice.get_volume(volume_id) self.assertEqual(volume_obj['status'], "available") - rv = self.mystorage.delete_volume(volume_id) + rv = self.volume.delete_volume(volume_id) self.assertRaises(exception.Error, - volumenode.get_volume, + volumeservice.get_volume, volume_id) def test_multi_node(self): diff --git a/nova/volume/volumenode.py b/nova/volume/volumenode.py deleted file mode 100644 index 6b4ad0d87..000000000 --- a/nova/volume/volumenode.py +++ /dev/null @@ -1,305 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Nova Storage manages creating, attaching, detaching, and -destroying persistent storage volumes, ala EBS. -Currently uses Ata-over-Ethernet. -""" - -import glob -import logging -import os -import shutil -import socket -import tempfile - -from twisted.application import service -from twisted.internet import defer - -from nova import datastore -from nova import exception -from nova import flags -from nova import node -from nova import process -from nova import utils -from nova import validate - - -FLAGS = flags.FLAGS -flags.DEFINE_string('storage_dev', '/dev/sdb', - 'Physical device to use for volumes') -flags.DEFINE_string('volume_group', 'nova-volumes', - 'Name for the VG that will contain exported volumes') -flags.DEFINE_string('aoe_eth_dev', 'eth0', - 'Which device to export the volumes on') -flags.DEFINE_string('storage_name', - socket.gethostname(), - 'name of this node') -flags.DEFINE_integer('first_shelf_id', - utils.last_octet(utils.get_my_ip()) * 10, - 'AoE starting shelf_id for this node') -flags.DEFINE_integer('last_shelf_id', - utils.last_octet(utils.get_my_ip()) * 10 + 9, - 'AoE starting shelf_id for this node') -flags.DEFINE_string('aoe_export_dir', - '/var/lib/vblade-persist/vblades', - 'AoE directory where exports are created') -flags.DEFINE_integer('slots_per_shelf', - 16, - 'Number of AoE slots per shelf') -flags.DEFINE_string('storage_availability_zone', - 'nova', - 'availability zone of this node') -flags.DEFINE_boolean('fake_storage', False, - 'Should we make real storage volumes to attach?') - - -class NoMoreVolumes(exception.Error): - pass - -def get_volume(volume_id): - """ Returns a redis-backed volume object """ - volume_class = Volume - if FLAGS.fake_storage: - volume_class = FakeVolume - if datastore.Redis.instance().sismember('volumes', volume_id): - return volume_class(volume_id=volume_id) - raise exception.Error("Volume does not exist") - -class VolumeNode(node.Node): - """ - There is one VolumeNode running on each host. - However, each VolumeNode can report on the state of - *all* volumes in the cluster. - """ - def __init__(self): - super(VolumeNode, self).__init__() - self.volume_class = Volume - if FLAGS.fake_storage: - FLAGS.aoe_export_dir = tempfile.mkdtemp() - self.volume_class = FakeVolume - self._init_volume_group() - - def __del__(self): - # TODO(josh): Get rid of this destructor, volumes destroy themselves - if FLAGS.fake_storage: - try: - shutil.rmtree(FLAGS.aoe_export_dir) - except Exception, err: - pass - - @validate.rangetest(size=(0, 1000)) - def create_volume(self, size, user_id, project_id): - """ - Creates an exported volume (fake or real), - restarts exports to make it available. - Volume at this point has size, owner, and zone. - """ - logging.debug("Creating volume of size: %s" % (size)) - vol = self.volume_class.create(size, user_id, project_id) - datastore.Redis.instance().sadd('volumes', vol['volume_id']) - datastore.Redis.instance().sadd('volumes:%s' % (FLAGS.storage_name), vol['volume_id']) - self._restart_exports() - return vol['volume_id'] - - def by_node(self, node_id): - """ returns a list of volumes for a node """ - for volume_id in datastore.Redis.instance().smembers('volumes:%s' % (node_id)): - yield self.volume_class(volume_id=volume_id) - - @property - def all(self): - """ returns a list of all volumes """ - for volume_id in datastore.Redis.instance().smembers('volumes'): - yield self.volume_class(volume_id=volume_id) - - def delete_volume(self, volume_id): - logging.debug("Deleting volume with id of: %s" % (volume_id)) - vol = get_volume(volume_id) - if vol['status'] == "attached": - raise exception.Error("Volume is still attached") - if vol['node_name'] != FLAGS.storage_name: - raise exception.Error("Volume is not local to this node") - vol.destroy() - datastore.Redis.instance().srem('volumes', vol['volume_id']) - datastore.Redis.instance().srem('volumes:%s' % (FLAGS.storage_name), vol['volume_id']) - return True - - @defer.inlineCallbacks - def _restart_exports(self): - if FLAGS.fake_storage: - return - yield process.simple_execute( - "sudo vblade-persist auto all") - yield process.simple_execute( - "sudo vblade-persist start all") - - @defer.inlineCallbacks - def _init_volume_group(self): - if FLAGS.fake_storage: - return - yield process.simple_execute( - "sudo pvcreate %s" % (FLAGS.storage_dev)) - yield process.simple_execute( - "sudo vgcreate %s %s" % (FLAGS.volume_group, - FLAGS.storage_dev)) - -class Volume(datastore.BasicModel): - - def __init__(self, volume_id=None): - self.volume_id = volume_id - super(Volume, self).__init__() - - @property - def identifier(self): - return self.volume_id - - def default_state(self): - return {"volume_id": self.volume_id} - - @classmethod - def create(cls, size, user_id, project_id): - volume_id = utils.generate_uid('vol') - vol = cls(volume_id) - vol['node_name'] = FLAGS.storage_name - vol['size'] = size - vol['user_id'] = user_id - vol['project_id'] = project_id - vol['availability_zone'] = FLAGS.storage_availability_zone - vol["instance_id"] = 'none' - vol["mountpoint"] = 'none' - vol['attach_time'] = 'none' - vol['status'] = "creating" # creating | available | in-use - vol['attach_status'] = "detached" # attaching | attached | detaching | detached - vol['delete_on_termination'] = 'False' - vol.save() - vol.create_lv() - vol._setup_export() - # TODO(joshua) - We need to trigger a fanout message for aoe-discover on all the nodes - # TODO(joshua - vol['status'] = "available" - vol.save() - return vol - - def start_attach(self, instance_id, mountpoint): - """ """ - self['instance_id'] = instance_id - self['mountpoint'] = mountpoint - self['status'] = "in-use" - self['attach_status'] = "attaching" - self['attach_time'] = utils.isotime() - self['delete_on_termination'] = 'False' - self.save() - - def finish_attach(self): - """ """ - self['attach_status'] = "attached" - self.save() - - def start_detach(self): - """ """ - self['attach_status'] = "detaching" - self.save() - - def finish_detach(self): - self['instance_id'] = None - self['mountpoint'] = None - self['status'] = "available" - self['attach_status'] = "detached" - self.save() - - def destroy(self): - try: - self._remove_export() - except: - pass - self._delete_lv() - super(Volume, self).destroy() - - @defer.inlineCallbacks - def create_lv(self): - if str(self['size']) == '0': - sizestr = '100M' - else: - sizestr = '%sG' % self['size'] - yield process.simple_execute( - "sudo lvcreate -L %s -n %s %s" % (sizestr, - self['volume_id'], - FLAGS.volume_group)) - - @defer.inlineCallbacks - def _delete_lv(self): - yield process.simple_execute( - "sudo lvremove -f %s/%s" % (FLAGS.volume_group, - self['volume_id'])) - - def _setup_export(self): - (shelf_id, blade_id) = get_next_aoe_numbers() - self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id) - self['shelf_id'] = shelf_id - self['blade_id'] = blade_id - self.save() - self._exec_export() - - @defer.inlineCallbacks - def _exec_export(self): - yield process.simple_execute( - "sudo vblade-persist setup %s %s %s /dev/%s/%s" % - (self['shelf_id'], - self['blade_id'], - FLAGS.aoe_eth_dev, - FLAGS.volume_group, - self['volume_id'])) - - @defer.inlineCallbacks - def _remove_export(self): - yield process.simple_execute( - "sudo vblade-persist stop %s %s" % (self['shelf_id'], - self['blade_id'])) - yield process.simple_execute( - "sudo vblade-persist destroy %s %s" % (self['shelf_id'], - self['blade_id'])) - - -class FakeVolume(Volume): - def create_lv(self): - pass - - def _exec_export(self): - fname = os.path.join(FLAGS.aoe_export_dir, self['aoe_device']) - f = file(fname, "w") - f.close() - - def _remove_export(self): - pass - - def _delete_lv(self): - pass - -def get_next_aoe_numbers(): - for shelf_id in xrange(FLAGS.first_shelf_id, FLAGS.last_shelf_id + 1): - aoes = glob.glob("%s/e%s.*" % (FLAGS.aoe_export_dir, shelf_id)) - if not aoes: - blade_id = 0 - else: - blade_id = int(max([int(a.rpartition('.')[2]) for a in aoes])) + 1 - if blade_id < FLAGS.slots_per_shelf: - logging.debug("Next shelf.blade is %s.%s", shelf_id, blade_id) - return (shelf_id, blade_id) - raise NoMoreVolumes() diff --git a/nova/volume/volumeservice.py b/nova/volume/volumeservice.py new file mode 100644 index 000000000..87a47f40a --- /dev/null +++ b/nova/volume/volumeservice.py @@ -0,0 +1,304 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Nova Storage manages creating, attaching, detaching, and +destroying persistent storage volumes, ala EBS. +Currently uses Ata-over-Ethernet. +""" + +import glob +import logging +import os +import shutil +import socket +import tempfile + +from twisted.internet import defer + +from nova import datastore +from nova import exception +from nova import flags +from nova import process +from nova import service +from nova import utils +from nova import validate + + +FLAGS = flags.FLAGS +flags.DEFINE_string('storage_dev', '/dev/sdb', + 'Physical device to use for volumes') +flags.DEFINE_string('volume_group', 'nova-volumes', + 'Name for the VG that will contain exported volumes') +flags.DEFINE_string('aoe_eth_dev', 'eth0', + 'Which device to export the volumes on') +flags.DEFINE_string('storage_name', + socket.gethostname(), + 'name of this service') +flags.DEFINE_integer('first_shelf_id', + utils.last_octet(utils.get_my_ip()) * 10, + 'AoE starting shelf_id for this service') +flags.DEFINE_integer('last_shelf_id', + utils.last_octet(utils.get_my_ip()) * 10 + 9, + 'AoE starting shelf_id for this service') +flags.DEFINE_string('aoe_export_dir', + '/var/lib/vblade-persist/vblades', + 'AoE directory where exports are created') +flags.DEFINE_integer('slots_per_shelf', + 16, + 'Number of AoE slots per shelf') +flags.DEFINE_string('storage_availability_zone', + 'nova', + 'availability zone of this service') +flags.DEFINE_boolean('fake_storage', False, + 'Should we make real storage volumes to attach?') + + +class NoMoreVolumes(exception.Error): + pass + +def get_volume(volume_id): + """ Returns a redis-backed volume object """ + volume_class = Volume + if FLAGS.fake_storage: + volume_class = FakeVolume + if datastore.Redis.instance().sismember('volumes', volume_id): + return volume_class(volume_id=volume_id) + raise exception.Error("Volume does not exist") + +class VolumeService(service.Service): + """ + There is one VolumeNode running on each host. + However, each VolumeNode can report on the state of + *all* volumes in the cluster. + """ + def __init__(self): + super(VolumeService, self).__init__() + self.volume_class = Volume + if FLAGS.fake_storage: + FLAGS.aoe_export_dir = tempfile.mkdtemp() + self.volume_class = FakeVolume + self._init_volume_group() + + def __del__(self): + # TODO(josh): Get rid of this destructor, volumes destroy themselves + if FLAGS.fake_storage: + try: + shutil.rmtree(FLAGS.aoe_export_dir) + except Exception, err: + pass + + @validate.rangetest(size=(0, 1000)) + def create_volume(self, size, user_id, project_id): + """ + Creates an exported volume (fake or real), + restarts exports to make it available. + Volume at this point has size, owner, and zone. + """ + logging.debug("Creating volume of size: %s" % (size)) + vol = self.volume_class.create(size, user_id, project_id) + datastore.Redis.instance().sadd('volumes', vol['volume_id']) + datastore.Redis.instance().sadd('volumes:%s' % (FLAGS.storage_name), vol['volume_id']) + self._restart_exports() + return vol['volume_id'] + + def by_node(self, node_id): + """ returns a list of volumes for a node """ + for volume_id in datastore.Redis.instance().smembers('volumes:%s' % (node_id)): + yield self.volume_class(volume_id=volume_id) + + @property + def all(self): + """ returns a list of all volumes """ + for volume_id in datastore.Redis.instance().smembers('volumes'): + yield self.volume_class(volume_id=volume_id) + + def delete_volume(self, volume_id): + logging.debug("Deleting volume with id of: %s" % (volume_id)) + vol = get_volume(volume_id) + if vol['status'] == "attached": + raise exception.Error("Volume is still attached") + if vol['node_name'] != FLAGS.storage_name: + raise exception.Error("Volume is not local to this node") + vol.destroy() + datastore.Redis.instance().srem('volumes', vol['volume_id']) + datastore.Redis.instance().srem('volumes:%s' % (FLAGS.storage_name), vol['volume_id']) + return True + + @defer.inlineCallbacks + def _restart_exports(self): + if FLAGS.fake_storage: + return + yield process.simple_execute( + "sudo vblade-persist auto all") + yield process.simple_execute( + "sudo vblade-persist start all") + + @defer.inlineCallbacks + def _init_volume_group(self): + if FLAGS.fake_storage: + return + yield process.simple_execute( + "sudo pvcreate %s" % (FLAGS.storage_dev)) + yield process.simple_execute( + "sudo vgcreate %s %s" % (FLAGS.volume_group, + FLAGS.storage_dev)) + +class Volume(datastore.BasicModel): + + def __init__(self, volume_id=None): + self.volume_id = volume_id + super(Volume, self).__init__() + + @property + def identifier(self): + return self.volume_id + + def default_state(self): + return {"volume_id": self.volume_id} + + @classmethod + def create(cls, size, user_id, project_id): + volume_id = utils.generate_uid('vol') + vol = cls(volume_id) + vol['node_name'] = FLAGS.storage_name + vol['size'] = size + vol['user_id'] = user_id + vol['project_id'] = project_id + vol['availability_zone'] = FLAGS.storage_availability_zone + vol["instance_id"] = 'none' + vol["mountpoint"] = 'none' + vol['attach_time'] = 'none' + vol['status'] = "creating" # creating | available | in-use + vol['attach_status'] = "detached" # attaching | attached | detaching | detached + vol['delete_on_termination'] = 'False' + vol.save() + vol.create_lv() + vol._setup_export() + # TODO(joshua) - We need to trigger a fanout message for aoe-discover on all the nodes + # TODO(joshua + vol['status'] = "available" + vol.save() + return vol + + def start_attach(self, instance_id, mountpoint): + """ """ + self['instance_id'] = instance_id + self['mountpoint'] = mountpoint + self['status'] = "in-use" + self['attach_status'] = "attaching" + self['attach_time'] = utils.isotime() + self['delete_on_termination'] = 'False' + self.save() + + def finish_attach(self): + """ """ + self['attach_status'] = "attached" + self.save() + + def start_detach(self): + """ """ + self['attach_status'] = "detaching" + self.save() + + def finish_detach(self): + self['instance_id'] = None + self['mountpoint'] = None + self['status'] = "available" + self['attach_status'] = "detached" + self.save() + + def destroy(self): + try: + self._remove_export() + except: + pass + self._delete_lv() + super(Volume, self).destroy() + + @defer.inlineCallbacks + def create_lv(self): + if str(self['size']) == '0': + sizestr = '100M' + else: + sizestr = '%sG' % self['size'] + yield process.simple_execute( + "sudo lvcreate -L %s -n %s %s" % (sizestr, + self['volume_id'], + FLAGS.volume_group)) + + @defer.inlineCallbacks + def _delete_lv(self): + yield process.simple_execute( + "sudo lvremove -f %s/%s" % (FLAGS.volume_group, + self['volume_id'])) + + def _setup_export(self): + (shelf_id, blade_id) = get_next_aoe_numbers() + self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id) + self['shelf_id'] = shelf_id + self['blade_id'] = blade_id + self.save() + self._exec_export() + + @defer.inlineCallbacks + def _exec_export(self): + yield process.simple_execute( + "sudo vblade-persist setup %s %s %s /dev/%s/%s" % + (self['shelf_id'], + self['blade_id'], + FLAGS.aoe_eth_dev, + FLAGS.volume_group, + self['volume_id'])) + + @defer.inlineCallbacks + def _remove_export(self): + yield process.simple_execute( + "sudo vblade-persist stop %s %s" % (self['shelf_id'], + self['blade_id'])) + yield process.simple_execute( + "sudo vblade-persist destroy %s %s" % (self['shelf_id'], + self['blade_id'])) + + +class FakeVolume(Volume): + def create_lv(self): + pass + + def _exec_export(self): + fname = os.path.join(FLAGS.aoe_export_dir, self['aoe_device']) + f = file(fname, "w") + f.close() + + def _remove_export(self): + pass + + def _delete_lv(self): + pass + +def get_next_aoe_numbers(): + for shelf_id in xrange(FLAGS.first_shelf_id, FLAGS.last_shelf_id + 1): + aoes = glob.glob("%s/e%s.*" % (FLAGS.aoe_export_dir, shelf_id)) + if not aoes: + blade_id = 0 + else: + blade_id = int(max([int(a.rpartition('.')[2]) for a in aoes])) + 1 + if blade_id < FLAGS.slots_per_shelf: + logging.debug("Next shelf.blade is %s.%s", shelf_id, blade_id) + return (shelf_id, blade_id) + raise NoMoreVolumes() -- cgit From a0c29a822aaed756728f2619e176d8c54bb1d4e9 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 23 Jul 2010 17:20:21 -0700 Subject: fixed bug where partition code was sometimes failing due to initial dd not being yielded properly --- nova/compute/disk.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/compute/disk.py b/nova/compute/disk.py index 08a22556e..7e31498e5 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -64,8 +64,8 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None): last_sector = local_last # e # create an empty file - execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' - % (outfile, last_sector, sector_size)) + yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' + % (outfile, last_sector, sector_size)) # make mbr partition yield execute('parted --script %s mklabel msdos' % outfile) -- cgit From 4c536de1732c531bfb87018826a92de2744e8d1a Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sat, 24 Jul 2010 14:45:35 +0100 Subject: Add missing import following merge from trunk (cset 150). --- nova/virt/images.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/virt/images.py b/nova/virt/images.py index fd74349b1..12338fd80 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -24,6 +24,7 @@ Handling of VM disk images. import os.path from nova import flags +from nova import process FLAGS = flags.FLAGS -- cgit From eb10c8f1ea41564b5ee2d19054eeb8b65bfc0b33 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sat, 24 Jul 2010 16:22:17 -0700 Subject: Updated URLs in the README file to point to current locations. --- README | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/README b/README index f7d21f400..851bca9db 100644 --- a/README +++ b/README @@ -6,15 +6,19 @@ The Choose Your Own Adventure README for Nova: To monitor it from a distance: follow @novacc on twitter -To tame it for use in your own cloud: read http://docs.novacc.org/getting.started.html +To tame it for use in your own cloud: read http://nova.openstack.org/getting.started.html -To study its anatomy: read http://docs.novacc.org/architecture.html +To study its anatomy: read http://nova.openstack.org/architecture.html -To disect it in detail: visit http://github.com/nova/cc +To disect it in detail: visit http://code.launchpad.net/nova -To taunt it with its weaknesses: use http://github.com/nova/cc/issues +To taunt it with its weaknesses: use http://bugs.launchpad.net/nova + +To watch it: http://hudson.openstack.org To hack at it: read HACKING -To watch it: http://test.novacc.org/waterfall +To laugh at its PEP8 problems: http://hudson.openstack.org/job/nova-pep8/violations + +To cry over its pylint problems: http://hudson.openstack.org/job/nova-pylint/violations -- cgit From 87e27afec0c7b683ee35f842abdaccea954f2fba Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sat, 24 Jul 2010 18:06:22 -0700 Subject: Updated sphinx layout to a two-dir layout like swift. Updated a doc string to get rid of a Sphinx warning. --- doc/.gitignore | 1 + doc/build/.gitignore | 1 + doc/source/Makefile | 89 ++++++++++++++++ doc/source/_static/.gitignore | 0 doc/source/_templates/.gitignore | 0 doc/source/architecture.rst | 48 +++++++++ doc/source/auth.rst | 215 +++++++++++++++++++++++++++++++++++++++ doc/source/binaries.rst | 31 ++++++ doc/source/compute.rst | 74 ++++++++++++++ doc/source/conf.py | 202 ++++++++++++++++++++++++++++++++++++ doc/source/endpoint.rst | 91 +++++++++++++++++ doc/source/fakes.rst | 43 ++++++++ doc/source/getting.started.rst | 148 +++++++++++++++++++++++++++ doc/source/index.rst | 55 ++++++++++ doc/source/modules.rst | 34 +++++++ doc/source/network.rst | 88 ++++++++++++++++ doc/source/nova.rst | 91 +++++++++++++++++ doc/source/objectstore.rst | 66 ++++++++++++ doc/source/packages.rst | 29 ++++++ doc/source/storage.rst | 31 ++++++ doc/source/volume.rst | 45 ++++++++ docs/.gitignore | 1 - docs/Makefile | 89 ---------------- docs/_build/.gitignore | 1 - docs/_static/.gitignore | 0 docs/_templates/.gitignore | 0 docs/architecture.rst | 48 --------- docs/auth.rst | 215 --------------------------------------- docs/binaries.rst | 31 ------ docs/compute.rst | 74 -------------- docs/conf.py | 202 ------------------------------------ docs/endpoint.rst | 91 ----------------- docs/fakes.rst | 43 -------- docs/getting.started.rst | 148 --------------------------- docs/index.rst | 56 ---------- docs/modules.rst | 34 ------- docs/network.rst | 88 ---------------- docs/nova.rst | 91 ----------------- docs/objectstore.rst | 66 ------------ docs/packages.rst | 29 ------ docs/storage.rst | 31 ------ docs/volume.rst | 45 -------- nova/compute/disk.py | 3 +- setup.cfg | 4 +- 44 files changed, 1386 insertions(+), 1386 deletions(-) create mode 100644 doc/.gitignore create mode 100644 doc/build/.gitignore create mode 100644 doc/source/Makefile create mode 100644 doc/source/_static/.gitignore create mode 100644 doc/source/_templates/.gitignore create mode 100644 doc/source/architecture.rst create mode 100644 doc/source/auth.rst create mode 100644 doc/source/binaries.rst create mode 100644 doc/source/compute.rst create mode 100644 doc/source/conf.py create mode 100644 doc/source/endpoint.rst create mode 100644 doc/source/fakes.rst create mode 100644 doc/source/getting.started.rst create mode 100644 doc/source/index.rst create mode 100644 doc/source/modules.rst create mode 100644 doc/source/network.rst create mode 100644 doc/source/nova.rst create mode 100644 doc/source/objectstore.rst create mode 100644 doc/source/packages.rst create mode 100644 doc/source/storage.rst create mode 100644 doc/source/volume.rst delete mode 100644 docs/.gitignore delete mode 100644 docs/Makefile delete mode 100644 docs/_build/.gitignore delete mode 100644 docs/_static/.gitignore delete mode 100644 docs/_templates/.gitignore delete mode 100644 docs/architecture.rst delete mode 100644 docs/auth.rst delete mode 100644 docs/binaries.rst delete mode 100644 docs/compute.rst delete mode 100644 docs/conf.py delete mode 100644 docs/endpoint.rst delete mode 100644 docs/fakes.rst delete mode 100644 docs/getting.started.rst delete mode 100644 docs/index.rst delete mode 100644 docs/modules.rst delete mode 100644 docs/network.rst delete mode 100644 docs/nova.rst delete mode 100644 docs/objectstore.rst delete mode 100644 docs/packages.rst delete mode 100644 docs/storage.rst delete mode 100644 docs/volume.rst diff --git a/doc/.gitignore b/doc/.gitignore new file mode 100644 index 000000000..88f9974bd --- /dev/null +++ b/doc/.gitignore @@ -0,0 +1 @@ +_build/* diff --git a/doc/build/.gitignore b/doc/build/.gitignore new file mode 100644 index 000000000..72e8ffc0d --- /dev/null +++ b/doc/build/.gitignore @@ -0,0 +1 @@ +* diff --git a/doc/source/Makefile b/doc/source/Makefile new file mode 100644 index 000000000..b2f74e85a --- /dev/null +++ b/doc/source/Makefile @@ -0,0 +1,89 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/nova.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/nova.qhc" + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ + "run these through (pdf)latex." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/doc/source/_static/.gitignore b/doc/source/_static/.gitignore new file mode 100644 index 000000000..e69de29bb diff --git a/doc/source/_templates/.gitignore b/doc/source/_templates/.gitignore new file mode 100644 index 000000000..e69de29bb diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst new file mode 100644 index 000000000..11813d2c8 --- /dev/null +++ b/doc/source/architecture.rst @@ -0,0 +1,48 @@ +.. + Copyright 2010 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +nova System Architecture +======================== + +Nova is built on a shared-nothing, messaging-based architecture. All of the major nova components can be run on multiple servers. This means that most component to component communication must go via message queue. In order to avoid blocking each component while waiting for a response, we use deferred objects, with a callback that gets triggered when a response is received. + +In order to achieve shared-nothing with multiple copies of the same component (especially when the component is an API server that needs to reply with state information in a timely fashion), we need to keep all of our system state in a distributed data system. Updates to system state are written into this system, using atomic transactions when necessary. Requests for state are read out of this system. In limited cases, these read calls are memoized within controllers for short periods of time. (Such a limited case would be, for instance, the current list of system users.) + + +Components +---------- + +Below you will find a helpful explanation. + +:: + + [ User Manager ] ---- ( LDAP ) + | + | / [ Storage ] - ( ATAoE ) + [ API server ] -> [ Cloud ] < AMQP > + | \ [ Nodes ] - ( libvirt/kvm ) + < HTTP > + | + [ S3 ] + + +* API: receives http requests from boto, converts commands to/from API format, and sending requests to cloud controller +* Cloud Controller: global state of system, talks to ldap, s3, and node/storage workers through a queue +* Nodes: worker that spawns instances +* S3: tornado based http/s3 server +* User Manager: create/manage users, which are stored in ldap +* Network Controller: allocate and deallocate IPs and VLANs diff --git a/doc/source/auth.rst b/doc/source/auth.rst new file mode 100644 index 000000000..70aca704a --- /dev/null +++ b/doc/source/auth.rst @@ -0,0 +1,215 @@ +.. + Copyright 2010 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Auth Documentation +================== + +Nova provides RBAC (Role-based access control) of the AWS-type APIs. We define the following roles: + +Roles-Based Access Control of AWS-style APIs using SAML Assertions +“Achieving FIPS 199 Moderate certification of a hybrid cloud environment using CloudAudit and declarative C.I.A. classifications” + +Introduction +-------------- + +We will investigate one method for integrating an AWS-style API with US eAuthentication-compatible federated authentication systems, to achieve access controls and limits based on traditional operational roles. +Additionally, we will look at how combining this approach, with an implementation of the CloudAudit APIs, will allow us to achieve a certification under FIPS 199 Moderate classification for a hybrid cloud environment. + +Relationship of US eAuth to RBAC +-------------------------------- + +Typical implementations of US eAuth authentication systems are structured as follows:: + + [ MS Active Directory or other federated LDAP user store ] + --> backends to… + [ SUN Identity Manager or other SAML Policy Controller ] + --> maps URLs to groups… + [ Apache Policy Agent in front of eAuth-secured Web Application ] + +In more ideal implementations, the remainder of the application-specific account information is stored either in extended schema on the LDAP server itself, via the use of a translucent LDAP proxy, or in an independent datastore keyed off of the UID provided via SAML assertion. + +Basic AWS API call structure +---------------------------- + +AWS API calls are traditionally secured via Access and Secret Keys, which are used to sign API calls, along with traditional timestamps to prevent replay attacks. The APIs can be logically grouped into sets that align with five typical roles: + +* System User +* System Administrator +* Network Administrator +* Project Manager +* Cloud Administrator +* (IT-Sec?) + +There is an additional, conceptual end-user that may or may not have API access: + +* (EXTERNAL) End-user / Third-party User + +Basic operations are available to any System User: + +* Launch Instance +* Terminate Instance (their own) +* Create keypair +* Delete keypair +* Create, Upload, Delete: Buckets and Keys (Object Store) – their own +* Create, Attach, Delete Volume (Block Store) – their own + +System Administrators: + +* Register/Unregister Machine Image (project-wide) +* Change Machine Image properties (public / private) +* Request / Review CloudAudit Scans + +Network Administrator: + +* Change Firewall Rules, define Security Groups +* Allocate, Associate, Deassociate Public IP addresses + +Project Manager: + +* Launch and Terminate Instances (project-wide) +* CRUD of Object and Block store (project-wide) + +Cloud Administrator: + +* Register / Unregister Kernel and Ramdisk Images +* Register / Unregister Machine Image (any) + +Enhancements +------------ + +* SAML Token passing +* REST interfaces +* SOAP interfaces + +Wrapping the SAML token into the API calls. +Then store the UID (fetched via backchannel) into the instance metadata, providing end-to-end auditability of ownership and responsibility, without PII. + +CloudAudit APIs +--------------- + +* Request formats +* Response formats +* Stateless asynchronous queries + +CloudAudit queries may spawn long-running processes (similar to launching instances, etc.) They need to return a ReservationId in the same fashion, which can be returned in further queries for updates. +RBAC of CloudAudit API calls is critical, since detailed system information is a system vulnerability. + +Type declarations +--------------------- +* Data declarations – Volumes and Objects +* System declarations – Instances + +Existing API calls to launch instances specific a single, combined “type” flag. We propose to extend this with three additional type declarations, mapping to the “Confidentiality, Integrity, Availability” classifications of FIPS 199. An example API call would look like:: + + RunInstances type=m1.large number=1 secgroup=default key=mykey confidentiality=low integrity=low availability=low + +These additional parameters would also apply to creation of block storage volumes (along with the existing parameter of ‘size’), and creation of object storage ‘buckets’. (C.I.A. classifications on a bucket would be inherited by the keys within this bucket.) + +Request Brokering +----------------- + + * Cloud Interop + * IMF Registration / PubSub + * Digital C&A + +Establishing declarative semantics for individual API calls will allow the cloud environment to seamlessly proxy these API calls to external, third-party vendors – when the requested CIA levels match. + +See related work within the Infrastructure 2.0 working group for more information on how the IMF Metadata specification could be utilized to manage registration of these vendors and their C&A credentials. + +Dirty Cloud – Hybrid Data Centers +--------------------------------- + +* CloudAudit bridge interfaces +* Anything in the ARP table + +A hybrid cloud environment provides dedicated, potentially co-located physical hardware with a network interconnect to the project or users’ cloud virtual network. + +This interconnect is typically a bridged VPN connection. Any machines that can be bridged into a hybrid environment in this fashion (at Layer 2) must implement a minimum version of the CloudAudit spec, such that they can be queried to provide a complete picture of the IT-sec runtime environment. + +Network discovery protocols (ARP, CDP) can be applied in this case, and existing protocols (SNMP location data, DNS LOC records) overloaded to provide CloudAudit information. + +The Details +----------- + + * Preliminary Roles Definitions + * Categorization of available API calls + * SAML assertion vocabulary + +System limits +------------- + +The following limits need to be defined and enforced: + +* Total number of instances allowed (user / project) +* Total number of instances, per instance type (user / project) +* Total number of volumes (user / project) +* Maximum size of volume +* Cumulative size of all volumes +* Total use of object storage (GB) +* Total number of Public IPs + + +Further Challenges +------------------ + * Prioritization of users / jobs in shared computing environments + * Incident response planning + * Limit launch of instances to specific security groups based on AMI + * Store AMIs in LDAP for added property control + + + +The :mod:`rbac` Module +-------------------------- + +.. automodule:: nova.auth.rbac + :members: + :undoc-members: + :show-inheritance: + +The :mod:`signer` Module +------------------------ + +.. automodule:: nova.auth.signer + :members: + :undoc-members: + :show-inheritance: + +The :mod:`users` Module +----------------------- + +.. automodule:: nova.auth.users + :members: + :undoc-members: + :show-inheritance: + +The :mod:`users_unittest` Module +-------------------------------- + +.. automodule:: nova.tests.users_unittest + :members: + :undoc-members: + :show-inheritance: + +The :mod:`access_unittest` Module +--------------------------------- + +.. automodule:: nova.tests.access_unittest + :members: + :undoc-members: + :show-inheritance: + + diff --git a/doc/source/binaries.rst b/doc/source/binaries.rst new file mode 100644 index 000000000..90a9581f7 --- /dev/null +++ b/doc/source/binaries.rst @@ -0,0 +1,31 @@ +.. + Copyright 2010 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Nova Binaries +=============== + +* nova-api +* nova-compute +* nova-manage +* nova-objectstore +* nova-volume + +The configuration of these binaries relies on "flagfiles" using the google +gflags package. If present, the nova.conf file will be used as the flagfile +- otherwise, it must be specified on the command line:: + + $ python node_worker.py --flagfile flagfile diff --git a/doc/source/compute.rst b/doc/source/compute.rst new file mode 100644 index 000000000..5b08dbd5b --- /dev/null +++ b/doc/source/compute.rst @@ -0,0 +1,74 @@ +.. + Copyright 2010 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Compute Documentation +===================== + +This page contains the Compute Package documentation. + + +The :mod:`disk` Module +---------------------- + +.. automodule:: nova.compute.disk + :members: + :undoc-members: + :show-inheritance: + +The :mod:`exception` Module +--------------------------- + +.. automodule:: nova.compute.exception + :members: + :undoc-members: + :show-inheritance: + +The :mod:`model` Module +------------------------- + +.. automodule:: nova.compute.model + :members: + :undoc-members: + :show-inheritance: + +The :mod:`network` Module +------------------------- + +.. automodule:: nova.compute.network + :members: + :undoc-members: + :show-inheritance: + +The :mod:`node` Module +---------------------- + +.. automodule:: nova.compute.node + :members: + :undoc-members: + :show-inheritance: + +RELATED TESTS +--------------- + +The :mod:`node_unittest` Module +------------------------------- + +.. automodule:: nova.tests.node_unittest + :members: + :undoc-members: + :show-inheritance: + diff --git a/doc/source/conf.py b/doc/source/conf.py new file mode 100644 index 000000000..1c1ae7f48 --- /dev/null +++ b/doc/source/conf.py @@ -0,0 +1,202 @@ +# -*- coding: utf-8 -*- +# +# nova documentation build configuration file, created by +# sphinx-quickstart on Sat May 1 15:17:47 2010. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys, os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.append([os.path.abspath('../nova'), os.path.abspath('..'), os.path.abspath('../bin')]) + + +# -- General configuration ----------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig'] +todo_include_todos = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'nova' +copyright = u'2010, United States Government as represented by the Administrator of the National Aeronautics and Space Administration.' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '0.42' +# The full version, including alpha/beta/rc tags. +release = '0.42' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of documents that shouldn't be included in the build. +#unused_docs = [] + +# List of directories, relative to source directory, that shouldn't be searched +# for source files. +exclude_trees = [] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +modindex_common_prefix = ['nova.'] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_use_modindex = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'novadoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +# The paper size ('letter' or 'a4'). +#latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +#latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'Nova.tex', u'Nova Documentation', + u'Anso Labs, LLC', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# Additional stuff for the LaTeX preamble. +#latex_preamble = '' + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_use_modindex = True + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = {'python': ('http://docs.python.org/', None), + 'swift': ('http://swift.openstack.org', None)} + diff --git a/doc/source/endpoint.rst b/doc/source/endpoint.rst new file mode 100644 index 000000000..399df4161 --- /dev/null +++ b/doc/source/endpoint.rst @@ -0,0 +1,91 @@ +.. + Copyright 2010 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Endpoint Documentation +====================== + +This page contains the Endpoint Package documentation. + +The :mod:`admin` Module +----------------------- + +.. automodule:: nova.endpoint.admin + :members: + :undoc-members: + :show-inheritance: + +The :mod:`api` Module +--------------------- + +.. automodule:: nova.endpoint.api + :members: + :undoc-members: + :show-inheritance: + +The :mod:`cloud` Module +----------------------- + +.. automodule:: nova.endpoint.cloud + :members: + :undoc-members: + :show-inheritance: + +The :mod:`images` Module +------------------------ + +.. automodule:: nova.endpoint.images + :members: + :undoc-members: + :show-inheritance: + + +RELATED TESTS +-------------- + +The :mod:`api_unittest` Module +------------------------------ + +.. automodule:: nova.tests.api_unittest + :members: + :undoc-members: + :show-inheritance: + +The :mod:`api_integration` Module +--------------------------------- + +.. automodule:: nova.tests.api_integration + :members: + :undoc-members: + :show-inheritance: + +The :mod:`cloud_unittest` Module +-------------------------------- + +.. automodule:: nova.tests.cloud_unittest + :members: + :undoc-members: + :show-inheritance: + +The :mod:`network_unittest` Module +---------------------------------- + +.. automodule:: nova.tests.network_unittest + :members: + :undoc-members: + :show-inheritance: + + diff --git a/doc/source/fakes.rst b/doc/source/fakes.rst new file mode 100644 index 000000000..bea8bc4e9 --- /dev/null +++ b/doc/source/fakes.rst @@ -0,0 +1,43 @@ +.. + Copyright 2010 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Nova Fakes +========== + +The :mod:`fakevirt` Module +-------------------------- + +.. automodule:: nova.fakevirt + :members: + :undoc-members: + :show-inheritance: + +The :mod:`fakeldap` Module +-------------------------- + +.. automodule:: nova.auth.fakeldap + :members: + :undoc-members: + :show-inheritance: + +The :mod:`fakerabbit` Module +---------------------------- + +.. automodule:: nova.fakerabbit + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/getting.started.rst b/doc/source/getting.started.rst new file mode 100644 index 000000000..3eadd0882 --- /dev/null +++ b/doc/source/getting.started.rst @@ -0,0 +1,148 @@ +.. + Copyright 2010 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Getting Started with Nova +========================= + + +GOTTA HAVE A nova.pth file added or it WONT WORK (will write setup.py file soon) + +Create a file named nova.pth in your python libraries directory +(usually /usr/local/lib/python2.6/dist-packages) with a single line that points +to the directory where you checked out the source (that contains the nova/ +directory). + +DEPENDENCIES +------------ + +Related servers we rely on + +* RabbitMQ: messaging queue, used for all communication between components +* OpenLDAP: users, groups (maybe cut) +* ReDIS: Remote Dictionary Store (for fast, shared state data) +* nginx: HTTP server to handle serving large files (because Tornado can't) + +Python libraries we don't vendor + +* M2Crypto: python library interface for openssl +* curl + +Vendored python libaries (don't require any installation) + +* Tornado: scalable non blocking web server for api requests +* Twisted: just for the twisted.internet.defer package +* boto: python api for aws api +* IPy: library for managing ip addresses + +Recommended +----------------- + +* euca2ools: python implementation of aws ec2-tools and ami tools +* build tornado to use C module for evented section + + +Installation +-------------- +:: + + # system libraries and tools + apt-get install -y aoetools vlan curl + modprobe aoe + + # python libraries + apt-get install -y python-setuptools python-dev python-pycurl python-m2crypto + + # ON THE CLOUD CONTROLLER + apt-get install -y rabbitmq-server dnsmasq nginx + # build redis from 2.0.0-rc1 source + # setup ldap (slap.sh as root will remove ldap and reinstall it) + NOVA_PATH/nova/auth/slap.sh + /etc/init.d/rabbitmq-server start + + # ON VOLUME NODE: + apt-get install -y vblade-persist + + # ON THE COMPUTE NODE: + apt-get install -y python-libvirt + apt-get install -y kpartx kvm libvirt-bin + modprobe kvm + + # optional packages + apt-get install -y euca2ools + +Configuration +--------------- + +ON CLOUD CONTROLLER + +* Add yourself to the libvirtd group, log out, and log back in +* fix hardcoded ec2 metadata/userdata uri ($IP is the IP of the cloud), and masqurade all traffic from launched instances +:: + + iptables -t nat -A PREROUTING -s 0.0.0.0/0 -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination $IP:8773 + iptables --table nat --append POSTROUTING --out-interface $PUBLICIFACE -j MASQUERADE + + +* Configure NginX proxy (/etc/nginx/sites-enabled/default) + +:: + + server { + listen 3333 default; + server-name localhost; + client_max_body_size 10m; + + access_log /var/log/nginx/localhost.access.log; + + location ~ /_images/.+ { + root NOVA_PATH/images; + rewrite ^/_images/(.*)$ /$1 break; + } + + location / { + proxy_pass http://localhost:3334/; + } + } + +ON VOLUME NODE + +* create a filesystem (you can use an actual disk if you have one spare, default is /dev/sdb) + +:: + + # This creates a 1GB file to create volumes out of + dd if=/dev/zero of=MY_FILE_PATH bs=100M count=10 + losetup --show -f MY_FILE_PATH + # replace loop0 below with whatever losetup returns + echo "--storage_dev=/dev/loop0" >> NOVA_PATH/bin/nova.conf + +Running +--------- + +Launch servers + +* rabbitmq +* redis +* slapd +* nginx + +Launch nova components + +* nova-api +* nova-compute +* nova-objectstore +* nova-volume diff --git a/doc/source/index.rst b/doc/source/index.rst new file mode 100644 index 000000000..6627fe066 --- /dev/null +++ b/doc/source/index.rst @@ -0,0 +1,55 @@ +.. + Copyright 2010 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Welcome to nova's documentation! +================================ + +Nova is a cloud computing fabric controller (the main part of an IaaS system) built to match the popular AWS EC2 and S3 APIs. +It is written in Python, using the Tornado and Twisted frameworks, and relies on the standard AMQP messaging protocol, +and the Redis distributed KVS. +Nova is intended to be easy to extend, and adapt. For example, it currently uses +an LDAP server for users and groups, but also includes a fake LDAP server, +that stores data in Redis. It has extensive test coverage, and uses the +Sphinx toolkit (the same as Python itself) for code and user documentation. +While Nova is currently in Beta use within several organizations, the codebase +is very much under active development - there are bugs! + +Contents: + +.. toctree:: + :maxdepth: 2 + + getting.started + architecture + network + storage + auth + compute + endpoint + nova + fakes + binaries + modules + packages + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/doc/source/modules.rst b/doc/source/modules.rst new file mode 100644 index 000000000..82c61f008 --- /dev/null +++ b/doc/source/modules.rst @@ -0,0 +1,34 @@ +.. + Copyright 2010 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Nova Documentation +================== + +This page contains the Nova Modules documentation. + +Modules: +-------- + +.. toctree:: + :maxdepth: 4 + + auth + compute + endpoint + fakes + nova + volume diff --git a/doc/source/network.rst b/doc/source/network.rst new file mode 100644 index 000000000..357a0517f --- /dev/null +++ b/doc/source/network.rst @@ -0,0 +1,88 @@ +.. + Copyright 2010 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +nova Networking +================ + +The nova networking components manage private networks, public IP addressing, VPN connectivity, and firewall rules. + +Components +---------- +There are several key components: + +* NetworkController (Manages address and vlan allocation) +* RoutingNode (NATs public IPs to private IPs, and enforces firewall rules) +* AddressingNode (runs DHCP services for private networks) +* BridgingNode (a subclass of the basic nova ComputeNode) +* TunnelingNode (provides VPN connectivity) + +Component Diagram +----------------- + +Overview:: + + (PUBLIC INTERNET) + | \ + / \ / \ + [RoutingNode] ... [RN] [TunnelingNode] ... [TN] + | \ / | | + | < AMQP > | | + [AddressingNode]-- (VLAN) ... | (VLAN)... (VLAN) --- [AddressingNode] + \ | \ / + / \ / \ / \ / \ + [BridgingNode] ... [BridgingNode] + + + [NetworkController] ... [NetworkController] + \ / + < AMQP > + | + / \ + [CloudController]...[CloudController] + +While this diagram may not make this entirely clear, nodes and controllers communicate exclusively across the message bus (AMQP, currently). + +State Model +----------- +Network State consists of the following facts: + +* VLAN assignment (to a project) +* Private Subnet assignment (to a security group) in a VLAN +* Private IP assignments (to running instances) +* Public IP allocations (to a project) +* Public IP associations (to a private IP / running instance) + +While copies of this state exist in many places (expressed in IPTables rule chains, DHCP hosts files, etc), the controllers rely only on the distributed "fact engine" for state, queried over RPC (currently AMQP). The NetworkController inserts most records into this datastore (allocating addresses, etc) - however, individual nodes update state e.g. when running instances crash. + +The Public Traffic Path +----------------------- + +Public Traffic:: + + (PUBLIC INTERNET) + | + <-- [RoutingNode] + | + [AddressingNode] --> | + ( VLAN ) + | <-- [BridgingNode] + | + + +The RoutingNode is currently implemented using IPTables rules, which implement both NATing of public IP addresses, and the appropriate firewall chains. We are also looking at using Netomata / Clusto to manage NATting within a switch or router, and/or to manage firewall rules within a hardware firewall appliance. + +Similarly, the AddressingNode currently manages running DNSMasq instances for DHCP services. However, we could run an internal DHCP server (using Scapy ala Clusto), or even switch to static addressing by inserting the private address into the disk image the same way we insert the SSH keys. (See compute for more details). diff --git a/doc/source/nova.rst b/doc/source/nova.rst new file mode 100644 index 000000000..4b9c44a5f --- /dev/null +++ b/doc/source/nova.rst @@ -0,0 +1,91 @@ +.. + Copyright 2010 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +NOVA Libraries +=============== + +The :mod:`crypto` Module +------------------------ + +.. automodule:: nova.crypto + :members: + :undoc-members: + :show-inheritance: + +The :mod:`adminclient` Module +----------------------------- + +.. automodule:: nova.adminclient + :members: + :undoc-members: + :show-inheritance: + +The :mod:`datastore` Module +--------------------------- + +.. automodule:: nova.datastore + :members: + :undoc-members: + :show-inheritance: + +The :mod:`exception` Module +--------------------------- + +.. automodule:: nova.exception + :members: + :undoc-members: + :show-inheritance: + +The :mod:`flags` Module +--------------------------- + +.. automodule:: nova.flags + :members: + :undoc-members: + :show-inheritance: + +The :mod:`rpc` Module +--------------------------- + +.. automodule:: nova.rpc + :members: + :undoc-members: + :show-inheritance: + +The :mod:`server` Module +--------------------------- + +.. automodule:: nova.server + :members: + :undoc-members: + :show-inheritance: + +The :mod:`test` Module +--------------------------- + +.. automodule:: nova.test + :members: + :undoc-members: + :show-inheritance: + +The :mod:`utils` Module +--------------------------- + +.. automodule:: nova.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/objectstore.rst b/doc/source/objectstore.rst new file mode 100644 index 000000000..6b8d293f4 --- /dev/null +++ b/doc/source/objectstore.rst @@ -0,0 +1,66 @@ +.. + Copyright 2010 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Objectstore Documentation +========================= + +This page contains the Objectstore Package documentation. + + +The :mod:`bucket` Module +------------------------ + +.. automodule:: nova.objectstore.bucket + :members: + :undoc-members: + :show-inheritance: + +The :mod:`handler` Module +------------------------- + +.. automodule:: nova.objectstore.handler + :members: + :undoc-members: + :show-inheritance: + +The :mod:`image` Module +----------------------- + +.. automodule:: nova.objectstore.image + :members: + :undoc-members: + :show-inheritance: + +The :mod:`stored` Module +------------------------ + +.. automodule:: nova.objectstore.stored + :members: + :undoc-members: + :show-inheritance: + +RELATED TESTS +------------- + +The :mod:`objectstore_unittest` Module +-------------------------------------- + +.. automodule:: nova.tests.objectstore_unittest + :members: + :undoc-members: + :show-inheritance: + diff --git a/doc/source/packages.rst b/doc/source/packages.rst new file mode 100644 index 000000000..6029ad7d7 --- /dev/null +++ b/doc/source/packages.rst @@ -0,0 +1,29 @@ +.. + Copyright 2010 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +nova Packages & Dependencies +============================ + +Nova is being built on Ubuntu Lucid. + +The following packages are required: + + apt-get install python-ipy, python-libvirt, python-boto, python-pycurl, python-twisted, python-daemon, python-redis, python-carrot, python-lockfile + +In addition you need to install python: + + * python-gflags - http://code.google.com/p/python-gflags/ diff --git a/doc/source/storage.rst b/doc/source/storage.rst new file mode 100644 index 000000000..f77e5f0e5 --- /dev/null +++ b/doc/source/storage.rst @@ -0,0 +1,31 @@ +.. + Copyright 2010 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Storage in the Nova Cloud +========================= + +There are three primary classes of storage in a nova cloud environment: + +* Ephemeral Storage (local disk within an instance) +* Volume Storage (network-attached FS) +* Object Storage (redundant KVS with locality and MR) + +.. toctree:: + :maxdepth: 2 + + volume + objectstore diff --git a/doc/source/volume.rst b/doc/source/volume.rst new file mode 100644 index 000000000..619968458 --- /dev/null +++ b/doc/source/volume.rst @@ -0,0 +1,45 @@ +.. + Copyright 2010 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Volume Documentation +==================== + +Nova uses ata-over-ethernet (AoE) to export storage volumes from multiple storage nodes. These AoE exports are attached (using libvirt) directly to running instances. + +Nova volumes are exported over the primary system VLAN (usually VLAN 1), and not over individual VLANs. + +AoE exports are numbered according to a "shelf and blade" syntax. In order to avoid collisions, we currently perform an AoE-discover of existing exports, and then grab the next unused number. (This obviously has race condition problems, and should be replaced by allocating a shelf-id to each storage node.) + +The underlying volumes are LVM logical volumes, created on demand within a single large volume group. + + +The :mod:`storage` Module +------------------------- + +.. automodule:: nova.volume.storage + :members: + :undoc-members: + :show-inheritance: + +The :mod:`storage_unittest` Module +---------------------------------- + +.. automodule:: nova.tests.storage_unittest + :members: + :undoc-members: + :show-inheritance: + diff --git a/docs/.gitignore b/docs/.gitignore deleted file mode 100644 index 88f9974bd..000000000 --- a/docs/.gitignore +++ /dev/null @@ -1 +0,0 @@ -_build/* diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index b2f74e85a..000000000 --- a/docs/Makefile +++ /dev/null @@ -1,89 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = _build - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . - -.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - -rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/nova.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/nova.qhc" - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ - "run these through (pdf)latex." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." diff --git a/docs/_build/.gitignore b/docs/_build/.gitignore deleted file mode 100644 index 72e8ffc0d..000000000 --- a/docs/_build/.gitignore +++ /dev/null @@ -1 +0,0 @@ -* diff --git a/docs/_static/.gitignore b/docs/_static/.gitignore deleted file mode 100644 index e69de29bb..000000000 diff --git a/docs/_templates/.gitignore b/docs/_templates/.gitignore deleted file mode 100644 index e69de29bb..000000000 diff --git a/docs/architecture.rst b/docs/architecture.rst deleted file mode 100644 index 11813d2c8..000000000 --- a/docs/architecture.rst +++ /dev/null @@ -1,48 +0,0 @@ -.. - Copyright 2010 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -nova System Architecture -======================== - -Nova is built on a shared-nothing, messaging-based architecture. All of the major nova components can be run on multiple servers. This means that most component to component communication must go via message queue. In order to avoid blocking each component while waiting for a response, we use deferred objects, with a callback that gets triggered when a response is received. - -In order to achieve shared-nothing with multiple copies of the same component (especially when the component is an API server that needs to reply with state information in a timely fashion), we need to keep all of our system state in a distributed data system. Updates to system state are written into this system, using atomic transactions when necessary. Requests for state are read out of this system. In limited cases, these read calls are memoized within controllers for short periods of time. (Such a limited case would be, for instance, the current list of system users.) - - -Components ----------- - -Below you will find a helpful explanation. - -:: - - [ User Manager ] ---- ( LDAP ) - | - | / [ Storage ] - ( ATAoE ) - [ API server ] -> [ Cloud ] < AMQP > - | \ [ Nodes ] - ( libvirt/kvm ) - < HTTP > - | - [ S3 ] - - -* API: receives http requests from boto, converts commands to/from API format, and sending requests to cloud controller -* Cloud Controller: global state of system, talks to ldap, s3, and node/storage workers through a queue -* Nodes: worker that spawns instances -* S3: tornado based http/s3 server -* User Manager: create/manage users, which are stored in ldap -* Network Controller: allocate and deallocate IPs and VLANs diff --git a/docs/auth.rst b/docs/auth.rst deleted file mode 100644 index 70aca704a..000000000 --- a/docs/auth.rst +++ /dev/null @@ -1,215 +0,0 @@ -.. - Copyright 2010 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Auth Documentation -================== - -Nova provides RBAC (Role-based access control) of the AWS-type APIs. We define the following roles: - -Roles-Based Access Control of AWS-style APIs using SAML Assertions -“Achieving FIPS 199 Moderate certification of a hybrid cloud environment using CloudAudit and declarative C.I.A. classifications” - -Introduction --------------- - -We will investigate one method for integrating an AWS-style API with US eAuthentication-compatible federated authentication systems, to achieve access controls and limits based on traditional operational roles. -Additionally, we will look at how combining this approach, with an implementation of the CloudAudit APIs, will allow us to achieve a certification under FIPS 199 Moderate classification for a hybrid cloud environment. - -Relationship of US eAuth to RBAC --------------------------------- - -Typical implementations of US eAuth authentication systems are structured as follows:: - - [ MS Active Directory or other federated LDAP user store ] - --> backends to… - [ SUN Identity Manager or other SAML Policy Controller ] - --> maps URLs to groups… - [ Apache Policy Agent in front of eAuth-secured Web Application ] - -In more ideal implementations, the remainder of the application-specific account information is stored either in extended schema on the LDAP server itself, via the use of a translucent LDAP proxy, or in an independent datastore keyed off of the UID provided via SAML assertion. - -Basic AWS API call structure ----------------------------- - -AWS API calls are traditionally secured via Access and Secret Keys, which are used to sign API calls, along with traditional timestamps to prevent replay attacks. The APIs can be logically grouped into sets that align with five typical roles: - -* System User -* System Administrator -* Network Administrator -* Project Manager -* Cloud Administrator -* (IT-Sec?) - -There is an additional, conceptual end-user that may or may not have API access: - -* (EXTERNAL) End-user / Third-party User - -Basic operations are available to any System User: - -* Launch Instance -* Terminate Instance (their own) -* Create keypair -* Delete keypair -* Create, Upload, Delete: Buckets and Keys (Object Store) – their own -* Create, Attach, Delete Volume (Block Store) – their own - -System Administrators: - -* Register/Unregister Machine Image (project-wide) -* Change Machine Image properties (public / private) -* Request / Review CloudAudit Scans - -Network Administrator: - -* Change Firewall Rules, define Security Groups -* Allocate, Associate, Deassociate Public IP addresses - -Project Manager: - -* Launch and Terminate Instances (project-wide) -* CRUD of Object and Block store (project-wide) - -Cloud Administrator: - -* Register / Unregister Kernel and Ramdisk Images -* Register / Unregister Machine Image (any) - -Enhancements ------------- - -* SAML Token passing -* REST interfaces -* SOAP interfaces - -Wrapping the SAML token into the API calls. -Then store the UID (fetched via backchannel) into the instance metadata, providing end-to-end auditability of ownership and responsibility, without PII. - -CloudAudit APIs ---------------- - -* Request formats -* Response formats -* Stateless asynchronous queries - -CloudAudit queries may spawn long-running processes (similar to launching instances, etc.) They need to return a ReservationId in the same fashion, which can be returned in further queries for updates. -RBAC of CloudAudit API calls is critical, since detailed system information is a system vulnerability. - -Type declarations ---------------------- -* Data declarations – Volumes and Objects -* System declarations – Instances - -Existing API calls to launch instances specific a single, combined “type” flag. We propose to extend this with three additional type declarations, mapping to the “Confidentiality, Integrity, Availability” classifications of FIPS 199. An example API call would look like:: - - RunInstances type=m1.large number=1 secgroup=default key=mykey confidentiality=low integrity=low availability=low - -These additional parameters would also apply to creation of block storage volumes (along with the existing parameter of ‘size’), and creation of object storage ‘buckets’. (C.I.A. classifications on a bucket would be inherited by the keys within this bucket.) - -Request Brokering ------------------ - - * Cloud Interop - * IMF Registration / PubSub - * Digital C&A - -Establishing declarative semantics for individual API calls will allow the cloud environment to seamlessly proxy these API calls to external, third-party vendors – when the requested CIA levels match. - -See related work within the Infrastructure 2.0 working group for more information on how the IMF Metadata specification could be utilized to manage registration of these vendors and their C&A credentials. - -Dirty Cloud – Hybrid Data Centers ---------------------------------- - -* CloudAudit bridge interfaces -* Anything in the ARP table - -A hybrid cloud environment provides dedicated, potentially co-located physical hardware with a network interconnect to the project or users’ cloud virtual network. - -This interconnect is typically a bridged VPN connection. Any machines that can be bridged into a hybrid environment in this fashion (at Layer 2) must implement a minimum version of the CloudAudit spec, such that they can be queried to provide a complete picture of the IT-sec runtime environment. - -Network discovery protocols (ARP, CDP) can be applied in this case, and existing protocols (SNMP location data, DNS LOC records) overloaded to provide CloudAudit information. - -The Details ------------ - - * Preliminary Roles Definitions - * Categorization of available API calls - * SAML assertion vocabulary - -System limits -------------- - -The following limits need to be defined and enforced: - -* Total number of instances allowed (user / project) -* Total number of instances, per instance type (user / project) -* Total number of volumes (user / project) -* Maximum size of volume -* Cumulative size of all volumes -* Total use of object storage (GB) -* Total number of Public IPs - - -Further Challenges ------------------- - * Prioritization of users / jobs in shared computing environments - * Incident response planning - * Limit launch of instances to specific security groups based on AMI - * Store AMIs in LDAP for added property control - - - -The :mod:`rbac` Module --------------------------- - -.. automodule:: nova.auth.rbac - :members: - :undoc-members: - :show-inheritance: - -The :mod:`signer` Module ------------------------- - -.. automodule:: nova.auth.signer - :members: - :undoc-members: - :show-inheritance: - -The :mod:`users` Module ------------------------ - -.. automodule:: nova.auth.users - :members: - :undoc-members: - :show-inheritance: - -The :mod:`users_unittest` Module --------------------------------- - -.. automodule:: nova.tests.users_unittest - :members: - :undoc-members: - :show-inheritance: - -The :mod:`access_unittest` Module ---------------------------------- - -.. automodule:: nova.tests.access_unittest - :members: - :undoc-members: - :show-inheritance: - - diff --git a/docs/binaries.rst b/docs/binaries.rst deleted file mode 100644 index 90a9581f7..000000000 --- a/docs/binaries.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. - Copyright 2010 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Nova Binaries -=============== - -* nova-api -* nova-compute -* nova-manage -* nova-objectstore -* nova-volume - -The configuration of these binaries relies on "flagfiles" using the google -gflags package. If present, the nova.conf file will be used as the flagfile -- otherwise, it must be specified on the command line:: - - $ python node_worker.py --flagfile flagfile diff --git a/docs/compute.rst b/docs/compute.rst deleted file mode 100644 index 5b08dbd5b..000000000 --- a/docs/compute.rst +++ /dev/null @@ -1,74 +0,0 @@ -.. - Copyright 2010 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Compute Documentation -===================== - -This page contains the Compute Package documentation. - - -The :mod:`disk` Module ----------------------- - -.. automodule:: nova.compute.disk - :members: - :undoc-members: - :show-inheritance: - -The :mod:`exception` Module ---------------------------- - -.. automodule:: nova.compute.exception - :members: - :undoc-members: - :show-inheritance: - -The :mod:`model` Module -------------------------- - -.. automodule:: nova.compute.model - :members: - :undoc-members: - :show-inheritance: - -The :mod:`network` Module -------------------------- - -.. automodule:: nova.compute.network - :members: - :undoc-members: - :show-inheritance: - -The :mod:`node` Module ----------------------- - -.. automodule:: nova.compute.node - :members: - :undoc-members: - :show-inheritance: - -RELATED TESTS ---------------- - -The :mod:`node_unittest` Module -------------------------------- - -.. automodule:: nova.tests.node_unittest - :members: - :undoc-members: - :show-inheritance: - diff --git a/docs/conf.py b/docs/conf.py deleted file mode 100644 index fb3fd1a30..000000000 --- a/docs/conf.py +++ /dev/null @@ -1,202 +0,0 @@ -# -*- coding: utf-8 -*- -# -# nova documentation build configuration file, created by -# sphinx-quickstart on Sat May 1 15:17:47 2010. -# -# This file is execfile()d with the current directory set to its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys, os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.append(os.path.abspath('/Users/jmckenty/Projects/cc')) -sys.path.append([os.path.abspath('../nova'),os.path.abspath('../'),os.path.abspath('../vendor')]) - - -# -- General configuration ----------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig'] -#sphinx_to_github = False -todo_include_todos = True - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'nova' -copyright = u'2010, United States Government as represented by the Administrator of the National Aeronautics and Space Administration.' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '0.42' -# The full version, including alpha/beta/rc tags. -release = '0.42' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of documents that shouldn't be included in the build. -#unused_docs = [] - -# List of directories, relative to source directory, that shouldn't be searched -# for source files. -exclude_trees = ['_build'] - -# The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -modindex_common_prefix = ['nova.'] - - -# -- Options for HTML output --------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_use_modindex = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'novadoc' - - -# -- Options for LaTeX output -------------------------------------------------- - -# The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [ - ('index', 'nova.tex', u'nova Documentation', - u'Anso Labs, LLC', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -#latex_preamble = '' - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_use_modindex = True - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/docs/endpoint.rst b/docs/endpoint.rst deleted file mode 100644 index 399df4161..000000000 --- a/docs/endpoint.rst +++ /dev/null @@ -1,91 +0,0 @@ -.. - Copyright 2010 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Endpoint Documentation -====================== - -This page contains the Endpoint Package documentation. - -The :mod:`admin` Module ------------------------ - -.. automodule:: nova.endpoint.admin - :members: - :undoc-members: - :show-inheritance: - -The :mod:`api` Module ---------------------- - -.. automodule:: nova.endpoint.api - :members: - :undoc-members: - :show-inheritance: - -The :mod:`cloud` Module ------------------------ - -.. automodule:: nova.endpoint.cloud - :members: - :undoc-members: - :show-inheritance: - -The :mod:`images` Module ------------------------- - -.. automodule:: nova.endpoint.images - :members: - :undoc-members: - :show-inheritance: - - -RELATED TESTS --------------- - -The :mod:`api_unittest` Module ------------------------------- - -.. automodule:: nova.tests.api_unittest - :members: - :undoc-members: - :show-inheritance: - -The :mod:`api_integration` Module ---------------------------------- - -.. automodule:: nova.tests.api_integration - :members: - :undoc-members: - :show-inheritance: - -The :mod:`cloud_unittest` Module --------------------------------- - -.. automodule:: nova.tests.cloud_unittest - :members: - :undoc-members: - :show-inheritance: - -The :mod:`network_unittest` Module ----------------------------------- - -.. automodule:: nova.tests.network_unittest - :members: - :undoc-members: - :show-inheritance: - - diff --git a/docs/fakes.rst b/docs/fakes.rst deleted file mode 100644 index bea8bc4e9..000000000 --- a/docs/fakes.rst +++ /dev/null @@ -1,43 +0,0 @@ -.. - Copyright 2010 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Nova Fakes -========== - -The :mod:`fakevirt` Module --------------------------- - -.. automodule:: nova.fakevirt - :members: - :undoc-members: - :show-inheritance: - -The :mod:`fakeldap` Module --------------------------- - -.. automodule:: nova.auth.fakeldap - :members: - :undoc-members: - :show-inheritance: - -The :mod:`fakerabbit` Module ----------------------------- - -.. automodule:: nova.fakerabbit - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/getting.started.rst b/docs/getting.started.rst deleted file mode 100644 index 3eadd0882..000000000 --- a/docs/getting.started.rst +++ /dev/null @@ -1,148 +0,0 @@ -.. - Copyright 2010 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Getting Started with Nova -========================= - - -GOTTA HAVE A nova.pth file added or it WONT WORK (will write setup.py file soon) - -Create a file named nova.pth in your python libraries directory -(usually /usr/local/lib/python2.6/dist-packages) with a single line that points -to the directory where you checked out the source (that contains the nova/ -directory). - -DEPENDENCIES ------------- - -Related servers we rely on - -* RabbitMQ: messaging queue, used for all communication between components -* OpenLDAP: users, groups (maybe cut) -* ReDIS: Remote Dictionary Store (for fast, shared state data) -* nginx: HTTP server to handle serving large files (because Tornado can't) - -Python libraries we don't vendor - -* M2Crypto: python library interface for openssl -* curl - -Vendored python libaries (don't require any installation) - -* Tornado: scalable non blocking web server for api requests -* Twisted: just for the twisted.internet.defer package -* boto: python api for aws api -* IPy: library for managing ip addresses - -Recommended ------------------ - -* euca2ools: python implementation of aws ec2-tools and ami tools -* build tornado to use C module for evented section - - -Installation --------------- -:: - - # system libraries and tools - apt-get install -y aoetools vlan curl - modprobe aoe - - # python libraries - apt-get install -y python-setuptools python-dev python-pycurl python-m2crypto - - # ON THE CLOUD CONTROLLER - apt-get install -y rabbitmq-server dnsmasq nginx - # build redis from 2.0.0-rc1 source - # setup ldap (slap.sh as root will remove ldap and reinstall it) - NOVA_PATH/nova/auth/slap.sh - /etc/init.d/rabbitmq-server start - - # ON VOLUME NODE: - apt-get install -y vblade-persist - - # ON THE COMPUTE NODE: - apt-get install -y python-libvirt - apt-get install -y kpartx kvm libvirt-bin - modprobe kvm - - # optional packages - apt-get install -y euca2ools - -Configuration ---------------- - -ON CLOUD CONTROLLER - -* Add yourself to the libvirtd group, log out, and log back in -* fix hardcoded ec2 metadata/userdata uri ($IP is the IP of the cloud), and masqurade all traffic from launched instances -:: - - iptables -t nat -A PREROUTING -s 0.0.0.0/0 -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination $IP:8773 - iptables --table nat --append POSTROUTING --out-interface $PUBLICIFACE -j MASQUERADE - - -* Configure NginX proxy (/etc/nginx/sites-enabled/default) - -:: - - server { - listen 3333 default; - server-name localhost; - client_max_body_size 10m; - - access_log /var/log/nginx/localhost.access.log; - - location ~ /_images/.+ { - root NOVA_PATH/images; - rewrite ^/_images/(.*)$ /$1 break; - } - - location / { - proxy_pass http://localhost:3334/; - } - } - -ON VOLUME NODE - -* create a filesystem (you can use an actual disk if you have one spare, default is /dev/sdb) - -:: - - # This creates a 1GB file to create volumes out of - dd if=/dev/zero of=MY_FILE_PATH bs=100M count=10 - losetup --show -f MY_FILE_PATH - # replace loop0 below with whatever losetup returns - echo "--storage_dev=/dev/loop0" >> NOVA_PATH/bin/nova.conf - -Running ---------- - -Launch servers - -* rabbitmq -* redis -* slapd -* nginx - -Launch nova components - -* nova-api -* nova-compute -* nova-objectstore -* nova-volume diff --git a/docs/index.rst b/docs/index.rst deleted file mode 100644 index ef2e8f63e..000000000 --- a/docs/index.rst +++ /dev/null @@ -1,56 +0,0 @@ -.. - Copyright 2010 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Welcome to nova's documentation! -================================ - -Nova is a cloud computing fabric controller (the main part of an IaaS system) built to match the popular AWS EC2 and S3 APIs. -It is written in Python, using the Tornado and Twisted frameworks, and relies on the standard AMQP messaging protocol, -and the Redis distributed KVS. -Nova is intended to be easy to extend, and adapt. For example, it currently uses -an LDAP server for users and groups, but also includes a fake LDAP server, -that stores data in Redis. It has extensive test coverage, and uses the -Sphinx toolkit (the same as Python itself) for code and user documentation. -While Nova is currently in Beta use within several organizations, the codebase -is very much under active development - there are bugs! - -Contents: - -.. toctree:: - :maxdepth: 2 - - getting.started - architecture - network - storage - auth - compute - endpoint - nova - fakes - binaries - todo - modules - packages - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` - diff --git a/docs/modules.rst b/docs/modules.rst deleted file mode 100644 index 82c61f008..000000000 --- a/docs/modules.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. - Copyright 2010 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Nova Documentation -================== - -This page contains the Nova Modules documentation. - -Modules: --------- - -.. toctree:: - :maxdepth: 4 - - auth - compute - endpoint - fakes - nova - volume diff --git a/docs/network.rst b/docs/network.rst deleted file mode 100644 index 357a0517f..000000000 --- a/docs/network.rst +++ /dev/null @@ -1,88 +0,0 @@ -.. - Copyright 2010 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -nova Networking -================ - -The nova networking components manage private networks, public IP addressing, VPN connectivity, and firewall rules. - -Components ----------- -There are several key components: - -* NetworkController (Manages address and vlan allocation) -* RoutingNode (NATs public IPs to private IPs, and enforces firewall rules) -* AddressingNode (runs DHCP services for private networks) -* BridgingNode (a subclass of the basic nova ComputeNode) -* TunnelingNode (provides VPN connectivity) - -Component Diagram ------------------ - -Overview:: - - (PUBLIC INTERNET) - | \ - / \ / \ - [RoutingNode] ... [RN] [TunnelingNode] ... [TN] - | \ / | | - | < AMQP > | | - [AddressingNode]-- (VLAN) ... | (VLAN)... (VLAN) --- [AddressingNode] - \ | \ / - / \ / \ / \ / \ - [BridgingNode] ... [BridgingNode] - - - [NetworkController] ... [NetworkController] - \ / - < AMQP > - | - / \ - [CloudController]...[CloudController] - -While this diagram may not make this entirely clear, nodes and controllers communicate exclusively across the message bus (AMQP, currently). - -State Model ------------ -Network State consists of the following facts: - -* VLAN assignment (to a project) -* Private Subnet assignment (to a security group) in a VLAN -* Private IP assignments (to running instances) -* Public IP allocations (to a project) -* Public IP associations (to a private IP / running instance) - -While copies of this state exist in many places (expressed in IPTables rule chains, DHCP hosts files, etc), the controllers rely only on the distributed "fact engine" for state, queried over RPC (currently AMQP). The NetworkController inserts most records into this datastore (allocating addresses, etc) - however, individual nodes update state e.g. when running instances crash. - -The Public Traffic Path ------------------------ - -Public Traffic:: - - (PUBLIC INTERNET) - | - <-- [RoutingNode] - | - [AddressingNode] --> | - ( VLAN ) - | <-- [BridgingNode] - | - - -The RoutingNode is currently implemented using IPTables rules, which implement both NATing of public IP addresses, and the appropriate firewall chains. We are also looking at using Netomata / Clusto to manage NATting within a switch or router, and/or to manage firewall rules within a hardware firewall appliance. - -Similarly, the AddressingNode currently manages running DNSMasq instances for DHCP services. However, we could run an internal DHCP server (using Scapy ala Clusto), or even switch to static addressing by inserting the private address into the disk image the same way we insert the SSH keys. (See compute for more details). diff --git a/docs/nova.rst b/docs/nova.rst deleted file mode 100644 index 4b9c44a5f..000000000 --- a/docs/nova.rst +++ /dev/null @@ -1,91 +0,0 @@ -.. - Copyright 2010 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -NOVA Libraries -=============== - -The :mod:`crypto` Module ------------------------- - -.. automodule:: nova.crypto - :members: - :undoc-members: - :show-inheritance: - -The :mod:`adminclient` Module ------------------------------ - -.. automodule:: nova.adminclient - :members: - :undoc-members: - :show-inheritance: - -The :mod:`datastore` Module ---------------------------- - -.. automodule:: nova.datastore - :members: - :undoc-members: - :show-inheritance: - -The :mod:`exception` Module ---------------------------- - -.. automodule:: nova.exception - :members: - :undoc-members: - :show-inheritance: - -The :mod:`flags` Module ---------------------------- - -.. automodule:: nova.flags - :members: - :undoc-members: - :show-inheritance: - -The :mod:`rpc` Module ---------------------------- - -.. automodule:: nova.rpc - :members: - :undoc-members: - :show-inheritance: - -The :mod:`server` Module ---------------------------- - -.. automodule:: nova.server - :members: - :undoc-members: - :show-inheritance: - -The :mod:`test` Module ---------------------------- - -.. automodule:: nova.test - :members: - :undoc-members: - :show-inheritance: - -The :mod:`utils` Module ---------------------------- - -.. automodule:: nova.utils - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/objectstore.rst b/docs/objectstore.rst deleted file mode 100644 index 6b8d293f4..000000000 --- a/docs/objectstore.rst +++ /dev/null @@ -1,66 +0,0 @@ -.. - Copyright 2010 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Objectstore Documentation -========================= - -This page contains the Objectstore Package documentation. - - -The :mod:`bucket` Module ------------------------- - -.. automodule:: nova.objectstore.bucket - :members: - :undoc-members: - :show-inheritance: - -The :mod:`handler` Module -------------------------- - -.. automodule:: nova.objectstore.handler - :members: - :undoc-members: - :show-inheritance: - -The :mod:`image` Module ------------------------ - -.. automodule:: nova.objectstore.image - :members: - :undoc-members: - :show-inheritance: - -The :mod:`stored` Module ------------------------- - -.. automodule:: nova.objectstore.stored - :members: - :undoc-members: - :show-inheritance: - -RELATED TESTS -------------- - -The :mod:`objectstore_unittest` Module --------------------------------------- - -.. automodule:: nova.tests.objectstore_unittest - :members: - :undoc-members: - :show-inheritance: - diff --git a/docs/packages.rst b/docs/packages.rst deleted file mode 100644 index 6029ad7d7..000000000 --- a/docs/packages.rst +++ /dev/null @@ -1,29 +0,0 @@ -.. - Copyright 2010 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -nova Packages & Dependencies -============================ - -Nova is being built on Ubuntu Lucid. - -The following packages are required: - - apt-get install python-ipy, python-libvirt, python-boto, python-pycurl, python-twisted, python-daemon, python-redis, python-carrot, python-lockfile - -In addition you need to install python: - - * python-gflags - http://code.google.com/p/python-gflags/ diff --git a/docs/storage.rst b/docs/storage.rst deleted file mode 100644 index f77e5f0e5..000000000 --- a/docs/storage.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. - Copyright 2010 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Storage in the Nova Cloud -========================= - -There are three primary classes of storage in a nova cloud environment: - -* Ephemeral Storage (local disk within an instance) -* Volume Storage (network-attached FS) -* Object Storage (redundant KVS with locality and MR) - -.. toctree:: - :maxdepth: 2 - - volume - objectstore diff --git a/docs/volume.rst b/docs/volume.rst deleted file mode 100644 index 619968458..000000000 --- a/docs/volume.rst +++ /dev/null @@ -1,45 +0,0 @@ -.. - Copyright 2010 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Volume Documentation -==================== - -Nova uses ata-over-ethernet (AoE) to export storage volumes from multiple storage nodes. These AoE exports are attached (using libvirt) directly to running instances. - -Nova volumes are exported over the primary system VLAN (usually VLAN 1), and not over individual VLANs. - -AoE exports are numbered according to a "shelf and blade" syntax. In order to avoid collisions, we currently perform an AoE-discover of existing exports, and then grab the next unused number. (This obviously has race condition problems, and should be replaced by allocating a shelf-id to each storage node.) - -The underlying volumes are LVM logical volumes, created on demand within a single large volume group. - - -The :mod:`storage` Module -------------------------- - -.. automodule:: nova.volume.storage - :members: - :undoc-members: - :show-inheritance: - -The :mod:`storage_unittest` Module ----------------------------------- - -.. automodule:: nova.tests.storage_unittest - :members: - :undoc-members: - :show-inheritance: - diff --git a/nova/compute/disk.py b/nova/compute/disk.py index 08a22556e..5749d4c6a 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -40,7 +40,8 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None): formatted as ext2. In the diagram below, dashes represent drive sectors. - 0 a b c d e + +-----+------. . .-------+------. . .------+ + | 0 a| b c|d e| +-----+------. . .-------+------. . .------+ | mbr | primary partiton | local partition | +-----+------. . .-------+------. . .------+ diff --git a/setup.cfg b/setup.cfg index 278586962..839472544 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,4 +1,4 @@ [build_sphinx] -source-dir = docs -build-dir = docs/_build +source-dir = doc/source +build-dir = doc/build all_files = 1 -- cgit From 01dffdd79098429a448cf283119d9026ae1231c2 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Sun, 25 Jul 2010 14:29:20 +0200 Subject: Always make sure to set a Date headers, since it's needed to calculate the S3 Auth header. --- nova/compute/node.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/nova/compute/node.py b/nova/compute/node.py index 7c09d9583..a01cb3070 100644 --- a/nova/compute/node.py +++ b/nova/compute/node.py @@ -47,12 +47,12 @@ from nova import flags from nova import process from nova import utils from nova.auth import signer +from nova.auth.users import UserManager from nova.compute import disk from nova.compute import model from nova.compute import network from nova.objectstore import image # for image_path flag from nova.volume import storage -from nova.users import UserManager FLAGS = flags.FLAGS @@ -449,12 +449,18 @@ class Instance(object): def _fetch_s3_image(self, image, path): url = _image_url('%s/image' % image) + headers = {} + headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) + user_id = self.datamodel['user_id'] user = UserManager.instance().get_user(user_id) - auth = signer.Signer(user.secret.encode()).s3_authorization({}, 'GET', url) - auth_header = 'Authorization: %s:%s' % (user_id, auth) - d = process.simple_execute( - 'curl --silent %s -o "%s"' % (url, auth_header, path)) + auth = signer.Signer(user.secret.encode()).s3_authorization(headers, 'GET', url) + headers['Authorization'] = auth + + headers_opt_string = ' '.join(['-H %s:%s' % (k,v) for (k,v) in headers.iteritems()]) + d = process.simple_execute('curl --silent %s ' + '%s -o "%s"' % (url, headers_opt_string, + path)) return d def _fetch_local_image(self, image, path): -- cgit From 1a53eaeed901f3c789ebdb867b73996ccac608c3 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sun, 25 Jul 2010 15:00:37 +0100 Subject: Fix assertion "Someone released me too many times: too many tokens!" when more than one process was running at the same time. This was caused by the override of SharedPool.__new__ not stopping ProcessPool.__init__ from being run whenever process.simple_execute is called. When __init__ ran for the second time, the DeferredSemaphore was replaced, and this meant that we ended up releasing a different semaphore to the one that was acquired. --- nova/process.py | 13 ++++++------- nova/tests/process_unittest.py | 7 +++++++ 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/nova/process.py b/nova/process.py index d3558ed2e..8ecef1584 100644 --- a/nova/process.py +++ b/nova/process.py @@ -205,13 +205,12 @@ class ProcessPool(object): self._pool.release() return rv -class SharedPool(ProcessPool): - _instance = None - def __new__(cls, *args, **kwargs): - if not cls._instance: - cls._instance = super(SharedPool, cls).__new__( - cls, *args, **kwargs) - return cls._instance +_instance = None +def SharedPool(): + global _instance + if _instance is None: + _instance = ProcessPool() + return _instance def simple_execute(cmd, **kwargs): return SharedPool().simple_execute(cmd, **kwargs) diff --git a/nova/tests/process_unittest.py b/nova/tests/process_unittest.py index 1c15b69a0..c96bb5913 100644 --- a/nova/tests/process_unittest.py +++ b/nova/tests/process_unittest.py @@ -120,3 +120,10 @@ class ProcessTestCase(test.TrialTestCase): pool2 = process.SharedPool() self.assert_(id(pool1) == id(pool2)) + def test_shared_pool_works_as_singleton(self): + d1 = process.simple_execute('sleep 1') + d2 = process.simple_execute('sleep 0.005') + # lp609749: would have failed with + # exceptions.AssertionError: Someone released me too many times: + # too many tokens! + return d1 -- cgit From b2d769cb92dce5be26288c8e389491cf554b5703 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sun, 25 Jul 2010 15:08:48 +0100 Subject: Removed creation of process pools. We don't use these any more now that we're using process.simple_execute. --- nova/virt/libvirt_conn.py | 1 - nova/virt/xenapi.py | 1 - 2 files changed, 2 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 39ed9bd78..30a182057 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -68,7 +68,6 @@ class LibvirtConnection(object): self._conn = libvirt.openReadOnly('qemu:///system') else: self._conn = libvirt.openAuth('qemu:///system', auth, 0) - self._pool = process.ProcessPool() def list_instances(self): diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 46ff9c5e4..58fcd79c5 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -48,7 +48,6 @@ class XenAPIConnection(object): def __init__(self, url, user, pw): self._conn = XenAPI.Session(url) self._conn.login_with_password(user, pw) - self._pool = process.Pool() def list_instances(self): result = [self._conn.xenapi.VM.get_name_label(vm) \ -- cgit From dd17c810029d142abaa8f0c97e30a8ae1dc13b5c Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Sun, 25 Jul 2010 16:17:55 +0200 Subject: Return a 404 when attempting to access a bucket that does not exist. --- nova/objectstore/handler.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index 558b04881..1a54f8a98 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -47,7 +47,7 @@ import urllib from twisted.application import internet, service from twisted.web.resource import Resource -from twisted.web import server, static +from twisted.web import server, static, error from nova import exception @@ -150,7 +150,10 @@ class BucketResource(Resource): def render_GET(self, request): logging.debug("List keys for bucket %s" % (self.name)) - bucket_object = bucket.Bucket(self.name) + try: + bucket_object = bucket.Bucket(self.name) + except exception.NotFound, e: + return error.NoResource(message="No such bucket").render(request) if not bucket_object.is_authorized(request.context): raise exception.NotAuthorized -- cgit From 480d1b6d5dd12490298b2b4d3e62f40917390bde Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sun, 25 Jul 2010 15:32:10 +0100 Subject: Add an import so that nova-compute sees the images_path flag, so that it can be used on the command line. --- bin/nova-compute | 1 + 1 file changed, 1 insertion(+) diff --git a/bin/nova-compute b/bin/nova-compute index 5635efbaf..63d57a765 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -43,6 +43,7 @@ from nova import flags from nova import rpc from nova import twistd from nova.compute import node +from nova.objectstore import image # For the images_path flag FLAGS = flags.FLAGS -- cgit From fdea01a233e72551e750a5beaca0739ec8173ac3 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sun, 25 Jul 2010 17:28:39 +0100 Subject: Set durable=False on TopicPublisher, so that it matches the flag on TopicConsumer. This ensures that either redeclaration of the control_exchange will use the same flag, and avoid AMQPChannelException. --- nova/rpc.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/rpc.py b/nova/rpc.py index ef463e84b..5a2f4b3ad 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -151,6 +151,7 @@ class TopicPublisher(Publisher): def __init__(self, connection=None, topic="broadcast"): self.routing_key = topic self.exchange = FLAGS.control_exchange + self.durable = False super(TopicPublisher, self).__init__(connection=connection) -- cgit From ad2250ac0080ca35b1fd2747e3f4d0ff07bc90be Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sun, 25 Jul 2010 17:40:41 +0100 Subject: Replace hardcoded "nova" with FLAGS.control_exchange. --- nova/rpc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/rpc.py b/nova/rpc.py index ef463e84b..5610ea124 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -242,7 +242,7 @@ def send_message(topic, message, wait=True): consumer.register_callback(generic_response) publisher = messaging.Publisher(connection=Connection.instance(), - exchange="nova", + exchange=FLAGS.control_exchange, exchange_type="topic", routing_key=topic) publisher.send(message) -- cgit From a8c8aed28ce5d1d9eadcbecab03f6bc3bec8e622 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sun, 25 Jul 2010 19:09:12 +0100 Subject: Fix references to get_argument, fixing internal error when calling euca-deregister. --- nova/objectstore/handler.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index c670ee02f..fd1ed848c 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -273,8 +273,8 @@ class ImageResource(Resource): def render_POST(self, request): """ update image attributes: public/private """ - image_id = self.get_argument('image_id', u'') - operation = self.get_argument('operation', u'') + image_id = get_argument(request, 'image_id', u'') + operation = get_argument(request, 'operation', u'') image_object = image.Image(image_id) @@ -287,7 +287,7 @@ class ImageResource(Resource): def render_DELETE(self, request): """ delete a registered image """ - image_id = self.get_argument("image_id", u"") + image_id = get_argument(request, "image_id", u"") image_object = image.Image(image_id) if not image.is_authorized(request.context): -- cgit From f7962c73aa9835c76857005ab56f512fbc9eebfd Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 25 Jul 2010 11:20:09 -0700 Subject: More Cleanup of code Moved code in AuthManager init to new so it isn't called multiple times Changed AuthManager flag to specify class name as well as module name Added exception for missing auth_driver Changed import to use "recommended" style for nested imports http://docs.python.org/dev/library/functions.html#__import__ --- bin/nova-dhcpbridge | 2 +- nova/auth/fakeldapdriver.py | 32 -------------------------------- nova/auth/ldapdriver.py | 12 ++++++++++-- nova/auth/manager.py | 22 ++++++++++++++++------ nova/tests/fake_flags.py | 2 +- nova/tests/network_unittest.py | 2 +- 6 files changed, 29 insertions(+), 43 deletions(-) delete mode 100644 nova/auth/fakeldapdriver.py diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index ece7ffc8c..c519c6ccb 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -78,7 +78,7 @@ def main(): FLAGS.network_size = 32 FLAGS.fake_libvirt=True FLAGS.fake_network=True - FLAGS.auth_driver='nova.auth.fakeldapdriver' + FLAGS.auth_driver='nova.auth.ldapdriver.FakeLdapDriver' action = argv[1] if action in ['add','del','old']: mac = argv[2] diff --git a/nova/auth/fakeldapdriver.py b/nova/auth/fakeldapdriver.py deleted file mode 100644 index 833548c79..000000000 --- a/nova/auth/fakeldapdriver.py +++ /dev/null @@ -1,32 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Fake Auth driver for ldap - -""" - -from nova.auth import ldapdriver - -class AuthDriver(ldapdriver.AuthDriver): - """Ldap Auth driver - - Defines enter and exit and therefore supports the with/as syntax. - """ - def __init__(self): - self.ldap = __import__('nova.auth.fakeldap', fromlist=True) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 0535977af..1591c88e9 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -17,7 +17,7 @@ # under the License. """ -Auth driver for ldap +Auth driver for ldap. Includes FakeLdapDriver. It should be easy to create a replacement for this driver supporting other backends by creating another class that exposes the same @@ -25,6 +25,7 @@ public methods. """ import logging +import sys from nova import exception from nova import flags @@ -61,7 +62,7 @@ flags.DEFINE_string('ldap_developer', # to define a set interface for AuthDrivers. I'm delaying # creating this now because I'm expecting an auth refactor # in which we may want to change the interface a bit more. -class AuthDriver(object): +class LdapDriver(object): """Ldap Auth driver Defines enter and exit and therefore supports the with/as syntax. @@ -471,3 +472,10 @@ class AuthDriver(object): """Convert uid to dn""" return 'uid=%s,%s' % (dn, FLAGS.ldap_user_subtree) + +class FakeLdapDriver(LdapDriver): + """Fake Ldap Auth driver""" + def __init__(self): + __import__('nova.auth.fakeldap') + self.ldap = sys.modules['nova.auth.fakeldap'] + diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 130bed7c2..32c2f9e02 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -24,6 +24,7 @@ import logging import os import shutil import string +import sys import tempfile import uuid import zipfile @@ -75,7 +76,7 @@ flags.DEFINE_string('credential_cert_subject', flags.DEFINE_string('vpn_ip', '127.0.0.1', 'Public IP for the cloudpipe VPN servers') -flags.DEFINE_string('auth_driver', 'fakeldapdriver', +flags.DEFINE_string('auth_driver', 'nova.auth.ldapdriver.AuthDriver', 'Driver that auth manager uses') class AuthBase(object): @@ -320,16 +321,25 @@ class AuthManager(object): """ _instance=None def __new__(cls, *args, **kwargs): + """Returns the AuthManager singleton with driver set + + __init__ is run every time AuthManager() is called, so we need to do + any constructor related stuff here. The driver that is specified + in the flagfile is loaded here. + """ if not cls._instance: cls._instance = super(AuthManager, cls).__new__( cls, *args, **kwargs) + mod_str, sep, driver_str = FLAGS.auth_driver.rpartition('.') + try: + mod = __import__(mod_str) + cls._instance.driver = getattr(sys.modules[mod_str], + driver_str) + except (ImportError, AttributeError): + raise exception.Error('Auth driver %s cannot be found' + % FLAGS.auth_driver) return cls._instance - def __init__(self, *args, **kwargs): - """Imports the driver module and saves the Driver class""" - mod = __import__(FLAGS.auth_driver, fromlist=True) - self.driver = mod.AuthDriver - def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', verify_signature=True): diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 57575b44b..304f24841 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -24,5 +24,5 @@ FLAGS.fake_libvirt = True FLAGS.fake_storage = True FLAGS.fake_rabbit = True FLAGS.fake_network = True -FLAGS.auth_driver = 'nova.auth.fakeldapdriver' +FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' FLAGS.verbose = True diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 12840e736..9e17bf155 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -37,7 +37,7 @@ class NetworkTestCase(test.TrialTestCase): self.flags(fake_libvirt=True, fake_storage=True, fake_network=True, - auth_driver='nova.auth.fakeldapdriver', + auth_driver='nova.auth.ldapdriver.FakeLdapDriver', network_size=32) logging.getLogger().setLevel(logging.DEBUG) self.manager = manager.AuthManager() -- cgit From 3233f7a964564fba9ec88c277d566eebed50d12a Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 25 Jul 2010 11:23:24 -0700 Subject: removed unused assignment --- nova/auth/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 32c2f9e02..93330790b 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -332,7 +332,7 @@ class AuthManager(object): cls, *args, **kwargs) mod_str, sep, driver_str = FLAGS.auth_driver.rpartition('.') try: - mod = __import__(mod_str) + __import__(mod_str) cls._instance.driver = getattr(sys.modules[mod_str], driver_str) except (ImportError, AttributeError): -- cgit From 0278767e0dc41444b889f904e6e49d26be5a54c4 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sun, 25 Jul 2010 19:25:42 +0100 Subject: Fix references to image_object. This caused an internal error when using euca-deregister. --- nova/objectstore/handler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index fd1ed848c..ae3ffa0eb 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -278,7 +278,7 @@ class ImageResource(Resource): image_object = image.Image(image_id) - if not image.is_authorized(request.context): + if not image_object.is_authorized(request.context): raise exception.NotAuthorized image_object.set_public(operation=='add') @@ -290,7 +290,7 @@ class ImageResource(Resource): image_id = get_argument(request, "image_id", u"") image_object = image.Image(image_id) - if not image.is_authorized(request.context): + if not image_object.is_authorized(request.context): raise exception.NotAuthorized image_object.delete() -- cgit From 6d636cd416d4a0f8a778ea9cb04c41de6299714e Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sun, 25 Jul 2010 20:31:53 +0100 Subject: Fix instance cleanup. --- nova/virt/libvirt_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 30a182057..2c34711bc 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -83,7 +83,7 @@ class LibvirtConnection(object): pass # If the instance is already terminated, we're still happy d = defer.Deferred() - d.addCallback(lambda x: self._cleanup()) + d.addCallback(lambda _: self._cleanup(instance)) # FIXME: What does this comment mean? # TODO(termie): short-circuit me for tests # WE'LL save this for when we do shutdown, -- cgit From c5edaa2186add12947185cb1fd47e0a48eccafa9 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sun, 25 Jul 2010 20:32:33 +0100 Subject: Replace hardcoded example URL, username, and password with flags called xenapi_connection_url, xenapi_connection_username, xenapi_connection_password. --- nova/virt/xenapi.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 58fcd79c5..dc372e3e3 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -30,6 +30,17 @@ from nova.compute import power_state XenAPI = None +FLAGS = flags.FLAGS +flags.DEFINE_string('xenapi_connection_url', + None, + 'URL for connection to XenServer/Xen Cloud Platform. Required if connection_type=xenapi.') +flags.DEFINE_string('xenapi_connection_username', + 'root', + 'Username for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi.') +flags.DEFINE_string('xenapi_connection_password', + None, + 'Password for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi.') + def get_connection(_): """Note that XenAPI doesn't have a read-only connection mode, so @@ -37,10 +48,14 @@ def get_connection(_): # This is loaded late so that there's no need to install this # library when not using XenAPI. global XenAPI - if XenAPI is not None: + if XenAPI is None: XenAPI = __import__('XenAPI') - return XenAPIConnection('http://eli.testdev.hq.xensource.com', - 'root', 'xensource') + url = FLAGS.xenapi_connection_url + username = FLAGS.xenapi_connection_username + password = FLAGS.xenapi_connection_password + if not url or password is None: + raise Exception('Must specify xenapi_connection_url, xenapi_connection_username (optionally), and xenapi_connection_password to use connection_type=xenapi') + return XenAPIConnection(url, username, password) class XenAPIConnection(object): -- cgit From d5051bf90909f693a74ea0f2bdc3db341460a5a3 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 26 Jul 2010 16:03:23 +0200 Subject: Add a simple set of tests for S3 API (using boto). --- nova/objectstore/handler.py | 14 ++-- nova/tests/objectstore_unittest.py | 131 ++++++++++++++++++++++++++++--------- 2 files changed, 105 insertions(+), 40 deletions(-) diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index 1a54f8a98..45eeef163 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -108,8 +108,6 @@ def get_context(request): raise exception.NotAuthorized access, sep, secret = authorization_header.split(' ')[1].rpartition(':') um = users.UserManager.instance() - print 'um %s' % um - (user, project) = um.authenticate(access, secret, {}, request.method, request.getRequestHostname(), request.uri, headers=request.getAllHeaders(), check_type='s3') return api.APIRequestContext(None, user, project) except exception.Error as ex: @@ -169,10 +167,6 @@ class BucketResource(Resource): def render_PUT(self, request): logging.debug("Creating bucket %s" % (self.name)) - try: - print 'user is %s' % request.context - except Exception as e: - logging.exception(e) logging.debug("calling bucket.Bucket.create(%r, %r)" % (self.name, request.context)) bucket.Bucket.create(self.name, request.context) request.finish() @@ -300,9 +294,13 @@ class ImageResource(Resource): request.setResponseCode(204) return '' -def get_application(): +def get_site(): root = S3() - factory = server.Site(root) + site = server.Site(root) + return site + +def get_application(): + factory = get_site() application = service.Application("objectstore") objectStoreService = internet.TCPServer(FLAGS.s3_port, factory) objectStoreService.setServiceParent(application) diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py index f47ca7f00..ef1a477ff 100644 --- a/nova/tests/objectstore_unittest.py +++ b/nova/tests/objectstore_unittest.py @@ -16,6 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. +import boto import glob import hashlib import logging @@ -27,7 +28,11 @@ from nova import flags from nova import objectstore from nova import test from nova.auth import users +from nova.objectstore.handler import S3 +from boto.s3.connection import S3Connection, OrdinaryCallingFormat +from twisted.internet import reactor, threads, defer +from twisted.web import http, server FLAGS = flags.FLAGS @@ -169,35 +174,97 @@ class ObjectStoreTestCase(test.BaseTestCase): self.context.project = self.um.get_project('proj2') self.assert_(my_img.is_authorized(self.context) == False) -# class ApiObjectStoreTestCase(test.BaseTestCase): -# def setUp(self): -# super(ApiObjectStoreTestCase, self).setUp() -# FLAGS.fake_users = True -# FLAGS.buckets_path = os.path.join(tempdir, 'buckets') -# FLAGS.images_path = os.path.join(tempdir, 'images') -# FLAGS.ca_path = os.path.join(os.path.dirname(__file__), 'CA') -# -# self.users = users.UserManager.instance() -# self.app = handler.Application(self.users) -# -# self.host = '127.0.0.1' -# -# self.conn = boto.s3.connection.S3Connection( -# aws_access_key_id=user.access, -# aws_secret_access_key=user.secret, -# is_secure=False, -# calling_format=boto.s3.connection.OrdinaryCallingFormat(), -# port=FLAGS.s3_port, -# host=FLAGS.s3_host) -# -# self.mox.StubOutWithMock(self.ec2, 'new_http_connection') -# -# def tearDown(self): -# FLAGS.Reset() -# super(ApiObjectStoreTestCase, self).tearDown() -# -# def test_describe_instances(self): -# self.expect_http() -# self.mox.ReplayAll() -# -# self.assertEqual(self.ec2.get_all_instances(), []) + +class TestHTTPChannel(http.HTTPChannel): + # Otherwise we end up with an unclean reactor + def checkPersistence(self, _, __): + return False + + +class TestSite(server.Site): + protocol = TestHTTPChannel + + +class S3APITestCase(test.TrialTestCase): + def setUp(self): + super(S3APITestCase, self).setUp() + FLAGS.fake_users = True + FLAGS.buckets_path = os.path.join(oss_tempdir, 'buckets') + + shutil.rmtree(FLAGS.buckets_path) + os.mkdir(FLAGS.buckets_path) + + root = S3() + self.site = TestSite(root) + self.listening_port = reactor.listenTCP(0, self.site, interface='127.0.0.1') + self.tcp_port = self.listening_port.getHost().port + + + boto.config.set('Boto', 'num_retries', '0') + self.conn = S3Connection(aws_access_key_id='admin', + aws_secret_access_key='admin', + host='127.0.0.1', + port=self.tcp_port, + is_secure=False, + calling_format=OrdinaryCallingFormat()) + + # Don't attempt to reuse connections + def get_http_connection(host, is_secure): + return self.conn.new_http_connection(host, is_secure) + self.conn.get_http_connection = get_http_connection + + def _ensure_empty_list(self, l): + self.assertEquals(len(l), 0, "List was not empty") + return True + + def _ensure_only_bucket(self, l, name): + self.assertEquals(len(l), 1, "List didn't have exactly one element in it") + self.assertEquals(l[0].name, name, "Wrong name") + + def test_000_list_buckets(self): + d = threads.deferToThread(self.conn.get_all_buckets) + d.addCallback(self._ensure_empty_list) + return d + + def test_001_create_and_delete_bucket(self): + bucket_name = 'testbucket' + + d = threads.deferToThread(self.conn.create_bucket, bucket_name) + d.addCallback(lambda _:threads.deferToThread(self.conn.get_all_buckets)) + + def ensure_only_bucket(l, name): + self.assertEquals(len(l), 1, "List didn't have exactly one element in it") + self.assertEquals(l[0].name, name, "Wrong name") + d.addCallback(ensure_only_bucket, bucket_name) + + d.addCallback(lambda _:threads.deferToThread(self.conn.delete_bucket, bucket_name)) + d.addCallback(lambda _:threads.deferToThread(self.conn.get_all_buckets)) + d.addCallback(self._ensure_empty_list) + return d + + def test_002_create_bucket_and_key_and_delete_key_again(self): + bucket_name = 'testbucket' + key_name = 'somekey' + key_contents = 'somekey' + + d = threads.deferToThread(self.conn.create_bucket, bucket_name) + d.addCallback(lambda b:threads.deferToThread(b.new_key, key_name)) + d.addCallback(lambda k:threads.deferToThread(k.set_contents_from_string, key_contents)) + def ensure_key_contents(bucket_name, key_name, contents): + bucket = self.conn.get_bucket(bucket_name) + key = bucket.get_key(key_name) + self.assertEquals(key.get_contents_as_string(), contents, "Bad contents") + d.addCallback(lambda _:threads.deferToThread(ensure_key_contents, bucket_name, key_name, key_contents)) + def delete_key(bucket_name, key_name): + bucket = self.conn.get_bucket(bucket_name) + key = bucket.get_key(key_name) + key.delete() + d.addCallback(lambda _:threads.deferToThread(delete_key, bucket_name, key_name)) + d.addCallback(lambda _:threads.deferToThread(self.conn.get_bucket, bucket_name)) + d.addCallback(lambda b:threads.deferToThread(b.get_all_keys)) + d.addCallback(self._ensure_empty_list) + return d + + def tearDown(self): + super(S3APITestCase, self).tearDown() + return defer.DeferredList([defer.maybeDeferred(self.listening_port.stopListening)]) -- cgit From d6e74751fa156f3879ff2136caccf2a40d4b9e8c Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Mon, 26 Jul 2010 15:01:42 -0400 Subject: Basic standup of SessionToken model for shortlived auth tokens. --- nova/compute/model.py | 36 ++++++++++++++++++++++++++++++ nova/exception.py | 3 +++ nova/tests/model_unittest.py | 52 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 91 insertions(+) diff --git a/nova/compute/model.py b/nova/compute/model.py index cda188183..331b68349 100644 --- a/nova/compute/model.py +++ b/nova/compute/model.py @@ -43,6 +43,7 @@ True import logging import time import redis +import uuid from nova import datastore from nova import exception @@ -228,6 +229,41 @@ class Daemon(datastore.BasicModel): for x in cls.associated_to("host", hostname): yield x +class SessionToken(datastore.BasicModel): + """This is a short-lived auth token that is passed through web requests""" + + def __init__(self, session_token): + self.token = session_token + super(SessionToken, self).__init__() + + @property + def identifier(self): + return self.token + + def default_state(self): + return {'user': None, 'session_type': None, 'token': self.token} + + @classmethod + def generate(cls, userid, session_type=None): + token = str(uuid.uuid4()) + while cls.lookup(token): + token = str(uuid.uuid4()) + instance = cls(token) + instance['user'] = userid + instance['session_type'] = session_type + instance.save() + return instance + + def save(self): + """Call into superclass to save object, then save associations""" + if not self['user']: + raise exception.Invalid("SessionToken requires a User association") + success = super(SessionToken, self).save() + if success: + self.associate_with("user", self['user']) + return True + + if __name__ == "__main__": import doctest doctest.testmod() diff --git a/nova/exception.py b/nova/exception.py index 2108123de..52497a19e 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -47,6 +47,9 @@ class NotAuthorized(Error): class NotEmpty(Error): pass +class Invalid(Error): + pass + def wrap_exception(f): def _wrap(*args, **kw): try: diff --git a/nova/tests/model_unittest.py b/nova/tests/model_unittest.py index 1bd7e527f..7823991b9 100644 --- a/nova/tests/model_unittest.py +++ b/nova/tests/model_unittest.py @@ -66,6 +66,12 @@ class ModelTestCase(test.TrialTestCase): daemon.save() return daemon + def create_session_token(self): + session_token = model.SessionToken('tk12341234') + session_token['user'] = 'testuser' + session_token.save() + return session_token + @defer.inlineCallbacks def test_create_instance(self): """store with create_instace, then test that a load finds it""" @@ -204,3 +210,49 @@ class ModelTestCase(test.TrialTestCase): if x.identifier == 'testhost:nova-testdaemon': found = True self.assertTrue(found) + + @defer.inlineCallbacks + def test_create_session_token(self): + """create""" + d = yield self.create_session_token() + d = model.SessionToken(d.token) + self.assertFalse(d.is_new_record()) + + @defer.inlineCallbacks + def test_delete_session_token(self): + """create, then destroy, then make sure loads a new record""" + instance = yield self.create_session_token() + yield instance.destroy() + newinst = yield model.SessionToken(instance.token) + self.assertTrue(newinst.is_new_record()) + + @defer.inlineCallbacks + def test_session_token_added_to_set(self): + """create, then check that it is included in list""" + instance = yield self.create_session_token() + found = False + for x in model.SessionToken.all(): + if x.identifier == instance.token: + found = True + self.assert_(found) + + @defer.inlineCallbacks + def test_session_token_associates_user(self): + """create, then check that it is listed for the user""" + instance = yield self.create_session_token() + found = False + for x in model.SessionToken.associated_to('user', 'testuser'): + if x.identifier == instance.identifier: + found = True + self.assertTrue(found) + + @defer.inlineCallbacks + def test_session_token_generation(self): + instance = yield model.SessionToken.generate('username', 'TokenType') + self.assertFalse(instance.is_new_record()) + + @defer.inlineCallbacks + def test_find_generated_session_token(self): + instance = yield model.SessionToken.generate('username', 'TokenType') + found = yield model.SessionToken.lookup(instance.identifier) + self.assert_(found) -- cgit From 5c3d352cee5003395d078dcfe01e8f4743027074 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 26 Jul 2010 12:49:21 -0700 Subject: class based singleton for SharedPool --- nova/process.py | 13 +++++++------ nova/tests/process_unittest.py | 2 +- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/nova/process.py b/nova/process.py index 8ecef1584..2dc56372f 100644 --- a/nova/process.py +++ b/nova/process.py @@ -205,12 +205,13 @@ class ProcessPool(object): self._pool.release() return rv -_instance = None -def SharedPool(): - global _instance - if _instance is None: - _instance = ProcessPool() - return _instance +class SharedPool(object): + _instance = None + def __init__(self): + if SharedPool._instance is None: + self.__class__._instance = ProcessPool() + def __getattr__(self, key): + return getattr(self._instance, key) def simple_execute(cmd, **kwargs): return SharedPool().simple_execute(cmd, **kwargs) diff --git a/nova/tests/process_unittest.py b/nova/tests/process_unittest.py index c96bb5913..75187e1fc 100644 --- a/nova/tests/process_unittest.py +++ b/nova/tests/process_unittest.py @@ -118,7 +118,7 @@ class ProcessTestCase(test.TrialTestCase): def test_shared_pool_is_singleton(self): pool1 = process.SharedPool() pool2 = process.SharedPool() - self.assert_(id(pool1) == id(pool2)) + self.assertEqual(id(pool1._instance), id(pool2._instance)) def test_shared_pool_works_as_singleton(self): d1 = process.simple_execute('sleep 1') -- cgit From fd2d4e3f3dba426eedc22b326d2bb0cb6a19eb76 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Mon, 26 Jul 2010 17:00:50 -0400 Subject: Expiry awareness for SessionToken. --- nova/compute/model.py | 33 ++++++++++++++++++++++++--------- nova/tests/model_unittest.py | 9 +++++++++ nova/utils.py | 9 ++++++--- 3 files changed, 39 insertions(+), 12 deletions(-) diff --git a/nova/compute/model.py b/nova/compute/model.py index 331b68349..3aa6fc841 100644 --- a/nova/compute/model.py +++ b/nova/compute/model.py @@ -40,6 +40,7 @@ True True """ +import datetime import logging import time import redis @@ -241,10 +242,24 @@ class SessionToken(datastore.BasicModel): return self.token def default_state(self): - return {'user': None, 'session_type': None, 'token': self.token} + now = datetime.datetime.utcnow() + diff = datetime.timedelta(hours=1) + expires = now + diff + return {'user': None, 'session_type': None, 'token': self.token, + 'expiry': expires.strftime(utils.TIME_FORMAT)} + + def save(self): + """Call into superclass to save object, then save associations""" + if not self['user']: + raise exception.Invalid("SessionToken requires a User association") + success = super(SessionToken, self).save() + if success: + self.associate_with("user", self['user']) + return True @classmethod def generate(cls, userid, session_type=None): + """make a new token for the given user""" token = str(uuid.uuid4()) while cls.lookup(token): token = str(uuid.uuid4()) @@ -254,14 +269,14 @@ class SessionToken(datastore.BasicModel): instance.save() return instance - def save(self): - """Call into superclass to save object, then save associations""" - if not self['user']: - raise exception.Invalid("SessionToken requires a User association") - success = super(SessionToken, self).save() - if success: - self.associate_with("user", self['user']) - return True + def update_expiry(self, **kwargs): + """updates the expirty attribute, but doesn't save""" + if not kwargs: + kwargs['hours'] = 1 + time = datetime.datetime.utcnow() + diff = datetime.timedelta(**kwargs) + expires = time + diff + self['expiry'] = expires.strftime(utils.TIME_FORMAT) if __name__ == "__main__": diff --git a/nova/tests/model_unittest.py b/nova/tests/model_unittest.py index 7823991b9..0755d8578 100644 --- a/nova/tests/model_unittest.py +++ b/nova/tests/model_unittest.py @@ -16,6 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. +from datetime import datetime import logging import time from twisted.internet import defer @@ -256,3 +257,11 @@ class ModelTestCase(test.TrialTestCase): instance = yield model.SessionToken.generate('username', 'TokenType') found = yield model.SessionToken.lookup(instance.identifier) self.assert_(found) + + def test_update_expiry(self): + instance = model.SessionToken('tk12341234') + oldtime = datetime.utcnow() + instance['expiry'] = oldtime.strftime(utils.TIME_FORMAT) + instance.update_expiry() + expiry = utils.parse_isotime(instance['expiry']) + self.assert_(expiry > datetime.utcnow()) diff --git a/nova/utils.py b/nova/utils.py index 9ecceafe0..a1eb0a092 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -20,7 +20,7 @@ System-level utilities and helper functions. """ -from datetime import datetime +from datetime import datetime, timedelta import inspect import logging import os @@ -32,7 +32,7 @@ import sys from nova import flags FLAGS = flags.FLAGS - +TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" def fetchfile(url, target): logging.debug("Fetching %s" % url) @@ -118,4 +118,7 @@ def get_my_ip(): def isotime(at=None): if not at: at = datetime.utcnow() - return at.strftime("%Y-%m-%dT%H:%M:%SZ") + return at.strftime(TIME_FORMAT) + +def parse_isotime(timestr): + return datetime.strptime(timestr, TIME_FORMAT) -- cgit From 6d3331e831ec147916d0d605958f61ca69eee9a8 Mon Sep 17 00:00:00 2001 From: andy Date: Mon, 26 Jul 2010 23:16:49 +0200 Subject: Move virtualenv installation out of the makefile. Also adds some tools for dealing with virtualenvs to the tools directory. --- .bzrignore | 1 + .gitignore | 1 + Makefile | 28 +++++++-------- tools/activate_venv.sh | 3 ++ tools/install_venv.py | 95 ++++++++++++++++++++++++++++++++++++++++++++++++++ tools/pip-requires | 3 +- tools/with_venv.sh | 4 +++ 7 files changed, 120 insertions(+), 15 deletions(-) create mode 100644 tools/activate_venv.sh create mode 100644 tools/install_venv.py create mode 100755 tools/with_venv.sh diff --git a/.bzrignore b/.bzrignore index 93fc868a3..c3a502a1a 100644 --- a/.bzrignore +++ b/.bzrignore @@ -1 +1,2 @@ run_tests.err.log +.nova-venv diff --git a/.gitignore b/.gitignore index 9db87ac29..2afc7a32c 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,4 @@ keys build/* build-stamp nova.egg-info +.nova-venv diff --git a/Makefile b/Makefile index da69f2b72..5fb512610 100644 --- a/Makefile +++ b/Makefile @@ -1,27 +1,27 @@ -venv=.venv -with_venv=source $(venv)/bin/activate -installed=$(venv)/lib/python2.6/site-packages -twisted=$(installed)/twisted/__init__.py +venv=.nova-venv +with_venv=tools/with_venv.sh +build: + # Nothing to do -test: python-dependencies $(twisted) - $(with_venv) && python run_tests.py +test: $(venv) + $(with_venv) python run_tests.py + +test-system: + python run_tests.py clean: rm -rf _trial_temp rm -rf keys rm -rf instances rm -rf networks + rm run_tests.err.log clean-all: clean rm -rf $(venv) -python-dependencies: $(venv) - pip install -q -E $(venv) -r tools/pip-requires - $(venv): - pip install -q virtualenv - virtualenv -q --no-site-packages $(venv) - -$(twisted): - pip install -q -E $(venv) http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz + @echo "You need to install the Nova virtualenv before you can run this." + @echo "" + @echo "Please run tools/install_venv.py" + @exit 1 diff --git a/tools/activate_venv.sh b/tools/activate_venv.sh new file mode 100644 index 000000000..d12cb3491 --- /dev/null +++ b/tools/activate_venv.sh @@ -0,0 +1,3 @@ +_TOOLS=`dirname $0` +_VENV=$_TOOLS/../.nova-venv +source $_VENV/bin/activate diff --git a/tools/install_venv.py b/tools/install_venv.py new file mode 100644 index 000000000..720dcc0d9 --- /dev/null +++ b/tools/install_venv.py @@ -0,0 +1,95 @@ +""" +Installation script for Nova's development virtualenv +""" + +import os +import subprocess +import sys +import textwrap + + +ROOT = os.path.dirname(os.path.dirname(__file__)) +VENV = os.path.join(ROOT, '.nova-venv') +PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires') +TWISTED_NOVA='http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz' + + +def die(message, *args): + print >>sys.stderr, message % args + sys.exit(1) + + +def run_command(cmd, redirect_output=True, error_ok=False): + # Useful for debugging: + #print >>sys.stderr, ' '.join(cmd) + if redirect_output: + stdout = subprocess.PIPE + else: + stdout = None + + proc = subprocess.Popen(cmd, stdout=stdout) + output = proc.communicate()[0] + if not error_ok and proc.returncode != 0: + die('Command "%s" failed.\n%s', ' '.join(cmd), output) + return output + + +def check_dependencies(): + """Make sure pip and virtualenv are on the path.""" + print 'Checking for pip...', + if not run_command(['which', 'pip']).strip(): + die('ERROR: pip not found.\n\nNova development requires pip,' + ' please install it using your favorite package management tool') + print 'done.' + + print 'Checking for virtualenv...', + if not run_command(['which', 'virtualenv']).strip(): + die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,' + ' please install it using your favorite package management tool') + print 'done.' + + +def create_virtualenv(venv=VENV): + print 'Creating venv...', + run_command(['virtualenv', '-q', '--no-site-packages', VENV]) + print 'done.' + + +def install_dependencies(venv=VENV): + print 'Installing dependencies with pip (this can take a while)...' + run_command(['pip', 'install', '-E', venv, '-r', PIP_REQUIRES], + redirect_output=False) + run_command(['pip', 'install', '-E', venv, TWISTED_NOVA], + redirect_output=False) + + +def print_help(): + help = """ + Nova development environment setup is complete. + + Nova development uses virtualenv to track and manage Python dependencies + while in development and testing. + + To activate the Nova virtualenv for the extent of your current shell session + you can run: + + $ source tools/activate_venv.sh + + Or, if you prefer, you can run commands in the virtualenv on a case by case + basis by running: + + $ tools/with_venv.sh + + Also, run_tests.sh will automatically use the virtualenv. + """ + print help + + +def main(argv): + check_dependencies() + create_virtualenv() + install_dependencies() + print_help() + +if __name__ == '__main__': + main(sys.argv) diff --git a/tools/pip-requires b/tools/pip-requires index edb5fc01c..4eb47ca2b 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -5,10 +5,11 @@ anyjson==0.2.4 boto==2.0b1 carrot==0.10.5 lockfile==0.8 -mox==0.5.0 python-daemon==1.5.5 python-gflags==1.3 redis==2.0.0 tornado==1.0 wsgiref==0.1.2 zope.interface==3.6.1 +mox==0.5.0 +-f http://pymox.googlecode.com/files/mox-0.5.0.tar.gz diff --git a/tools/with_venv.sh b/tools/with_venv.sh new file mode 100755 index 000000000..51468a334 --- /dev/null +++ b/tools/with_venv.sh @@ -0,0 +1,4 @@ +#!/bin/sh +TOOLS=`dirname $0` +VENV=$TOOLS/../.nova-venv +source $VENV/bin/activate && $@ -- cgit From bed760dcc4dbdf1927c41cfff325cc56102ef962 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 26 Jul 2010 23:26:54 +0200 Subject: Make image downloads work again in S3 handler. Listing worked, but fetching the images failed because I wasn't clever enough to use twisted.web.static.File correctly. --- nova/objectstore/handler.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index 6e76abb5a..4074f005d 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -121,7 +121,7 @@ class S3(Resource): if name == '': return self elif name == '_images': - return ImageResource() + return ImagesResource() else: return BucketResource(name) @@ -226,13 +226,21 @@ class ObjectResource(Resource): return '' class ImageResource(Resource): + isLeaf = True + + def __init__(self, name): + Resource.__init__(self) + self.img = image.Image(name) + + def render_GET(self, request): + return static.File(self.img.image_path, defaultType='application/octet-stream').render_GET(request) + +class ImagesResource(Resource): def getChild(self, name, request): if name == '': return self else: - request.setHeader("Content-Type", "application/octet-stream") - img = image.Image(name) - return static.File(img.image_path) + return ImageResource(name) def render_GET(self, request): """ returns a json listing of all images -- cgit From 69cbb1ac3559c44f6640939cd2e1db64e82073fe Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 26 Jul 2010 23:27:42 +0200 Subject: Make _fetch_s3_image pass proper AWS Authorization headers so that image downloads work again. --- nova/compute/node.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/nova/compute/node.py b/nova/compute/node.py index 7b86ca749..a44583c06 100644 --- a/nova/compute/node.py +++ b/nova/compute/node.py @@ -31,6 +31,7 @@ import logging import os import shutil import sys +import time from twisted.internet import defer from twisted.internet import task from twisted.application import service @@ -453,19 +454,25 @@ class Instance(object): def _fetch_s3_image(self, image, path): url = _image_url('%s/image' % image) + + # This should probably move somewhere else, like e.g. a download_as + # method on User objects and at the same time get rewritten to use + # twisted web client. headers = {} headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) user_id = self.datamodel['user_id'] user = UserManager.instance().get_user(user_id) - auth = signer.Signer(user.secret.encode()).s3_authorization(headers, 'GET', url) - headers['Authorization'] = auth + uri = '/' + url.partition('/')[2] + auth = signer.Signer(user.secret.encode()).s3_authorization(headers, 'GET', uri) + headers['Authorization'] = 'AWS %s:%s' % (user.access, auth) - headers_opt_string = ' '.join(['-H %s:%s' % (k,v) for (k,v) in headers.iteritems()]) - d = process.simple_execute('curl --silent %s ' - '%s -o "%s"' % (url, headers_opt_string, - path)) - return d + cmd = ['/usr/bin/curl', '--silent', url] + for (k,v) in headers.iteritems(): + cmd += ['-H', '%s: %s' % (k,v)] + + cmd += ['-o', path] + return process.SharedPool().execute(executable=cmd[0], args=cmd[1:]) def _fetch_local_image(self, image, path): source = _image_path('%s/image' % image) -- cgit From 0f5b95575afeb14fea80bead6720bf510f5358f5 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 26 Jul 2010 14:41:51 -0700 Subject: moved misnamed nova-dchp file --- debian/nova-dhcp.conf | 2 -- debian/nova-dhcpbridge.conf | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 debian/nova-dhcp.conf create mode 100644 debian/nova-dhcpbridge.conf diff --git a/debian/nova-dhcp.conf b/debian/nova-dhcp.conf deleted file mode 100644 index 0aafe7549..000000000 --- a/debian/nova-dhcp.conf +++ /dev/null @@ -1,2 +0,0 @@ ---networks_path=/var/lib/nova/networks ---fake_users=1 diff --git a/debian/nova-dhcpbridge.conf b/debian/nova-dhcpbridge.conf new file mode 100644 index 000000000..0aafe7549 --- /dev/null +++ b/debian/nova-dhcpbridge.conf @@ -0,0 +1,2 @@ +--networks_path=/var/lib/nova/networks +--fake_users=1 -- cgit From ffe52b8660123335e425c52eb3bebba2e3d2e42f Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 26 Jul 2010 14:48:08 -0700 Subject: default flag file full path --- nova/compute/linux_net.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/compute/linux_net.py b/nova/compute/linux_net.py index 48e07da66..861ce779b 100644 --- a/nova/compute/linux_net.py +++ b/nova/compute/linux_net.py @@ -29,7 +29,7 @@ from nova import flags FLAGS=flags.FLAGS flags.DEFINE_string('dhcpbridge_flagfile', - '/etc/nova-dhcpbridge.conf', + '/etc/nova/nova-dhcpbridge.conf', 'location of flagfile for dhcpbridge') def execute(cmd, addl_env=None): -- cgit From 58b41fde4c8639577b738d0f57f10acda4c63c0e Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Mon, 26 Jul 2010 18:00:39 -0400 Subject: Lookup should only not return expired tokens. --- nova/compute/model.py | 9 +++++++++ nova/tests/model_unittest.py | 16 +++++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/nova/compute/model.py b/nova/compute/model.py index 3aa6fc841..ab0bfeb83 100644 --- a/nova/compute/model.py +++ b/nova/compute/model.py @@ -257,6 +257,15 @@ class SessionToken(datastore.BasicModel): self.associate_with("user", self['user']) return True + @classmethod + def lookup(cls, key): + token = super(SessionToken, cls).lookup(key) + if token: + expires_at = utils.parse_isotime(token['expiry']) + if datetime.datetime.utcnow() >= expires_at: + return None + return token + @classmethod def generate(cls, userid, session_type=None): """make a new token for the given user""" diff --git a/nova/tests/model_unittest.py b/nova/tests/model_unittest.py index 0755d8578..10d3016f8 100644 --- a/nova/tests/model_unittest.py +++ b/nova/tests/model_unittest.py @@ -258,10 +258,24 @@ class ModelTestCase(test.TrialTestCase): found = yield model.SessionToken.lookup(instance.identifier) self.assert_(found) - def test_update_expiry(self): + def test_update_session_token_expiry(self): instance = model.SessionToken('tk12341234') oldtime = datetime.utcnow() instance['expiry'] = oldtime.strftime(utils.TIME_FORMAT) instance.update_expiry() expiry = utils.parse_isotime(instance['expiry']) self.assert_(expiry > datetime.utcnow()) + + @defer.inlineCallbacks + def test_session_token_lookup_when_expired(self): + instance = yield model.SessionToken.generate("testuser") + instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT) + instance.save() + inst = model.SessionToken.lookup(instance.identifier) + self.assertFalse(inst) + + @defer.inlineCallbacks + def test_session_token_lookup_when_not_expired(self): + instance = yield model.SessionToken.generate("testuser") + inst = model.SessionToken.lookup(instance.identifier) + self.assert_(inst) -- cgit From 7588ae06e8d6a7d526b12e0f15f3e5be522f16d0 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Mon, 26 Jul 2010 18:02:00 -0400 Subject: In fact, it should delete them. --- nova/compute/model.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/compute/model.py b/nova/compute/model.py index ab0bfeb83..7335d2c79 100644 --- a/nova/compute/model.py +++ b/nova/compute/model.py @@ -263,6 +263,7 @@ class SessionToken(datastore.BasicModel): if token: expires_at = utils.parse_isotime(token['expiry']) if datetime.datetime.utcnow() >= expires_at: + token.destroy() return None return token -- cgit From be176f06fd03ddb6c25b40e4f2ee71981f47c724 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 26 Jul 2010 16:03:33 -0700 Subject: fix auth_driver flag to default to usable driver --- nova/auth/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 93330790b..bc373fd26 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -76,7 +76,7 @@ flags.DEFINE_string('credential_cert_subject', flags.DEFINE_string('vpn_ip', '127.0.0.1', 'Public IP for the cloudpipe VPN servers') -flags.DEFINE_string('auth_driver', 'nova.auth.ldapdriver.AuthDriver', +flags.DEFINE_string('auth_driver', 'nova.auth.ldapdriver.FakeLdapDriver', 'Driver that auth manager uses') class AuthBase(object): -- cgit From c42b214c61342089401b5191e70c0c6b09fb5a4a Mon Sep 17 00:00:00 2001 From: Devin Carlen Date: Tue, 27 Jul 2010 00:11:18 +0000 Subject: Began wiring up rbac admin api --- nova/adminclient.py | 102 +++++++++++++++++++++++++++++++++++++++++++++++++ nova/endpoint/admin.py | 71 ++++++++++++++++++++++++++++++++++ 2 files changed, 173 insertions(+) diff --git a/nova/adminclient.py b/nova/adminclient.py index db392feb1..9b9505ac1 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -56,6 +56,29 @@ class UserInfo(object): elif name == 'secretkey': self.secretkey = str(value) +class ProjectInfo(object): + """ + Information about a Nova project, as parsed through SAX + fields include: + projectname + description + member_ids + """ + + def __init__(self, connection=None, projectname=None, endpoint=None): + self.connection = connection + self.projectname = projectname + self.endpoint = endpoint + + def __repr__(self): + return 'ProjectInfo:%s' % self.projectname + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + setattr(self, name, str(value)) + class HostInfo(object): """ Information about a Nova Host, as parsed through SAX: @@ -137,6 +160,85 @@ class NovaAdminClient(object): """ deletes a user """ return self.apiconn.get_object('DeregisterUser', {'Name': username}, UserInfo) + def add_user_role(self, user, role, project=None): + """ + Add a role to a user either globally or for a specific project. + """ + return self.modify_user_role(user, role, project=project, + operation='add') + + def remove_user_role(self, user, role, project=None): + """ + Remove a role from a user either globally or for a specific project. + """ + return self.modify_user_role(user, role, project=project, + operation='remove') + + def modify_user_role(self, user, role, project=None, operation='add', + **kwargs): + """ + Add or remove a role for a user and project. + """ + params = { + 'User': user, + 'Role': role, + 'Project': project, + 'Operation': operation + + } + return self.apiconn.get_status('ModifyUserRole', params) + + def get_projects(self): + """ + Returns a list of all projects. + """ + return self.apiconn.get_list('DescribeProjects', {}, + [('item', ProjectInfo)]) + + def get_project(self, name): + """ + Returns a single project with the specified name. + """ + project = self.apiconn.get_object('DescribeProject', + {'Name': name}, + ProjectInfo) + + if project.projectname != None: + return project + + def create_project(self, projectname, manager_user, description=None, + member_users=None): + """ + Creates a new project. + """ + params = { + 'Name': projectname, + 'ManagerUser': manager_user, + 'Description': description, + 'MemberUsers': member_users + } + return self.apiconn.get_object('RegisterProject', params, ProjectInfo) + + def delete_project(self, projectname): + """ + Permanently deletes the specified project. + """ + return self.apiconn.get_object('DeregisterProject', + {'Name': projectname}, + ProjectInfo) + + def modify_project_user(self, user, project, operation='add', + **kwargs): + """ + Adds or removes a user from a project. + """ + params = { + 'User': user, + 'Project': project, + 'Operation': operation + } + return self.apiconn.get_status('ModifyProjectUser', params) + def get_zip(self, username): """ returns the content of a zip file containing novarc and access credentials. """ return self.apiconn.get_object('GenerateX509ForUser', {'Name': username}, UserInfo).file diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py index 55a8e4238..e3762e2af 100644 --- a/nova/endpoint/admin.py +++ b/nova/endpoint/admin.py @@ -37,6 +37,17 @@ def user_dict(user, base64_file=None): else: return {} +def project_dict(project): + """Convert the project object to a result dict""" + if project: + return { + 'projectname': project.id, + 'description': project.description, + 'member_ids': project.member_ids + } + else: + return {} + def host_dict(host): """Convert a host model object to a result dict""" if host: @@ -92,6 +103,22 @@ class AdminController(object): return True + @admin_only + def modify_user_role(self, context, user, role, project=None, + operation='add', **kwargs): + """ + Add or remove a role for a user and project. + """ + + if operation == 'add': + manager.AuthManager().add_role(user, role, project) + elif operation == 'remove': + manager.AuthManager().remove_role(user, role, project) + else: + raise exception.ApiError('operation must be add or remove') + + return True + @admin_only def generate_x509_for_user(self, _context, name, project=None, **kwargs): """Generates and returns an x509 certificate for a single user. @@ -104,6 +131,50 @@ class AdminController(object): user = manager.AuthManager().get_user(name) return user_dict(user, base64.b64encode(project.get_credentials(user))) + @admin_only + def describe_project(self, context, name, **kwargs): + """Returns project data, including member ids.""" + return project_dict(manager.AuthManager().get_project(name)) + + @admin_only + def describe_projects(self, context, **kwargs): + """Returns all projects - should be changed to deal with a list.""" + return {'projectSet': + [project_dict(u) for u in + manager.AuthManager().get_projects()]} + + @admin_only + def register_project(self, context, name, manager_user, description=None, + member_users=None, **kwargs): + """Creates a new project""" + return project_dict( + manager.AuthManager().create_project( + name, + manager_user, + description=None, + member_users=None + ) + ) + + @admin_only + def deregister_project(self, context, name): + """Permanently deletes a project.""" + manager.AuthManager().delete_project(name) + return True + + @admin_only + def modify_project_user(self, context, user, project, operation, **kwargs): + """ + Add or remove a user from a project. + """ + + if operation =='add': + manager.AuthManager().add_to_project(user, project) + elif operation == 'remove': + manager.AuthManager().remove_from_project(user, project) + else: + raise exception.ApiError('operation must be add or remove') + @admin_only def describe_hosts(self, _context, **_kwargs): """Returns status info for all nodes. Includes: -- cgit From 754db8ef1ceb84fa9a1f44bfc6c5c6bbd99cd7e1 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 26 Jul 2010 17:14:28 -0700 Subject: renamed xxxservice to service --- bin/nova-compute | 4 +- bin/nova-network | 4 +- bin/nova-volume | 4 +- nova/compute/computeservice.py | 581 ----------------------------------------- nova/compute/service.py | 581 +++++++++++++++++++++++++++++++++++++++++ nova/endpoint/cloud.py | 12 +- nova/network/networkservice.py | 35 --- nova/network/service.py | 35 +++ nova/tests/cloud_unittest.py | 4 +- nova/tests/compute_unittest.py | 4 +- nova/tests/volume_unittest.py | 20 +- nova/volume/service.py | 304 +++++++++++++++++++++ nova/volume/volumeservice.py | 304 --------------------- 13 files changed, 946 insertions(+), 946 deletions(-) delete mode 100644 nova/compute/computeservice.py create mode 100644 nova/compute/service.py delete mode 100644 nova/network/networkservice.py create mode 100644 nova/network/service.py create mode 100644 nova/volume/service.py delete mode 100644 nova/volume/volumeservice.py diff --git a/bin/nova-compute b/bin/nova-compute index 7ef5d074a..e0c12354f 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -22,11 +22,11 @@ """ from nova import twistd -from nova.compute import computeservice +from nova.compute import service if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': - application = computeservice.ComputeService.create() + application = service.ComputeService.create() diff --git a/bin/nova-network b/bin/nova-network index 0d3aa0002..52d6cb70a 100755 --- a/bin/nova-network +++ b/bin/nova-network @@ -22,11 +22,11 @@ """ from nova import twistd -from nova.network import networkservice +from nova.network import service if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': - application = networkservice.NetworkService.create() + application = service.NetworkService.create() diff --git a/bin/nova-volume b/bin/nova-volume index c1c0163cf..f7a8fad37 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -22,11 +22,11 @@ """ from nova import twistd -from nova.volume import volumeservice +from nova.volume import service if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': - application = volumeservice.VolumeService.create() + application = service.VolumeService.create() diff --git a/nova/compute/computeservice.py b/nova/compute/computeservice.py deleted file mode 100644 index 5568e3888..000000000 --- a/nova/compute/computeservice.py +++ /dev/null @@ -1,581 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Compute Service: - - Runs on each compute host, managing the - hypervisor using libvirt. - -""" - -import base64 -import json -import logging -import os -import shutil -import sys -from twisted.internet import defer -from twisted.internet import task - - -try: - import libvirt -except Exception, err: - logging.warning('no libvirt found') - -from nova import exception -from nova import fakevirt -from nova import flags -from nova import process -from nova import service -from nova import utils -from nova.compute import disk -from nova.compute import model -from nova.compute import network -from nova.objectstore import image # for image_path flag -from nova.volume import volumeservice - - -FLAGS = flags.FLAGS -flags.DEFINE_string('libvirt_xml_template', - utils.abspath('compute/libvirt.xml.template'), - 'Libvirt XML Template') -flags.DEFINE_bool('use_s3', True, - 'whether to get images from s3 or use local copy') -flags.DEFINE_string('instances_path', utils.abspath('../instances'), - 'where instances are stored on disk') - -INSTANCE_TYPES = {} -INSTANCE_TYPES['m1.tiny'] = {'memory_mb': 512, 'vcpus': 1, 'local_gb': 0} -INSTANCE_TYPES['m1.small'] = {'memory_mb': 1024, 'vcpus': 1, 'local_gb': 10} -INSTANCE_TYPES['m1.medium'] = {'memory_mb': 2048, 'vcpus': 2, 'local_gb': 10} -INSTANCE_TYPES['m1.large'] = {'memory_mb': 4096, 'vcpus': 4, 'local_gb': 10} -INSTANCE_TYPES['m1.xlarge'] = {'memory_mb': 8192, 'vcpus': 4, 'local_gb': 10} -INSTANCE_TYPES['c1.medium'] = {'memory_mb': 2048, 'vcpus': 4, 'local_gb': 10} - - -def _image_path(path=''): - return os.path.join(FLAGS.images_path, path) - - -def _image_url(path): - return "%s:%s/_images/%s" % (FLAGS.s3_host, FLAGS.s3_port, path) - - -class ComputeService(service.Service): - """ - Manages the running instances. - """ - def __init__(self): - """ load configuration options for this node and connect to libvirt """ - super(ComputeService, self).__init__() - self._instances = {} - self._conn = self._get_connection() - self.instdir = model.InstanceDirectory() - # TODO(joshua): This needs to ensure system state, specifically: modprobe aoe - - def _get_connection(self): - """ returns a libvirt connection object """ - # TODO(termie): maybe lazy load after initial check for permissions - # TODO(termie): check whether we can be disconnected - if FLAGS.fake_libvirt: - conn = fakevirt.FakeVirtConnection.instance() - else: - auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], - 'root', - None] - conn = libvirt.openAuth('qemu:///system', auth, 0) - if conn == None: - logging.error('Failed to open connection to the hypervisor') - sys.exit(1) - return conn - - def noop(self): - """ simple test of an AMQP message call """ - return defer.succeed('PONG') - - def get_instance(self, instance_id): - # inst = self.instdir.get(instance_id) - # return inst - if self.instdir.exists(instance_id): - return Instance.fromName(self._conn, instance_id) - return None - - @exception.wrap_exception - def adopt_instances(self): - """ if there are instances already running, adopt them """ - return defer.succeed(0) - instance_names = [self._conn.lookupByID(x).name() - for x in self._conn.listDomainsID()] - for name in instance_names: - try: - new_inst = Instance.fromName(self._conn, name) - new_inst.update_state() - except: - pass - return defer.succeed(len(self._instances)) - - @exception.wrap_exception - def describe_instances(self): - retval = {} - for inst in self.instdir.by_node(FLAGS.node_name): - retval[inst['instance_id']] = ( - Instance.fromName(self._conn, inst['instance_id'])) - return retval - - @defer.inlineCallbacks - def report_state(self, nodename, daemon): - # TODO(termie): make this pattern be more elegant. -todd - try: - record = model.Daemon(nodename, daemon) - record.heartbeat() - if getattr(self, "model_disconnected", False): - self.model_disconnected = False - logging.error("Recovered model server connection!") - - except model.ConnectionError, ex: - if not getattr(self, "model_disconnected", False): - self.model_disconnected = True - logging.exception("model server went away") - yield - - # @exception.wrap_exception - def run_instance(self, instance_id, **_kwargs): - """ launch a new instance with specified options """ - logging.debug("Starting instance %s..." % (instance_id)) - inst = self.instdir.get(instance_id) - if not FLAGS.simple_network: - # TODO: Get the real security group of launch in here - security_group = "default" - net = network.BridgedNetwork.get_network_for_project(inst['user_id'], - inst['project_id'], - security_group).express() - inst['node_name'] = FLAGS.node_name - inst.save() - # TODO(vish) check to make sure the availability zone matches - new_inst = Instance(self._conn, name=instance_id, data=inst) - logging.info("Instances current state is %s", new_inst.state) - if new_inst.is_running(): - raise exception.Error("Instance is already running") - d = new_inst.spawn() - return d - - @exception.wrap_exception - def terminate_instance(self, instance_id): - """ terminate an instance on this machine """ - logging.debug("Got told to terminate instance %s" % instance_id) - instance = self.get_instance(instance_id) - # inst = self.instdir.get(instance_id) - if not instance: - raise exception.Error( - 'trying to terminate unknown instance: %s' % instance_id) - d = instance.destroy() - # d.addCallback(lambda x: inst.destroy()) - return d - - @exception.wrap_exception - def reboot_instance(self, instance_id): - """ reboot an instance on this server - KVM doesn't support reboot, so we terminate and restart """ - instance = self.get_instance(instance_id) - if not instance: - raise exception.Error( - 'trying to reboot unknown instance: %s' % instance_id) - return instance.reboot() - - @defer.inlineCallbacks - @exception.wrap_exception - def get_console_output(self, instance_id): - """ send the console output for an instance """ - logging.debug("Getting console output for %s" % (instance_id)) - inst = self.instdir.get(instance_id) - instance = self.get_instance(instance_id) - if not instance: - raise exception.Error( - 'trying to get console log for unknown: %s' % instance_id) - rv = yield instance.console_output() - # TODO(termie): this stuff belongs in the API layer, no need to - # munge the data we send to ourselves - output = {"InstanceId" : instance_id, - "Timestamp" : "2", - "output" : base64.b64encode(rv)} - defer.returnValue(output) - - @defer.inlineCallbacks - @exception.wrap_exception - def attach_volume(self, instance_id = None, - volume_id = None, mountpoint = None): - volume = volumeservice.get_volume(volume_id) - yield self._init_aoe() - yield process.simple_execute( - "sudo virsh attach-disk %s /dev/etherd/%s %s" % - (instance_id, - volume['aoe_device'], - mountpoint.rpartition('/dev/')[2])) - volume.finish_attach() - defer.returnValue(True) - - @defer.inlineCallbacks - def _init_aoe(self): - yield process.simple_execute("sudo aoe-discover") - yield process.simple_execute("sudo aoe-stat") - - @defer.inlineCallbacks - @exception.wrap_exception - def detach_volume(self, instance_id, volume_id): - """ detach a volume from an instance """ - # despite the documentation, virsh detach-disk just wants the device - # name without the leading /dev/ - volume = volumeservice.get_volume(volume_id) - target = volume['mountpoint'].rpartition('/dev/')[2] - yield process.simple_execute( - "sudo virsh detach-disk %s %s " % (instance_id, target)) - volume.finish_detach() - defer.returnValue(True) - - -class Group(object): - def __init__(self, group_id): - self.group_id = group_id - - -class ProductCode(object): - def __init__(self, product_code): - self.product_code = product_code - - -class Instance(object): - - NOSTATE = 0x00 - RUNNING = 0x01 - BLOCKED = 0x02 - PAUSED = 0x03 - SHUTDOWN = 0x04 - SHUTOFF = 0x05 - CRASHED = 0x06 - - def __init__(self, conn, name, data): - """ spawn an instance with a given name """ - self._conn = conn - # TODO(vish): this can be removed after data has been updated - # data doesn't seem to have a working iterator so in doesn't work - if data.get('owner_id', None) is not None: - data['user_id'] = data['owner_id'] - data['project_id'] = data['owner_id'] - self.datamodel = data - - size = data.get('instance_type', FLAGS.default_instance_type) - if size not in INSTANCE_TYPES: - raise exception.Error('invalid instance type: %s' % size) - - self.datamodel.update(INSTANCE_TYPES[size]) - - self.datamodel['name'] = name - self.datamodel['instance_id'] = name - self.datamodel['basepath'] = data.get( - 'basepath', os.path.abspath( - os.path.join(FLAGS.instances_path, self.name))) - self.datamodel['memory_kb'] = int(self.datamodel['memory_mb']) * 1024 - self.datamodel.setdefault('image_id', FLAGS.default_image) - self.datamodel.setdefault('kernel_id', FLAGS.default_kernel) - self.datamodel.setdefault('ramdisk_id', FLAGS.default_ramdisk) - self.datamodel.setdefault('project_id', self.datamodel['user_id']) - self.datamodel.setdefault('bridge_name', None) - #self.datamodel.setdefault('key_data', None) - #self.datamodel.setdefault('key_name', None) - #self.datamodel.setdefault('addressing_type', None) - - # TODO(joshua) - The ugly non-flat ones - self.datamodel['groups'] = data.get('security_group', 'default') - # TODO(joshua): Support product codes somehow - self.datamodel.setdefault('product_codes', None) - - self.datamodel.save() - logging.debug("Finished init of Instance with id of %s" % name) - - def toXml(self): - # TODO(termie): cache? - logging.debug("Starting the toXML method") - libvirt_xml = open(FLAGS.libvirt_xml_template).read() - xml_info = self.datamodel.copy() - # TODO(joshua): Make this xml express the attached disks as well - - # TODO(termie): lazy lazy hack because xml is annoying - xml_info['nova'] = json.dumps(self.datamodel.copy()) - libvirt_xml = libvirt_xml % xml_info - logging.debug("Finished the toXML method") - - return libvirt_xml - - @classmethod - def fromName(cls, conn, name): - """ use the saved data for reloading the instance """ - instdir = model.InstanceDirectory() - instance = instdir.get(name) - return cls(conn=conn, name=name, data=instance) - - def set_state(self, state_code, state_description=None): - self.datamodel['state'] = state_code - if not state_description: - state_description = STATE_NAMES[state_code] - self.datamodel['state_description'] = state_description - self.datamodel.save() - - @property - def state(self): - # it is a string in datamodel - return int(self.datamodel['state']) - - @property - def name(self): - return self.datamodel['name'] - - def is_pending(self): - return (self.state == Instance.NOSTATE or self.state == 'pending') - - def is_destroyed(self): - return self.state == Instance.SHUTOFF - - def is_running(self): - logging.debug("Instance state is: %s" % self.state) - return (self.state == Instance.RUNNING or self.state == 'running') - - def describe(self): - return self.datamodel - - def info(self): - logging.debug("Getting info for dom %s" % self.name) - virt_dom = self._conn.lookupByName(self.name) - (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() - return {'state': state, - 'max_mem': max_mem, - 'mem': mem, - 'num_cpu': num_cpu, - 'cpu_time': cpu_time, - 'node_name': FLAGS.node_name} - - def basepath(self, path=''): - return os.path.abspath(os.path.join(self.datamodel['basepath'], path)) - - def update_state(self): - self.datamodel.update(self.info()) - self.set_state(self.state) - self.datamodel.save() # Extra, but harmless - - @exception.wrap_exception - def destroy(self): - if self.is_destroyed(): - self.datamodel.destroy() - raise exception.Error('trying to destroy already destroyed' - ' instance: %s' % self.name) - - self.set_state(Instance.NOSTATE, 'shutting_down') - try: - virt_dom = self._conn.lookupByName(self.name) - virt_dom.destroy() - except Exception, _err: - pass - # If the instance is already terminated, we're still happy - d = defer.Deferred() - d.addCallback(lambda x: self._cleanup()) - d.addCallback(lambda x: self.datamodel.destroy()) - # TODO(termie): short-circuit me for tests - # WE'LL save this for when we do shutdown, - # instead of destroy - but destroy returns immediately - timer = task.LoopingCall(f=None) - def _wait_for_shutdown(): - try: - self.update_state() - if self.state == Instance.SHUTDOWN: - timer.stop() - d.callback(None) - except Exception: - self.set_state(Instance.SHUTDOWN) - timer.stop() - d.callback(None) - timer.f = _wait_for_shutdown - timer.start(interval=0.5, now=True) - return d - - def _cleanup(self): - target = os.path.abspath(self.datamodel['basepath']) - logging.info("Deleting instance files at %s", target) - shutil.rmtree(target) - - @defer.inlineCallbacks - @exception.wrap_exception - def reboot(self): - if not self.is_running(): - raise exception.Error( - 'trying to reboot a non-running' - 'instance: %s (state: %s)' % (self.name, self.state)) - - logging.debug('rebooting instance %s' % self.name) - self.set_state(Instance.NOSTATE, 'rebooting') - yield self._conn.lookupByName(self.name).destroy() - self._conn.createXML(self.toXml(), 0) - - d = defer.Deferred() - timer = task.LoopingCall(f=None) - def _wait_for_reboot(): - try: - self.update_state() - if self.is_running(): - logging.debug('rebooted instance %s' % self.name) - timer.stop() - d.callback(None) - except Exception: - self.set_state(Instance.SHUTDOWN) - timer.stop() - d.callback(None) - timer.f = _wait_for_reboot - timer.start(interval=0.5, now=True) - yield d - - def _fetch_s3_image(self, image, path): - url = _image_url('%s/image' % image) - d = process.simple_execute( - 'curl --silent %s -o %s' % (url, path)) - return d - - def _fetch_local_image(self, image, path): - source = _image_path('%s/image' % image) - d = process.simple_execute('cp %s %s' % (source, path)) - return d - - @defer.inlineCallbacks - def _create_image(self, libvirt_xml): - # syntactic nicety - data = self.datamodel - basepath = self.basepath - - # ensure directories exist and are writable - yield process.simple_execute( - 'mkdir -p %s' % basepath()) - yield process.simple_execute( - 'chmod 0777 %s' % basepath()) - - - # TODO(termie): these are blocking calls, it would be great - # if they weren't. - logging.info('Creating image for: %s', data['instance_id']) - f = open(basepath('libvirt.xml'), 'w') - f.write(libvirt_xml) - f.close() - - if FLAGS.fake_libvirt: - logging.info('fake_libvirt, nothing to do for create_image') - raise defer.returnValue(None); - - if FLAGS.use_s3: - _fetch_file = self._fetch_s3_image - else: - _fetch_file = self._fetch_local_image - - if not os.path.exists(basepath('disk')): - yield _fetch_file(data['image_id'], basepath('disk-raw')) - if not os.path.exists(basepath('kernel')): - yield _fetch_file(data['kernel_id'], basepath('kernel')) - if not os.path.exists(basepath('ramdisk')): - yield _fetch_file(data['ramdisk_id'], basepath('ramdisk')) - - execute = lambda cmd, input=None: \ - process.simple_execute(cmd=cmd, - input=input, - error_ok=1) - - key = data['key_data'] - net = None - if FLAGS.simple_network: - with open(FLAGS.simple_network_template) as f: - net = f.read() % {'address': data['private_dns_name'], - 'network': FLAGS.simple_network_network, - 'netmask': FLAGS.simple_network_netmask, - 'gateway': FLAGS.simple_network_gateway, - 'broadcast': FLAGS.simple_network_broadcast, - 'dns': FLAGS.simple_network_dns} - if key or net: - logging.info('Injecting data into image %s', data['image_id']) - yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute) - - if os.path.exists(basepath('disk')): - yield process.simple_execute( - 'rm -f %s' % basepath('disk')) - - bytes = (INSTANCE_TYPES[data['instance_type']]['local_gb'] - * 1024 * 1024 * 1024) - yield disk.partition( - basepath('disk-raw'), basepath('disk'), bytes, execute=execute) - - @defer.inlineCallbacks - @exception.wrap_exception - def spawn(self): - self.set_state(Instance.NOSTATE, 'spawning') - logging.debug("Starting spawn in Instance") - - xml = self.toXml() - self.set_state(Instance.NOSTATE, 'launching') - logging.info('self %s', self) - try: - yield self._create_image(xml) - self._conn.createXML(xml, 0) - # TODO(termie): this should actually register - # a callback to check for successful boot - logging.debug("Instance is running") - - local_d = defer.Deferred() - timer = task.LoopingCall(f=None) - def _wait_for_boot(): - try: - self.update_state() - if self.is_running(): - logging.debug('booted instance %s' % self.name) - timer.stop() - local_d.callback(None) - except Exception: - self.set_state(Instance.SHUTDOWN) - logging.error('Failed to boot instance %s' % self.name) - timer.stop() - local_d.callback(None) - timer.f = _wait_for_boot - timer.start(interval=0.5, now=True) - except Exception, ex: - logging.debug(ex) - self.set_state(Instance.SHUTDOWN) - - @exception.wrap_exception - def console_output(self): - if not FLAGS.fake_libvirt: - fname = os.path.abspath( - os.path.join(self.datamodel['basepath'], 'console.log')) - with open(fname, 'r') as f: - console = f.read() - else: - console = 'FAKE CONSOLE OUTPUT' - return defer.succeed(console) - -STATE_NAMES = { - Instance.NOSTATE : 'pending', - Instance.RUNNING : 'running', - Instance.BLOCKED : 'blocked', - Instance.PAUSED : 'paused', - Instance.SHUTDOWN : 'shutdown', - Instance.SHUTOFF : 'shutdown', - Instance.CRASHED : 'crashed', -} diff --git a/nova/compute/service.py b/nova/compute/service.py new file mode 100644 index 000000000..02e35baa2 --- /dev/null +++ b/nova/compute/service.py @@ -0,0 +1,581 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Compute Service: + + Runs on each compute host, managing the + hypervisor using libvirt. + +""" + +import base64 +import json +import logging +import os +import shutil +import sys +from twisted.internet import defer +from twisted.internet import task + + +try: + import libvirt +except Exception, err: + logging.warning('no libvirt found') + +from nova import exception +from nova import fakevirt +from nova import flags +from nova import process +from nova import service +from nova import utils +from nova.compute import disk +from nova.compute import model +from nova.compute import network +from nova.objectstore import image # for image_path flag +from nova.volume import service as volume_service + + +FLAGS = flags.FLAGS +flags.DEFINE_string('libvirt_xml_template', + utils.abspath('compute/libvirt.xml.template'), + 'Libvirt XML Template') +flags.DEFINE_bool('use_s3', True, + 'whether to get images from s3 or use local copy') +flags.DEFINE_string('instances_path', utils.abspath('../instances'), + 'where instances are stored on disk') + +INSTANCE_TYPES = {} +INSTANCE_TYPES['m1.tiny'] = {'memory_mb': 512, 'vcpus': 1, 'local_gb': 0} +INSTANCE_TYPES['m1.small'] = {'memory_mb': 1024, 'vcpus': 1, 'local_gb': 10} +INSTANCE_TYPES['m1.medium'] = {'memory_mb': 2048, 'vcpus': 2, 'local_gb': 10} +INSTANCE_TYPES['m1.large'] = {'memory_mb': 4096, 'vcpus': 4, 'local_gb': 10} +INSTANCE_TYPES['m1.xlarge'] = {'memory_mb': 8192, 'vcpus': 4, 'local_gb': 10} +INSTANCE_TYPES['c1.medium'] = {'memory_mb': 2048, 'vcpus': 4, 'local_gb': 10} + + +def _image_path(path=''): + return os.path.join(FLAGS.images_path, path) + + +def _image_url(path): + return "%s:%s/_images/%s" % (FLAGS.s3_host, FLAGS.s3_port, path) + + +class ComputeService(service.Service): + """ + Manages the running instances. + """ + def __init__(self): + """ load configuration options for this node and connect to libvirt """ + super(ComputeService, self).__init__() + self._instances = {} + self._conn = self._get_connection() + self.instdir = model.InstanceDirectory() + # TODO(joshua): This needs to ensure system state, specifically: modprobe aoe + + def _get_connection(self): + """ returns a libvirt connection object """ + # TODO(termie): maybe lazy load after initial check for permissions + # TODO(termie): check whether we can be disconnected + if FLAGS.fake_libvirt: + conn = fakevirt.FakeVirtConnection.instance() + else: + auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], + 'root', + None] + conn = libvirt.openAuth('qemu:///system', auth, 0) + if conn == None: + logging.error('Failed to open connection to the hypervisor') + sys.exit(1) + return conn + + def noop(self): + """ simple test of an AMQP message call """ + return defer.succeed('PONG') + + def get_instance(self, instance_id): + # inst = self.instdir.get(instance_id) + # return inst + if self.instdir.exists(instance_id): + return Instance.fromName(self._conn, instance_id) + return None + + @exception.wrap_exception + def adopt_instances(self): + """ if there are instances already running, adopt them """ + return defer.succeed(0) + instance_names = [self._conn.lookupByID(x).name() + for x in self._conn.listDomainsID()] + for name in instance_names: + try: + new_inst = Instance.fromName(self._conn, name) + new_inst.update_state() + except: + pass + return defer.succeed(len(self._instances)) + + @exception.wrap_exception + def describe_instances(self): + retval = {} + for inst in self.instdir.by_node(FLAGS.node_name): + retval[inst['instance_id']] = ( + Instance.fromName(self._conn, inst['instance_id'])) + return retval + + @defer.inlineCallbacks + def report_state(self, nodename, daemon): + # TODO(termie): make this pattern be more elegant. -todd + try: + record = model.Daemon(nodename, daemon) + record.heartbeat() + if getattr(self, "model_disconnected", False): + self.model_disconnected = False + logging.error("Recovered model server connection!") + + except model.ConnectionError, ex: + if not getattr(self, "model_disconnected", False): + self.model_disconnected = True + logging.exception("model server went away") + yield + + # @exception.wrap_exception + def run_instance(self, instance_id, **_kwargs): + """ launch a new instance with specified options """ + logging.debug("Starting instance %s..." % (instance_id)) + inst = self.instdir.get(instance_id) + if not FLAGS.simple_network: + # TODO: Get the real security group of launch in here + security_group = "default" + net = network.BridgedNetwork.get_network_for_project(inst['user_id'], + inst['project_id'], + security_group).express() + inst['node_name'] = FLAGS.node_name + inst.save() + # TODO(vish) check to make sure the availability zone matches + new_inst = Instance(self._conn, name=instance_id, data=inst) + logging.info("Instances current state is %s", new_inst.state) + if new_inst.is_running(): + raise exception.Error("Instance is already running") + d = new_inst.spawn() + return d + + @exception.wrap_exception + def terminate_instance(self, instance_id): + """ terminate an instance on this machine """ + logging.debug("Got told to terminate instance %s" % instance_id) + instance = self.get_instance(instance_id) + # inst = self.instdir.get(instance_id) + if not instance: + raise exception.Error( + 'trying to terminate unknown instance: %s' % instance_id) + d = instance.destroy() + # d.addCallback(lambda x: inst.destroy()) + return d + + @exception.wrap_exception + def reboot_instance(self, instance_id): + """ reboot an instance on this server + KVM doesn't support reboot, so we terminate and restart """ + instance = self.get_instance(instance_id) + if not instance: + raise exception.Error( + 'trying to reboot unknown instance: %s' % instance_id) + return instance.reboot() + + @defer.inlineCallbacks + @exception.wrap_exception + def get_console_output(self, instance_id): + """ send the console output for an instance """ + logging.debug("Getting console output for %s" % (instance_id)) + inst = self.instdir.get(instance_id) + instance = self.get_instance(instance_id) + if not instance: + raise exception.Error( + 'trying to get console log for unknown: %s' % instance_id) + rv = yield instance.console_output() + # TODO(termie): this stuff belongs in the API layer, no need to + # munge the data we send to ourselves + output = {"InstanceId" : instance_id, + "Timestamp" : "2", + "output" : base64.b64encode(rv)} + defer.returnValue(output) + + @defer.inlineCallbacks + @exception.wrap_exception + def attach_volume(self, instance_id = None, + volume_id = None, mountpoint = None): + volume = volume_service.get_volume(volume_id) + yield self._init_aoe() + yield process.simple_execute( + "sudo virsh attach-disk %s /dev/etherd/%s %s" % + (instance_id, + volume['aoe_device'], + mountpoint.rpartition('/dev/')[2])) + volume.finish_attach() + defer.returnValue(True) + + @defer.inlineCallbacks + def _init_aoe(self): + yield process.simple_execute("sudo aoe-discover") + yield process.simple_execute("sudo aoe-stat") + + @defer.inlineCallbacks + @exception.wrap_exception + def detach_volume(self, instance_id, volume_id): + """ detach a volume from an instance """ + # despite the documentation, virsh detach-disk just wants the device + # name without the leading /dev/ + volume = volume_service.get_volume(volume_id) + target = volume['mountpoint'].rpartition('/dev/')[2] + yield process.simple_execute( + "sudo virsh detach-disk %s %s " % (instance_id, target)) + volume.finish_detach() + defer.returnValue(True) + + +class Group(object): + def __init__(self, group_id): + self.group_id = group_id + + +class ProductCode(object): + def __init__(self, product_code): + self.product_code = product_code + + +class Instance(object): + + NOSTATE = 0x00 + RUNNING = 0x01 + BLOCKED = 0x02 + PAUSED = 0x03 + SHUTDOWN = 0x04 + SHUTOFF = 0x05 + CRASHED = 0x06 + + def __init__(self, conn, name, data): + """ spawn an instance with a given name """ + self._conn = conn + # TODO(vish): this can be removed after data has been updated + # data doesn't seem to have a working iterator so in doesn't work + if data.get('owner_id', None) is not None: + data['user_id'] = data['owner_id'] + data['project_id'] = data['owner_id'] + self.datamodel = data + + size = data.get('instance_type', FLAGS.default_instance_type) + if size not in INSTANCE_TYPES: + raise exception.Error('invalid instance type: %s' % size) + + self.datamodel.update(INSTANCE_TYPES[size]) + + self.datamodel['name'] = name + self.datamodel['instance_id'] = name + self.datamodel['basepath'] = data.get( + 'basepath', os.path.abspath( + os.path.join(FLAGS.instances_path, self.name))) + self.datamodel['memory_kb'] = int(self.datamodel['memory_mb']) * 1024 + self.datamodel.setdefault('image_id', FLAGS.default_image) + self.datamodel.setdefault('kernel_id', FLAGS.default_kernel) + self.datamodel.setdefault('ramdisk_id', FLAGS.default_ramdisk) + self.datamodel.setdefault('project_id', self.datamodel['user_id']) + self.datamodel.setdefault('bridge_name', None) + #self.datamodel.setdefault('key_data', None) + #self.datamodel.setdefault('key_name', None) + #self.datamodel.setdefault('addressing_type', None) + + # TODO(joshua) - The ugly non-flat ones + self.datamodel['groups'] = data.get('security_group', 'default') + # TODO(joshua): Support product codes somehow + self.datamodel.setdefault('product_codes', None) + + self.datamodel.save() + logging.debug("Finished init of Instance with id of %s" % name) + + def toXml(self): + # TODO(termie): cache? + logging.debug("Starting the toXML method") + libvirt_xml = open(FLAGS.libvirt_xml_template).read() + xml_info = self.datamodel.copy() + # TODO(joshua): Make this xml express the attached disks as well + + # TODO(termie): lazy lazy hack because xml is annoying + xml_info['nova'] = json.dumps(self.datamodel.copy()) + libvirt_xml = libvirt_xml % xml_info + logging.debug("Finished the toXML method") + + return libvirt_xml + + @classmethod + def fromName(cls, conn, name): + """ use the saved data for reloading the instance """ + instdir = model.InstanceDirectory() + instance = instdir.get(name) + return cls(conn=conn, name=name, data=instance) + + def set_state(self, state_code, state_description=None): + self.datamodel['state'] = state_code + if not state_description: + state_description = STATE_NAMES[state_code] + self.datamodel['state_description'] = state_description + self.datamodel.save() + + @property + def state(self): + # it is a string in datamodel + return int(self.datamodel['state']) + + @property + def name(self): + return self.datamodel['name'] + + def is_pending(self): + return (self.state == Instance.NOSTATE or self.state == 'pending') + + def is_destroyed(self): + return self.state == Instance.SHUTOFF + + def is_running(self): + logging.debug("Instance state is: %s" % self.state) + return (self.state == Instance.RUNNING or self.state == 'running') + + def describe(self): + return self.datamodel + + def info(self): + logging.debug("Getting info for dom %s" % self.name) + virt_dom = self._conn.lookupByName(self.name) + (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() + return {'state': state, + 'max_mem': max_mem, + 'mem': mem, + 'num_cpu': num_cpu, + 'cpu_time': cpu_time, + 'node_name': FLAGS.node_name} + + def basepath(self, path=''): + return os.path.abspath(os.path.join(self.datamodel['basepath'], path)) + + def update_state(self): + self.datamodel.update(self.info()) + self.set_state(self.state) + self.datamodel.save() # Extra, but harmless + + @exception.wrap_exception + def destroy(self): + if self.is_destroyed(): + self.datamodel.destroy() + raise exception.Error('trying to destroy already destroyed' + ' instance: %s' % self.name) + + self.set_state(Instance.NOSTATE, 'shutting_down') + try: + virt_dom = self._conn.lookupByName(self.name) + virt_dom.destroy() + except Exception, _err: + pass + # If the instance is already terminated, we're still happy + d = defer.Deferred() + d.addCallback(lambda x: self._cleanup()) + d.addCallback(lambda x: self.datamodel.destroy()) + # TODO(termie): short-circuit me for tests + # WE'LL save this for when we do shutdown, + # instead of destroy - but destroy returns immediately + timer = task.LoopingCall(f=None) + def _wait_for_shutdown(): + try: + self.update_state() + if self.state == Instance.SHUTDOWN: + timer.stop() + d.callback(None) + except Exception: + self.set_state(Instance.SHUTDOWN) + timer.stop() + d.callback(None) + timer.f = _wait_for_shutdown + timer.start(interval=0.5, now=True) + return d + + def _cleanup(self): + target = os.path.abspath(self.datamodel['basepath']) + logging.info("Deleting instance files at %s", target) + shutil.rmtree(target) + + @defer.inlineCallbacks + @exception.wrap_exception + def reboot(self): + if not self.is_running(): + raise exception.Error( + 'trying to reboot a non-running' + 'instance: %s (state: %s)' % (self.name, self.state)) + + logging.debug('rebooting instance %s' % self.name) + self.set_state(Instance.NOSTATE, 'rebooting') + yield self._conn.lookupByName(self.name).destroy() + self._conn.createXML(self.toXml(), 0) + + d = defer.Deferred() + timer = task.LoopingCall(f=None) + def _wait_for_reboot(): + try: + self.update_state() + if self.is_running(): + logging.debug('rebooted instance %s' % self.name) + timer.stop() + d.callback(None) + except Exception: + self.set_state(Instance.SHUTDOWN) + timer.stop() + d.callback(None) + timer.f = _wait_for_reboot + timer.start(interval=0.5, now=True) + yield d + + def _fetch_s3_image(self, image, path): + url = _image_url('%s/image' % image) + d = process.simple_execute( + 'curl --silent %s -o %s' % (url, path)) + return d + + def _fetch_local_image(self, image, path): + source = _image_path('%s/image' % image) + d = process.simple_execute('cp %s %s' % (source, path)) + return d + + @defer.inlineCallbacks + def _create_image(self, libvirt_xml): + # syntactic nicety + data = self.datamodel + basepath = self.basepath + + # ensure directories exist and are writable + yield process.simple_execute( + 'mkdir -p %s' % basepath()) + yield process.simple_execute( + 'chmod 0777 %s' % basepath()) + + + # TODO(termie): these are blocking calls, it would be great + # if they weren't. + logging.info('Creating image for: %s', data['instance_id']) + f = open(basepath('libvirt.xml'), 'w') + f.write(libvirt_xml) + f.close() + + if FLAGS.fake_libvirt: + logging.info('fake_libvirt, nothing to do for create_image') + raise defer.returnValue(None); + + if FLAGS.use_s3: + _fetch_file = self._fetch_s3_image + else: + _fetch_file = self._fetch_local_image + + if not os.path.exists(basepath('disk')): + yield _fetch_file(data['image_id'], basepath('disk-raw')) + if not os.path.exists(basepath('kernel')): + yield _fetch_file(data['kernel_id'], basepath('kernel')) + if not os.path.exists(basepath('ramdisk')): + yield _fetch_file(data['ramdisk_id'], basepath('ramdisk')) + + execute = lambda cmd, input=None: \ + process.simple_execute(cmd=cmd, + input=input, + error_ok=1) + + key = data['key_data'] + net = None + if FLAGS.simple_network: + with open(FLAGS.simple_network_template) as f: + net = f.read() % {'address': data['private_dns_name'], + 'network': FLAGS.simple_network_network, + 'netmask': FLAGS.simple_network_netmask, + 'gateway': FLAGS.simple_network_gateway, + 'broadcast': FLAGS.simple_network_broadcast, + 'dns': FLAGS.simple_network_dns} + if key or net: + logging.info('Injecting data into image %s', data['image_id']) + yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute) + + if os.path.exists(basepath('disk')): + yield process.simple_execute( + 'rm -f %s' % basepath('disk')) + + bytes = (INSTANCE_TYPES[data['instance_type']]['local_gb'] + * 1024 * 1024 * 1024) + yield disk.partition( + basepath('disk-raw'), basepath('disk'), bytes, execute=execute) + + @defer.inlineCallbacks + @exception.wrap_exception + def spawn(self): + self.set_state(Instance.NOSTATE, 'spawning') + logging.debug("Starting spawn in Instance") + + xml = self.toXml() + self.set_state(Instance.NOSTATE, 'launching') + logging.info('self %s', self) + try: + yield self._create_image(xml) + self._conn.createXML(xml, 0) + # TODO(termie): this should actually register + # a callback to check for successful boot + logging.debug("Instance is running") + + local_d = defer.Deferred() + timer = task.LoopingCall(f=None) + def _wait_for_boot(): + try: + self.update_state() + if self.is_running(): + logging.debug('booted instance %s' % self.name) + timer.stop() + local_d.callback(None) + except Exception: + self.set_state(Instance.SHUTDOWN) + logging.error('Failed to boot instance %s' % self.name) + timer.stop() + local_d.callback(None) + timer.f = _wait_for_boot + timer.start(interval=0.5, now=True) + except Exception, ex: + logging.debug(ex) + self.set_state(Instance.SHUTDOWN) + + @exception.wrap_exception + def console_output(self): + if not FLAGS.fake_libvirt: + fname = os.path.abspath( + os.path.join(self.datamodel['basepath'], 'console.log')) + with open(fname, 'r') as f: + console = f.read() + else: + console = 'FAKE CONSOLE OUTPUT' + return defer.succeed(console) + +STATE_NAMES = { + Instance.NOSTATE : 'pending', + Instance.RUNNING : 'running', + Instance.BLOCKED : 'blocked', + Instance.PAUSED : 'paused', + Instance.SHUTDOWN : 'shutdown', + Instance.SHUTOFF : 'shutdown', + Instance.CRASHED : 'crashed', +} diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 56d474fd9..97a7b5a38 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -37,9 +37,9 @@ from nova.auth import rbac from nova.auth import users from nova.compute import model from nova.compute import network -from nova.compute import computeservice +from nova.compute import service as compute_service from nova.endpoint import images -from nova.volume import volumeservice +from nova.volume import service as volume_service FLAGS = flags.FLAGS @@ -75,7 +75,7 @@ class CloudController(object): def volumes(self): """ returns a list of all volumes """ for volume_id in datastore.Redis.instance().smembers("volumes"): - volume = volumeservice.get_volume(volume_id) + volume = volume_service.get_volume(volume_id) yield volume def __str__(self): @@ -102,7 +102,7 @@ class CloudController(object): result = {} for instance in self.instdir.all: if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], computeservice.INSTANCE_TYPES[instance['instance_type']]['vcpus']) + line = '%s slots=%d' % (instance['private_dns_name'], compute_service.INSTANCE_TYPES[instance['instance_type']]['vcpus']) if instance['key_name'] in result: result[instance['key_name']].append(line) else: @@ -295,7 +295,7 @@ class CloudController(object): @rbac.allow('projectmanager', 'sysadmin') def create_volume(self, context, size, **kwargs): - # TODO(vish): refactor this to create the volume object here and tell volumeservice to create it + # TODO(vish): refactor this to create the volume object here and tell service to create it res = rpc.call(FLAGS.volume_topic, {"method": "create_volume", "args" : {"size": size, "user_id": context.user.id, @@ -330,7 +330,7 @@ class CloudController(object): raise exception.NotFound('Instance %s could not be found' % instance_id) def _get_volume(self, context, volume_id): - volume = volumeservice.get_volume(volume_id) + volume = volume_service.get_volume(volume_id) if context.user.is_admin() or volume['project_id'] == context.project.id: return volume raise exception.NotFound('Volume %s could not be found' % volume_id) diff --git a/nova/network/networkservice.py b/nova/network/networkservice.py deleted file mode 100644 index 9d87e05e6..000000000 --- a/nova/network/networkservice.py +++ /dev/null @@ -1,35 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Network Nodes are responsible for allocating ips and setting up network -""" - -import logging - -from nova import flags -from nova import service - - -FLAGS = flags.FLAGS - -class NetworkService(service.Service): - """Allocates ips and sets up networks""" - - def __init__(self): - logging.debug("Network node working") diff --git a/nova/network/service.py b/nova/network/service.py new file mode 100644 index 000000000..9d87e05e6 --- /dev/null +++ b/nova/network/service.py @@ -0,0 +1,35 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Network Nodes are responsible for allocating ips and setting up network +""" + +import logging + +from nova import flags +from nova import service + + +FLAGS = flags.FLAGS + +class NetworkService(service.Service): + """Allocates ips and sets up networks""" + + def __init__(self): + logging.debug("Network node working") diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py index 38f4de8d9..128188b0d 100644 --- a/nova/tests/cloud_unittest.py +++ b/nova/tests/cloud_unittest.py @@ -28,7 +28,7 @@ from nova import flags from nova import rpc from nova import test from nova.auth import users -from nova.compute import computeservice +from nova.compute import service from nova.endpoint import api from nova.endpoint import cloud @@ -54,7 +54,7 @@ class CloudTestCase(test.BaseTestCase): self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop)) # set up a service - self.compute = computeservice.ComputeService() + self.compute = service.ComputeService() self.compute_consumer = rpc.AdapterConsumer(connection=self.conn, topic=FLAGS.compute_topic, proxy=self.compute) diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index db08308bb..b70260c25 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -26,7 +26,7 @@ from nova import flags from nova import test from nova import utils from nova.compute import model -from nova.compute import computeservice +from nova.compute import service FLAGS = flags.FLAGS @@ -60,7 +60,7 @@ class ComputeConnectionTestCase(test.TrialTestCase): self.flags(fake_libvirt=True, fake_storage=True, fake_users=True) - self.compute = computeservice.ComputeService() + self.compute = service.ComputeService() def create_instance(self): instdir = model.InstanceDirectory() diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index 568b199a0..62144269c 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -18,11 +18,11 @@ import logging +from nova import compute from nova import exception from nova import flags from nova import test -from nova.compute import computeservice -from nova.volume import volumeservice +from nova.volume import service as volume_service FLAGS = flags.FLAGS @@ -32,11 +32,11 @@ class VolumeTestCase(test.TrialTestCase): def setUp(self): logging.getLogger().setLevel(logging.DEBUG) super(VolumeTestCase, self).setUp() - self.compute = computeservice.ComputeService() + self.compute = compute.service.ComputeService() self.volume = None self.flags(fake_libvirt=True, fake_storage=True) - self.volume = volumeservice.VolumeService() + self.volume = volume_service.VolumeService() def test_run_create_volume(self): vol_size = '0' @@ -45,11 +45,11 @@ class VolumeTestCase(test.TrialTestCase): volume_id = self.volume.create_volume(vol_size, user_id, project_id) # TODO(termie): get_volume returns differently than create_volume self.assertEqual(volume_id, - volumeservice.get_volume(volume_id)['volume_id']) + volume_service.get_volume(volume_id)['volume_id']) rv = self.volume.delete_volume(volume_id) self.assertRaises(exception.Error, - volumeservice.get_volume, + volume_service.get_volume, volume_id) def test_too_big_volume(self): @@ -70,7 +70,7 @@ class VolumeTestCase(test.TrialTestCase): for i in xrange(total_slots): vid = self.volume.create_volume(vol_size, user_id, project_id) vols.append(vid) - self.assertRaises(volumeservice.NoMoreVolumes, + self.assertRaises(volume_service.NoMoreVolumes, self.volume.create_volume, vol_size, user_id, project_id) for id in vols: @@ -85,7 +85,7 @@ class VolumeTestCase(test.TrialTestCase): mountpoint = "/dev/sdf" volume_id = self.volume.create_volume(vol_size, user_id, project_id) - volume_obj = volumeservice.get_volume(volume_id) + volume_obj = volume_service.get_volume(volume_id) volume_obj.start_attach(instance_id, mountpoint) rv = yield self.compute.attach_volume(volume_id, instance_id, @@ -100,12 +100,12 @@ class VolumeTestCase(test.TrialTestCase): volume_id) rv = yield self.volume.detach_volume(volume_id) - volume_obj = volumeservice.get_volume(volume_id) + volume_obj = volume_service.get_volume(volume_id) self.assertEqual(volume_obj['status'], "available") rv = self.volume.delete_volume(volume_id) self.assertRaises(exception.Error, - volumeservice.get_volume, + volume_service.get_volume, volume_id) def test_multi_node(self): diff --git a/nova/volume/service.py b/nova/volume/service.py new file mode 100644 index 000000000..87a47f40a --- /dev/null +++ b/nova/volume/service.py @@ -0,0 +1,304 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Nova Storage manages creating, attaching, detaching, and +destroying persistent storage volumes, ala EBS. +Currently uses Ata-over-Ethernet. +""" + +import glob +import logging +import os +import shutil +import socket +import tempfile + +from twisted.internet import defer + +from nova import datastore +from nova import exception +from nova import flags +from nova import process +from nova import service +from nova import utils +from nova import validate + + +FLAGS = flags.FLAGS +flags.DEFINE_string('storage_dev', '/dev/sdb', + 'Physical device to use for volumes') +flags.DEFINE_string('volume_group', 'nova-volumes', + 'Name for the VG that will contain exported volumes') +flags.DEFINE_string('aoe_eth_dev', 'eth0', + 'Which device to export the volumes on') +flags.DEFINE_string('storage_name', + socket.gethostname(), + 'name of this service') +flags.DEFINE_integer('first_shelf_id', + utils.last_octet(utils.get_my_ip()) * 10, + 'AoE starting shelf_id for this service') +flags.DEFINE_integer('last_shelf_id', + utils.last_octet(utils.get_my_ip()) * 10 + 9, + 'AoE starting shelf_id for this service') +flags.DEFINE_string('aoe_export_dir', + '/var/lib/vblade-persist/vblades', + 'AoE directory where exports are created') +flags.DEFINE_integer('slots_per_shelf', + 16, + 'Number of AoE slots per shelf') +flags.DEFINE_string('storage_availability_zone', + 'nova', + 'availability zone of this service') +flags.DEFINE_boolean('fake_storage', False, + 'Should we make real storage volumes to attach?') + + +class NoMoreVolumes(exception.Error): + pass + +def get_volume(volume_id): + """ Returns a redis-backed volume object """ + volume_class = Volume + if FLAGS.fake_storage: + volume_class = FakeVolume + if datastore.Redis.instance().sismember('volumes', volume_id): + return volume_class(volume_id=volume_id) + raise exception.Error("Volume does not exist") + +class VolumeService(service.Service): + """ + There is one VolumeNode running on each host. + However, each VolumeNode can report on the state of + *all* volumes in the cluster. + """ + def __init__(self): + super(VolumeService, self).__init__() + self.volume_class = Volume + if FLAGS.fake_storage: + FLAGS.aoe_export_dir = tempfile.mkdtemp() + self.volume_class = FakeVolume + self._init_volume_group() + + def __del__(self): + # TODO(josh): Get rid of this destructor, volumes destroy themselves + if FLAGS.fake_storage: + try: + shutil.rmtree(FLAGS.aoe_export_dir) + except Exception, err: + pass + + @validate.rangetest(size=(0, 1000)) + def create_volume(self, size, user_id, project_id): + """ + Creates an exported volume (fake or real), + restarts exports to make it available. + Volume at this point has size, owner, and zone. + """ + logging.debug("Creating volume of size: %s" % (size)) + vol = self.volume_class.create(size, user_id, project_id) + datastore.Redis.instance().sadd('volumes', vol['volume_id']) + datastore.Redis.instance().sadd('volumes:%s' % (FLAGS.storage_name), vol['volume_id']) + self._restart_exports() + return vol['volume_id'] + + def by_node(self, node_id): + """ returns a list of volumes for a node """ + for volume_id in datastore.Redis.instance().smembers('volumes:%s' % (node_id)): + yield self.volume_class(volume_id=volume_id) + + @property + def all(self): + """ returns a list of all volumes """ + for volume_id in datastore.Redis.instance().smembers('volumes'): + yield self.volume_class(volume_id=volume_id) + + def delete_volume(self, volume_id): + logging.debug("Deleting volume with id of: %s" % (volume_id)) + vol = get_volume(volume_id) + if vol['status'] == "attached": + raise exception.Error("Volume is still attached") + if vol['node_name'] != FLAGS.storage_name: + raise exception.Error("Volume is not local to this node") + vol.destroy() + datastore.Redis.instance().srem('volumes', vol['volume_id']) + datastore.Redis.instance().srem('volumes:%s' % (FLAGS.storage_name), vol['volume_id']) + return True + + @defer.inlineCallbacks + def _restart_exports(self): + if FLAGS.fake_storage: + return + yield process.simple_execute( + "sudo vblade-persist auto all") + yield process.simple_execute( + "sudo vblade-persist start all") + + @defer.inlineCallbacks + def _init_volume_group(self): + if FLAGS.fake_storage: + return + yield process.simple_execute( + "sudo pvcreate %s" % (FLAGS.storage_dev)) + yield process.simple_execute( + "sudo vgcreate %s %s" % (FLAGS.volume_group, + FLAGS.storage_dev)) + +class Volume(datastore.BasicModel): + + def __init__(self, volume_id=None): + self.volume_id = volume_id + super(Volume, self).__init__() + + @property + def identifier(self): + return self.volume_id + + def default_state(self): + return {"volume_id": self.volume_id} + + @classmethod + def create(cls, size, user_id, project_id): + volume_id = utils.generate_uid('vol') + vol = cls(volume_id) + vol['node_name'] = FLAGS.storage_name + vol['size'] = size + vol['user_id'] = user_id + vol['project_id'] = project_id + vol['availability_zone'] = FLAGS.storage_availability_zone + vol["instance_id"] = 'none' + vol["mountpoint"] = 'none' + vol['attach_time'] = 'none' + vol['status'] = "creating" # creating | available | in-use + vol['attach_status'] = "detached" # attaching | attached | detaching | detached + vol['delete_on_termination'] = 'False' + vol.save() + vol.create_lv() + vol._setup_export() + # TODO(joshua) - We need to trigger a fanout message for aoe-discover on all the nodes + # TODO(joshua + vol['status'] = "available" + vol.save() + return vol + + def start_attach(self, instance_id, mountpoint): + """ """ + self['instance_id'] = instance_id + self['mountpoint'] = mountpoint + self['status'] = "in-use" + self['attach_status'] = "attaching" + self['attach_time'] = utils.isotime() + self['delete_on_termination'] = 'False' + self.save() + + def finish_attach(self): + """ """ + self['attach_status'] = "attached" + self.save() + + def start_detach(self): + """ """ + self['attach_status'] = "detaching" + self.save() + + def finish_detach(self): + self['instance_id'] = None + self['mountpoint'] = None + self['status'] = "available" + self['attach_status'] = "detached" + self.save() + + def destroy(self): + try: + self._remove_export() + except: + pass + self._delete_lv() + super(Volume, self).destroy() + + @defer.inlineCallbacks + def create_lv(self): + if str(self['size']) == '0': + sizestr = '100M' + else: + sizestr = '%sG' % self['size'] + yield process.simple_execute( + "sudo lvcreate -L %s -n %s %s" % (sizestr, + self['volume_id'], + FLAGS.volume_group)) + + @defer.inlineCallbacks + def _delete_lv(self): + yield process.simple_execute( + "sudo lvremove -f %s/%s" % (FLAGS.volume_group, + self['volume_id'])) + + def _setup_export(self): + (shelf_id, blade_id) = get_next_aoe_numbers() + self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id) + self['shelf_id'] = shelf_id + self['blade_id'] = blade_id + self.save() + self._exec_export() + + @defer.inlineCallbacks + def _exec_export(self): + yield process.simple_execute( + "sudo vblade-persist setup %s %s %s /dev/%s/%s" % + (self['shelf_id'], + self['blade_id'], + FLAGS.aoe_eth_dev, + FLAGS.volume_group, + self['volume_id'])) + + @defer.inlineCallbacks + def _remove_export(self): + yield process.simple_execute( + "sudo vblade-persist stop %s %s" % (self['shelf_id'], + self['blade_id'])) + yield process.simple_execute( + "sudo vblade-persist destroy %s %s" % (self['shelf_id'], + self['blade_id'])) + + +class FakeVolume(Volume): + def create_lv(self): + pass + + def _exec_export(self): + fname = os.path.join(FLAGS.aoe_export_dir, self['aoe_device']) + f = file(fname, "w") + f.close() + + def _remove_export(self): + pass + + def _delete_lv(self): + pass + +def get_next_aoe_numbers(): + for shelf_id in xrange(FLAGS.first_shelf_id, FLAGS.last_shelf_id + 1): + aoes = glob.glob("%s/e%s.*" % (FLAGS.aoe_export_dir, shelf_id)) + if not aoes: + blade_id = 0 + else: + blade_id = int(max([int(a.rpartition('.')[2]) for a in aoes])) + 1 + if blade_id < FLAGS.slots_per_shelf: + logging.debug("Next shelf.blade is %s.%s", shelf_id, blade_id) + return (shelf_id, blade_id) + raise NoMoreVolumes() diff --git a/nova/volume/volumeservice.py b/nova/volume/volumeservice.py deleted file mode 100644 index 87a47f40a..000000000 --- a/nova/volume/volumeservice.py +++ /dev/null @@ -1,304 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Nova Storage manages creating, attaching, detaching, and -destroying persistent storage volumes, ala EBS. -Currently uses Ata-over-Ethernet. -""" - -import glob -import logging -import os -import shutil -import socket -import tempfile - -from twisted.internet import defer - -from nova import datastore -from nova import exception -from nova import flags -from nova import process -from nova import service -from nova import utils -from nova import validate - - -FLAGS = flags.FLAGS -flags.DEFINE_string('storage_dev', '/dev/sdb', - 'Physical device to use for volumes') -flags.DEFINE_string('volume_group', 'nova-volumes', - 'Name for the VG that will contain exported volumes') -flags.DEFINE_string('aoe_eth_dev', 'eth0', - 'Which device to export the volumes on') -flags.DEFINE_string('storage_name', - socket.gethostname(), - 'name of this service') -flags.DEFINE_integer('first_shelf_id', - utils.last_octet(utils.get_my_ip()) * 10, - 'AoE starting shelf_id for this service') -flags.DEFINE_integer('last_shelf_id', - utils.last_octet(utils.get_my_ip()) * 10 + 9, - 'AoE starting shelf_id for this service') -flags.DEFINE_string('aoe_export_dir', - '/var/lib/vblade-persist/vblades', - 'AoE directory where exports are created') -flags.DEFINE_integer('slots_per_shelf', - 16, - 'Number of AoE slots per shelf') -flags.DEFINE_string('storage_availability_zone', - 'nova', - 'availability zone of this service') -flags.DEFINE_boolean('fake_storage', False, - 'Should we make real storage volumes to attach?') - - -class NoMoreVolumes(exception.Error): - pass - -def get_volume(volume_id): - """ Returns a redis-backed volume object """ - volume_class = Volume - if FLAGS.fake_storage: - volume_class = FakeVolume - if datastore.Redis.instance().sismember('volumes', volume_id): - return volume_class(volume_id=volume_id) - raise exception.Error("Volume does not exist") - -class VolumeService(service.Service): - """ - There is one VolumeNode running on each host. - However, each VolumeNode can report on the state of - *all* volumes in the cluster. - """ - def __init__(self): - super(VolumeService, self).__init__() - self.volume_class = Volume - if FLAGS.fake_storage: - FLAGS.aoe_export_dir = tempfile.mkdtemp() - self.volume_class = FakeVolume - self._init_volume_group() - - def __del__(self): - # TODO(josh): Get rid of this destructor, volumes destroy themselves - if FLAGS.fake_storage: - try: - shutil.rmtree(FLAGS.aoe_export_dir) - except Exception, err: - pass - - @validate.rangetest(size=(0, 1000)) - def create_volume(self, size, user_id, project_id): - """ - Creates an exported volume (fake or real), - restarts exports to make it available. - Volume at this point has size, owner, and zone. - """ - logging.debug("Creating volume of size: %s" % (size)) - vol = self.volume_class.create(size, user_id, project_id) - datastore.Redis.instance().sadd('volumes', vol['volume_id']) - datastore.Redis.instance().sadd('volumes:%s' % (FLAGS.storage_name), vol['volume_id']) - self._restart_exports() - return vol['volume_id'] - - def by_node(self, node_id): - """ returns a list of volumes for a node """ - for volume_id in datastore.Redis.instance().smembers('volumes:%s' % (node_id)): - yield self.volume_class(volume_id=volume_id) - - @property - def all(self): - """ returns a list of all volumes """ - for volume_id in datastore.Redis.instance().smembers('volumes'): - yield self.volume_class(volume_id=volume_id) - - def delete_volume(self, volume_id): - logging.debug("Deleting volume with id of: %s" % (volume_id)) - vol = get_volume(volume_id) - if vol['status'] == "attached": - raise exception.Error("Volume is still attached") - if vol['node_name'] != FLAGS.storage_name: - raise exception.Error("Volume is not local to this node") - vol.destroy() - datastore.Redis.instance().srem('volumes', vol['volume_id']) - datastore.Redis.instance().srem('volumes:%s' % (FLAGS.storage_name), vol['volume_id']) - return True - - @defer.inlineCallbacks - def _restart_exports(self): - if FLAGS.fake_storage: - return - yield process.simple_execute( - "sudo vblade-persist auto all") - yield process.simple_execute( - "sudo vblade-persist start all") - - @defer.inlineCallbacks - def _init_volume_group(self): - if FLAGS.fake_storage: - return - yield process.simple_execute( - "sudo pvcreate %s" % (FLAGS.storage_dev)) - yield process.simple_execute( - "sudo vgcreate %s %s" % (FLAGS.volume_group, - FLAGS.storage_dev)) - -class Volume(datastore.BasicModel): - - def __init__(self, volume_id=None): - self.volume_id = volume_id - super(Volume, self).__init__() - - @property - def identifier(self): - return self.volume_id - - def default_state(self): - return {"volume_id": self.volume_id} - - @classmethod - def create(cls, size, user_id, project_id): - volume_id = utils.generate_uid('vol') - vol = cls(volume_id) - vol['node_name'] = FLAGS.storage_name - vol['size'] = size - vol['user_id'] = user_id - vol['project_id'] = project_id - vol['availability_zone'] = FLAGS.storage_availability_zone - vol["instance_id"] = 'none' - vol["mountpoint"] = 'none' - vol['attach_time'] = 'none' - vol['status'] = "creating" # creating | available | in-use - vol['attach_status'] = "detached" # attaching | attached | detaching | detached - vol['delete_on_termination'] = 'False' - vol.save() - vol.create_lv() - vol._setup_export() - # TODO(joshua) - We need to trigger a fanout message for aoe-discover on all the nodes - # TODO(joshua - vol['status'] = "available" - vol.save() - return vol - - def start_attach(self, instance_id, mountpoint): - """ """ - self['instance_id'] = instance_id - self['mountpoint'] = mountpoint - self['status'] = "in-use" - self['attach_status'] = "attaching" - self['attach_time'] = utils.isotime() - self['delete_on_termination'] = 'False' - self.save() - - def finish_attach(self): - """ """ - self['attach_status'] = "attached" - self.save() - - def start_detach(self): - """ """ - self['attach_status'] = "detaching" - self.save() - - def finish_detach(self): - self['instance_id'] = None - self['mountpoint'] = None - self['status'] = "available" - self['attach_status'] = "detached" - self.save() - - def destroy(self): - try: - self._remove_export() - except: - pass - self._delete_lv() - super(Volume, self).destroy() - - @defer.inlineCallbacks - def create_lv(self): - if str(self['size']) == '0': - sizestr = '100M' - else: - sizestr = '%sG' % self['size'] - yield process.simple_execute( - "sudo lvcreate -L %s -n %s %s" % (sizestr, - self['volume_id'], - FLAGS.volume_group)) - - @defer.inlineCallbacks - def _delete_lv(self): - yield process.simple_execute( - "sudo lvremove -f %s/%s" % (FLAGS.volume_group, - self['volume_id'])) - - def _setup_export(self): - (shelf_id, blade_id) = get_next_aoe_numbers() - self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id) - self['shelf_id'] = shelf_id - self['blade_id'] = blade_id - self.save() - self._exec_export() - - @defer.inlineCallbacks - def _exec_export(self): - yield process.simple_execute( - "sudo vblade-persist setup %s %s %s /dev/%s/%s" % - (self['shelf_id'], - self['blade_id'], - FLAGS.aoe_eth_dev, - FLAGS.volume_group, - self['volume_id'])) - - @defer.inlineCallbacks - def _remove_export(self): - yield process.simple_execute( - "sudo vblade-persist stop %s %s" % (self['shelf_id'], - self['blade_id'])) - yield process.simple_execute( - "sudo vblade-persist destroy %s %s" % (self['shelf_id'], - self['blade_id'])) - - -class FakeVolume(Volume): - def create_lv(self): - pass - - def _exec_export(self): - fname = os.path.join(FLAGS.aoe_export_dir, self['aoe_device']) - f = file(fname, "w") - f.close() - - def _remove_export(self): - pass - - def _delete_lv(self): - pass - -def get_next_aoe_numbers(): - for shelf_id in xrange(FLAGS.first_shelf_id, FLAGS.last_shelf_id + 1): - aoes = glob.glob("%s/e%s.*" % (FLAGS.aoe_export_dir, shelf_id)) - if not aoes: - blade_id = 0 - else: - blade_id = int(max([int(a.rpartition('.')[2]) for a in aoes])) + 1 - if blade_id < FLAGS.slots_per_shelf: - logging.debug("Next shelf.blade is %s.%s", shelf_id, blade_id) - return (shelf_id, blade_id) - raise NoMoreVolumes() -- cgit From 4373ca830fe93ddff70cf02bc366303d6b001ae9 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 26 Jul 2010 18:57:24 -0700 Subject: Fixes to the virtualenv installer --- Makefile | 2 +- tools/activate_venv.sh | 1 + tools/install_venv.py | 3 +-- tools/with_venv.sh | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 5fb512610..fa11cf335 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ clean: rm -rf keys rm -rf instances rm -rf networks - rm run_tests.err.log + rm -f run_tests.err.log clean-all: clean rm -rf $(venv) diff --git a/tools/activate_venv.sh b/tools/activate_venv.sh index d12cb3491..376db5b96 100644 --- a/tools/activate_venv.sh +++ b/tools/activate_venv.sh @@ -1,3 +1,4 @@ +#!/bin/bash _TOOLS=`dirname $0` _VENV=$_TOOLS/../.nova-venv source $_VENV/bin/activate diff --git a/tools/install_venv.py b/tools/install_venv.py index 720dcc0d9..c35af434b 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -5,7 +5,6 @@ Installation script for Nova's development virtualenv import os import subprocess import sys -import textwrap ROOT = os.path.dirname(os.path.dirname(__file__)) @@ -80,7 +79,7 @@ def print_help(): $ tools/with_venv.sh - Also, run_tests.sh will automatically use the virtualenv. + Also, make test will automatically use the virtualenv. """ print help diff --git a/tools/with_venv.sh b/tools/with_venv.sh index 51468a334..99d1ac18f 100755 --- a/tools/with_venv.sh +++ b/tools/with_venv.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash TOOLS=`dirname $0` VENV=$TOOLS/../.nova-venv source $VENV/bin/activate && $@ -- cgit From a2cf8a6f6038062cf343322acdbde66456b73dfb Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Mon, 26 Jul 2010 23:28:59 -0400 Subject: Fixes bug#610140. Thanks to Vish and Muharem for the patch --- nova/tests/api_unittest.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/nova/tests/api_unittest.py b/nova/tests/api_unittest.py index e5e2afe26..45ae50b2e 100644 --- a/nova/tests/api_unittest.py +++ b/nova/tests/api_unittest.py @@ -43,7 +43,11 @@ def boto_to_tornado(method, path, headers, data, host, connection=None): connection should be a FakeTornadoHttpConnection instance """ - headers = httpserver.HTTPHeaders() + try: + headers = httpserver.HTTPHeaders() + except AttributeError: + from tornado import httputil + headers = httputil.HTTPHeaders() for k, v in headers.iteritems(): headers[k] = v -- cgit From 74ce3aef4dafca8b0fc6bf0404725afdefe335ec Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Mon, 26 Jul 2010 23:49:49 -0400 Subject: Give SessionToken an is_expired method --- nova/compute/model.py | 5 +++++ nova/tests/model_unittest.py | 11 +++++++++++ 2 files changed, 16 insertions(+) diff --git a/nova/compute/model.py b/nova/compute/model.py index 7335d2c79..bae93b6c1 100644 --- a/nova/compute/model.py +++ b/nova/compute/model.py @@ -288,6 +288,11 @@ class SessionToken(datastore.BasicModel): expires = time + diff self['expiry'] = expires.strftime(utils.TIME_FORMAT) + def is_expired(self): + now = datetime.datetime.utcnow() + expires = utils.parse_isotime(self['expiry']) + return expires <= now + if __name__ == "__main__": import doctest diff --git a/nova/tests/model_unittest.py b/nova/tests/model_unittest.py index 10d3016f8..88ba5e6e9 100644 --- a/nova/tests/model_unittest.py +++ b/nova/tests/model_unittest.py @@ -279,3 +279,14 @@ class ModelTestCase(test.TrialTestCase): instance = yield model.SessionToken.generate("testuser") inst = model.SessionToken.lookup(instance.identifier) self.assert_(inst) + + @defer.inlineCallbacks + def test_session_token_is_expired_when_expired(self): + instance = yield model.SessionToken.generate("testuser") + instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT) + self.assert_(instance.is_expired()) + + @defer.inlineCallbacks + def test_session_token_is_expired_when_not_expired(self): + instance = yield model.SessionToken.generate("testuser") + self.assertFalse(instance.is_expired()) -- cgit From 4199dab2e761efc6cb2d9af353f5f09c943a2d2f Mon Sep 17 00:00:00 2001 From: Paul Voccio Date: Mon, 26 Jul 2010 23:23:44 -0500 Subject: resolving conflict w/ merge, cleaning up virtenv setups --- tools/activate_venv.sh | 4 ---- tools/install_venv.py | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) delete mode 100644 tools/activate_venv.sh diff --git a/tools/activate_venv.sh b/tools/activate_venv.sh deleted file mode 100644 index 376db5b96..000000000 --- a/tools/activate_venv.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -_TOOLS=`dirname $0` -_VENV=$_TOOLS/../.nova-venv -source $_VENV/bin/activate diff --git a/tools/install_venv.py b/tools/install_venv.py index c35af434b..0b35fc8e9 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -72,7 +72,7 @@ def print_help(): To activate the Nova virtualenv for the extent of your current shell session you can run: - $ source tools/activate_venv.sh + $ source .nova-venv/bin/activate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running: -- cgit From ad7f099aefc17d04a2a04deb7fd3055adc8cd84a Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 27 Jul 2010 01:03:05 -0400 Subject: Flag for SessionToken ttl setting. --- nova/compute/model.py | 12 ++++++++++-- nova/flags.py | 2 ++ nova/tests/model_unittest.py | 11 ++++++++++- 3 files changed, 22 insertions(+), 3 deletions(-) diff --git a/nova/compute/model.py b/nova/compute/model.py index bae93b6c1..212830d3c 100644 --- a/nova/compute/model.py +++ b/nova/compute/model.py @@ -235,6 +235,7 @@ class SessionToken(datastore.BasicModel): def __init__(self, session_token): self.token = session_token + self.default_ttl = FLAGS.auth_token_ttl super(SessionToken, self).__init__() @property @@ -243,7 +244,7 @@ class SessionToken(datastore.BasicModel): def default_state(self): now = datetime.datetime.utcnow() - diff = datetime.timedelta(hours=1) + diff = datetime.timedelta(seconds=self.default_ttl) expires = now + diff return {'user': None, 'session_type': None, 'token': self.token, 'expiry': expires.strftime(utils.TIME_FORMAT)} @@ -282,7 +283,7 @@ class SessionToken(datastore.BasicModel): def update_expiry(self, **kwargs): """updates the expirty attribute, but doesn't save""" if not kwargs: - kwargs['hours'] = 1 + kwargs['seconds'] = self.default_ttl time = datetime.datetime.utcnow() diff = datetime.timedelta(**kwargs) expires = time + diff @@ -293,6 +294,13 @@ class SessionToken(datastore.BasicModel): expires = utils.parse_isotime(self['expiry']) return expires <= now + def ttl(self): + """number of seconds remaining before expiration""" + now = datetime.datetime.utcnow() + expires = utils.parse_isotime(self['expiry']) + delta = expires - now + return (delta.seconds + (delta.days * 24 * 3600)) + if __name__ == "__main__": import doctest diff --git a/nova/flags.py b/nova/flags.py index 06ea1e007..3c1a0acaf 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -75,6 +75,8 @@ DEFINE_string('vpn_key_suffix', '-key', 'Suffix to add to project name for vpn key') +DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger') + # UNUSED DEFINE_string('node_availability_zone', 'nova', diff --git a/nova/tests/model_unittest.py b/nova/tests/model_unittest.py index 88ba5e6e9..24c08a908 100644 --- a/nova/tests/model_unittest.py +++ b/nova/tests/model_unittest.py @@ -16,7 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. -from datetime import datetime +from datetime import datetime, timedelta import logging import time from twisted.internet import defer @@ -290,3 +290,12 @@ class ModelTestCase(test.TrialTestCase): def test_session_token_is_expired_when_not_expired(self): instance = yield model.SessionToken.generate("testuser") self.assertFalse(instance.is_expired()) + + @defer.inlineCallbacks + def test_session_token_ttl(self): + instance = yield model.SessionToken.generate("testuser") + now = datetime.utcnow() + delta = timedelta(hours=1) + instance['expiry'] = (now + delta).strftime(utils.TIME_FORMAT) + # give 5 seconds of fuzziness + self.assert_(abs(instance.ttl() - FLAGS.auth_token_ttl) < 5) -- cgit From cb874bbe6d73007bfad8be750f6b57555adac7b6 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 26 Jul 2010 22:48:57 -0700 Subject: removed old reference from nova-common.install and fixed spacing --- debian/nova-common.install | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/debian/nova-common.install b/debian/nova-common.install index 9b1bbf147..93251363a 100644 --- a/debian/nova-common.install +++ b/debian/nova-common.install @@ -1,10 +1,9 @@ -bin/nova-manage usr/bin -debian/nova-manage.conf etc/nova +bin/nova-manage usr/bin +debian/nova-manage.conf etc/nova nova/auth/novarc.template usr/share/nova nova/cloudpipe/client.ovpn.template usr/share/nova nova/compute/libvirt.xml.template usr/share/nova nova/compute/interfaces.template usr/share/nova -usr/lib/python*/*-packages/nova/* CA/openssl.cnf.tmpl var/lib/nova/CA CA/geninter.sh var/lib/nova/CA CA/genrootca.sh var/lib/nova/CA -- cgit From 9641dac5d0cbfd8d1f3026e1bae5749b9d8e00fc Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 27 Jul 2010 10:30:00 +0200 Subject: Ensure that boto's config has a "Boto" section before attempting to set a value in it. --- nova/tests/objectstore_unittest.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py index 0a2f54031..20053a258 100644 --- a/nova/tests/objectstore_unittest.py +++ b/nova/tests/objectstore_unittest.py @@ -189,6 +189,8 @@ class S3APITestCase(test.TrialTestCase): self.tcp_port = self.listening_port.getHost().port + if not boto.config.has_section('Boto'): + boto.config.add_section('Boto') boto.config.set('Boto', 'num_retries', '0') self.conn = S3Connection(aws_access_key_id='admin', aws_secret_access_key='admin', -- cgit From 3365987ee5bc8ec66676f838d7eeefce377a76b9 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 27 Jul 2010 12:26:53 +0200 Subject: Automatically choose the correct type of test (virtualenv or system). --- Makefile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index fa11cf335..cd7e233e1 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,11 @@ with_venv=tools/with_venv.sh build: # Nothing to do -test: $(venv) +default_test_type:= $(shell if [ -e $(venv) ]; then echo venv; else echo system; fi) + +test: test-$(default_test_type) + +test-venv: $(venv) $(with_venv) python run_tests.py test-system: -- cgit From 56da5a45f6b7e511d3adc2f7ce12e1c9a9dc3665 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 27 Jul 2010 22:38:09 +0200 Subject: Bump version to 0.9.0. Change author to "OpenStack". Change author_email to nova@lists.launchpad.net. Change url to http://www.openstack.org/. Change description to "cloud computing fabric controller". --- setup.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/setup.py b/setup.py index eb42283ea..f9a616335 100644 --- a/setup.py +++ b/setup.py @@ -25,11 +25,10 @@ from setuptools import setup, find_packages srcdir = os.path.join(os.path.dirname(sys.argv[0]), 'src') setup(name='nova', - version='0.3.0', - description='None Other, Vaguely Awesome', - author='nova-core', - author_email='nova-core@googlegroups.com', - url='http://novacc.org/', + version='0.9.0', + description='cloud computing fabric controller', + author='OpenStack', + author_email='nova@lists.launchpad.net', + url='http://www.openstack.org/', packages = find_packages(), - ) -- cgit From 253b0005670d80ec4d953330a7dbd74b8a33b148 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 27 Jul 2010 23:06:03 +0200 Subject: Remove debian/ from main branch. --- debian/changelog | 232 ------------------------------------ debian/compat | 1 - debian/control | 136 --------------------- debian/nova-api.conf | 5 - debian/nova-api.init | 69 ----------- debian/nova-api.install | 3 - debian/nova-common.dirs | 11 -- debian/nova-common.install | 9 -- debian/nova-compute.conf | 7 -- debian/nova-compute.init | 69 ----------- debian/nova-compute.install | 2 - debian/nova-dhcpbridge.conf | 1 - debian/nova-instancemonitor.init | 69 ----------- debian/nova-instancemonitor.install | 1 - debian/nova-manage.conf | 4 - debian/nova-objectstore.conf | 5 - debian/nova-objectstore.init | 69 ----------- debian/nova-objectstore.install | 2 - debian/nova-volume.conf | 4 - debian/nova-volume.init | 69 ----------- debian/nova-volume.install | 2 - debian/pycompat | 1 - debian/pyversions | 1 - debian/rules | 4 - 24 files changed, 776 deletions(-) delete mode 100644 debian/changelog delete mode 100644 debian/compat delete mode 100644 debian/control delete mode 100644 debian/nova-api.conf delete mode 100644 debian/nova-api.init delete mode 100644 debian/nova-api.install delete mode 100644 debian/nova-common.dirs delete mode 100644 debian/nova-common.install delete mode 100644 debian/nova-compute.conf delete mode 100644 debian/nova-compute.init delete mode 100644 debian/nova-compute.install delete mode 100644 debian/nova-dhcpbridge.conf delete mode 100644 debian/nova-instancemonitor.init delete mode 100644 debian/nova-instancemonitor.install delete mode 100644 debian/nova-manage.conf delete mode 100644 debian/nova-objectstore.conf delete mode 100644 debian/nova-objectstore.init delete mode 100644 debian/nova-objectstore.install delete mode 100644 debian/nova-volume.conf delete mode 100644 debian/nova-volume.init delete mode 100644 debian/nova-volume.install delete mode 100644 debian/pycompat delete mode 100644 debian/pyversions delete mode 100755 debian/rules diff --git a/debian/changelog b/debian/changelog deleted file mode 100644 index 31dd5e91e..000000000 --- a/debian/changelog +++ /dev/null @@ -1,232 +0,0 @@ -nova (0.2.3-1) UNRELEASED; urgency=low - - * Relax the Twisted dependency to python-twisted-core (rather than the - full stack). - * Move nova related configuration files into /etc/nova/. - * Add a dependency on nginx from nova-objectsstore and install a - suitable configuration file. - * Ship the CA directory in nova-common. - * Add a default flag file for nova-manage to help it find the CA. - * If set, pass KernelId and RamdiskId from RunInstances call to the - target compute node. - * Added --network_path setting to nova-compute's flagfile. - * Move templates from python directories to /usr/share/nova. - * Add debian/nova-common.dirs to create - var/lib/nova/{buckets,CA,images,instances,keys,networks} - * Don't pass --daemonize=1 to nova-compute. It's already daemonising - by default. - - -- Vishvananda Ishaya Mon, 14 Jul 2010 12:00:00 -0700 - -nova (0.2.2-10) UNRELEASED; urgency=low - - * Fixed extra space in vblade-persist - - -- Vishvananda Ishaya Mon, 13 Jul 2010 19:00:00 -0700 - -nova (0.2.2-9) UNRELEASED; urgency=low - - * Fixed invalid dn bug in ldap for adding roles - - -- Vishvananda Ishaya Mon, 12 Jul 2010 15:20:00 -0700 - -nova (0.2.2-8) UNRELEASED; urgency=low - - * Added a missing comma - - -- Vishvananda Ishaya Mon, 08 Jul 2010 10:05:00 -0700 - -nova (0.2.2-7) UNRELEASED; urgency=low - - * Missing files from twisted patch - * License upedates - * Reformatting/cleanup - * Users/ldap bugfixes - * Merge fixes - * Documentation updates - * Vpn key creation fix - * Multiple shelves for volumes - - -- Vishvananda Ishaya Wed, 07 Jul 2010 18:45:00 -0700 - -nova (0.2.2-6) UNRELEASED; urgency=low - - * Fix to make Key Injection work again - - -- Vishvananda Ishaya Mon, 14 Jun 2010 21:35:00 -0700 - -nova (0.2.2-5) UNRELEASED; urgency=low - - * Lowered message callback frequency to stop compute and volume - from eating tons of cpu - - -- Vishvananda Ishaya Mon, 14 Jun 2010 14:15:00 -0700 - -nova (0.2.2-4) UNRELEASED; urgency=low - - * Documentation fixes - * Uncaught exceptions now log properly - * Nova Manage zip exporting works again - * Twisted threads no longer interrupt system calls - - -- Vishvananda Ishaya Sun, 13 Jun 2010 01:40:00 -0700 - -nova (0.2.2-3) UNRELEASED; urgency=low - - * Fixes to api calls - * More accurate documentation - * Removal of buggy multiprocessing - * Asynchronus execution of shell commands - * Fix of messaging race condition - * Test redis database cleaned out on each run of tests - * Smoketest updates - - -- Vishvananda Ishaya Fri, 12 Jun 2010 20:10:00 -0700 - -nova (0.2.2-2) UNRELEASED; urgency=low - - * Bugfixes to volume code - * Instances no longer use keeper - * Sectors off by one fix - * State reported properly by instances - - -- Vishvananda Ishaya Wed, 03 Jun 2010 15:21:00 -0700 - -nova (0.2.2-1) UNRELEASED; urgency=low - - * First release based on nova/cc - * Major rewrites to volumes and instances - * Addition of cloudpipe and rbac - * Major bugfixes - - -- Vishvananda Ishaya Wed, 02 Jun 2010 17:42:00 -0700 - -nova (0.2.1-1) UNRELEASED; urgency=low - - * Support ephemeral (local) space for instances - * instance related fixes - * fix network & cloudpipe bugs - - -- Vishvananda Ishaya Mon, 25 May 2010 12:14:00 -0700 - -nova (0.2.0-20) UNRELEASED; urgency=low - - * template files are in proper folder - - -- Vishvananda Ishaya Mon, 25 May 2010 12:14:00 -0700 - -nova (0.2.0-19) UNRELEASED; urgency=low - - * removed mox dependency and added templates to install - - -- Vishvananda Ishaya Mon, 25 May 2010 11:53:00 -0700 - -nova (0.2.0-18) UNRELEASED; urgency=low - - * api server properly sends instance status code - - -- Vishvananda Ishaya Mon, 24 May 2010 17:18:00 -0700 - -nova (0.2.0-17) UNRELEASED; urgency=low - - * redis-backed datastore - - -- Vishvananda Ishaya Mon, 24 May 2010 16:28:00 -0700 - -nova (0.2.0-16) UNRELEASED; urgency=low - - * make sure twistd.pid is really overriden - - -- Manish Singh Sun, 23 May 2010 22:18:47 -0700 - -nova (0.2.0-15) UNRELEASED; urgency=low - - * rpc shouldn't require tornado unless you are using attach_to_tornado - - -- Jesse Andrews Sun, 23 May 2010 21:59:00 -0700 - -nova (0.2.0-14) UNRELEASED; urgency=low - - * quicky init scripts for the other services, based on nova-objectstore - - -- Manish Singh Sun, 23 May 2010 21:49:43 -0700 - -nova (0.2.0-13) UNRELEASED; urgency=low - - * init script for nova-objectstore - - -- Manish Singh Sun, 23 May 2010 21:33:25 -0700 - -nova (0.2.0-12) UNRELEASED; urgency=low - - * kvm, kpartx required for nova-compute - - -- Jesse Andrews Sun, 23 May 2010 21:32:00 -0700 - -nova (0.2.0-11) UNRELEASED; urgency=low - - * Need to include the python modules in nova-common.install as well. - - -- Manish Singh Sun, 23 May 2010 20:04:27 -0700 - -nova (0.2.0-10) UNRELEASED; urgency=low - - * add more requirements to bin packages - - -- Jesse Andrews Sun, 23 May 2010 19:54:00 -0700 - -nova (0.2.0-9) UNRELEASED; urgency=low - - * nova bin packages should depend on the same version of nova-common they - were built from. - - -- Manish Singh Sun, 23 May 2010 18:46:34 -0700 - -nova (0.2.0-8) UNRELEASED; urgency=low - - * Require libvirt 0.8.1 or newer for nova-compute - - -- Jesse Andrews Sun, 23 May 2010 18:33:00 -0700 - -nova (0.2.0-7) UNRELEASED; urgency=low - - * Split bins into separate packages - - -- Manish Singh Sun, 23 May 2010 18:46:34 -0700 - -nova (0.2.0-6) UNRELEASED; urgency=low - - * Add python-m2crypto to deps - - -- Jesse Andrews Sun, 23 May 2010 18:33:00 -0700 - -nova (0.2.0-5) UNRELEASED; urgency=low - - * Add python-gflags to deps - - -- Manish Singh Sun, 23 May 2010 18:28:50 -0700 - -nova (0.2.0-4) UNRELEASED; urgency=low - - * install scripts - - -- Manish Singh Sun, 23 May 2010 18:16:27 -0700 - -nova (0.2.0-3) UNRELEASED; urgency=low - - * debian build goop - - -- Manish Singh Sun, 23 May 2010 18:06:37 -0700 - -nova (0.2.0-2) UNRELEASED; urgency=low - - * improved requirements - - -- Jesse Andrews Sun, 23 May 2010 17:42:00 -0700 - -nova (0.2.0-1) UNRELEASED; urgency=low - - * initial release - - -- Jesse Andrews Fri, 21 May 2010 12:28:00 -0700 - diff --git a/debian/compat b/debian/compat deleted file mode 100644 index 7f8f011eb..000000000 --- a/debian/compat +++ /dev/null @@ -1 +0,0 @@ -7 diff --git a/debian/control b/debian/control deleted file mode 100644 index a6d12f36e..000000000 --- a/debian/control +++ /dev/null @@ -1,136 +0,0 @@ -Source: nova -Section: net -Priority: extra -Maintainer: Jesse Andrews -Build-Depends: debhelper (>= 7), redis-server (>=2:2.0.0~rc1), python-m2crypto -Build-Depends-Indep: python-support, python-setuptools -Standards-Version: 3.8.4 -XS-Python-Version: 2.6 - -Package: nova-common -Architecture: all -Depends: ${python:Depends}, aoetools, vlan, python-ipy, python-boto, python-m2crypto, python-pycurl, python-twisted-core, python-daemon, python-redis, python-carrot, python-lockfile, python-gflags, python-tornado, ${misc:Depends} -Provides: ${python:Provides} -Description: Nova Cloud Computing - common files - Nova is a cloud computing fabric controller (the main part of an IaaS - system) built to match the popular AWS EC2 and S3 APIs. It is written in - Python, using the Tornado and Twisted frameworks, and relies on the - standard AMQP messaging protocol, and the Redis distributed KVS. - . - Nova is intended to be easy to extend, and adapt. For example, it - currently uses an LDAP server for users and groups, but also includes a - fake LDAP server, that stores data in Redis. It has extensive test - coverage, and uses the Sphinx toolkit (the same as Python itself) for code - and user documentation. - . - While Nova is currently in Beta use within several organizations, the - codebase is very much under active development. - . - This package contains things that are needed by all parts of Nova. - -Package: nova-compute -Architecture: all -Depends: nova-common (= ${binary:Version}), kpartx, kvm, python-libvirt, libvirt-bin (>= 0.7.5), curl, ${python:Depends}, ${misc:Depends} -Description: Nova Cloud Computing - compute node - Nova is a cloud computing fabric controller (the main part of an IaaS - system) built to match the popular AWS EC2 and S3 APIs. It is written in - Python, using the Tornado and Twisted frameworks, and relies on the - standard AMQP messaging protocol, and the Redis distributed KVS. - . - Nova is intended to be easy to extend, and adapt. For example, it - currently uses an LDAP server for users and groups, but also includes a - fake LDAP server, that stores data in Redis. It has extensive test - coverage, and uses the Sphinx toolkit (the same as Python itself) for code - and user documentation. - . - While Nova is currently in Beta use within several organizations, the - codebase is very much under active development. - . - This is the package you will install on the nodes that will run your - virtual machines. - -Package: nova-volume -Architecture: all -Depends: nova-common (= ${binary:Version}), vblade, vblade-persist, ${python:Depends}, ${misc:Depends} -Description: Nova Cloud Computing - storage - Nova is a cloud computing fabric controller (the main part of an IaaS - system) built to match the popular AWS EC2 and S3 APIs. It is written in - Python, using the Tornado and Twisted frameworks, and relies on the - standard AMQP messaging protocol, and the Redis distributed KVS. - . - Nova is intended to be easy to extend, and adapt. For example, it - currently uses an LDAP server for users and groups, but also includes a - fake LDAP server, that stores data in Redis. It has extensive test - coverage, and uses the Sphinx toolkit (the same as Python itself) for code - and user documentation. - . - While Nova is currently in Beta use within several organizations, the - codebase is very much under active development. - . - This is the package you will install on your storage nodes. - -Package: nova-api -Architecture: all -Depends: nova-common (= ${binary:Version}), ${python:Depends}, ${misc:Depends} -Description: Nova Cloud Computing - API frontend - Nova is a cloud computing fabric controller (the main part of an IaaS - system) built to match the popular AWS EC2 and S3 APIs. It is written in - Python, using the Tornado and Twisted frameworks, and relies on the - standard AMQP messaging protocol, and the Redis distributed KVS. - . - Nova is intended to be easy to extend, and adapt. For example, it - currently uses an LDAP server for users and groups, but also includes a - fake LDAP server, that stores data in Redis. It has extensive test - coverage, and uses the Sphinx toolkit (the same as Python itself) for code - and user documentation. - . - While Nova is currently in Beta use within several organizations, the - codebase is very much under active development. - . - This package provides the API frontend. - -Package: nova-objectstore -Architecture: all -Depends: nova-common (= ${binary:Version}), ${python:Depends}, ${misc:Depends} -Description: Nova Cloud Computing - object store - Nova is a cloud computing fabric controller (the main part of an IaaS - system) built to match the popular AWS EC2 and S3 APIs. It is written in - Python, using the Tornado and Twisted frameworks, and relies on the - standard AMQP messaging protocol, and the Redis distributed KVS. - . - Nova is intended to be easy to extend, and adapt. For example, it - currently uses an LDAP server for users and groups, but also includes a - fake LDAP server, that stores data in Redis. It has extensive test - coverage, and uses the Sphinx toolkit (the same as Python itself) for code - and user documentation. - . - While Nova is currently in Beta use within several organizations, the - codebase is very much under active development. - . - This is the package you will install on the nodes that will contain your - object store. - -Package: nova-instancemonitor -Architecture: all -Depends: nova-common (= ${binary:Version}), ${python:Depends}, ${misc:Depends} -Description: Nova instance monitor - -Package: nova-tools -Architecture: all -Depends: python-boto, ${python:Depends}, ${misc:Depends} -Description: Nova Cloud Computing - management tools - Nova is a cloud computing fabric controller (the main part of an IaaS - system) built to match the popular AWS EC2 and S3 APIs. It is written in - Python, using the Tornado and Twisted frameworks, and relies on the - standard AMQP messaging protocol, and the Redis distributed KVS. - . - Nova is intended to be easy to extend, and adapt. For example, it - currently uses an LDAP server for users and groups, but also includes a - fake LDAP server, that stores data in Redis. It has extensive test - coverage, and uses the Sphinx toolkit (the same as Python itself) for code - and user documentation. - . - While Nova is currently in Beta use within several organizations, the - codebase is very much under active development. - . - This package contains admin tools for Nova. diff --git a/debian/nova-api.conf b/debian/nova-api.conf deleted file mode 100644 index 3e6c056ad..000000000 --- a/debian/nova-api.conf +++ /dev/null @@ -1,5 +0,0 @@ ---daemonize=1 ---ca_path=/var/lib/nova/CA ---keys_path=/var/lib/nova/keys ---networks_path=/var/lib/nova/networks ---dhcpbridge_flagfile=/etc/nova/nova-dhcpbridge.conf diff --git a/debian/nova-api.init b/debian/nova-api.init deleted file mode 100644 index 597fbef95..000000000 --- a/debian/nova-api.init +++ /dev/null @@ -1,69 +0,0 @@ -#! /bin/sh -### BEGIN INIT INFO -# Provides: nova-api -# Required-Start: $remote_fs $syslog -# Required-Stop: $remote_fs $syslog -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: nova-api -# Description: nova-api -### END INIT INFO - - -set -e - -DAEMON=/usr/bin/nova-api -DAEMON_ARGS="--flagfile=/etc/nova/nova-api.conf" -PIDFILE=/var/run/nova-api.pid - -ENABLED=true - -if test -f /etc/default/nova-api; then - . /etc/default/nova-api -fi - -. /lib/lsb/init-functions - -export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" - -case "$1" in - start) - test "$ENABLED" = "true" || exit 0 - log_daemon_msg "Starting nova api" "nova-api" - cd /var/run - if $DAEMON $DAEMON_ARGS start; then - log_end_msg 0 - else - log_end_msg 1 - fi - ;; - stop) - test "$ENABLED" = "true" || exit 0 - log_daemon_msg "Stopping nova api" "nova-api" - cd /var/run - if $DAEMON $DAEMON_ARGS stop; then - log_end_msg 0 - else - log_end_msg 1 - fi - ;; - restart|force-reload) - test "$ENABLED" = "true" || exit 1 - cd /var/run - if $DAEMON $DAEMON_ARGS restart; then - log_end_msg 0 - else - log_end_msg 1 - fi - ;; - status) - test "$ENABLED" = "true" || exit 0 - status_of_proc -p $PIDFILE $DAEMON nova-api && exit 0 || exit $? - ;; - *) - log_action_msg "Usage: /etc/init.d/nova-api {start|stop|restart|force-reload|status}" - exit 1 - ;; -esac - -exit 0 diff --git a/debian/nova-api.install b/debian/nova-api.install deleted file mode 100644 index 89615d302..000000000 --- a/debian/nova-api.install +++ /dev/null @@ -1,3 +0,0 @@ -bin/nova-api usr/bin -debian/nova-api.conf etc/nova -debian/nova-dhcpbridge.conf etc/nova diff --git a/debian/nova-common.dirs b/debian/nova-common.dirs deleted file mode 100644 index b58fe8b7f..000000000 --- a/debian/nova-common.dirs +++ /dev/null @@ -1,11 +0,0 @@ -etc/nova -var/lib/nova/buckets -var/lib/nova/CA -var/lib/nova/CA/INTER -var/lib/nova/CA/newcerts -var/lib/nova/CA/private -var/lib/nova/CA/reqs -var/lib/nova/images -var/lib/nova/instances -var/lib/nova/keys -var/lib/nova/networks diff --git a/debian/nova-common.install b/debian/nova-common.install deleted file mode 100644 index 93251363a..000000000 --- a/debian/nova-common.install +++ /dev/null @@ -1,9 +0,0 @@ -bin/nova-manage usr/bin -debian/nova-manage.conf etc/nova -nova/auth/novarc.template usr/share/nova -nova/cloudpipe/client.ovpn.template usr/share/nova -nova/compute/libvirt.xml.template usr/share/nova -nova/compute/interfaces.template usr/share/nova -CA/openssl.cnf.tmpl var/lib/nova/CA -CA/geninter.sh var/lib/nova/CA -CA/genrootca.sh var/lib/nova/CA diff --git a/debian/nova-compute.conf b/debian/nova-compute.conf deleted file mode 100644 index 11de13ff6..000000000 --- a/debian/nova-compute.conf +++ /dev/null @@ -1,7 +0,0 @@ ---ca_path=/var/lib/nova/CA ---keys_path=/var/lib/nova/keys ---instances_path=/var/lib/nova/instances ---simple_network_template=/usr/share/nova/interfaces.template ---libvirt_xml_template=/usr/share/nova/libvirt.xml.template ---vpn_client_template=/usr/share/nova/client.ovpn.template ---credentials_template=/usr/share/nova/novarc.template diff --git a/debian/nova-compute.init b/debian/nova-compute.init deleted file mode 100644 index d0f093a7a..000000000 --- a/debian/nova-compute.init +++ /dev/null @@ -1,69 +0,0 @@ -#! /bin/sh -### BEGIN INIT INFO -# Provides: nova-compute -# Required-Start: $remote_fs $syslog -# Required-Stop: $remote_fs $syslog -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: nova-compute -# Description: nova-compute -### END INIT INFO - - -set -e - -DAEMON=/usr/bin/nova-compute -DAEMON_ARGS="--flagfile=/etc/nova/nova-compute.conf" -PIDFILE=/var/run/nova-compute.pid - -ENABLED=true - -if test -f /etc/default/nova-compute; then - . /etc/default/nova-compute -fi - -. /lib/lsb/init-functions - -export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" - -case "$1" in - start) - test "$ENABLED" = "true" || exit 0 - log_daemon_msg "Starting nova compute" "nova-compute" - cd /var/run - if $DAEMON $DAEMON_ARGS start; then - log_end_msg 0 - else - log_end_msg 1 - fi - ;; - stop) - test "$ENABLED" = "true" || exit 0 - log_daemon_msg "Stopping nova compute" "nova-compute" - cd /var/run - if $DAEMON $DAEMON_ARGS stop; then - log_end_msg 0 - else - log_end_msg 1 - fi - ;; - restart|force-reload) - test "$ENABLED" = "true" || exit 1 - cd /var/run - if $DAEMON $DAEMON_ARGS restart; then - log_end_msg 0 - else - log_end_msg 1 - fi - ;; - status) - test "$ENABLED" = "true" || exit 0 - status_of_proc -p $PIDFILE $DAEMON nova-compute && exit 0 || exit $? - ;; - *) - log_action_msg "Usage: /etc/init.d/nova-compute {start|stop|restart|force-reload|status}" - exit 1 - ;; -esac - -exit 0 diff --git a/debian/nova-compute.install b/debian/nova-compute.install deleted file mode 100644 index 5f9df46a8..000000000 --- a/debian/nova-compute.install +++ /dev/null @@ -1,2 +0,0 @@ -bin/nova-compute usr/bin -debian/nova-compute.conf etc/nova diff --git a/debian/nova-dhcpbridge.conf b/debian/nova-dhcpbridge.conf deleted file mode 100644 index 68cb8903e..000000000 --- a/debian/nova-dhcpbridge.conf +++ /dev/null @@ -1 +0,0 @@ ---networks_path=/var/lib/nova/networks diff --git a/debian/nova-instancemonitor.init b/debian/nova-instancemonitor.init deleted file mode 100644 index 2865fc334..000000000 --- a/debian/nova-instancemonitor.init +++ /dev/null @@ -1,69 +0,0 @@ -#! /bin/sh -### BEGIN INIT INFO -# Provides: nova-instancemonitor -# Required-Start: $remote_fs $syslog -# Required-Stop: $remote_fs $syslog -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: nova-instancemonitor -# Description: nova-instancemonitor -### END INIT INFO - - -set -e - -DAEMON=/usr/bin/nova-instancemonitor -DAEMON_ARGS="--flagfile=/etc/nova.conf" -PIDFILE=/var/run/nova-instancemonitor.pid - -ENABLED=false - -if test -f /etc/default/nova-instancemonitor; then - . /etc/default/nova-instancemonitor -fi - -. /lib/lsb/init-functions - -export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" - -case "$1" in - start) - test "$ENABLED" = "true" || exit 0 - log_daemon_msg "Starting nova compute" "nova-instancemonitor" - cd /var/run - if $DAEMON $DAEMON_ARGS start; then - log_end_msg 0 - else - log_end_msg 1 - fi - ;; - stop) - test "$ENABLED" = "true" || exit 0 - log_daemon_msg "Stopping nova compute" "nova-instancemonitor" - cd /var/run - if $DAEMON $DAEMON_ARGS stop; then - log_end_msg 0 - else - log_end_msg 1 - fi - ;; - restart|force-reload) - test "$ENABLED" = "true" || exit 1 - cd /var/run - if $DAEMON $DAEMON_ARGS restart; then - log_end_msg 0 - else - log_end_msg 1 - fi - ;; - status) - test "$ENABLED" = "true" || exit 0 - status_of_proc -p $PIDFILE $DAEMON nova-instancemonitor && exit 0 || exit $? - ;; - *) - log_action_msg "Usage: /etc/init.d/nova-instancemonitor {start|stop|restart|force-reload|status}" - exit 1 - ;; -esac - -exit 0 diff --git a/debian/nova-instancemonitor.install b/debian/nova-instancemonitor.install deleted file mode 100644 index 48e7884b4..000000000 --- a/debian/nova-instancemonitor.install +++ /dev/null @@ -1 +0,0 @@ -bin/nova-instancemonitor usr/bin diff --git a/debian/nova-manage.conf b/debian/nova-manage.conf deleted file mode 100644 index 5ccda7ecf..000000000 --- a/debian/nova-manage.conf +++ /dev/null @@ -1,4 +0,0 @@ ---ca_path=/var/lib/nova/CA ---credentials_template=/usr/share/nova/novarc.template ---keys_path=/var/lib/nova/keys ---vpn_client_template=/usr/share/nova/client.ovpn.template diff --git a/debian/nova-objectstore.conf b/debian/nova-objectstore.conf deleted file mode 100644 index 8eca39715..000000000 --- a/debian/nova-objectstore.conf +++ /dev/null @@ -1,5 +0,0 @@ ---daemonize=1 ---ca_path=/var/lib/nova/CA ---keys_path=/var/lib/nova/keys ---images_path=/var/lib/nova/images ---buckets_path=/var/lib/nova/buckets diff --git a/debian/nova-objectstore.init b/debian/nova-objectstore.init deleted file mode 100644 index 9676345ad..000000000 --- a/debian/nova-objectstore.init +++ /dev/null @@ -1,69 +0,0 @@ -#! /bin/sh -### BEGIN INIT INFO -# Provides: nova-objectstore -# Required-Start: $remote_fs $syslog -# Required-Stop: $remote_fs $syslog -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: nova-objectstore -# Description: nova-objectstore -### END INIT INFO - - -set -e - -DAEMON=/usr/bin/nova-objectstore -DAEMON_ARGS="--flagfile=/etc/nova/nova-objectstore.conf" -PIDFILE=/var/run/nova-objectstore.pid - -ENABLED=true - -if test -f /etc/default/nova-objectstore; then - . /etc/default/nova-objectstore -fi - -. /lib/lsb/init-functions - -export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" - -case "$1" in - start) - test "$ENABLED" = "true" || exit 0 - log_daemon_msg "Starting nova objectstore" "nova-objectstore" - cd /var/run - if $DAEMON $DAEMON_ARGS start; then - log_end_msg 0 - else - log_end_msg 1 - fi - ;; - stop) - test "$ENABLED" = "true" || exit 0 - log_daemon_msg "Stopping nova objectstore" "nova-objectstore" - cd /var/run - if $DAEMON $DAEMON_ARGS stop; then - log_end_msg 0 - else - log_end_msg 1 - fi - ;; - restart|force-reload) - test "$ENABLED" = "true" || exit 1 - cd /var/run - if $DAEMON $DAEMON_ARGS restart; then - log_end_msg 0 - else - log_end_msg 1 - fi - ;; - status) - test "$ENABLED" = "true" || exit 0 - status_of_proc -p $PIDFILE $DAEMON nova-objectstore && exit 0 || exit $? - ;; - *) - log_action_msg "Usage: /etc/init.d/nova-objectstore {start|stop|restart|force-reload|status}" - exit 1 - ;; -esac - -exit 0 diff --git a/debian/nova-objectstore.install b/debian/nova-objectstore.install deleted file mode 100644 index c5b3d997a..000000000 --- a/debian/nova-objectstore.install +++ /dev/null @@ -1,2 +0,0 @@ -bin/nova-objectstore usr/bin -debian/nova-objectstore.conf etc/nova diff --git a/debian/nova-volume.conf b/debian/nova-volume.conf deleted file mode 100644 index 57e3411a0..000000000 --- a/debian/nova-volume.conf +++ /dev/null @@ -1,4 +0,0 @@ ---ca_path=/var/lib/nova/CA ---keys_path=/var/lib/nova/keys ---images_path=/var/lib/nova/images ---buckets_path=/var/lib/nova/buckets diff --git a/debian/nova-volume.init b/debian/nova-volume.init deleted file mode 100644 index d5c2dddf8..000000000 --- a/debian/nova-volume.init +++ /dev/null @@ -1,69 +0,0 @@ -#! /bin/sh -### BEGIN INIT INFO -# Provides: nova-volume -# Required-Start: $remote_fs $syslog -# Required-Stop: $remote_fs $syslog -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: nova-volume -# Description: nova-volume -### END INIT INFO - - -set -e - -DAEMON=/usr/bin/nova-volume -DAEMON_ARGS="--flagfile=/etc/nova/nova-volume.conf" -PIDFILE=/var/run/nova-volume.pid - -ENABLED=true - -if test -f /etc/default/nova-volume; then - . /etc/default/nova-volume -fi - -. /lib/lsb/init-functions - -export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" - -case "$1" in - start) - test "$ENABLED" = "true" || exit 0 - log_daemon_msg "Starting nova volume" "nova-volume" - cd /var/run - if $DAEMON $DAEMON_ARGS start; then - log_end_msg 0 - else - log_end_msg 1 - fi - ;; - stop) - test "$ENABLED" = "true" || exit 0 - log_daemon_msg "Stopping nova volume" "nova-volume" - cd /var/run - if $DAEMON $DAEMON_ARGS stop; then - log_end_msg 0 - else - log_end_msg 1 - fi - ;; - restart|force-reload) - test "$ENABLED" = "true" || exit 1 - cd /var/run - if $DAEMON $DAEMON_ARGS restart; then - log_end_msg 0 - else - log_end_msg 1 - fi - ;; - status) - test "$ENABLED" = "true" || exit 0 - status_of_proc -p $PIDFILE $DAEMON nova-volume && exit 0 || exit $? - ;; - *) - log_action_msg "Usage: /etc/init.d/nova-volume {start|stop|restart|force-reload|status}" - exit 1 - ;; -esac - -exit 0 diff --git a/debian/nova-volume.install b/debian/nova-volume.install deleted file mode 100644 index 9a840c78e..000000000 --- a/debian/nova-volume.install +++ /dev/null @@ -1,2 +0,0 @@ -bin/nova-volume usr/bin -debian/nova-volume.conf etc/nova diff --git a/debian/pycompat b/debian/pycompat deleted file mode 100644 index 0cfbf0888..000000000 --- a/debian/pycompat +++ /dev/null @@ -1 +0,0 @@ -2 diff --git a/debian/pyversions b/debian/pyversions deleted file mode 100644 index 0c043f18c..000000000 --- a/debian/pyversions +++ /dev/null @@ -1 +0,0 @@ -2.6- diff --git a/debian/rules b/debian/rules deleted file mode 100755 index 2d33f6ac8..000000000 --- a/debian/rules +++ /dev/null @@ -1,4 +0,0 @@ -#!/usr/bin/make -f - -%: - dh $@ -- cgit From 9718f5216ce9423c002c47f86e05b2b2eb08e551 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 27 Jul 2010 14:16:49 -0700 Subject: properly delete old vlans assigned to deleted projects --- nova/compute/network.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/compute/network.py b/nova/compute/network.py index b5b3c3b5d..62d892e58 100644 --- a/nova/compute/network.py +++ b/nova/compute/network.py @@ -144,7 +144,7 @@ class Vlan(datastore.BasicModel): @datastore.absorb_connection_error def destroy(self): set_name = self._redis_set_name(self.__class__.__name__) - datastore.Redis.instance().hdel(set_name, self.project) + datastore.Redis.instance().hdel(set_name, self.project_id) def subnet(self): vlan = int(self.vlan_id) @@ -529,6 +529,7 @@ def get_vlan_for_project(project_id): # don't orphan any VLANs. It is basically # garbage collection for after projects abandoned # their reference. + vlan.destroy() vlan.project_id = project_id vlan.save() return vlan -- cgit From 0cdc13f0f0bcdcd085d58a78b7aa7dbb856cdc79 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 27 Jul 2010 23:56:24 +0200 Subject: Add a 'sdist' make target. It first generates a MANIFEST.in based on what's in bzr, then calls python setup.py sdist. --- Makefile | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/Makefile b/Makefile index cd7e233e1..847da779f 100644 --- a/Makefile +++ b/Makefile @@ -24,8 +24,17 @@ clean: clean-all: clean rm -rf $(venv) +MANIFEST.in: + [ -d .bzr ] || (echo "Must be a bzr checkout" ; exit 1) + bzr ls --kind=file -VR | while read f; do echo include "$$f"; done > $@ + +sdist: MANIFEST.in + python setup.py sdist + $(venv): @echo "You need to install the Nova virtualenv before you can run this." @echo "" @echo "Please run tools/install_venv.py" @exit 1 + +.PHONY: MANIFEST.in -- cgit From 40dfe6316fae4b14f9fa694653341349a86d55ab Mon Sep 17 00:00:00 2001 From: Devin Carlen Date: Wed, 28 Jul 2010 00:28:56 +0000 Subject: Wired up user:project auth calls --- nova/adminclient.py | 5 +++-- nova/auth/manager.py | 1 + nova/endpoint/admin.py | 13 ++++--------- 3 files changed, 8 insertions(+), 11 deletions(-) diff --git a/nova/adminclient.py b/nova/adminclient.py index 9b9505ac1..e81e0470f 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -122,13 +122,14 @@ class NovaAdminClient(object): **kwargs) self.apiconn.APIVersion = 'nova' - def connection_for(self, username, **kwargs): + def connection_for(self, username, project, **kwargs): """ Returns a boto ec2 connection for the given username. """ user = self.get_user(username) + access_key = '%s:%s' % (user.accesskey, project) return boto.connect_ec2( - aws_access_key_id=user.accesskey, + aws_access_key_id=access_key, aws_secret_access_key=user.secretkey, is_secure=False, region=RegionInfo(None, self.region, self.clc_ip), diff --git a/nova/auth/manager.py b/nova/auth/manager.py index bc373fd26..8c8c7377c 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -532,6 +532,7 @@ class AuthManager(object): def get_projects(self): """Retrieves list of all projects""" + # TODO(devcamcar): Implement filter by user. with self.driver() as drv: project_list = drv.get_projects() if not project_list: diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py index e3762e2af..e1e06e944 100644 --- a/nova/endpoint/admin.py +++ b/nova/endpoint/admin.py @@ -106,10 +106,7 @@ class AdminController(object): @admin_only def modify_user_role(self, context, user, role, project=None, operation='add', **kwargs): - """ - Add or remove a role for a user and project. - """ - + """Add or remove a role for a user and project.""" if operation == 'add': manager.AuthManager().add_role(user, role, project) elif operation == 'remove': @@ -137,8 +134,9 @@ class AdminController(object): return project_dict(manager.AuthManager().get_project(name)) @admin_only - def describe_projects(self, context, **kwargs): + def describe_projects(self, context, user=None, **kwargs): """Returns all projects - should be changed to deal with a list.""" + # TODO(devcamcar): Implement filter by user. return {'projectSet': [project_dict(u) for u in manager.AuthManager().get_projects()]} @@ -164,10 +162,7 @@ class AdminController(object): @admin_only def modify_project_user(self, context, user, project, operation, **kwargs): - """ - Add or remove a user from a project. - """ - + """Add or remove a user from a project.""" if operation =='add': manager.AuthManager().add_to_project(user, project) elif operation == 'remove': -- cgit From c13b2fedb3cb6260fe132677a012a913c7249458 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 27 Jul 2010 19:51:07 -0700 Subject: fixed typo from auth refactor --- nova/endpoint/cloud.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 76ca35320..0940c5d8a 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -49,8 +49,8 @@ flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') def _gen_key(user_id, key_name): """ Tuck this into AuthManager """ try: - manager = manager.AuthManager() - private_key, fingerprint = manager.generate_key_pair(user_id, key_name) + mgr = manager.AuthManager() + private_key, fingerprint = mgr.generate_key_pair(user_id, key_name) except Exception as ex: return {'exception': ex} return {'private_key': private_key, 'fingerprint': fingerprint} -- cgit From 849a4062cdf8af50b8c3d44611f10857fedf6813 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 27 Jul 2010 21:35:55 -0700 Subject: Changed Makefile to shell script. The Makefile approach completely broke debhelper's ability to figure out that this was a python package. --- Makefile | 40 ---------------------------------------- run_tests.sh | 13 +++++++++++++ 2 files changed, 13 insertions(+), 40 deletions(-) delete mode 100644 Makefile create mode 100644 run_tests.sh diff --git a/Makefile b/Makefile deleted file mode 100644 index 847da779f..000000000 --- a/Makefile +++ /dev/null @@ -1,40 +0,0 @@ -venv=.nova-venv -with_venv=tools/with_venv.sh - -build: - # Nothing to do - -default_test_type:= $(shell if [ -e $(venv) ]; then echo venv; else echo system; fi) - -test: test-$(default_test_type) - -test-venv: $(venv) - $(with_venv) python run_tests.py - -test-system: - python run_tests.py - -clean: - rm -rf _trial_temp - rm -rf keys - rm -rf instances - rm -rf networks - rm -f run_tests.err.log - -clean-all: clean - rm -rf $(venv) - -MANIFEST.in: - [ -d .bzr ] || (echo "Must be a bzr checkout" ; exit 1) - bzr ls --kind=file -VR | while read f; do echo include "$$f"; done > $@ - -sdist: MANIFEST.in - python setup.py sdist - -$(venv): - @echo "You need to install the Nova virtualenv before you can run this." - @echo "" - @echo "Please run tools/install_venv.py" - @exit 1 - -.PHONY: MANIFEST.in diff --git a/run_tests.sh b/run_tests.sh new file mode 100644 index 000000000..1bf3d1a79 --- /dev/null +++ b/run_tests.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +venv=.nova-venv +with_venv=tools/with_venv.sh + +if [ -e ${venv} ]; then + ${with_venv} python run_tests.py +else + echo "You need to install the Nova virtualenv before you can run this." + echo "" + echo "Please run tools/install_venv.py" + exit 1 +fi -- cgit From 26113f6b1379aa81941169b858aee37493bad63a Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 27 Jul 2010 21:39:23 -0700 Subject: Put in a single MANIFEST.in file that takes care of things. --- MANIFEST.in | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 MANIFEST.in diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 000000000..6482bd7ea --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,5 @@ +include HACKING LICENSE run_tests.sh run_test.py README builddeb.sh exercise_rsapi.py +graft CA +graft doc +graft smoketests +graft tools -- cgit From 464311c787d3d3176a89ec44791a03034ccb2851 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 27 Jul 2010 21:39:58 -0700 Subject: Made run_tests.sh executable. --- run_tests.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 run_tests.sh diff --git a/run_tests.sh b/run_tests.sh old mode 100644 new mode 100755 -- cgit From 9587bd8ce817d71a8581ac16d0820714fbb10d02 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 27 Jul 2010 21:40:06 -0700 Subject: Removed gitignore files. --- .gitignore | 12 ------------ CA/.gitignore | 11 ----------- CA/INTER/.gitignore | 1 - CA/reqs/.gitignore | 1 - 4 files changed, 25 deletions(-) delete mode 100644 .gitignore delete mode 100644 CA/.gitignore delete mode 100644 CA/INTER/.gitignore delete mode 100644 CA/reqs/.gitignore diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 2afc7a32c..000000000 --- a/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -*.pyc -*.DS_Store -local_settings.py -CA/index.txt -CA/serial -keeper -instances -keys -build/* -build-stamp -nova.egg-info -.nova-venv diff --git a/CA/.gitignore b/CA/.gitignore deleted file mode 100644 index fae0922bf..000000000 --- a/CA/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -index.txt -index.txt.old -index.txt.attr -index.txt.attr.old -cacert.pem -serial -serial.old -openssl.cnf -private/* -newcerts/* - diff --git a/CA/INTER/.gitignore b/CA/INTER/.gitignore deleted file mode 100644 index 72e8ffc0d..000000000 --- a/CA/INTER/.gitignore +++ /dev/null @@ -1 +0,0 @@ -* diff --git a/CA/reqs/.gitignore b/CA/reqs/.gitignore deleted file mode 100644 index 72e8ffc0d..000000000 --- a/CA/reqs/.gitignore +++ /dev/null @@ -1 +0,0 @@ -* -- cgit From ea89153b930eed70c7586eae56636b648e4e7252 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 27 Jul 2010 21:41:07 -0700 Subject: Updated setup.py file to install stuff on a python setup.py install command. --- setup.cfg | 10 ++++++++-- setup.py | 19 ++++++++++++------- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/setup.cfg b/setup.cfg index 839472544..14dcb5c8e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,4 +1,10 @@ [build_sphinx] +all_files = 1 +build-dir = doc/build source-dir = doc/source -build-dir = doc/build -all_files = 1 + +[egg_info] +tag_build = +tag_date = 0 +tag_svn_revision = 0 + diff --git a/setup.py b/setup.py index f9a616335..127d014b1 100644 --- a/setup.py +++ b/setup.py @@ -16,19 +16,24 @@ # License for the specific language governing permissions and limitations # under the License. -import glob -import os -import sys - from setuptools import setup, find_packages -srcdir = os.path.join(os.path.dirname(sys.argv[0]), 'src') - setup(name='nova', version='0.9.0', description='cloud computing fabric controller', author='OpenStack', author_email='nova@lists.launchpad.net', url='http://www.openstack.org/', - packages = find_packages(), + packages = find_packages(exclude=['bin','smoketests']), + scripts=['bin/nova-api', + 'bin/nova-compute', + 'bin/nova-dhcpbridge', + 'bin/nova-import-canonical-imagestore', + 'bin/nova-instancemonitor', + 'bin/nova-manage', + 'bin/nova-network', + 'bin/nova-objectstore', + 'bin/nova-rsapi', + 'bin/nova-volume', + ] ) -- cgit From 67711b8aa4ed0ec80d407fecea5b4bf5ffc22322 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 27 Jul 2010 21:52:01 -0700 Subject: Added a few more missing files to MANIFEST.in and added some placeholder files so that setup.py would carry the empty dir. --- CA/INTER/.placeholder | 0 CA/reqs/.placeholder | 0 MANIFEST.in | 19 ++++++++++++++++++- doc/build/.placeholder | 0 doc/source/_static/.placeholder | 0 doc/source/_templates/.placeholder | 0 6 files changed, 18 insertions(+), 1 deletion(-) create mode 100644 CA/INTER/.placeholder create mode 100644 CA/reqs/.placeholder create mode 100644 doc/build/.placeholder create mode 100644 doc/source/_static/.placeholder create mode 100644 doc/source/_templates/.placeholder diff --git a/CA/INTER/.placeholder b/CA/INTER/.placeholder new file mode 100644 index 000000000..e69de29bb diff --git a/CA/reqs/.placeholder b/CA/reqs/.placeholder new file mode 100644 index 000000000..e69de29bb diff --git a/MANIFEST.in b/MANIFEST.in index 6482bd7ea..36482be88 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,5 +1,22 @@ -include HACKING LICENSE run_tests.sh run_test.py README builddeb.sh exercise_rsapi.py +include HACKING LICENSE run_tests.py run_tests.sh +include README builddeb.sh exercise_rsapi.py graft CA graft doc graft smoketests graft tools +include nova/auth/novarc.template +include nova/auth/slap.sh +include nova/cloudpipe/bootscript.sh +include nova/cloudpipe/client.ovpn.template +include nova/compute/fakevirtinstance.xml +include nova/compute/interfaces.template +include nova/compute/libvirt.xml.template +include nova/tests/CA/ +include nova/tests/CA/cacert.pem +include nova/tests/CA/private/ +include nova/tests/CA/private/cakey.pem +include nova/tests/bundle/ +include nova/tests/bundle/1mb.manifest.xml +include nova/tests/bundle/1mb.part.0 +include nova/tests/bundle/1mb.part.1 +include diff --git a/doc/build/.placeholder b/doc/build/.placeholder new file mode 100644 index 000000000..e69de29bb diff --git a/doc/source/_static/.placeholder b/doc/source/_static/.placeholder new file mode 100644 index 000000000..e69de29bb diff --git a/doc/source/_templates/.placeholder b/doc/source/_templates/.placeholder new file mode 100644 index 000000000..e69de29bb -- cgit From 25de868554bbf1a9c6e5f9ed295bef6c37194352 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 27 Jul 2010 23:18:27 -0700 Subject: Added the gitignore files back in for the folks who are still on the git. --- .gitignore | 12 ++++++++++++ CA/.gitignore | 11 +++++++++++ CA/INTER/.gitignore | 1 + CA/reqs/.gitignore | 1 + 4 files changed, 25 insertions(+) create mode 100644 .gitignore create mode 100644 CA/.gitignore create mode 100644 CA/INTER/.gitignore create mode 100644 CA/reqs/.gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..2afc7a32c --- /dev/null +++ b/.gitignore @@ -0,0 +1,12 @@ +*.pyc +*.DS_Store +local_settings.py +CA/index.txt +CA/serial +keeper +instances +keys +build/* +build-stamp +nova.egg-info +.nova-venv diff --git a/CA/.gitignore b/CA/.gitignore new file mode 100644 index 000000000..fae0922bf --- /dev/null +++ b/CA/.gitignore @@ -0,0 +1,11 @@ +index.txt +index.txt.old +index.txt.attr +index.txt.attr.old +cacert.pem +serial +serial.old +openssl.cnf +private/* +newcerts/* + diff --git a/CA/INTER/.gitignore b/CA/INTER/.gitignore new file mode 100644 index 000000000..72e8ffc0d --- /dev/null +++ b/CA/INTER/.gitignore @@ -0,0 +1 @@ +* diff --git a/CA/reqs/.gitignore b/CA/reqs/.gitignore new file mode 100644 index 000000000..72e8ffc0d --- /dev/null +++ b/CA/reqs/.gitignore @@ -0,0 +1 @@ +* -- cgit From 90ffbc240ffc68154816d2237dc04ea33f5066cb Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 27 Jul 2010 23:23:23 -0700 Subject: Removed extra include. --- MANIFEST.in | 1 - 1 file changed, 1 deletion(-) diff --git a/MANIFEST.in b/MANIFEST.in index 36482be88..e917077c5 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -19,4 +19,3 @@ include nova/tests/bundle/ include nova/tests/bundle/1mb.manifest.xml include nova/tests/bundle/1mb.part.0 include nova/tests/bundle/1mb.part.1 -include -- cgit From 0465cd87fd767fbf421e77bdabb023c980242adb Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 28 Jul 2010 00:18:20 -0700 Subject: import ldapdriver for flags --- nova/auth/manager.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index b3b5d14ca..66027f6c2 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -35,7 +35,9 @@ from nova import exception from nova import flags from nova import objectstore # for flags from nova import utils +from nova.auth import ldapdriver # for flags from nova.auth import signer + FLAGS = flags.FLAGS # NOTE(vish): a user with one of these roles will be a superuser and -- cgit From a46e9aecb9a0de4d06dc00979795b33f788959fa Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 28 Jul 2010 01:12:14 -0700 Subject: use default kernel and ramdisk and check for legal access --- nova/endpoint/cloud.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 76ca35320..e7c4542f8 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -515,8 +515,12 @@ class CloudController(object): # get defaults from imagestore image_id = image['imageId'] - kernel_id = image.get('kernelId', None) - ramdisk_id = image.get('ramdiskId', None) + kernel_id = image.get('kernelId', FLAGS.default_kernel) + ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) + + # make sure we have access to kernel and ramdisk + self._get_image(context, kernel_id) + self._get_image(context, ramdisk_id) # API parameters overrides of defaults kernel_id = kwargs.get('kernel_id', kernel_id) -- cgit From 2c6bf00a35c45cb63a0c8688bbf2504dd8610bb1 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 28 Jul 2010 02:21:11 -0700 Subject: Fix dnsmasq commands to pass in environment properly --- nova/compute/linux_net.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/compute/linux_net.py b/nova/compute/linux_net.py index 861ce779b..4a4b4c8a8 100644 --- a/nova/compute/linux_net.py +++ b/nova/compute/linux_net.py @@ -94,7 +94,7 @@ def bridge_create(net): execute("sudo ifconfig %s up" % net['bridge_name']) def dnsmasq_cmd(net): - cmd = ['sudo dnsmasq', + cmd = ['sudo -E dnsmasq', ' --strict-order', ' --bind-interfaces', ' --conf-file=', @@ -143,8 +143,9 @@ def start_dnsmasq(network): if os.path.exists(lease_file): os.unlink(lease_file) - # FLAGFILE in env - env = {'FLAGFILE' : FLAGS.dhcpbridge_flagfile} + # FLAGFILE and DNSMASQ_INTERFACE in env + env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, + 'DNSMASQ_INTERFACE': network['bridge_name']} execute(dnsmasq_cmd(network), addl_env=env) def stop_dnsmasq(network): -- cgit From 8901564edfaca072d6b9b2cf3f146aac1821017b Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Wed, 28 Jul 2010 10:09:42 -0400 Subject: Silence logs when associated models aren't found. Also document methods used ofr associating things. And get rid of some duplicated code. --- nova/datastore.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/nova/datastore.py b/nova/datastore.py index e57177e04..660ad9d90 100644 --- a/nova/datastore.py +++ b/nova/datastore.py @@ -184,21 +184,19 @@ class BasicModel(object): @absorb_connection_error def add_to_index(self): + """Each insance of Foo has its id tracked int the set named Foos""" set_name = self.__class__._redis_set_name(self.__class__.__name__) Redis.instance().sadd(set_name, self.identifier) @absorb_connection_error def remove_from_index(self): - set_name = self.__class__._redis_set_name(self.__class__.__name__) - Redis.instance().srem(set_name, self.identifier) - - @absorb_connection_error - def remove_from_index(self): + """Remove id of this instance from the set tracking ids of this type""" set_name = self.__class__._redis_set_name(self.__class__.__name__) Redis.instance().srem(set_name, self.identifier) @absorb_connection_error def associate_with(self, foreign_type, foreign_id): + """Add this class id into the set foreign_type:foreign_id:this_types""" # note the extra 's' on the end is for plurality # to match the old data without requiring a migration of any sort self.add_associated_model_to_its_set(foreign_type, foreign_id) @@ -208,21 +206,24 @@ class BasicModel(object): @absorb_connection_error def unassociate_with(self, foreign_type, foreign_id): + """Delete from foreign_type:foreign_id:this_types set""" redis_set = self.__class__._redis_association_name(foreign_type, foreign_id) Redis.instance().srem(redis_set, self.identifier) - def add_associated_model_to_its_set(self, my_type, my_id): + def add_associated_model_to_its_set(self, model_type, model_id): + """ + When associating an X to a Y, save Y for newer timestamp, etc, and to + make sure to save it if Y is a new record. + If the model_type isn't found as a usable class, ignore it, this can + happen when associating to things stored in LDAP (user, project, ...). + """ table = globals() - klsname = my_type.capitalize() + klsname = model_type.capitalize() if table.has_key(klsname): - my_class = table[klsname] - my_inst = my_class(my_id) - my_inst.save() - else: - logging.warning("no model class for %s when building" - " association from %s", - klsname, self) + model_class = table[klsname] + model_inst = model_class(model_id) + model_inst.save() @absorb_connection_error def save(self): -- cgit From 10f32554c93020dc409a326b4a4f1d4ea5f672c7 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 28 Jul 2010 08:21:57 -0700 Subject: Tagged 0.9.0 and bumped the version to 0.9.1 --- doc/source/conf.py | 4 ++-- setup.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 1c1ae7f48..349d23af2 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,9 +47,9 @@ copyright = u'2010, United States Government as represented by the Administrator # built documents. # # The short X.Y version. -version = '0.42' +version = '0.9' # The full version, including alpha/beta/rc tags. -release = '0.42' +release = '0.9.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index 127d014b1..50d5f2a3d 100644 --- a/setup.py +++ b/setup.py @@ -19,7 +19,7 @@ from setuptools import setup, find_packages setup(name='nova', - version='0.9.0', + version='0.9.1', description='cloud computing fabric controller', author='OpenStack', author_email='nova@lists.launchpad.net', -- cgit From 2ecdf8cdd81df36ef34fc7522aae3f5f1b217d81 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 28 Jul 2010 10:47:53 -0700 Subject: Specify a filter by user for get projects --- nova/auth/ldapdriver.py | 7 +++++-- nova/auth/manager.py | 6 +++--- nova/tests/auth_unittest.py | 6 ++++++ 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 1591c88e9..055e8332b 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -120,10 +120,13 @@ class LdapDriver(object): '(objectclass=novaKeyPair)') return [self.__to_key_pair(uid, attr) for attr in attrs] - def get_projects(self): + def get_projects(self, uid=None): """Retrieve list of projects""" + filter = '(objectclass=novaProject)' + if uid: + filter = "(&%s(member=%s))" % (filter, self.__uid_to_dn(uid)) attrs = self.__find_objects(FLAGS.ldap_project_subtree, - '(objectclass=novaProject)') + filter) return [self.__to_project(attr) for attr in attrs] def create_user(self, name, access_key, secret_key, is_admin): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 66027f6c2..7307f673b 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -547,10 +547,10 @@ class AuthManager(object): if project_dict: return Project(**project_dict) - def get_projects(self): - """Retrieves list of all projects""" + def get_projects(self, user=None): + """Retrieves list of projects, optionally filtered by user""" with self.driver() as drv: - project_list = drv.get_projects() + project_list = drv.get_projects(User.safe_id(user)) if not project_list: return [] return [Project(**project_dict) for project_dict in project_list] diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index 0cd377b70..6f35bab4e 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -193,9 +193,15 @@ class AuthTestCase(test.BaseTestCase): for vpn in vpns: vpn.destroy() + def test_214_can_retrieve_project_by_user(self): + project = self.manager.create_project('testproj2', 'test2', 'Another test project', ['test2']) + self.assert_(len(self.manager.get_projects()) > 1) + self.assertEqual(len(self.manager.get_projects('test2')), 1) + def test_299_can_delete_project(self): self.manager.delete_project('testproj') self.assertFalse(filter(lambda p: p.name == 'testproj', self.manager.get_projects())) + self.manager.delete_project('testproj2') def test_999_can_delete_users(self): self.manager.delete_user('test1') -- cgit From e53caccb7a242bdabd4ea6aed914ab77cb1fca32 Mon Sep 17 00:00:00 2001 From: Devin Carlen Date: Wed, 28 Jul 2010 18:16:09 +0000 Subject: Fixed project api --- nova/adminclient.py | 6 +++++- nova/endpoint/admin.py | 1 + nova/endpoint/cloud.py | 3 +-- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/nova/adminclient.py b/nova/adminclient.py index e81e0470f..7ef3497bc 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -69,6 +69,7 @@ class ProjectInfo(object): self.connection = connection self.projectname = projectname self.endpoint = endpoint + self.memberIds = [] def __repr__(self): return 'ProjectInfo:%s' % self.projectname @@ -77,7 +78,10 @@ class ProjectInfo(object): return None def endElement(self, name, value, connection): - setattr(self, name, str(value)) + if name == 'item': + self.memberIds.append(value) + elif name != 'memberIds': + setattr(self, name, str(value)) class HostInfo(object): """ diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py index e1e06e944..120396b4a 100644 --- a/nova/endpoint/admin.py +++ b/nova/endpoint/admin.py @@ -42,6 +42,7 @@ def project_dict(project): if project: return { 'projectname': project.id, + 'project_manager_id': project.project_manager_id, 'description': project.description, 'member_ids': project.member_ids } diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 76ca35320..754b0780c 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -49,8 +49,7 @@ flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') def _gen_key(user_id, key_name): """ Tuck this into AuthManager """ try: - manager = manager.AuthManager() - private_key, fingerprint = manager.generate_key_pair(user_id, key_name) + private_key, fingerprint = manager.AuthManager().generate_key_pair(user_id, key_name) except Exception as ex: return {'exception': ex} return {'private_key': private_key, 'fingerprint': fingerprint} -- cgit From 47408dc8c901a431e358cad0420b756439d93a80 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 28 Jul 2010 11:33:11 -0700 Subject: make redis name default to lower case --- nova/datastore.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/datastore.py b/nova/datastore.py index e57177e04..45a710aaf 100644 --- a/nova/datastore.py +++ b/nova/datastore.py @@ -103,7 +103,7 @@ class BasicModel(object): @classmethod def _redis_name(cls): - return cls.override_type or cls.__name__ + return cls.override_type or cls.__name__.lower() @classmethod def lookup(cls, identifier): -- cgit From a7e5d47d8e49f8fc10900ede3376ddc515799811 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 28 Jul 2010 12:53:27 -0700 Subject: allow driver to be passed in to auth manager instead of depending solely on flag --- nova/auth/manager.py | 24 ++++++++++-------------- nova/utils.py | 10 ++++++++++ 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 66027f6c2..7c0ee9cb4 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -323,25 +323,21 @@ class AuthManager(object): """ _instance=None def __new__(cls, *args, **kwargs): - """Returns the AuthManager singleton with driver set - - __init__ is run every time AuthManager() is called, so we need to do - any constructor related stuff here. The driver that is specified - in the flagfile is loaded here. - """ + """Returns the AuthManager singleton""" if not cls._instance: cls._instance = super(AuthManager, cls).__new__( cls, *args, **kwargs) - mod_str, sep, driver_str = FLAGS.auth_driver.rpartition('.') - try: - __import__(mod_str) - cls._instance.driver = getattr(sys.modules[mod_str], - driver_str) - except (ImportError, AttributeError): - raise exception.Error('Auth driver %s cannot be found' - % FLAGS.auth_driver) return cls._instance + def __init__(self, driver=None, *args, **kwargs): + """Inits the driver from parameter or flag + + __init__ is run every time AuthManager() is called, so we only + reset the driver if it is not set or a new driver is specified. + """ + if driver or not getattr(self, 'driver', None): + self.driver = utils.import_class(driver or FLAGS.auth_driver) + def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', check_type='ec2', headers=None): diff --git a/nova/utils.py b/nova/utils.py index a1eb0a092..0016b656e 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -29,11 +29,21 @@ import subprocess import socket import sys +from nova import exception from nova import flags FLAGS = flags.FLAGS TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" +def import_class(import_str): + """Returns a class from a string including module and class""" + mod_str, _sep, class_str = import_str.rpartition('.') + try: + __import__(mod_str) + return getattr(sys.modules[mod_str], class_str) + except (ImportError, AttributeError): + raise exception.NotFound('Class %s cannot be found' % class_str) + def fetchfile(url, target): logging.debug("Fetching %s" % url) # c = pycurl.Curl() -- cgit From 93236b6b5f4f18fabf0852c6cbb9540578935961 Mon Sep 17 00:00:00 2001 From: Devin Carlen Date: Wed, 28 Jul 2010 21:04:05 +0000 Subject: Wired up get/add/remove project members --- nova/adminclient.py | 79 ++++++++++++++++++++++++++++++++++++++++++-------- nova/endpoint/admin.py | 15 +++++++--- 2 files changed, 78 insertions(+), 16 deletions(-) diff --git a/nova/adminclient.py b/nova/adminclient.py index 7ef3497bc..3a2ce2f01 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -59,16 +59,18 @@ class UserInfo(object): class ProjectInfo(object): """ Information about a Nova project, as parsed through SAX - fields include: + Fields include: projectname description - member_ids + projectManagerId + memberIds """ - def __init__(self, connection=None, projectname=None, endpoint=None): + def __init__(self, connection=None): self.connection = connection - self.projectname = projectname - self.endpoint = endpoint + self.projectname = None + self.description = None + self.projectManagerId = None self.memberIds = [] def __repr__(self): @@ -78,11 +80,40 @@ class ProjectInfo(object): return None def endElement(self, name, value, connection): - if name == 'item': + if name == 'projectname': + self.projectname = value + elif name == 'description': + self.description = value + elif name == 'projectManagerId': + self.projectManagerId = value + elif name == 'memberId': self.memberIds.append(value) - elif name != 'memberIds': + else: setattr(self, name, str(value)) +class ProjectMember(object): + """ + Information about a Nova project member, as parsed through SAX. + Fields include: + memberId + """ + def __init__(self, connection=None): + self.connection = connection + self.memberId = None + + def __repr__(self): + return 'ProjectMember:%s' % self.memberId + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'member': + self.memberId = value + else: + setattr(self, name, str(value)) + + class HostInfo(object): """ Information about a Nova Host, as parsed through SAX: @@ -193,11 +224,16 @@ class NovaAdminClient(object): } return self.apiconn.get_status('ModifyUserRole', params) - def get_projects(self): + def get_projects(self, user=None): """ Returns a list of all projects. """ - return self.apiconn.get_list('DescribeProjects', {}, + if user: + params = {'User': user} + else: + params = {} + return self.apiconn.get_list('DescribeProjects', + params, [('item', ProjectInfo)]) def get_project(self, name): @@ -232,8 +268,27 @@ class NovaAdminClient(object): {'Name': projectname}, ProjectInfo) - def modify_project_user(self, user, project, operation='add', - **kwargs): + def get_project_members(self, name): + """ + Returns a list of members of a project. + """ + return self.apiconn.get_list('DescribeProjectMembers', + {'Name': name}, + [('item', ProjectMember)]) + + def add_project_member(self, user, project): + """ + Adds a user to a project. + """ + return self.modify_project_member(user, project, operation='add') + + def remove_project_member(self, user, project): + """ + Removes a user from a project. + """ + return self.modify_project_member(user, project, operation='remove') + + def modify_project_member(self, user, project, operation='add'): """ Adds or removes a user from a project. """ @@ -242,7 +297,7 @@ class NovaAdminClient(object): 'Project': project, 'Operation': operation } - return self.apiconn.get_status('ModifyProjectUser', params) + return self.apiconn.get_status('ModifyProjectMember', params) def get_zip(self, username): """ returns the content of a zip file containing novarc and access credentials. """ diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py index 120396b4a..db1d319f9 100644 --- a/nova/endpoint/admin.py +++ b/nova/endpoint/admin.py @@ -44,7 +44,6 @@ def project_dict(project): 'projectname': project.id, 'project_manager_id': project.project_manager_id, 'description': project.description, - 'member_ids': project.member_ids } else: return {} @@ -137,10 +136,9 @@ class AdminController(object): @admin_only def describe_projects(self, context, user=None, **kwargs): """Returns all projects - should be changed to deal with a list.""" - # TODO(devcamcar): Implement filter by user. return {'projectSet': [project_dict(u) for u in - manager.AuthManager().get_projects()]} + manager.AuthManager().get_projects(user=user)]} @admin_only def register_project(self, context, name, manager_user, description=None, @@ -162,7 +160,15 @@ class AdminController(object): return True @admin_only - def modify_project_user(self, context, user, project, operation, **kwargs): + def describe_project_members(self, context, name, **kwargs): + project = manager.AuthManager().get_project(name) + result = { + 'members': [{'member': m} for m in project.member_ids] + } + return result + + @admin_only + def modify_project_member(self, context, user, project, operation, **kwargs): """Add or remove a user from a project.""" if operation =='add': manager.AuthManager().add_to_project(user, project) @@ -170,6 +176,7 @@ class AdminController(object): manager.AuthManager().remove_from_project(user, project) else: raise exception.ApiError('operation must be add or remove') + return True @admin_only def describe_hosts(self, _context, **_kwargs): -- cgit From 7c39435df527126411ccaf5f95c1f9d0828af107 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 28 Jul 2010 14:37:58 -0700 Subject: Added ChangeLog generation. --- .bzrignore | 1 + MANIFEST.in | 1 + setup.py | 18 ++++++++++++++++++ 3 files changed, 20 insertions(+) diff --git a/.bzrignore b/.bzrignore index c3a502a1a..ab099d3e3 100644 --- a/.bzrignore +++ b/.bzrignore @@ -1,2 +1,3 @@ run_tests.err.log .nova-venv +ChangeLog diff --git a/MANIFEST.in b/MANIFEST.in index e917077c5..4eb28bde6 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,5 +1,6 @@ include HACKING LICENSE run_tests.py run_tests.sh include README builddeb.sh exercise_rsapi.py +include ChangeLog graft CA graft doc graft smoketests diff --git a/setup.py b/setup.py index 127d014b1..2940b518a 100644 --- a/setup.py +++ b/setup.py @@ -17,6 +17,23 @@ # under the License. from setuptools import setup, find_packages +from setuptools.command.sdist import sdist + +import os +import subprocess + +class local_sdist(sdist): + """Customized sdist hook - builds the ChangeLog file from VC first""" + + def run(self): + if os.path.isdir('.bzr'): + # We're in a bzr branch + log_cmd = subprocess.Popen(["bzr","log","--gnu"], + stdout = subprocess.PIPE) + changelog = log_cmd.communicate()[0] + with open("ChangeLog", "w") as changelog_file: + changelog_file.write(changelog) + sdist.run(self) setup(name='nova', version='0.9.0', @@ -24,6 +41,7 @@ setup(name='nova', author='OpenStack', author_email='nova@lists.launchpad.net', url='http://www.openstack.org/', + cmdclass = {'sdist': local_sdist}, packages = find_packages(exclude=['bin','smoketests']), scripts=['bin/nova-api', 'bin/nova-compute', -- cgit From 49dd0c52c143d86dcdd562ffd764a9f7e3ee8ce0 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 28 Jul 2010 16:05:17 -0700 Subject: Fixed pep8 issues in setup.py - thanks redbo. --- setup.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/setup.py b/setup.py index 2940b518a..098e62370 100644 --- a/setup.py +++ b/setup.py @@ -22,14 +22,15 @@ from setuptools.command.sdist import sdist import os import subprocess + class local_sdist(sdist): """Customized sdist hook - builds the ChangeLog file from VC first""" def run(self): if os.path.isdir('.bzr'): # We're in a bzr branch - log_cmd = subprocess.Popen(["bzr","log","--gnu"], - stdout = subprocess.PIPE) + log_cmd = subprocess.Popen(["bzr", "log", "--gnu"], + stdout=subprocess.PIPE) changelog = log_cmd.communicate()[0] with open("ChangeLog", "w") as changelog_file: changelog_file.write(changelog) @@ -41,8 +42,8 @@ setup(name='nova', author='OpenStack', author_email='nova@lists.launchpad.net', url='http://www.openstack.org/', - cmdclass = {'sdist': local_sdist}, - packages = find_packages(exclude=['bin','smoketests']), + cmdclass={'sdist': local_sdist}, + packages=find_packages(exclude=['bin', 'smoketests']), scripts=['bin/nova-api', 'bin/nova-compute', 'bin/nova-dhcpbridge', @@ -52,6 +53,4 @@ setup(name='nova', 'bin/nova-network', 'bin/nova-objectstore', 'bin/nova-rsapi', - 'bin/nova-volume', - ] - ) + 'bin/nova-volume']) -- cgit From bb375bbeffb1249c653d3589acb521a25f8824c7 Mon Sep 17 00:00:00 2001 From: Devin Carlen Date: Wed, 28 Jul 2010 23:41:43 +0000 Subject: pep8 --- nova/adminclient.py | 37 +++++++++++++++---------------------- nova/endpoint/admin.py | 14 +++++--------- 2 files changed, 20 insertions(+), 31 deletions(-) diff --git a/nova/adminclient.py b/nova/adminclient.py index 3a2ce2f01..fceeac274 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -23,6 +23,7 @@ import base64 import boto from boto.ec2.regioninfo import RegionInfo + class UserInfo(object): """ Information about a Nova user, as parsed through SAX @@ -170,8 +171,7 @@ class NovaAdminClient(object): region=RegionInfo(None, self.region, self.clc_ip), port=8773, path='/services/Cloud', - **kwargs - ) + **kwargs) def get_users(self): """ grabs the list of all users """ @@ -215,13 +215,10 @@ class NovaAdminClient(object): """ Add or remove a role for a user and project. """ - params = { - 'User': user, - 'Role': role, - 'Project': project, - 'Operation': operation - - } + params = {'User': user, + 'Role': role, + 'Project': project, + 'Operation': operation} return self.apiconn.get_status('ModifyUserRole', params) def get_projects(self, user=None): @@ -241,8 +238,8 @@ class NovaAdminClient(object): Returns a single project with the specified name. """ project = self.apiconn.get_object('DescribeProject', - {'Name': name}, - ProjectInfo) + {'Name': name}, + ProjectInfo) if project.projectname != None: return project @@ -252,12 +249,10 @@ class NovaAdminClient(object): """ Creates a new project. """ - params = { - 'Name': projectname, - 'ManagerUser': manager_user, - 'Description': description, - 'MemberUsers': member_users - } + params = {'Name': projectname, + 'ManagerUser': manager_user, + 'Description': description, + 'MemberUsers': member_users} return self.apiconn.get_object('RegisterProject', params, ProjectInfo) def delete_project(self, projectname): @@ -292,11 +287,9 @@ class NovaAdminClient(object): """ Adds or removes a user from a project. """ - params = { - 'User': user, - 'Project': project, - 'Operation': operation - } + params = {'User': user, + 'Project': project, + 'Operation': operation} return self.apiconn.get_status('ModifyProjectMember', params) def get_zip(self, username): diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py index db1d319f9..c4b8c05ca 100644 --- a/nova/endpoint/admin.py +++ b/nova/endpoint/admin.py @@ -25,6 +25,7 @@ import base64 from nova.auth import manager from nova.compute import model + def user_dict(user, base64_file=None): """Convert the user object to a result dict""" if user: @@ -32,8 +33,7 @@ def user_dict(user, base64_file=None): 'username': user.id, 'accesskey': user.access, 'secretkey': user.secret, - 'file': base64_file, - } + 'file': base64_file} else: return {} @@ -43,8 +43,7 @@ def project_dict(project): return { 'projectname': project.id, 'project_manager_id': project.project_manager_id, - 'description': project.description, - } + 'description': project.description} else: return {} @@ -149,9 +148,7 @@ class AdminController(object): name, manager_user, description=None, - member_users=None - ) - ) + member_users=None)) @admin_only def deregister_project(self, context, name): @@ -163,8 +160,7 @@ class AdminController(object): def describe_project_members(self, context, name, **kwargs): project = manager.AuthManager().get_project(name) result = { - 'members': [{'member': m} for m in project.member_ids] - } + 'members': [{'member': m} for m in project.member_ids]} return result @admin_only -- cgit From 04a6a0267e7dc0f4e587e43f23b4acf0dcef52fc Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Thu, 29 Jul 2010 00:58:33 +0100 Subject: More merges from trunk. Not everything came over the first time. --- nova/auth/ldapdriver.py | 7 +++++-- nova/auth/manager.py | 6 +++--- nova/compute/linux_net.py | 7 ++++--- nova/datastore.py | 2 +- nova/endpoint/api.py | 2 +- nova/endpoint/cloud.py | 8 ++++++-- nova/objectstore/handler.py | 10 +++++----- nova/process.py | 13 +++++++------ nova/tests/auth_unittest.py | 6 ++++++ nova/tests/process_unittest.py | 7 ------- setup.py | 2 +- 11 files changed, 39 insertions(+), 31 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 1591c88e9..055e8332b 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -120,10 +120,13 @@ class LdapDriver(object): '(objectclass=novaKeyPair)') return [self.__to_key_pair(uid, attr) for attr in attrs] - def get_projects(self): + def get_projects(self, uid=None): """Retrieve list of projects""" + filter = '(objectclass=novaProject)' + if uid: + filter = "(&%s(member=%s))" % (filter, self.__uid_to_dn(uid)) attrs = self.__find_objects(FLAGS.ldap_project_subtree, - '(objectclass=novaProject)') + filter) return [self.__to_project(attr) for attr in attrs] def create_user(self, name, access_key, secret_key, is_admin): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 66027f6c2..7307f673b 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -547,10 +547,10 @@ class AuthManager(object): if project_dict: return Project(**project_dict) - def get_projects(self): - """Retrieves list of all projects""" + def get_projects(self, user=None): + """Retrieves list of projects, optionally filtered by user""" with self.driver() as drv: - project_list = drv.get_projects() + project_list = drv.get_projects(User.safe_id(user)) if not project_list: return [] return [Project(**project_dict) for project_dict in project_list] diff --git a/nova/compute/linux_net.py b/nova/compute/linux_net.py index 861ce779b..4a4b4c8a8 100644 --- a/nova/compute/linux_net.py +++ b/nova/compute/linux_net.py @@ -94,7 +94,7 @@ def bridge_create(net): execute("sudo ifconfig %s up" % net['bridge_name']) def dnsmasq_cmd(net): - cmd = ['sudo dnsmasq', + cmd = ['sudo -E dnsmasq', ' --strict-order', ' --bind-interfaces', ' --conf-file=', @@ -143,8 +143,9 @@ def start_dnsmasq(network): if os.path.exists(lease_file): os.unlink(lease_file) - # FLAGFILE in env - env = {'FLAGFILE' : FLAGS.dhcpbridge_flagfile} + # FLAGFILE and DNSMASQ_INTERFACE in env + env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, + 'DNSMASQ_INTERFACE': network['bridge_name']} execute(dnsmasq_cmd(network), addl_env=env) def stop_dnsmasq(network): diff --git a/nova/datastore.py b/nova/datastore.py index 660ad9d90..9c2592334 100644 --- a/nova/datastore.py +++ b/nova/datastore.py @@ -103,7 +103,7 @@ class BasicModel(object): @classmethod def _redis_name(cls): - return cls.override_type or cls.__name__ + return cls.override_type or cls.__name__.lower() @classmethod def lookup(cls, identifier): diff --git a/nova/endpoint/api.py b/nova/endpoint/api.py index 8915e4742..78a18b9ea 100755 --- a/nova/endpoint/api.py +++ b/nova/endpoint/api.py @@ -266,7 +266,7 @@ class APIRequestHandler(tornado.web.RequestHandler): # Authenticate the request. try: - (user, project) = users.UserManager.instance().authenticate( + (user, project) = manager.AuthManager().authenticate( access, signature, auth_params, diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 21581ffd2..8a4edbc0b 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -516,8 +516,12 @@ class CloudController(object): # get defaults from imagestore image_id = image['imageId'] - kernel_id = image.get('kernelId', None) - ramdisk_id = image.get('ramdiskId', None) + kernel_id = image.get('kernelId', FLAGS.default_kernel) + ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) + + # make sure we have access to kernel and ramdisk + self._get_image(context, kernel_id) + self._get_image(context, ramdisk_id) # API parameters overrides of defaults kernel_id = kwargs.get('kernel_id', kernel_id) diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index 344d75f6b..b4d7e6179 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -297,12 +297,12 @@ class ImagesResource(Resource): def render_POST(self, request): """ update image attributes: public/private """ - image_id = self.get_argument('image_id', u'') - operation = self.get_argument('operation', u'') + image_id = get_argument(request, 'image_id', u'') + operation = get_argument(request, 'operation', u'') image_object = image.Image(image_id) - if not image.is_authorized(request.context): + if not image_object.is_authorized(request.context): raise exception.NotAuthorized image_object.set_public(operation=='add') @@ -311,10 +311,10 @@ class ImagesResource(Resource): def render_DELETE(self, request): """ delete a registered image """ - image_id = self.get_argument("image_id", u"") + image_id = get_argument(request, "image_id", u"") image_object = image.Image(image_id) - if not image.is_authorized(request.context): + if not image_object.is_authorized(request.context): raise exception.NotAuthorized image_object.delete() diff --git a/nova/process.py b/nova/process.py index 8ecef1584..d3558ed2e 100644 --- a/nova/process.py +++ b/nova/process.py @@ -205,12 +205,13 @@ class ProcessPool(object): self._pool.release() return rv -_instance = None -def SharedPool(): - global _instance - if _instance is None: - _instance = ProcessPool() - return _instance +class SharedPool(ProcessPool): + _instance = None + def __new__(cls, *args, **kwargs): + if not cls._instance: + cls._instance = super(SharedPool, cls).__new__( + cls, *args, **kwargs) + return cls._instance def simple_execute(cmd, **kwargs): return SharedPool().simple_execute(cmd, **kwargs) diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index 073ff71d2..2167c2385 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -193,9 +193,15 @@ class AuthTestCase(test.BaseTestCase): for vpn in vpns: vpn.destroy() + def test_214_can_retrieve_project_by_user(self): + project = self.manager.create_project('testproj2', 'test2', 'Another test project', ['test2']) + self.assert_(len(self.manager.get_projects()) > 1) + self.assertEqual(len(self.manager.get_projects('test2')), 1) + def test_299_can_delete_project(self): self.manager.delete_project('testproj') self.assertFalse(filter(lambda p: p.name == 'testproj', self.manager.get_projects())) + self.manager.delete_project('testproj2') def test_999_can_delete_users(self): self.manager.delete_user('test1') diff --git a/nova/tests/process_unittest.py b/nova/tests/process_unittest.py index c96bb5913..1c15b69a0 100644 --- a/nova/tests/process_unittest.py +++ b/nova/tests/process_unittest.py @@ -120,10 +120,3 @@ class ProcessTestCase(test.TrialTestCase): pool2 = process.SharedPool() self.assert_(id(pool1) == id(pool2)) - def test_shared_pool_works_as_singleton(self): - d1 = process.simple_execute('sleep 1') - d2 = process.simple_execute('sleep 0.005') - # lp609749: would have failed with - # exceptions.AssertionError: Someone released me too many times: - # too many tokens! - return d1 diff --git a/setup.py b/setup.py index 127d014b1..50d5f2a3d 100644 --- a/setup.py +++ b/setup.py @@ -19,7 +19,7 @@ from setuptools import setup, find_packages setup(name='nova', - version='0.9.0', + version='0.9.1', description='cloud computing fabric controller', author='OpenStack', author_email='nova@lists.launchpad.net', -- cgit From b01c531ba5cb408a630e992beda769032b135f6a Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Thu, 29 Jul 2010 01:21:50 +0100 Subject: Move the reading of API parameters above the call to _get_image, so that they have a chance to take effect. --- nova/endpoint/cloud.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index acba50b95..7e51cc83b 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -518,14 +518,14 @@ class CloudController(object): kernel_id = image.get('kernelId', FLAGS.default_kernel) ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) - # make sure we have access to kernel and ramdisk - self._get_image(context, kernel_id) - self._get_image(context, ramdisk_id) - # API parameters overrides of defaults kernel_id = kwargs.get('kernel_id', kernel_id) ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id) + # make sure we have access to kernel and ramdisk + self._get_image(context, kernel_id) + self._get_image(context, ramdisk_id) + logging.debug("Going to run instances...") reservation_id = utils.generate_uid('r') launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) -- cgit From bc96d6daa425f7c38d67ad43b8cf46f3ba5989c3 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Thu, 29 Jul 2010 09:08:31 +0100 Subject: Replace the second singleton unit test, lost during a merge. --- nova/tests/process_unittest.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/nova/tests/process_unittest.py b/nova/tests/process_unittest.py index f5f304aa2..75187e1fc 100644 --- a/nova/tests/process_unittest.py +++ b/nova/tests/process_unittest.py @@ -120,3 +120,10 @@ class ProcessTestCase(test.TrialTestCase): pool2 = process.SharedPool() self.assertEqual(id(pool1._instance), id(pool2._instance)) + def test_shared_pool_works_as_singleton(self): + d1 = process.simple_execute('sleep 1') + d2 = process.simple_execute('sleep 0.005') + # lp609749: would have failed with + # exceptions.AssertionError: Someone released me too many times: + # too many tokens! + return d1 -- cgit -- cgit