From 40b2bbcfe6274aca9fd4361c56b2b042ba22e3c2 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Mon, 2 Aug 2010 08:31:19 +0100 Subject: Turn the private _image_url(path) into a public image_url(image). This will be used by virt.xenapi to instruct xapi as to which images to download. As part of this, the value returned became a complete URL, with http:// on the front. This caused the URL parsing to be adjusted. --- nova/virt/images.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/nova/virt/images.py b/nova/virt/images.py index 92210e242..698536324 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -23,6 +23,7 @@ Handling of VM disk images. import os.path import time +import urlparse from nova import flags from nova import process @@ -42,7 +43,7 @@ def fetch(image, path, user): return f(image, path, user) def _fetch_s3_image(image, path, user): - url = _image_url('%s/image' % image) + url = image_url(image) # This should probably move somewhere else, like e.g. a download_as # method on User objects and at the same time get rewritten to use @@ -50,8 +51,8 @@ def _fetch_s3_image(image, path, user): headers = {} headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) - uri = '/' + url.partition('/')[2] - auth = signer.Signer(user.secret.encode()).s3_authorization(headers, 'GET', uri) + (_, _, url_path, _, _, _) = urlparse.urlparse(url) + auth = signer.Signer(user.secret.encode()).s3_authorization(headers, 'GET', url_path) headers['Authorization'] = 'AWS %s:%s' % (user.access, auth) cmd = ['/usr/bin/curl', '--silent', url] @@ -68,5 +69,6 @@ def _fetch_local_image(image, path, _): def _image_path(path): return os.path.join(FLAGS.images_path, path) -def _image_url(path): - return "%s:%s/_images/%s" % (FLAGS.s3_host, FLAGS.s3_port, path) +def image_url(image): + return "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port, + image) -- cgit From 4c8ae5e0a5b30039075a87ba39aec6da64fdd138 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Tue, 3 Aug 2010 00:52:06 +0100 Subject: Added a xapi plugin that can pull images from nova-objectstore, and use that to get a disk, kernel, and ramdisk for the VM. The VM actually boots! --- nova/virt/xenapi.py | 105 ++++++++++++- xenapi/README | 2 + xenapi/etc/xapi.d/plugins/objectstore | 231 ++++++++++++++++++++++++++++ xenapi/etc/xapi.d/plugins/pluginlib_nova.py | 216 ++++++++++++++++++++++++++ 4 files changed, 547 insertions(+), 7 deletions(-) create mode 100644 xenapi/README create mode 100644 xenapi/etc/xapi.d/plugins/objectstore create mode 100755 xenapi/etc/xapi.d/plugins/pluginlib_nova.py diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index dc372e3e3..b84e55138 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -19,6 +19,7 @@ A connection to XenServer or Xen Cloud Platform. """ import logging +import xmlrpclib from twisted.internet import defer from twisted.internet import task @@ -26,7 +27,9 @@ from twisted.internet import task from nova import exception from nova import flags from nova import process +from nova.auth.manager import AuthManager from nova.compute import power_state +from nova.virt import images XenAPI = None @@ -71,10 +74,26 @@ class XenAPIConnection(object): @defer.inlineCallbacks @exception.wrap_exception def spawn(self, instance): - vm = self.lookup(instance.name) + vm = yield self.lookup(instance.name) if vm is not None: raise Exception('Attempted to create non-unique name %s' % instance.name) + + user = AuthManager().get_user(instance.datamodel['user_id']) + vdi_uuid = yield self.fetch_image( + instance.datamodel['image_id'], user, True) + kernel = yield self.fetch_image( + instance.datamodel['kernel_id'], user, False) + ramdisk = yield self.fetch_image( + instance.datamodel['ramdisk_id'], user, False) + vdi_ref = yield self._conn.xenapi.VDI.get_by_uuid(vdi_uuid) + + vm_ref = yield self.create_vm(instance, kernel, ramdisk) + yield self.create_vbd(vm_ref, vdi_ref, 0, True) + yield self._conn.xenapi.VM.start(vm_ref, False, False) + + + def create_vm(self, instance, kernel, ramdisk): mem = str(long(instance.datamodel['memory_kb']) * 1024) vcpus = str(instance.datamodel['vcpus']) rec = { @@ -92,9 +111,9 @@ class XenAPIConnection(object): 'actions_after_reboot': 'restart', 'actions_after_crash': 'destroy', 'PV_bootloader': '', - 'PV_kernel': instance.datamodel['kernel_id'], - 'PV_ramdisk': instance.datamodel['ramdisk_id'], - 'PV_args': '', + 'PV_kernel': kernel, + 'PV_ramdisk': ramdisk, + 'PV_args': 'root=/dev/xvda1', 'PV_bootloader_args': '', 'PV_legacy_args': '', 'HVM_boot_policy': '', @@ -106,8 +125,48 @@ class XenAPIConnection(object): 'user_version': '0', 'other_config': {}, } - vm = yield self._conn.xenapi.VM.create(rec) - #yield self._conn.xenapi.VM.start(vm, False, False) + logging.debug('Created VM %s...', instance.name) + vm_ref = self._conn.xenapi.VM.create(rec) + logging.debug('Created VM %s as %s.', instance.name, vm_ref) + return vm_ref + + + def create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): + vbd_rec = {} + vbd_rec['VM'] = vm_ref + vbd_rec['VDI'] = vdi_ref + vbd_rec['userdevice'] = str(userdevice) + vbd_rec['bootable'] = bootable + vbd_rec['mode'] = 'RW' + vbd_rec['type'] = 'disk' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref) + vbd_ref = self._conn.xenapi.VBD.create(vbd_rec) + logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref, + vdi_ref) + return vbd_ref + + + def fetch_image(self, image, user, use_sr): + """use_sr: True to put the image as a VDI in an SR, False to place + it on dom0's filesystem. The former is for VM disks, the latter for + its kernel and ramdisk (if external kernels are being used).""" + + url = images.image_url(image) + logging.debug("Asking xapi to fetch %s as %s" % (url, user.access)) + fn = use_sr and 'get_vdi' or 'get_kernel' + args = {} + args['src_url'] = url + args['username'] = user.access + args['password'] = user.secret + if use_sr: + args['add_partition'] = 'true' + return self._call_plugin('objectstore', fn, args) def reboot(self, instance): @@ -143,10 +202,42 @@ class XenAPIConnection(object): else: return vms[0] + + def _call_plugin(self, plugin, fn, args): + return _unwrap_plugin_exceptions( + self._conn.xenapi.host.call_plugin, + self._get_xenapi_host(), plugin, fn, args) + + + def _get_xenapi_host(self): + return self._conn.xenapi.session.get_this_host(self._conn.handle) + + power_state_from_xenapi = { - 'Halted' : power_state.RUNNING, #FIXME + 'Halted' : power_state.SHUTDOWN, 'Running' : power_state.RUNNING, 'Paused' : power_state.PAUSED, 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed' : power_state.CRASHED } + + +def _unwrap_plugin_exceptions(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except XenAPI.Failure, exn: + logging.debug("Got exception: %s", exn) + if (len(exn.details) == 4 and + exn.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and + exn.details[2] == 'Failure'): + params = None + try: + params = eval(exn.details[3]) + except: + raise exn + raise XenAPI.Failure(params) + else: + raise + except xmlrpclib.ProtocolError, exn: + logging.debug("Got exception: %s", exn) + raise diff --git a/xenapi/README b/xenapi/README new file mode 100644 index 000000000..1fc67aa7a --- /dev/null +++ b/xenapi/README @@ -0,0 +1,2 @@ +This directory contains files that are required for the XenAPI support. They +should be installed in the XenServer / Xen Cloud Platform domain 0. diff --git a/xenapi/etc/xapi.d/plugins/objectstore b/xenapi/etc/xapi.d/plugins/objectstore new file mode 100644 index 000000000..271e7337f --- /dev/null +++ b/xenapi/etc/xapi.d/plugins/objectstore @@ -0,0 +1,231 @@ +#!/usr/bin/env python + +# Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# XenAPI plugin for fetching images from nova-objectstore. +# + +import base64 +import errno +import hmac +import os +import os.path +import sha +import time +import urlparse + +import XenAPIPlugin + +from pluginlib_nova import * +configure_logging('objectstore') + + +KERNEL_DIR = '/boot/guest' + +DOWNLOAD_CHUNK_SIZE = 2 * 1024 * 1024 +SECTOR_SIZE = 512 +MBR_SIZE_SECTORS = 63 +MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE + + +def get_vdi(session, args): + src_url = exists(args, 'src_url') + username = exists(args, 'username') + password = exists(args, 'password') + add_partition = validate_bool(args, 'add_partition', 'false') + + (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) + + sr = find_sr(session) + if sr is None: + raise Exception('Cannot find SR to write VDI to') + + virtual_size = \ + get_content_length(proto, netloc, url_path, username, password) + if virtual_size < 0: + raise Exception('Cannot get VDI size') + + vdi_size = virtual_size + if add_partition: + # Make room for MBR. + vdi_size += MBR_SIZE_BYTES + + vdi = create_vdi(session, sr, src_url, vdi_size, False) + with_vdi_in_dom0(session, vdi, False, + lambda dev: get_vdi_(proto, netloc, url_path, + username, password, add_partition, + virtual_size, '/dev/%s' % dev)) + return session.xenapi.VDI.get_uuid(vdi) + + +def get_vdi_(proto, netloc, url_path, username, password, add_partition, + virtual_size, dest): + + if add_partition: + write_partition(virtual_size, dest) + + offset = add_partition and MBR_SIZE_BYTES or 0 + get(proto, netloc, url_path, username, password, dest, offset) + + +def write_partition(virtual_size, dest): + mbr_last = MBR_SIZE_SECTORS - 1 + primary_first = MBR_SIZE_SECTORS + primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 + + logging.debug('Writing partition table %d %d to %s...', + primary_first, primary_last, dest) + + result = os.system('parted --script %s mklabel msdos' % dest) + if result != 0: + raise Exception('Failed to mklabel') + result = os.system('parted --script %s mkpart primary %ds %ds' % + (dest, primary_first, primary_last)) + if result != 0: + raise Exception('Failed to mkpart') + + logging.debug('Writing partition table %s done.', dest) + + +def find_sr(session): + host = get_this_host(session) + srs = session.xenapi.SR.get_all() + for sr in srs: + sr_rec = session.xenapi.SR.get_record(sr) + if not ('i18n-key' in sr_rec['other_config'] and + sr_rec['other_config']['i18n-key'] == 'local-storage'): + continue + for pbd in sr_rec['PBDs']: + pbd_rec = session.xenapi.PBD.get_record(pbd) + if pbd_rec['host'] == host: + return sr + return None + + +def get_kernel(session, args): + src_url = exists(args, 'src_url') + username = exists(args, 'username') + password = exists(args, 'password') + + (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) + + dest = os.path.join(KERNEL_DIR, url_path[1:]) + + # Paranoid check against people using ../ to do rude things. + if os.path.commonprefix([KERNEL_DIR, dest]) != KERNEL_DIR: + raise Exception('Illegal destination %s %s', (url_path, dest)) + + dirname = os.path.dirname(dest) + try: + os.makedirs(dirname) + except os.error, e: + if e.errno != errno.EEXIST: + raise + if not os.path.isdir(dirname): + raise Exception('Cannot make directory %s', dirname) + + try: + os.remove(dest) + except: + pass + + get(proto, netloc, url_path, username, password, dest, 0) + + return dest + + +def get_content_length(proto, netloc, url_path, username, password): + headers = make_headers('HEAD', url_path, username, password) + return with_http_connection( + proto, netloc, + lambda conn: get_content_length_(url_path, headers, conn)) + + +def get_content_length_(url_path, headers, conn): + conn.request('HEAD', url_path, None, headers) + response = conn.getresponse() + if response.status != 200: + raise Exception('%d %s' % (response.status, response.reason)) + + return long(response.getheader('Content-Length', -1)) + + +def get(proto, netloc, url_path, username, password, dest, offset): + headers = make_headers('GET', url_path, username, password) + download(proto, netloc, url_path, headers, dest, offset) + + +def make_headers(verb, url_path, username, password): + headers = {} + headers['Date'] = \ + time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) + headers['Authorization'] = \ + 'AWS %s:%s' % (username, + s3_authorization(verb, url_path, password, headers)) + return headers + + +def s3_authorization(verb, path, password, headers): + sha1 = hmac.new(password, digestmod=sha) + sha1.update(plaintext(verb, path, headers)) + return base64.encodestring(sha1.digest()).strip() + + +def plaintext(verb, path, headers): + return '%s\n\n\n%s\n%s' % (verb, + "\n".join([headers[h] for h in headers]), + path) + + +def download(proto, netloc, url_path, headers, dest, offset): + with_http_connection( + proto, netloc, + lambda conn: download_(url_path, dest, offset, headers, conn)) + + +def download_(url_path, dest, offset, headers, conn): + conn.request('GET', url_path, None, headers) + response = conn.getresponse() + if response.status != 200: + raise Exception('%d %s' % (response.status, response.reason)) + + length = response.getheader('Content-Length', -1) + + with_file( + dest, 'a', + lambda dest_file: download_all(response, length, dest_file, offset)) + + +def download_all(response, length, dest_file, offset): + dest_file.seek(offset) + i = 0 + while True: + buf = response.read(DOWNLOAD_CHUNK_SIZE) + if buf: + dest_file.write(buf) + else: + return + i += len(buf) + if length != -1 and i >= length: + return + + +if __name__ == '__main__': + XenAPIPlugin.dispatch({'get_vdi': get_vdi, + 'get_kernel': get_kernel}) diff --git a/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/xenapi/etc/xapi.d/plugins/pluginlib_nova.py new file mode 100755 index 000000000..2d323a016 --- /dev/null +++ b/xenapi/etc/xapi.d/plugins/pluginlib_nova.py @@ -0,0 +1,216 @@ +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# Helper functions for the Nova xapi plugins. In time, this will merge +# with the pluginlib.py shipped with xapi, but for now, that file is not +# very stable, so it's easiest just to have a copy of all the functions +# that we need. +# + +import httplib +import logging +import logging.handlers +import re +import time + + +##### Logging setup + +def configure_logging(name): + log = logging.getLogger() + log.setLevel(logging.DEBUG) + sysh = logging.handlers.SysLogHandler('/dev/log') + sysh.setLevel(logging.DEBUG) + formatter = logging.Formatter('%s: %%(levelname)-8s %%(message)s' % name) + sysh.setFormatter(formatter) + log.addHandler(sysh) + + +##### Exceptions + +class PluginError(Exception): + """Base Exception class for all plugin errors.""" + def __init__(self, *args): + Exception.__init__(self, *args) + +class ArgumentError(PluginError): + """Raised when required arguments are missing, argument values are invalid, + or incompatible arguments are given. + """ + def __init__(self, *args): + PluginError.__init__(self, *args) + + +##### Helpers + +def ignore_failure(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except XenAPI.Failure, e: + logging.error('Ignoring XenAPI.Failure %s', e) + return None + + +##### Argument validation + +ARGUMENT_PATTERN = re.compile(r'^[a-zA-Z0-9_:\.\-,]+$') + +def validate_exists(args, key, default=None): + """Validates that a string argument to a RPC method call is given, and + matches the shell-safe regex, with an optional default value in case it + does not exist. + + Returns the string. + """ + if key in args: + if len(args[key]) == 0: + raise ArgumentError('Argument %r value %r is too short.' % (key, args[key])) + if not ARGUMENT_PATTERN.match(args[key]): + raise ArgumentError('Argument %r value %r contains invalid characters.' % (key, args[key])) + if args[key][0] == '-': + raise ArgumentError('Argument %r value %r starts with a hyphen.' % (key, args[key])) + return args[key] + elif default is not None: + return default + else: + raise ArgumentError('Argument %s is required.' % key) + +def validate_bool(args, key, default=None): + """Validates that a string argument to a RPC method call is a boolean string, + with an optional default value in case it does not exist. + + Returns the python boolean value. + """ + value = validate_exists(args, key, default) + if value.lower() == 'true': + return True + elif value.lower() == 'false': + return False + else: + raise ArgumentError("Argument %s may not take value %r. Valid values are ['true', 'false']." % (key, value)) + +def exists(args, key): + """Validates that a freeform string argument to a RPC method call is given. + Returns the string. + """ + if key in args: + return args[key] + else: + raise ArgumentError('Argument %s is required.' % key) + +def optional(args, key): + """If the given key is in args, return the corresponding value, otherwise + return None""" + return key in args and args[key] or None + + +def get_this_host(session): + return session.xenapi.session.get_this_host(session.handle) + + +def get_domain_0(session): + this_host_ref = get_this_host(session) + expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"' % this_host_ref + return session.xenapi.VM.get_all_records_where(expr).keys()[0] + + +def create_vdi(session, sr_ref, name_label, virtual_size, read_only): + vdi_ref = session.xenapi.VDI.create( + { 'name_label': name_label, + 'name_description': '', + 'SR': sr_ref, + 'virtual_size': str(virtual_size), + 'type': 'User', + 'sharable': False, + 'read_only': read_only, + 'xenstore_data': {}, + 'other_config': {}, + 'sm_config': {}, + 'tags': [] }) + logging.debug('Created VDI %s (%s, %s, %s) on %s.', vdi_ref, name_label, + virtual_size, read_only, sr_ref) + return vdi_ref + + +def with_vdi_in_dom0(session, vdi, read_only, f): + dom0 = get_domain_0(session) + vbd_rec = {} + vbd_rec['VM'] = dom0 + vbd_rec['VDI'] = vdi + vbd_rec['userdevice'] = 'autodetect' + vbd_rec['bootable'] = False + vbd_rec['mode'] = read_only and 'RO' or 'RW' + vbd_rec['type'] = 'disk' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + logging.debug('Creating VBD for VDI %s ... ', vdi) + vbd = session.xenapi.VBD.create(vbd_rec) + logging.debug('Creating VBD for VDI %s done.', vdi) + try: + logging.debug('Plugging VBD %s ... ', vbd) + session.xenapi.VBD.plug(vbd) + logging.debug('Plugging VBD %s done.', vbd) + return f(session.xenapi.VBD.get_device(vbd)) + finally: + logging.debug('Destroying VBD for VDI %s ... ', vdi) + vbd_unplug_with_retry(session, vbd) + ignore_failure(session.xenapi.VBD.destroy, vbd) + logging.debug('Destroying VBD for VDI %s done.', vdi) + + +def vbd_unplug_with_retry(session, vbd): + """Call VBD.unplug on the given VBD, with a retry if we get + DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're + seeing the device still in use, even when all processes using the device + should be dead.""" + while True: + try: + session.xenapi.VBD.unplug(vbd) + logging.debug('VBD.unplug successful first time.') + return + except XenAPI.Failure, e: + if (len(e.details) > 0 and + e.details[0] == 'DEVICE_DETACH_REJECTED'): + logging.debug('VBD.unplug rejected: retrying...') + time.sleep(1) + elif (len(e.details) > 0 and + e.details[0] == 'DEVICE_ALREADY_DETACHED'): + logging.debug('VBD.unplug successful eventually.') + return + else: + logging.error('Ignoring XenAPI.Failure in VBD.unplug: %s', e) + return + + +def with_http_connection(proto, netloc, f): + conn = (proto == 'https' and + httplib.HTTPSConnection(netloc) or + httplib.HTTPConnection(netloc)) + try: + return f(conn) + finally: + conn.close() + + +def with_file(dest_path, mode, f): + dest = open(dest_path, mode) + try: + return f(dest) + finally: + dest.close() -- cgit From b31d4f795dbd94bae2c3d8f01aea3b15ed9684b2 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Tue, 3 Aug 2010 15:37:31 +0100 Subject: Define __contains__ on BasicModel, so that we can use "x in datamodel". --- nova/datastore.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/datastore.py b/nova/datastore.py index 9c2592334..f6c11d2c9 100644 --- a/nova/datastore.py +++ b/nova/datastore.py @@ -168,6 +168,9 @@ class BasicModel(object): def setdefault(self, item, default): return self.state.setdefault(item, default) + def __contains__(self, item): + return item in self.state + def __getitem__(self, item): return self.state[item] -- cgit From 89e057cf2f008ebb7ec1c99605ff99f5849d9b40 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Tue, 3 Aug 2010 15:41:35 +0100 Subject: Implement VIF creation. --- nova/virt/xenapi.py | 57 ++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 50 insertions(+), 7 deletions(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index b84e55138..b4768cffa 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -79,6 +79,18 @@ class XenAPIConnection(object): raise Exception('Attempted to create non-unique name %s' % instance.name) + if 'bridge_name' in instance.datamodel: + network_ref = \ + yield self._find_network_with_bridge( + instance.datamodel['bridge_name']) + else: + network_ref = None + + if 'mac_address' in instance.datamodel: + mac_address = instance.datamodel['mac_address'] + else: + mac_address = '' + user = AuthManager().get_user(instance.datamodel['user_id']) vdi_uuid = yield self.fetch_image( instance.datamodel['image_id'], user, True) @@ -90,6 +102,8 @@ class XenAPIConnection(object): vm_ref = yield self.create_vm(instance, kernel, ramdisk) yield self.create_vbd(vm_ref, vdi_ref, 0, True) + if network_ref: + yield self._create_vif(vm_ref, network_ref, mac_address) yield self._conn.xenapi.VM.start(vm_ref, False, False) @@ -152,6 +166,35 @@ class XenAPIConnection(object): return vbd_ref + def _create_vif(self, vm_ref, network_ref, mac_address): + vif_rec = {} + vif_rec['device'] = '0' + vif_rec['network']= network_ref + vif_rec['VM'] = vm_ref + vif_rec['MAC'] = mac_address + vif_rec['MTU'] = '1500' + vif_rec['other_config'] = {} + vif_rec['qos_algorithm_type'] = '' + vif_rec['qos_algorithm_params'] = {} + logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref, + network_ref) + vif_ref = self._conn.xenapi.VIF.create(vif_rec) + logging.debug('Created VIF %s for VM %s, network %s.', vif_ref, + vm_ref, network_ref) + return vif_ref + + + def _find_network_with_bridge(self, bridge): + expr = 'field "bridge" = "%s"' % bridge + networks = self._conn.xenapi.network.get_all_records_where(expr) + if len(networks) == 1: + return networks.keys()[0] + elif len(networks) > 1: + raise Exception('Found non-unique network for bridge %s' % bridge) + else: + raise Exception('Found no network for bridge %s' % bridge) + + def fetch_image(self, image, user, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place it on dom0's filesystem. The former is for VM disks, the latter for @@ -213,13 +256,13 @@ class XenAPIConnection(object): return self._conn.xenapi.session.get_this_host(self._conn.handle) - power_state_from_xenapi = { - 'Halted' : power_state.SHUTDOWN, - 'Running' : power_state.RUNNING, - 'Paused' : power_state.PAUSED, - 'Suspended': power_state.SHUTDOWN, # FIXME - 'Crashed' : power_state.CRASHED - } +power_state_from_xenapi = { + 'Halted' : power_state.SHUTDOWN, + 'Running' : power_state.RUNNING, + 'Paused' : power_state.PAUSED, + 'Suspended': power_state.SHUTDOWN, # FIXME + 'Crashed' : power_state.CRASHED +} def _unwrap_plugin_exceptions(func, *args, **kwargs): -- cgit From 035f93aa7dc19656bf22de9b7ccfe12b28cde61b Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Tue, 3 Aug 2010 15:42:17 +0100 Subject: Fix exception in get_info. --- nova/virt/xenapi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index b4768cffa..c3e84c2b9 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -227,7 +227,7 @@ class XenAPIConnection(object): def get_info(self, instance_id): vm = self.lookup(instance_id) if vm is None: - raise Exception('instance not present %s' % instance.name) + raise Exception('instance not present %s' % instance_id) rec = self._conn.xenapi.VM.get_record(vm) return {'state': power_state_from_xenapi[rec['power_state']], 'max_mem': long(rec['memory_static_max']) >> 10, -- cgit From 6187529119ab51a6df7e30ef5190757ee0feca5e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 3 Aug 2010 15:04:38 -0700 Subject: vblade commands randomly toss stuff into stderr, ignore it --- nova/volume/service.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/nova/volume/service.py b/nova/volume/service.py index e12f675a7..9dd63e88f 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -227,11 +227,7 @@ class Volume(datastore.BasicModel): @defer.inlineCallbacks def destroy(self): - try: - yield self._remove_export() - except Exception as ex: - logging.debug("Ingnoring failure to remove export %s" % ex) - pass + yield self._remove_export() yield self._delete_lv() super(Volume, self).destroy() @@ -250,7 +246,7 @@ class Volume(datastore.BasicModel): def _delete_lv(self): yield process.simple_execute( "sudo lvremove -f %s/%s" % (FLAGS.volume_group, - self['volume_id'])) + self['volume_id']), error_ok=1) @defer.inlineCallbacks def _setup_export(self): @@ -275,10 +271,10 @@ class Volume(datastore.BasicModel): def _remove_export(self): yield process.simple_execute( "sudo vblade-persist stop %s %s" % (self['shelf_id'], - self['blade_id'])) + self['blade_id']), error_ok=1) yield process.simple_execute( "sudo vblade-persist destroy %s %s" % (self['shelf_id'], - self['blade_id'])) + self['blade_id']), error_ok=1) class FakeVolume(Volume): -- cgit From d79fd0df0bf9c59483b30c0d8c3a811580a1ee39 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 5 Aug 2010 04:31:21 -0700 Subject: Changed volumes to use a pool instead of globbing filesystem for concurrency reasons. Fixed broken tests. --- nova/tests/volume_unittest.py | 77 +++++++++++++++++++++++-------- nova/volume/service.py | 102 +++++++++++++++++++++++------------------- 2 files changed, 114 insertions(+), 65 deletions(-) diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index 0f4f0e34d..2a07afe69 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -17,6 +17,10 @@ # under the License. import logging +import shutil +import tempfile + +from twisted.internet import defer from nova import compute from nova import exception @@ -34,10 +38,16 @@ class VolumeTestCase(test.TrialTestCase): super(VolumeTestCase, self).setUp() self.compute = compute.service.ComputeService() self.volume = None + self.tempdir = tempfile.mkdtemp() self.flags(connection_type='fake', - fake_storage=True) + fake_storage=True, + aoe_export_dir=self.tempdir) self.volume = volume_service.VolumeService() + def tearDown(self): + shutil.rmtree(self.tempdir) + + @defer.inlineCallbacks def test_run_create_volume(self): vol_size = '0' user_id = 'fake' @@ -48,34 +58,40 @@ class VolumeTestCase(test.TrialTestCase): volume_service.get_volume(volume_id)['volume_id']) rv = self.volume.delete_volume(volume_id) - self.assertFailure(volume_service.get_volume(volume_id), - exception.Error) + self.assertRaises(exception.Error, volume_service.get_volume, volume_id) + @defer.inlineCallbacks def test_too_big_volume(self): vol_size = '1001' user_id = 'fake' project_id = 'fake' - self.assertRaises(TypeError, - self.volume.create_volume, - vol_size, user_id, project_id) + try: + yield self.volume.create_volume(vol_size, user_id, project_id) + self.fail("Should have thrown TypeError") + except TypeError: + pass + @defer.inlineCallbacks def test_too_many_volumes(self): vol_size = '1' user_id = 'fake' project_id = 'fake' num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1 - total_slots = FLAGS.slots_per_shelf * num_shelves + total_slots = FLAGS.blades_per_shelf * num_shelves vols = [] + from nova import datastore + redis = datastore.Redis.instance() for i in xrange(total_slots): vid = yield self.volume.create_volume(vol_size, user_id, project_id) vols.append(vid) self.assertFailure(self.volume.create_volume(vol_size, user_id, project_id), - volume_service.NoMoreVolumes) + volume_service.NoMoreBlades) for id in vols: yield self.volume.delete_volume(id) + @defer.inlineCallbacks def test_run_attach_detach_volume(self): # Create one volume and one compute to test with instance_id = "storage-test" @@ -84,22 +100,26 @@ class VolumeTestCase(test.TrialTestCase): project_id = 'fake' mountpoint = "/dev/sdf" volume_id = yield self.volume.create_volume(vol_size, user_id, project_id) - volume_obj = volume_service.get_volume(volume_id) volume_obj.start_attach(instance_id, mountpoint) - rv = yield self.compute.attach_volume(volume_id, - instance_id, - mountpoint) + if FLAGS.fake_tests: + volume_obj.finish_attach() + else: + rv = yield self.compute.attach_volume(instance_id, + volume_id, + mountpoint) self.assertEqual(volume_obj['status'], "in-use") - self.assertEqual(volume_obj['attachStatus'], "attached") + self.assertEqual(volume_obj['attach_status'], "attached") self.assertEqual(volume_obj['instance_id'], instance_id) self.assertEqual(volume_obj['mountpoint'], mountpoint) - self.assertRaises(exception.Error, - self.volume.delete_volume, - volume_id) - - rv = yield self.volume.detach_volume(volume_id) + self.assertFailure(self.volume.delete_volume(volume_id), exception.Error) + volume_obj.start_detach() + if FLAGS.fake_tests: + volume_obj.finish_detach() + else: + rv = yield self.volume.detach_volume(instance_id, + volume_id) volume_obj = volume_service.get_volume(volume_id) self.assertEqual(volume_obj['status'], "available") @@ -108,6 +128,27 @@ class VolumeTestCase(test.TrialTestCase): volume_service.get_volume, volume_id) + @defer.inlineCallbacks + def test_multiple_volume_race_condition(self): + vol_size = "5" + user_id = "fake" + project_id = 'fake' + shelf_blades = [] + def _check(volume_id): + vol = volume_service.get_volume(volume_id) + shelf_blade = '%s.%s' % (vol['shelf_id'], vol['blade_id']) + self.assert_(shelf_blade not in shelf_blades) + shelf_blades.append(shelf_blade) + logging.debug("got %s" % shelf_blade) + vol.destroy() + deferreds = [] + for i in range(5): + d = self.volume.create_volume(vol_size, user_id, project_id) + d.addCallback(_check) + d.addErrback(self.fail) + deferreds.append(d) + yield defer.DeferredList(deferreds) + def test_multi_node(self): # TODO(termie): Figure out how to test with two nodes, # each of them having a different FLAG for storage_node diff --git a/nova/volume/service.py b/nova/volume/service.py index 9dd63e88f..9c52ee469 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -22,12 +22,8 @@ destroying persistent storage volumes, ala EBS. Currently uses Ata-over-Ethernet. """ -import glob import logging import os -import shutil -import socket -import tempfile from twisted.internet import defer @@ -47,9 +43,6 @@ flags.DEFINE_string('volume_group', 'nova-volumes', 'Name for the VG that will contain exported volumes') flags.DEFINE_string('aoe_eth_dev', 'eth0', 'Which device to export the volumes on') -flags.DEFINE_string('storage_name', - socket.gethostname(), - 'name of this service') flags.DEFINE_integer('first_shelf_id', utils.last_octet(utils.get_my_ip()) * 10, 'AoE starting shelf_id for this service') @@ -59,9 +52,9 @@ flags.DEFINE_integer('last_shelf_id', flags.DEFINE_string('aoe_export_dir', '/var/lib/vblade-persist/vblades', 'AoE directory where exports are created') -flags.DEFINE_integer('slots_per_shelf', +flags.DEFINE_integer('blades_per_shelf', 16, - 'Number of AoE slots per shelf') + 'Number of AoE blades per shelf') flags.DEFINE_string('storage_availability_zone', 'nova', 'availability zone of this service') @@ -69,7 +62,7 @@ flags.DEFINE_boolean('fake_storage', False, 'Should we make real storage volumes to attach?') -class NoMoreVolumes(exception.Error): +class NoMoreBlades(exception.Error): pass def get_volume(volume_id): @@ -77,8 +70,9 @@ def get_volume(volume_id): volume_class = Volume if FLAGS.fake_storage: volume_class = FakeVolume - if datastore.Redis.instance().sismember('volumes', volume_id): - return volume_class(volume_id=volume_id) + vol = volume_class.lookup(volume_id) + if vol: + return vol raise exception.Error("Volume does not exist") class VolumeService(service.Service): @@ -91,18 +85,9 @@ class VolumeService(service.Service): super(VolumeService, self).__init__() self.volume_class = Volume if FLAGS.fake_storage: - FLAGS.aoe_export_dir = tempfile.mkdtemp() self.volume_class = FakeVolume self._init_volume_group() - def __del__(self): - # TODO(josh): Get rid of this destructor, volumes destroy themselves - if FLAGS.fake_storage: - try: - shutil.rmtree(FLAGS.aoe_export_dir) - except Exception, err: - pass - @defer.inlineCallbacks @validate.rangetest(size=(0, 1000)) def create_volume(self, size, user_id, project_id): @@ -113,8 +98,6 @@ class VolumeService(service.Service): """ logging.debug("Creating volume of size: %s" % (size)) vol = yield self.volume_class.create(size, user_id, project_id) - datastore.Redis.instance().sadd('volumes', vol['volume_id']) - datastore.Redis.instance().sadd('volumes:%s' % (FLAGS.storage_name), vol['volume_id']) logging.debug("restarting exports") yield self._restart_exports() defer.returnValue(vol['volume_id']) @@ -134,13 +117,11 @@ class VolumeService(service.Service): def delete_volume(self, volume_id): logging.debug("Deleting volume with id of: %s" % (volume_id)) vol = get_volume(volume_id) - if vol['status'] == "attached": + if vol['attach_status'] == "attached": raise exception.Error("Volume is still attached") - if vol['node_name'] != FLAGS.storage_name: + if vol['node_name'] != FLAGS.node_name: raise exception.Error("Volume is not local to this node") yield vol.destroy() - datastore.Redis.instance().srem('volumes', vol['volume_id']) - datastore.Redis.instance().srem('volumes:%s' % (FLAGS.storage_name), vol['volume_id']) defer.returnValue(True) @defer.inlineCallbacks @@ -172,14 +153,15 @@ class Volume(datastore.BasicModel): return self.volume_id def default_state(self): - return {"volume_id": self.volume_id} + return {"volume_id": self.volume_id, + "node_name": "unassigned"} @classmethod @defer.inlineCallbacks def create(cls, size, user_id, project_id): volume_id = utils.generate_uid('vol') vol = cls(volume_id) - vol['node_name'] = FLAGS.storage_name + vol['node_name'] = FLAGS.node_name vol['size'] = size vol['user_id'] = user_id vol['project_id'] = project_id @@ -225,10 +207,31 @@ class Volume(datastore.BasicModel): self['attach_status'] = "detached" self.save() + def save(self): + is_new = self.is_new_record() + super(Volume, self).save() + if is_new: + redis = datastore.Redis.instance() + key = self.__devices_key + # TODO(vish): these should be added by admin commands + more = redis.scard(self._redis_association_name("node", + self['node_name'])) + if (not redis.exists(key) and not more): + for shelf_id in range(FLAGS.first_shelf_id, + FLAGS.last_shelf_id + 1): + for blade_id in range(FLAGS.blades_per_shelf): + redis.sadd(key, "%s.%s" % (shelf_id, blade_id)) + self.associate_with("node", self['node_name']) + @defer.inlineCallbacks def destroy(self): yield self._remove_export() yield self._delete_lv() + self.unassociate_with("node", self['node_name']) + if self.get('shelf_id', None) and self.get('blade_id', None): + redis = datastore.Redis.instance() + key = self.__devices_key + redis.sadd(key, "%s.%s" % (self['shelf_id'], self['blade_id'])) super(Volume, self).destroy() @defer.inlineCallbacks @@ -248,17 +251,26 @@ class Volume(datastore.BasicModel): "sudo lvremove -f %s/%s" % (FLAGS.volume_group, self['volume_id']), error_ok=1) + @property + def __devices_key(self): + return 'volume_devices:%s' % FLAGS.node_name + @defer.inlineCallbacks def _setup_export(self): - (shelf_id, blade_id) = get_next_aoe_numbers() + redis = datastore.Redis.instance() + key = self.__devices_key + device = redis.spop(key) + if not device: + raise NoMoreBlades() + (shelf_id, blade_id) = device.split('.') self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id) self['shelf_id'] = shelf_id self['blade_id'] = blade_id self.save() - yield self._exec_export() + yield self._exec_setup_export() @defer.inlineCallbacks - def _exec_export(self): + def _exec_setup_export(self): yield process.simple_execute( "sudo vblade-persist setup %s %s %s /dev/%s/%s" % (self['shelf_id'], @@ -269,6 +281,13 @@ class Volume(datastore.BasicModel): @defer.inlineCallbacks def _remove_export(self): + if not self.get('shelf_id', None) or not self.get('blade_id', None): + defer.returnValue(False) + yield self._exec_remove_export() + defer.returnValue(True) + + @defer.inlineCallbacks + def _exec_remove_export(self): yield process.simple_execute( "sudo vblade-persist stop %s %s" % (self['shelf_id'], self['blade_id']), error_ok=1) @@ -277,29 +296,18 @@ class Volume(datastore.BasicModel): self['blade_id']), error_ok=1) + class FakeVolume(Volume): def _create_lv(self): pass - def _exec_export(self): + def _exec_setup_export(self): fname = os.path.join(FLAGS.aoe_export_dir, self['aoe_device']) f = file(fname, "w") f.close() - def _remove_export(self): - pass + def _exec_remove_export(self): + os.unlink(os.path.join(FLAGS.aoe_export_dir, self['aoe_device'])) def _delete_lv(self): pass - -def get_next_aoe_numbers(): - for shelf_id in xrange(FLAGS.first_shelf_id, FLAGS.last_shelf_id + 1): - aoes = glob.glob("%s/e%s.*" % (FLAGS.aoe_export_dir, shelf_id)) - if not aoes: - blade_id = 0 - else: - blade_id = int(max([int(a.rpartition('.')[2]) for a in aoes])) + 1 - if blade_id < FLAGS.slots_per_shelf: - logging.debug("Next shelf.blade is %s.%s", shelf_id, blade_id) - return (shelf_id, blade_id) - raise NoMoreVolumes() -- cgit From a33dce2da8dc8e25d0943732adfa6b14b1e48c7b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 6 Aug 2010 15:48:46 -0700 Subject: a few more commands were putting output on stderr. In general, exceptions on stderr output seems like a bad idea --- nova/volume/service.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/nova/volume/service.py b/nova/volume/service.py index 9c52ee469..66163a812 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -128,8 +128,8 @@ class VolumeService(service.Service): def _restart_exports(self): if FLAGS.fake_storage: return - yield process.simple_execute("sudo vblade-persist auto all") - # NOTE(vish): this command sometimes sends output to stderr for warnings + # NOTE(vish): these commands sometimes sends output to stderr for warnings + yield process.simple_execute("sudo vblade-persist auto all", error_ok=1) yield process.simple_execute("sudo vblade-persist start all", error_ok=1) @defer.inlineCallbacks @@ -243,7 +243,8 @@ class Volume(datastore.BasicModel): yield process.simple_execute( "sudo lvcreate -L %s -n %s %s" % (sizestr, self['volume_id'], - FLAGS.volume_group)) + FLAGS.volume_group), + error_ok=1) @defer.inlineCallbacks def _delete_lv(self): @@ -277,7 +278,7 @@ class Volume(datastore.BasicModel): self['blade_id'], FLAGS.aoe_eth_dev, FLAGS.volume_group, - self['volume_id'])) + self['volume_id']), error_ok=1) @defer.inlineCallbacks def _remove_export(self): -- cgit From 86a7e62f0b72763088b0a34516ffc30f22ca937e Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Sun, 8 Aug 2010 09:49:47 -0700 Subject: adding pep8 and pylint for regular cleanup tasks --- tools/pip-requires | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/pip-requires b/tools/pip-requires index 4eb47ca2b..24aefb25e 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -1,3 +1,5 @@ +pep8==0.5.0 +pylint==0.21.1 IPy==0.70 M2Crypto==0.20.2 amqplib==0.6.1 -- cgit From abd9bed8f7f88617c0a402faef47da13963ccea7 Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Sun, 8 Aug 2010 09:50:22 -0700 Subject: attempting some cleanup work --- nova/endpoint/cloud.py | 77 ++++++++++++++++++++++++++------------------------ 1 file changed, 40 insertions(+), 37 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 878d54a15..ee22863a9 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -47,6 +47,7 @@ FLAGS = flags.FLAGS flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') + def _gen_key(user_id, key_name): """ Tuck this into AuthManager """ try: @@ -102,15 +103,16 @@ class CloudController(object): result = {} for instance in self.instdir.all: if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) + line = '%s slots=%d' % (instance['private_dns_name'], + INSTANCE_TYPES[instance['instance_type']]['vcpus']) if instance['key_name'] in result: result[instance['key_name']].append(line) else: result[instance['key_name']] = [line] return result - def get_metadata(self, ip): - i = self.get_instance_by_ip(ip) + def get_metadata(self, ipaddress): + i = self.get_instance_by_ip(ipaddress) if i is None: return None mpi = self._get_mpi_data(i['project_id']) @@ -147,7 +149,7 @@ class CloudController(object): }, 'public-hostname': i.get('dns_name', ''), 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP - 'public-keys' : keys, + 'public-keys': keys, 'ramdisk-id': i.get('ramdisk_id', ''), 'reservation-id': i['reservation_id'], 'security-groups': i.get('groups', ''), @@ -203,7 +205,7 @@ class CloudController(object): 'keyFingerprint': key_pair.fingerprint, }) - return { 'keypairsSet': result } + return {'keypairsSet': result} @rbac.allow('all') def create_key_pair(self, context, key_name, **kwargs): @@ -232,7 +234,7 @@ class CloudController(object): @rbac.allow('all') def describe_security_groups(self, context, group_names, **kwargs): - groups = { 'securityGroupSet': [] } + groups = {'securityGroupSet': []} # Stubbed for now to unblock other things. return groups @@ -251,7 +253,7 @@ class CloudController(object): instance = self._get_instance(context, instance_id[0]) return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "get_console_output", - "args" : {"instance_id": instance_id[0]}}) + "args": {"instance_id": instance_id[0]}}) def _get_user_id(self, context): if context and context.user: @@ -285,10 +287,10 @@ class CloudController(object): if volume['attach_status'] == 'attached': v['attachmentSet'] = [{'attachTime': volume['attach_time'], 'deleteOnTermination': volume['delete_on_termination'], - 'device' : volume['mountpoint'], - 'instanceId' : volume['instance_id'], - 'status' : 'attached', - 'volume_id' : volume['volume_id']}] + 'device': volume['mountpoint'], + 'instanceId': volume['instance_id'], + 'status': 'attached', + 'volume_id': volume['volume_id']}] else: v['attachmentSet'] = [{}] return v @@ -298,7 +300,7 @@ class CloudController(object): def create_volume(self, context, size, **kwargs): # TODO(vish): refactor this to create the volume object here and tell service to create it result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume", - "args" : {"size": size, + "args": {"size": size, "user_id": context.user.id, "project_id": context.project.id}}) # NOTE(vish): rpc returned value is in the result key in the dictionary @@ -348,15 +350,15 @@ class CloudController(object): compute_node = instance['node_name'] rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node), {"method": "attach_volume", - "args" : {"volume_id": volume_id, - "instance_id" : instance_id, - "mountpoint" : device}}) - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) + "args": {"volume_id": volume_id, + "instance_id": instance_id, + "mountpoint": device}}) + return defer.succeed({'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': instance_id, + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id}) @rbac.allow('projectmanager', 'sysadmin') @@ -372,18 +374,18 @@ class CloudController(object): instance = self._get_instance(context, instance_id) rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "detach_volume", - "args" : {"instance_id": instance_id, + "args": {"instance_id": instance_id, "volume_id": volume_id}}) except exception.NotFound: # If the instance doesn't exist anymore, # then we need to call detach blind volume.finish_detach() - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) + return defer.succeed({'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': instance_id, + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id}) def _convert_to_set(self, lst, label): if lst == None or lst == []: @@ -425,7 +427,8 @@ class CloudController(object): i['key_name'] = instance.get('key_name', None) if context.user.is_admin(): i['key_name'] = '%s (%s, %s)' % (i['key_name'], - instance.get('project_id', None), instance.get('node_name','')) + instance.get('project_id', None), + instance.get('node_name', '')) i['product_codes_set'] = self._convert_to_set( instance.get('product_codes', None), 'product_code') i['instance_type'] = instance.get('instance_type', None) @@ -442,7 +445,7 @@ class CloudController(object): reservations[res_id] = r reservations[res_id]['instances_set'].append(i) - instance_response = {'reservationSet' : list(reservations.values()) } + instance_response = {'reservationSet': list(reservations.values())} return instance_response @rbac.allow('all') @@ -457,7 +460,7 @@ class CloudController(object): address['project_id'] == context.project.id): address_rv = { 'public_ip': address['address'], - 'instance_id' : address.get('instance_id', 'free') + 'instance_id': address.get('instance_id', 'free') } if context.user.is_admin(): address_rv['instance_id'] = "%s (%s, %s)" % ( @@ -477,7 +480,7 @@ class CloudController(object): "args": {"user_id": context.user.id, "project_id": context.project.id}}) public_ip = alloc_result['result'] - defer.returnValue({'addressSet': [{'publicIp' : public_ip}]}) + defer.returnValue({'addressSet': [{'publicIp': public_ip}]}) @rbac.allow('netadmin') @defer.inlineCallbacks @@ -591,7 +594,7 @@ class CloudController(object): inst.save() rpc.cast(FLAGS.compute_topic, {"method": "run_instance", - "args": {"instance_id" : inst.instance_id}}) + "args": {"instance_id": inst.instance_id}}) logging.debug("Casting to node for %s's instance with IP of %s" % (context.user.name, inst['private_dns_name'])) # TODO: Make Network figure out the network name from ip. @@ -646,7 +649,7 @@ class CloudController(object): instance = self._get_instance(context, i) rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "reboot_instance", - "args" : {"instance_id": i}}) + "args": {"instance_id": i}}) return defer.succeed(True) @rbac.allow('projectmanager', 'sysadmin') @@ -656,7 +659,7 @@ class CloudController(object): volume_node = volume['node_name'] rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node), {"method": "delete_volume", - "args" : {"volume_id": volume_id}}) + "args": {"volume_id": volume_id}}) return defer.succeed(True) @rbac.allow('all') @@ -689,9 +692,9 @@ class CloudController(object): image = images.list(context, image_id)[0] except IndexError: raise exception.ApiError('invalid id: %s' % image_id) - result = { 'image_id': image_id, 'launchPermission': [] } + result = {'image_id': image_id, 'launchPermission': []} if image['isPublic']: - result['launchPermission'].append({ 'group': 'all' }) + result['launchPermission'].append({'group': 'all'}) return defer.succeed(result) @rbac.allow('projectmanager', 'sysadmin') -- cgit From e59b769cf1ad12f63788d2e90fd3a4412f9db6f4 Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Sun, 8 Aug 2010 11:39:14 -0700 Subject: variable name cleanup --- nova/endpoint/cloud.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index ee22863a9..8b937306e 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -210,18 +210,18 @@ class CloudController(object): @rbac.allow('all') def create_key_pair(self, context, key_name, **kwargs): try: - d = defer.Deferred() - p = context.handler.application.settings.get('pool') + dcall = defer.Deferred() + pool = context.handler.application.settings.get('pool') def _complete(kwargs): if 'exception' in kwargs: - d.errback(kwargs['exception']) + dcall.errback(kwargs['exception']) return - d.callback({'keyName': key_name, + dcall.callback({'keyName': key_name, 'keyFingerprint': kwargs['fingerprint'], 'keyMaterial': kwargs['private_key']}) - p.apply_async(_gen_key, [context.user.id, key_name], + pool.apply_async(_gen_key, [context.user.id, key_name], callback=_complete) - return d + return dcall except manager.UserError as e: raise -- cgit From 3fe167e1e398b3d602699b8219dcbfc8fec86859 Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Sun, 8 Aug 2010 11:40:03 -0700 Subject: removing what appears to be an unused try/except statement - nova.auth.manager.UserError doesn't exist in this codebase. Leftover? Something intended to be there but never added? --- nova/endpoint/cloud.py | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 8b937306e..ad9188ff3 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -209,22 +209,18 @@ class CloudController(object): @rbac.allow('all') def create_key_pair(self, context, key_name, **kwargs): - try: - dcall = defer.Deferred() - pool = context.handler.application.settings.get('pool') - def _complete(kwargs): - if 'exception' in kwargs: - dcall.errback(kwargs['exception']) - return - dcall.callback({'keyName': key_name, - 'keyFingerprint': kwargs['fingerprint'], - 'keyMaterial': kwargs['private_key']}) - pool.apply_async(_gen_key, [context.user.id, key_name], - callback=_complete) - return dcall - - except manager.UserError as e: - raise + dcall = defer.Deferred() + pool = context.handler.application.settings.get('pool') + def _complete(kwargs): + if 'exception' in kwargs: + dcall.errback(kwargs['exception']) + return + dcall.callback({'keyName': key_name, + 'keyFingerprint': kwargs['fingerprint'], + 'keyMaterial': kwargs['private_key']}) + pool.apply_async(_gen_key, [context.user.id, key_name], + callback=_complete) + return dcall @rbac.allow('all') def delete_key_pair(self, context, key_name, **kwargs): -- cgit From 86150042191005a9bf04ef243396667cb9dad1b0 Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Sun, 8 Aug 2010 13:20:50 -0700 Subject: convention and variable naming cleanup for pylint/pep8 --- nova/network/model.py | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/nova/network/model.py b/nova/network/model.py index daac035e4..eada776c7 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -97,11 +97,11 @@ class Vlan(datastore.BasicModel): def dict_by_vlan(cls): """a hash of vlan:project""" set_name = cls._redis_set_name(cls.__name__) - rv = {} - h = datastore.Redis.instance().hgetall(set_name) - for v in h.keys(): - rv[h[v]] = v - return rv + retvals = {} + hashset = datastore.Redis.instance().hgetall(set_name) + for val in hashset.keys(): + retvals[hashset[val]] = val + return retvals @classmethod @datastore.absorb_connection_error @@ -136,7 +136,8 @@ class Vlan(datastore.BasicModel): # CLEANUP: # TODO(ja): Save the IPs at the top of each subnet for cloudpipe vpn clients -# TODO(ja): does vlanpool "keeper" need to know the min/max - shouldn't FLAGS always win? +# TODO(ja): does vlanpool "keeper" need to know the min/max - +# shouldn't FLAGS always win? # TODO(joshua): Save the IPs at the top of each subnet for cloudpipe vpn clients class BaseNetwork(datastore.BasicModel): @@ -217,7 +218,9 @@ class BaseNetwork(datastore.BasicModel): def available(self): # the .2 address is always CloudPipe # and the top are for vpn clients - for idx in range(self.num_static_ips, len(self.network)-(1 + FLAGS.cnt_vpn_clients)): + num_ips = self.num_static_ips + num_clients = FLAGS.cnt_vpn_clients + for idx in range(num_ips, len(self.network)-(1 + num_clients)): address = str(self.network[idx]) if not address in self.hosts.keys(): yield address @@ -338,8 +341,9 @@ class DHCPNetwork(BridgedNetwork): private_ip = str(self.network[2]) linux_net.confirm_rule("FORWARD -d %s -p udp --dport 1194 -j ACCEPT" % (private_ip, )) - linux_net.confirm_rule("PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194" - % (self.project.vpn_ip, self.project.vpn_port, private_ip)) + linux_net.confirm_rule( + "PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194" + % (self.project.vpn_ip, self.project.vpn_port, private_ip)) def deexpress(self, address=None): # if this is the last address, stop dns @@ -374,13 +378,14 @@ class PublicAddress(datastore.BasicModel): return addr -DEFAULT_PORTS = [("tcp",80), ("tcp",22), ("udp",1194), ("tcp",443)] +DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] class PublicNetworkController(BaseNetwork): override_type = 'network' def __init__(self, *args, **kwargs): network_id = "public:default" - super(PublicNetworkController, self).__init__(network_id, FLAGS.public_range) + super(PublicNetworkController, self).__init__(network_id, + FLAGS.public_range) self['user_id'] = "public" self['project_id'] = "public" self["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) @@ -415,7 +420,7 @@ class PublicNetworkController(BaseNetwork): def deallocate_ip(self, ip_str): # NOTE(vish): cleanup is now done on release by the parent class - self.release_ip(ip_str) + self.release_ip(ip_str) def associate_address(self, public_ip, private_ip, instance_id): if not public_ip in self.assigned: @@ -461,8 +466,9 @@ class PublicNetworkController(BaseNetwork): linux_net.confirm_rule("FORWARD -d %s -p icmp -j ACCEPT" % (private_ip)) for (protocol, port) in DEFAULT_PORTS: - linux_net.confirm_rule("FORWARD -d %s -p %s --dport %s -j ACCEPT" - % (private_ip, protocol, port)) + linux_net.confirm_rule( + "FORWARD -d %s -p %s --dport %s -j ACCEPT" + % (private_ip, protocol, port)) def deexpress(self, address=None): addr = self.get_host(address) -- cgit From 21c1d379199c528024c5e85571609e77e53c6ee7 Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Sun, 8 Aug 2010 13:31:40 -0700 Subject: light cleanup - convention stuff mostly --- nova/auth/manager.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index d44ed52b2..e5efbca24 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -29,15 +29,17 @@ import uuid import zipfile from nova import crypto -from nova import datastore from nova import exception from nova import flags -from nova import objectstore # for flags from nova import utils -from nova.auth import ldapdriver # for flags from nova.auth import signer from nova.network import vpn +#unused imports +#from nova import datastore +#from nova.auth import ldapdriver # for flags +#from nova import objectstore # for flags + FLAGS = flags.FLAGS # NOTE(vish): a user with one of these roles will be a superuser and @@ -99,6 +101,7 @@ class AuthBase(object): class User(AuthBase): """Object representing a user""" def __init__(self, id, name, access, secret, admin): + AuthBase.__init__(self) self.id = id self.name = name self.access = access @@ -159,6 +162,7 @@ class KeyPair(AuthBase): fingerprint is stored. The user's private key is not saved. """ def __init__(self, id, name, owner_id, public_key, fingerprint): + AuthBase.__init__(self) self.id = id self.name = name self.owner_id = owner_id @@ -176,6 +180,7 @@ class KeyPair(AuthBase): class Project(AuthBase): """Represents a Project returned from the datastore""" def __init__(self, id, name, project_manager_id, description, member_ids): + AuthBase.__init__(self) self.id = id self.name = name self.project_manager_id = project_manager_id @@ -234,7 +239,7 @@ class AuthManager(object): AuthManager also manages associated data related to Auth objects that need to be more accessible, such as vpn ips and ports. """ - _instance=None + _instance = None def __new__(cls, *args, **kwargs): """Returns the AuthManager singleton""" if not cls._instance: @@ -248,7 +253,7 @@ class AuthManager(object): reset the driver if it is not set or a new driver is specified. """ if driver or not getattr(self, 'driver', None): - self.driver = utils.import_class(driver or FLAGS.auth_driver) + self.driver = utils.import_class(driver or FLAGS.auth_driver) def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', -- cgit From d1977a820db3dad7e907e976c5502ffd37e1b719 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Mon, 9 Aug 2010 13:23:19 +0100 Subject: Move the xenapi top level directory under plugins, as suggested by Jay Pipes. --- plugins/xenapi/README | 2 + plugins/xenapi/etc/xapi.d/plugins/objectstore | 231 +++++++++++++++++++++ .../xenapi/etc/xapi.d/plugins/pluginlib_nova.py | 216 +++++++++++++++++++ xenapi/README | 2 - xenapi/etc/xapi.d/plugins/objectstore | 231 --------------------- xenapi/etc/xapi.d/plugins/pluginlib_nova.py | 216 ------------------- 6 files changed, 449 insertions(+), 449 deletions(-) create mode 100644 plugins/xenapi/README create mode 100644 plugins/xenapi/etc/xapi.d/plugins/objectstore create mode 100755 plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py delete mode 100644 xenapi/README delete mode 100644 xenapi/etc/xapi.d/plugins/objectstore delete mode 100755 xenapi/etc/xapi.d/plugins/pluginlib_nova.py diff --git a/plugins/xenapi/README b/plugins/xenapi/README new file mode 100644 index 000000000..1fc67aa7a --- /dev/null +++ b/plugins/xenapi/README @@ -0,0 +1,2 @@ +This directory contains files that are required for the XenAPI support. They +should be installed in the XenServer / Xen Cloud Platform domain 0. diff --git a/plugins/xenapi/etc/xapi.d/plugins/objectstore b/plugins/xenapi/etc/xapi.d/plugins/objectstore new file mode 100644 index 000000000..271e7337f --- /dev/null +++ b/plugins/xenapi/etc/xapi.d/plugins/objectstore @@ -0,0 +1,231 @@ +#!/usr/bin/env python + +# Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# XenAPI plugin for fetching images from nova-objectstore. +# + +import base64 +import errno +import hmac +import os +import os.path +import sha +import time +import urlparse + +import XenAPIPlugin + +from pluginlib_nova import * +configure_logging('objectstore') + + +KERNEL_DIR = '/boot/guest' + +DOWNLOAD_CHUNK_SIZE = 2 * 1024 * 1024 +SECTOR_SIZE = 512 +MBR_SIZE_SECTORS = 63 +MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE + + +def get_vdi(session, args): + src_url = exists(args, 'src_url') + username = exists(args, 'username') + password = exists(args, 'password') + add_partition = validate_bool(args, 'add_partition', 'false') + + (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) + + sr = find_sr(session) + if sr is None: + raise Exception('Cannot find SR to write VDI to') + + virtual_size = \ + get_content_length(proto, netloc, url_path, username, password) + if virtual_size < 0: + raise Exception('Cannot get VDI size') + + vdi_size = virtual_size + if add_partition: + # Make room for MBR. + vdi_size += MBR_SIZE_BYTES + + vdi = create_vdi(session, sr, src_url, vdi_size, False) + with_vdi_in_dom0(session, vdi, False, + lambda dev: get_vdi_(proto, netloc, url_path, + username, password, add_partition, + virtual_size, '/dev/%s' % dev)) + return session.xenapi.VDI.get_uuid(vdi) + + +def get_vdi_(proto, netloc, url_path, username, password, add_partition, + virtual_size, dest): + + if add_partition: + write_partition(virtual_size, dest) + + offset = add_partition and MBR_SIZE_BYTES or 0 + get(proto, netloc, url_path, username, password, dest, offset) + + +def write_partition(virtual_size, dest): + mbr_last = MBR_SIZE_SECTORS - 1 + primary_first = MBR_SIZE_SECTORS + primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 + + logging.debug('Writing partition table %d %d to %s...', + primary_first, primary_last, dest) + + result = os.system('parted --script %s mklabel msdos' % dest) + if result != 0: + raise Exception('Failed to mklabel') + result = os.system('parted --script %s mkpart primary %ds %ds' % + (dest, primary_first, primary_last)) + if result != 0: + raise Exception('Failed to mkpart') + + logging.debug('Writing partition table %s done.', dest) + + +def find_sr(session): + host = get_this_host(session) + srs = session.xenapi.SR.get_all() + for sr in srs: + sr_rec = session.xenapi.SR.get_record(sr) + if not ('i18n-key' in sr_rec['other_config'] and + sr_rec['other_config']['i18n-key'] == 'local-storage'): + continue + for pbd in sr_rec['PBDs']: + pbd_rec = session.xenapi.PBD.get_record(pbd) + if pbd_rec['host'] == host: + return sr + return None + + +def get_kernel(session, args): + src_url = exists(args, 'src_url') + username = exists(args, 'username') + password = exists(args, 'password') + + (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) + + dest = os.path.join(KERNEL_DIR, url_path[1:]) + + # Paranoid check against people using ../ to do rude things. + if os.path.commonprefix([KERNEL_DIR, dest]) != KERNEL_DIR: + raise Exception('Illegal destination %s %s', (url_path, dest)) + + dirname = os.path.dirname(dest) + try: + os.makedirs(dirname) + except os.error, e: + if e.errno != errno.EEXIST: + raise + if not os.path.isdir(dirname): + raise Exception('Cannot make directory %s', dirname) + + try: + os.remove(dest) + except: + pass + + get(proto, netloc, url_path, username, password, dest, 0) + + return dest + + +def get_content_length(proto, netloc, url_path, username, password): + headers = make_headers('HEAD', url_path, username, password) + return with_http_connection( + proto, netloc, + lambda conn: get_content_length_(url_path, headers, conn)) + + +def get_content_length_(url_path, headers, conn): + conn.request('HEAD', url_path, None, headers) + response = conn.getresponse() + if response.status != 200: + raise Exception('%d %s' % (response.status, response.reason)) + + return long(response.getheader('Content-Length', -1)) + + +def get(proto, netloc, url_path, username, password, dest, offset): + headers = make_headers('GET', url_path, username, password) + download(proto, netloc, url_path, headers, dest, offset) + + +def make_headers(verb, url_path, username, password): + headers = {} + headers['Date'] = \ + time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) + headers['Authorization'] = \ + 'AWS %s:%s' % (username, + s3_authorization(verb, url_path, password, headers)) + return headers + + +def s3_authorization(verb, path, password, headers): + sha1 = hmac.new(password, digestmod=sha) + sha1.update(plaintext(verb, path, headers)) + return base64.encodestring(sha1.digest()).strip() + + +def plaintext(verb, path, headers): + return '%s\n\n\n%s\n%s' % (verb, + "\n".join([headers[h] for h in headers]), + path) + + +def download(proto, netloc, url_path, headers, dest, offset): + with_http_connection( + proto, netloc, + lambda conn: download_(url_path, dest, offset, headers, conn)) + + +def download_(url_path, dest, offset, headers, conn): + conn.request('GET', url_path, None, headers) + response = conn.getresponse() + if response.status != 200: + raise Exception('%d %s' % (response.status, response.reason)) + + length = response.getheader('Content-Length', -1) + + with_file( + dest, 'a', + lambda dest_file: download_all(response, length, dest_file, offset)) + + +def download_all(response, length, dest_file, offset): + dest_file.seek(offset) + i = 0 + while True: + buf = response.read(DOWNLOAD_CHUNK_SIZE) + if buf: + dest_file.write(buf) + else: + return + i += len(buf) + if length != -1 and i >= length: + return + + +if __name__ == '__main__': + XenAPIPlugin.dispatch({'get_vdi': get_vdi, + 'get_kernel': get_kernel}) diff --git a/plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py new file mode 100755 index 000000000..2d323a016 --- /dev/null +++ b/plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py @@ -0,0 +1,216 @@ +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# Helper functions for the Nova xapi plugins. In time, this will merge +# with the pluginlib.py shipped with xapi, but for now, that file is not +# very stable, so it's easiest just to have a copy of all the functions +# that we need. +# + +import httplib +import logging +import logging.handlers +import re +import time + + +##### Logging setup + +def configure_logging(name): + log = logging.getLogger() + log.setLevel(logging.DEBUG) + sysh = logging.handlers.SysLogHandler('/dev/log') + sysh.setLevel(logging.DEBUG) + formatter = logging.Formatter('%s: %%(levelname)-8s %%(message)s' % name) + sysh.setFormatter(formatter) + log.addHandler(sysh) + + +##### Exceptions + +class PluginError(Exception): + """Base Exception class for all plugin errors.""" + def __init__(self, *args): + Exception.__init__(self, *args) + +class ArgumentError(PluginError): + """Raised when required arguments are missing, argument values are invalid, + or incompatible arguments are given. + """ + def __init__(self, *args): + PluginError.__init__(self, *args) + + +##### Helpers + +def ignore_failure(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except XenAPI.Failure, e: + logging.error('Ignoring XenAPI.Failure %s', e) + return None + + +##### Argument validation + +ARGUMENT_PATTERN = re.compile(r'^[a-zA-Z0-9_:\.\-,]+$') + +def validate_exists(args, key, default=None): + """Validates that a string argument to a RPC method call is given, and + matches the shell-safe regex, with an optional default value in case it + does not exist. + + Returns the string. + """ + if key in args: + if len(args[key]) == 0: + raise ArgumentError('Argument %r value %r is too short.' % (key, args[key])) + if not ARGUMENT_PATTERN.match(args[key]): + raise ArgumentError('Argument %r value %r contains invalid characters.' % (key, args[key])) + if args[key][0] == '-': + raise ArgumentError('Argument %r value %r starts with a hyphen.' % (key, args[key])) + return args[key] + elif default is not None: + return default + else: + raise ArgumentError('Argument %s is required.' % key) + +def validate_bool(args, key, default=None): + """Validates that a string argument to a RPC method call is a boolean string, + with an optional default value in case it does not exist. + + Returns the python boolean value. + """ + value = validate_exists(args, key, default) + if value.lower() == 'true': + return True + elif value.lower() == 'false': + return False + else: + raise ArgumentError("Argument %s may not take value %r. Valid values are ['true', 'false']." % (key, value)) + +def exists(args, key): + """Validates that a freeform string argument to a RPC method call is given. + Returns the string. + """ + if key in args: + return args[key] + else: + raise ArgumentError('Argument %s is required.' % key) + +def optional(args, key): + """If the given key is in args, return the corresponding value, otherwise + return None""" + return key in args and args[key] or None + + +def get_this_host(session): + return session.xenapi.session.get_this_host(session.handle) + + +def get_domain_0(session): + this_host_ref = get_this_host(session) + expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"' % this_host_ref + return session.xenapi.VM.get_all_records_where(expr).keys()[0] + + +def create_vdi(session, sr_ref, name_label, virtual_size, read_only): + vdi_ref = session.xenapi.VDI.create( + { 'name_label': name_label, + 'name_description': '', + 'SR': sr_ref, + 'virtual_size': str(virtual_size), + 'type': 'User', + 'sharable': False, + 'read_only': read_only, + 'xenstore_data': {}, + 'other_config': {}, + 'sm_config': {}, + 'tags': [] }) + logging.debug('Created VDI %s (%s, %s, %s) on %s.', vdi_ref, name_label, + virtual_size, read_only, sr_ref) + return vdi_ref + + +def with_vdi_in_dom0(session, vdi, read_only, f): + dom0 = get_domain_0(session) + vbd_rec = {} + vbd_rec['VM'] = dom0 + vbd_rec['VDI'] = vdi + vbd_rec['userdevice'] = 'autodetect' + vbd_rec['bootable'] = False + vbd_rec['mode'] = read_only and 'RO' or 'RW' + vbd_rec['type'] = 'disk' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + logging.debug('Creating VBD for VDI %s ... ', vdi) + vbd = session.xenapi.VBD.create(vbd_rec) + logging.debug('Creating VBD for VDI %s done.', vdi) + try: + logging.debug('Plugging VBD %s ... ', vbd) + session.xenapi.VBD.plug(vbd) + logging.debug('Plugging VBD %s done.', vbd) + return f(session.xenapi.VBD.get_device(vbd)) + finally: + logging.debug('Destroying VBD for VDI %s ... ', vdi) + vbd_unplug_with_retry(session, vbd) + ignore_failure(session.xenapi.VBD.destroy, vbd) + logging.debug('Destroying VBD for VDI %s done.', vdi) + + +def vbd_unplug_with_retry(session, vbd): + """Call VBD.unplug on the given VBD, with a retry if we get + DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're + seeing the device still in use, even when all processes using the device + should be dead.""" + while True: + try: + session.xenapi.VBD.unplug(vbd) + logging.debug('VBD.unplug successful first time.') + return + except XenAPI.Failure, e: + if (len(e.details) > 0 and + e.details[0] == 'DEVICE_DETACH_REJECTED'): + logging.debug('VBD.unplug rejected: retrying...') + time.sleep(1) + elif (len(e.details) > 0 and + e.details[0] == 'DEVICE_ALREADY_DETACHED'): + logging.debug('VBD.unplug successful eventually.') + return + else: + logging.error('Ignoring XenAPI.Failure in VBD.unplug: %s', e) + return + + +def with_http_connection(proto, netloc, f): + conn = (proto == 'https' and + httplib.HTTPSConnection(netloc) or + httplib.HTTPConnection(netloc)) + try: + return f(conn) + finally: + conn.close() + + +def with_file(dest_path, mode, f): + dest = open(dest_path, mode) + try: + return f(dest) + finally: + dest.close() diff --git a/xenapi/README b/xenapi/README deleted file mode 100644 index 1fc67aa7a..000000000 --- a/xenapi/README +++ /dev/null @@ -1,2 +0,0 @@ -This directory contains files that are required for the XenAPI support. They -should be installed in the XenServer / Xen Cloud Platform domain 0. diff --git a/xenapi/etc/xapi.d/plugins/objectstore b/xenapi/etc/xapi.d/plugins/objectstore deleted file mode 100644 index 271e7337f..000000000 --- a/xenapi/etc/xapi.d/plugins/objectstore +++ /dev/null @@ -1,231 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2010 Citrix Systems, Inc. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# -# XenAPI plugin for fetching images from nova-objectstore. -# - -import base64 -import errno -import hmac -import os -import os.path -import sha -import time -import urlparse - -import XenAPIPlugin - -from pluginlib_nova import * -configure_logging('objectstore') - - -KERNEL_DIR = '/boot/guest' - -DOWNLOAD_CHUNK_SIZE = 2 * 1024 * 1024 -SECTOR_SIZE = 512 -MBR_SIZE_SECTORS = 63 -MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE - - -def get_vdi(session, args): - src_url = exists(args, 'src_url') - username = exists(args, 'username') - password = exists(args, 'password') - add_partition = validate_bool(args, 'add_partition', 'false') - - (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) - - sr = find_sr(session) - if sr is None: - raise Exception('Cannot find SR to write VDI to') - - virtual_size = \ - get_content_length(proto, netloc, url_path, username, password) - if virtual_size < 0: - raise Exception('Cannot get VDI size') - - vdi_size = virtual_size - if add_partition: - # Make room for MBR. - vdi_size += MBR_SIZE_BYTES - - vdi = create_vdi(session, sr, src_url, vdi_size, False) - with_vdi_in_dom0(session, vdi, False, - lambda dev: get_vdi_(proto, netloc, url_path, - username, password, add_partition, - virtual_size, '/dev/%s' % dev)) - return session.xenapi.VDI.get_uuid(vdi) - - -def get_vdi_(proto, netloc, url_path, username, password, add_partition, - virtual_size, dest): - - if add_partition: - write_partition(virtual_size, dest) - - offset = add_partition and MBR_SIZE_BYTES or 0 - get(proto, netloc, url_path, username, password, dest, offset) - - -def write_partition(virtual_size, dest): - mbr_last = MBR_SIZE_SECTORS - 1 - primary_first = MBR_SIZE_SECTORS - primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 - - logging.debug('Writing partition table %d %d to %s...', - primary_first, primary_last, dest) - - result = os.system('parted --script %s mklabel msdos' % dest) - if result != 0: - raise Exception('Failed to mklabel') - result = os.system('parted --script %s mkpart primary %ds %ds' % - (dest, primary_first, primary_last)) - if result != 0: - raise Exception('Failed to mkpart') - - logging.debug('Writing partition table %s done.', dest) - - -def find_sr(session): - host = get_this_host(session) - srs = session.xenapi.SR.get_all() - for sr in srs: - sr_rec = session.xenapi.SR.get_record(sr) - if not ('i18n-key' in sr_rec['other_config'] and - sr_rec['other_config']['i18n-key'] == 'local-storage'): - continue - for pbd in sr_rec['PBDs']: - pbd_rec = session.xenapi.PBD.get_record(pbd) - if pbd_rec['host'] == host: - return sr - return None - - -def get_kernel(session, args): - src_url = exists(args, 'src_url') - username = exists(args, 'username') - password = exists(args, 'password') - - (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) - - dest = os.path.join(KERNEL_DIR, url_path[1:]) - - # Paranoid check against people using ../ to do rude things. - if os.path.commonprefix([KERNEL_DIR, dest]) != KERNEL_DIR: - raise Exception('Illegal destination %s %s', (url_path, dest)) - - dirname = os.path.dirname(dest) - try: - os.makedirs(dirname) - except os.error, e: - if e.errno != errno.EEXIST: - raise - if not os.path.isdir(dirname): - raise Exception('Cannot make directory %s', dirname) - - try: - os.remove(dest) - except: - pass - - get(proto, netloc, url_path, username, password, dest, 0) - - return dest - - -def get_content_length(proto, netloc, url_path, username, password): - headers = make_headers('HEAD', url_path, username, password) - return with_http_connection( - proto, netloc, - lambda conn: get_content_length_(url_path, headers, conn)) - - -def get_content_length_(url_path, headers, conn): - conn.request('HEAD', url_path, None, headers) - response = conn.getresponse() - if response.status != 200: - raise Exception('%d %s' % (response.status, response.reason)) - - return long(response.getheader('Content-Length', -1)) - - -def get(proto, netloc, url_path, username, password, dest, offset): - headers = make_headers('GET', url_path, username, password) - download(proto, netloc, url_path, headers, dest, offset) - - -def make_headers(verb, url_path, username, password): - headers = {} - headers['Date'] = \ - time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) - headers['Authorization'] = \ - 'AWS %s:%s' % (username, - s3_authorization(verb, url_path, password, headers)) - return headers - - -def s3_authorization(verb, path, password, headers): - sha1 = hmac.new(password, digestmod=sha) - sha1.update(plaintext(verb, path, headers)) - return base64.encodestring(sha1.digest()).strip() - - -def plaintext(verb, path, headers): - return '%s\n\n\n%s\n%s' % (verb, - "\n".join([headers[h] for h in headers]), - path) - - -def download(proto, netloc, url_path, headers, dest, offset): - with_http_connection( - proto, netloc, - lambda conn: download_(url_path, dest, offset, headers, conn)) - - -def download_(url_path, dest, offset, headers, conn): - conn.request('GET', url_path, None, headers) - response = conn.getresponse() - if response.status != 200: - raise Exception('%d %s' % (response.status, response.reason)) - - length = response.getheader('Content-Length', -1) - - with_file( - dest, 'a', - lambda dest_file: download_all(response, length, dest_file, offset)) - - -def download_all(response, length, dest_file, offset): - dest_file.seek(offset) - i = 0 - while True: - buf = response.read(DOWNLOAD_CHUNK_SIZE) - if buf: - dest_file.write(buf) - else: - return - i += len(buf) - if length != -1 and i >= length: - return - - -if __name__ == '__main__': - XenAPIPlugin.dispatch({'get_vdi': get_vdi, - 'get_kernel': get_kernel}) diff --git a/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/xenapi/etc/xapi.d/plugins/pluginlib_nova.py deleted file mode 100755 index 2d323a016..000000000 --- a/xenapi/etc/xapi.d/plugins/pluginlib_nova.py +++ /dev/null @@ -1,216 +0,0 @@ -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# -# Helper functions for the Nova xapi plugins. In time, this will merge -# with the pluginlib.py shipped with xapi, but for now, that file is not -# very stable, so it's easiest just to have a copy of all the functions -# that we need. -# - -import httplib -import logging -import logging.handlers -import re -import time - - -##### Logging setup - -def configure_logging(name): - log = logging.getLogger() - log.setLevel(logging.DEBUG) - sysh = logging.handlers.SysLogHandler('/dev/log') - sysh.setLevel(logging.DEBUG) - formatter = logging.Formatter('%s: %%(levelname)-8s %%(message)s' % name) - sysh.setFormatter(formatter) - log.addHandler(sysh) - - -##### Exceptions - -class PluginError(Exception): - """Base Exception class for all plugin errors.""" - def __init__(self, *args): - Exception.__init__(self, *args) - -class ArgumentError(PluginError): - """Raised when required arguments are missing, argument values are invalid, - or incompatible arguments are given. - """ - def __init__(self, *args): - PluginError.__init__(self, *args) - - -##### Helpers - -def ignore_failure(func, *args, **kwargs): - try: - return func(*args, **kwargs) - except XenAPI.Failure, e: - logging.error('Ignoring XenAPI.Failure %s', e) - return None - - -##### Argument validation - -ARGUMENT_PATTERN = re.compile(r'^[a-zA-Z0-9_:\.\-,]+$') - -def validate_exists(args, key, default=None): - """Validates that a string argument to a RPC method call is given, and - matches the shell-safe regex, with an optional default value in case it - does not exist. - - Returns the string. - """ - if key in args: - if len(args[key]) == 0: - raise ArgumentError('Argument %r value %r is too short.' % (key, args[key])) - if not ARGUMENT_PATTERN.match(args[key]): - raise ArgumentError('Argument %r value %r contains invalid characters.' % (key, args[key])) - if args[key][0] == '-': - raise ArgumentError('Argument %r value %r starts with a hyphen.' % (key, args[key])) - return args[key] - elif default is not None: - return default - else: - raise ArgumentError('Argument %s is required.' % key) - -def validate_bool(args, key, default=None): - """Validates that a string argument to a RPC method call is a boolean string, - with an optional default value in case it does not exist. - - Returns the python boolean value. - """ - value = validate_exists(args, key, default) - if value.lower() == 'true': - return True - elif value.lower() == 'false': - return False - else: - raise ArgumentError("Argument %s may not take value %r. Valid values are ['true', 'false']." % (key, value)) - -def exists(args, key): - """Validates that a freeform string argument to a RPC method call is given. - Returns the string. - """ - if key in args: - return args[key] - else: - raise ArgumentError('Argument %s is required.' % key) - -def optional(args, key): - """If the given key is in args, return the corresponding value, otherwise - return None""" - return key in args and args[key] or None - - -def get_this_host(session): - return session.xenapi.session.get_this_host(session.handle) - - -def get_domain_0(session): - this_host_ref = get_this_host(session) - expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"' % this_host_ref - return session.xenapi.VM.get_all_records_where(expr).keys()[0] - - -def create_vdi(session, sr_ref, name_label, virtual_size, read_only): - vdi_ref = session.xenapi.VDI.create( - { 'name_label': name_label, - 'name_description': '', - 'SR': sr_ref, - 'virtual_size': str(virtual_size), - 'type': 'User', - 'sharable': False, - 'read_only': read_only, - 'xenstore_data': {}, - 'other_config': {}, - 'sm_config': {}, - 'tags': [] }) - logging.debug('Created VDI %s (%s, %s, %s) on %s.', vdi_ref, name_label, - virtual_size, read_only, sr_ref) - return vdi_ref - - -def with_vdi_in_dom0(session, vdi, read_only, f): - dom0 = get_domain_0(session) - vbd_rec = {} - vbd_rec['VM'] = dom0 - vbd_rec['VDI'] = vdi - vbd_rec['userdevice'] = 'autodetect' - vbd_rec['bootable'] = False - vbd_rec['mode'] = read_only and 'RO' or 'RW' - vbd_rec['type'] = 'disk' - vbd_rec['unpluggable'] = True - vbd_rec['empty'] = False - vbd_rec['other_config'] = {} - vbd_rec['qos_algorithm_type'] = '' - vbd_rec['qos_algorithm_params'] = {} - vbd_rec['qos_supported_algorithms'] = [] - logging.debug('Creating VBD for VDI %s ... ', vdi) - vbd = session.xenapi.VBD.create(vbd_rec) - logging.debug('Creating VBD for VDI %s done.', vdi) - try: - logging.debug('Plugging VBD %s ... ', vbd) - session.xenapi.VBD.plug(vbd) - logging.debug('Plugging VBD %s done.', vbd) - return f(session.xenapi.VBD.get_device(vbd)) - finally: - logging.debug('Destroying VBD for VDI %s ... ', vdi) - vbd_unplug_with_retry(session, vbd) - ignore_failure(session.xenapi.VBD.destroy, vbd) - logging.debug('Destroying VBD for VDI %s done.', vdi) - - -def vbd_unplug_with_retry(session, vbd): - """Call VBD.unplug on the given VBD, with a retry if we get - DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're - seeing the device still in use, even when all processes using the device - should be dead.""" - while True: - try: - session.xenapi.VBD.unplug(vbd) - logging.debug('VBD.unplug successful first time.') - return - except XenAPI.Failure, e: - if (len(e.details) > 0 and - e.details[0] == 'DEVICE_DETACH_REJECTED'): - logging.debug('VBD.unplug rejected: retrying...') - time.sleep(1) - elif (len(e.details) > 0 and - e.details[0] == 'DEVICE_ALREADY_DETACHED'): - logging.debug('VBD.unplug successful eventually.') - return - else: - logging.error('Ignoring XenAPI.Failure in VBD.unplug: %s', e) - return - - -def with_http_connection(proto, netloc, f): - conn = (proto == 'https' and - httplib.HTTPSConnection(netloc) or - httplib.HTTPConnection(netloc)) - try: - return f(conn) - finally: - conn.close() - - -def with_file(dest_path, mode, f): - dest = open(dest_path, mode) - try: - return f(dest) - finally: - dest.close() -- cgit From e3f8aa57873b7de69980c57cd05e3f1bdf6f7d08 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Mon, 9 Aug 2010 23:22:59 +0100 Subject: Implement the same fix as lp:~vishvananda/nova/fix-curl-project, but for virt.xenapi. --- nova/virt/xenapi.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index c3e84c2b9..9fe15644f 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -92,12 +92,13 @@ class XenAPIConnection(object): mac_address = '' user = AuthManager().get_user(instance.datamodel['user_id']) + project = AuthManager().get_project(instance.datamodel['project_id']) vdi_uuid = yield self.fetch_image( - instance.datamodel['image_id'], user, True) + instance.datamodel['image_id'], user, project, True) kernel = yield self.fetch_image( - instance.datamodel['kernel_id'], user, False) + instance.datamodel['kernel_id'], user, project, False) ramdisk = yield self.fetch_image( - instance.datamodel['ramdisk_id'], user, False) + instance.datamodel['ramdisk_id'], user, project, False) vdi_ref = yield self._conn.xenapi.VDI.get_by_uuid(vdi_uuid) vm_ref = yield self.create_vm(instance, kernel, ramdisk) @@ -195,17 +196,18 @@ class XenAPIConnection(object): raise Exception('Found no network for bridge %s' % bridge) - def fetch_image(self, image, user, use_sr): + def fetch_image(self, image, user, project, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place it on dom0's filesystem. The former is for VM disks, the latter for its kernel and ramdisk (if external kernels are being used).""" url = images.image_url(image) - logging.debug("Asking xapi to fetch %s as %s" % (url, user.access)) + access = AuthManager().get_access_key(user, project) + logging.debug("Asking xapi to fetch %s as %s" % (url, access)) fn = use_sr and 'get_vdi' or 'get_kernel' args = {} args['src_url'] = url - args['username'] = user.access + args['username'] = access args['password'] = user.secret if use_sr: args['add_partition'] = 'true' -- cgit From 8990a62b0e654dcacac06246733a17fa0502bcc7 Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Mon, 9 Aug 2010 17:53:10 -0700 Subject: fixing - removing unused imports per Eric & Jay review --- nova/auth/manager.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index e5efbca24..6d71a7ad6 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -35,10 +35,6 @@ from nova import utils from nova.auth import signer from nova.network import vpn -#unused imports -#from nova import datastore -#from nova.auth import ldapdriver # for flags -#from nova import objectstore # for flags FLAGS = flags.FLAGS -- cgit From 8c7558ed5ae7dd0b78a91a385dbd9b044ec7c8db Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Tue, 10 Aug 2010 12:44:38 -0400 Subject: Changes the run_tests.sh and /tools/install_venv.py scripts to be more user-friendly and not depend on PIP while not in the virtual environment. Running run_tests.sh should not just work out of the box on all systems supporting easy_install... --- run_tests.sh | 7 +++--- tools/install_venv.py | 59 ++++++++++++++++++++++++++++++++++++++------------- 2 files changed, 47 insertions(+), 19 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index 9b2de7aea..85d7c8834 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -6,8 +6,7 @@ with_venv=tools/with_venv.sh if [ -e ${venv} ]; then ${with_venv} python run_tests.py $@ else - echo "You need to install the Nova virtualenv before you can run this." - echo "" - echo "Please run tools/install_venv.py" - exit 1 + echo "No virtual environment found...creating one" + python tools/install_venv.py + ${with_venv} python run_tests.py $@ fi diff --git a/tools/install_venv.py b/tools/install_venv.py index 0b35fc8e9..adf24b365 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -1,3 +1,23 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Copyright 2010 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + """ Installation script for Nova's development virtualenv """ @@ -12,15 +32,14 @@ VENV = os.path.join(ROOT, '.nova-venv') PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires') TWISTED_NOVA='http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz' - def die(message, *args): print >>sys.stderr, message % args sys.exit(1) - def run_command(cmd, redirect_output=True, error_ok=False): - # Useful for debugging: - #print >>sys.stderr, ' '.join(cmd) + """Runs a command in an out-of-process shell, returning the + output of that command + """ if redirect_output: stdout = subprocess.PIPE else: @@ -32,33 +51,43 @@ def run_command(cmd, redirect_output=True, error_ok=False): die('Command "%s" failed.\n%s', ' '.join(cmd), output) return output +HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install']).strip()) +HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv']).strip()) def check_dependencies(): - """Make sure pip and virtualenv are on the path.""" - print 'Checking for pip...', - if not run_command(['which', 'pip']).strip(): - die('ERROR: pip not found.\n\nNova development requires pip,' - ' please install it using your favorite package management tool') - print 'done.' + """Make sure virtualenv is in the path.""" print 'Checking for virtualenv...', - if not run_command(['which', 'virtualenv']).strip(): - die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,' - ' please install it using your favorite package management tool') + if not HAS_VIRTUALENV: + print 'not found.' + # Try installing it via easy_install... + if HAS_EASY_INSTALL: + if not run_command(['which', 'easy_install']): + print 'Installing virtualenv via easy_install...', + die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,' + ' please install it using your favorite package management tool') + print 'done.' print 'done.' def create_virtualenv(venv=VENV): + """Creates the virtual environment and installs PIP only into the + virtual environment + """ print 'Creating venv...', run_command(['virtualenv', '-q', '--no-site-packages', VENV]) print 'done.' + print 'Installing pip in virtualenv...', + if not run_command(['tools/with_venv.sh', 'easy_install', 'pip']).strip(): + die("Failed to install pip.") + print 'done.' def install_dependencies(venv=VENV): print 'Installing dependencies with pip (this can take a while)...' - run_command(['pip', 'install', '-E', venv, '-r', PIP_REQUIRES], + run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, '-r', PIP_REQUIRES], redirect_output=False) - run_command(['pip', 'install', '-E', venv, TWISTED_NOVA], + run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, TWISTED_NOVA], redirect_output=False) -- cgit From f5695429db27110d8a95df3b66e4045c59d88c6a Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Tue, 10 Aug 2010 12:51:03 -0400 Subject: Quick fix on location of printouts when trying to install virtualenv. --- tools/install_venv.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/install_venv.py b/tools/install_venv.py index adf24b365..494535b5e 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -36,6 +36,7 @@ def die(message, *args): print >>sys.stderr, message % args sys.exit(1) + def run_command(cmd, redirect_output=True, error_ok=False): """Runs a command in an out-of-process shell, returning the output of that command @@ -51,9 +52,11 @@ def run_command(cmd, redirect_output=True, error_ok=False): die('Command "%s" failed.\n%s', ' '.join(cmd), output) return output + HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install']).strip()) HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv']).strip()) + def check_dependencies(): """Make sure virtualenv is in the path.""" @@ -62,8 +65,8 @@ def check_dependencies(): print 'not found.' # Try installing it via easy_install... if HAS_EASY_INSTALL: + print 'Installing virtualenv via easy_install...', if not run_command(['which', 'easy_install']): - print 'Installing virtualenv via easy_install...', die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,' ' please install it using your favorite package management tool') print 'done.' -- cgit From 7a1709561f1fed6e46a1c31aaa8e3ac54b9eebd3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 10:25:52 -0700 Subject: rename create_zip to zipfile so lazy match works --- bin/nova-manage | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 2dd569df0..6af092922 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -203,7 +203,7 @@ class ProjectCommands(object): arguments: project user""" self.manager.remove_from_project(user, project) - def create_zip(self, project_id, user_id, filename='nova.zip'): + def zipfile(self, project_id, user_id, filename='nova.zip'): """Exports credentials for project to a zip file arguments: project_id user_id [filename='nova.zip]""" zip_file = self.manager.get_credentials(project_id, user_id) -- cgit From 538fe868a8c89f892bffbfc0001b64e3bf1c9cf5 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 10 Aug 2010 15:28:35 -0400 Subject: Oops, we need eventlet as well. --- tools/pip-requires | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/pip-requires b/tools/pip-requires index 4eb47ca2b..e3591e92d 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -4,6 +4,7 @@ amqplib==0.6.1 anyjson==0.2.4 boto==2.0b1 carrot==0.10.5 +eventlet==0.9.10 lockfile==0.8 python-daemon==1.5.5 python-gflags==1.3 -- cgit From 049b89babe10068d3976f3f3a99b7dce120e2962 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 10 Aug 2010 18:17:44 -0400 Subject: work on a router that works with wsgi and non-wsgi routing --- nova/endpoint/rackspace.py | 27 ++++++++-------- nova/wsgi.py | 76 ++++++++++++++++++++++++++++++++++++++++++++++ tools/pip-requires | 3 ++ 3 files changed, 94 insertions(+), 12 deletions(-) diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py index 75b828e91..b4e6cd823 100644 --- a/nova/endpoint/rackspace.py +++ b/nova/endpoint/rackspace.py @@ -45,18 +45,20 @@ class API(wsgi.Middleware): def __init__(self): super(API, self).__init__(Router(webob.exc.HTTPNotFound())) - def __call__(self, environ, start_response): + @webob.dec.wsgify + def __call__(self, req): + return self.application context = {} - if "HTTP_X_AUTH_TOKEN" in environ: + if "HTTP_X_AUTH_TOKEN" in req.environ: context['user'] = manager.AuthManager().get_user_from_access_key( - environ['HTTP_X_AUTH_TOKEN']) + req.environ['HTTP_X_AUTH_TOKEN']) if context['user']: context['project'] = manager.AuthManager().get_project( context['user'].name) if "user" not in context: - return webob.exc.HTTPForbidden()(environ, start_response) + return webob.exc.HTTPForbidden() environ['nova.context'] = context - return self.application(environ, start_response) + return self.application class Router(wsgi.Router): @@ -64,13 +66,14 @@ class Router(wsgi.Router): def _build_map(self): """Build routing map for authentication and cloud.""" - self._connect("/v1.0", controller=AuthenticationAPI()) - cloud = CloudServerAPI() - self._connect("/servers", controller=cloud.launch_server, - conditions={"method": ["POST"]}) - self._connect("/servers/{server_id}", controller=cloud.delete_server, - conditions={'method': ["DELETE"]}) - self._connect("/servers", controller=cloud) + self.map.resource("server", "servers", controller=CloudServerAPI()) + #self._connect("/v1.0", controller=AuthenticationAPI()) + #cloud = CloudServerAPI() + #self._connect("/servers", controller=cloud.launch_server, + # conditions={"method": ["POST"]}) + #self._connect("/servers/{server_id}", controller=cloud.delete_server, + # conditions={'method': ["DELETE"]}) + #self._connect("/servers", controller=cloud) class AuthenticationAPI(wsgi.Application): diff --git a/nova/wsgi.py b/nova/wsgi.py index 4fd6e59e3..271648105 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -40,6 +40,7 @@ def run_server(application, port): eventlet.wsgi.server(sock, application) +# TODO(gundlach): I think we should toss this class, now that it has no purpose. class Application(object): """Base WSGI application wrapper. Subclasses need to implement __call__.""" @@ -140,6 +141,81 @@ class ParsedRoutes(Middleware): app = environ['wsgiorg.routing_args'][1]['controller'] return app(environ, start_response) +class MichaelRouter(object): + """ + My attempt at a routing class. Just override __init__ to call + super, then set up routes in self.map. + """ + + def __init__(self): + self.map = routes.Mapper() + self._router = routes.middleware.RoutesMiddleware(self._proceed, self.map) + + @webob.dec.wsgify + def __call__(self, req): + """ + Route the incoming request to a controller based on self.map. + If no match, return a 404. + """ + return self._router + + @webob.dec.wsgify + def _proceed(self, req): + """ + Called by self._router after matching the incoming request to a route + and putting the information into req.environ. + """ + if req.environ['routes.route'] is None: + return webob.exc.HTTPNotFound() + match = environ['wsgiorg.routing_args'][1] + if match.get('_is_wsgi', False): + wsgiapp = match['controller'] + return req.get_response(wsgiapp) + else: + # TODO(gundlach): doubt this is the right way -- and it really + # feels like this code should exist somewhere already on the + # internet + controller, action = match['controller'], match['action'] + delete match['controller'] + delete match['action'] + return _as_response(getattr(controller, action)(**match)) + + controller = environ['wsgiorg.routing_args'][1]['controller'] + self._dispatch(controller) + + def _as_response(self, result): + """ + When routing to a non-wsgi controller+action, its result will + be passed here before returning up the WSGI chain to be converted + into a webob.Response + + + + + +class ApiVersionRouter(MichaelRouter): + + def __init__(self): + super(ApiVersionRouter, self).__init__(self) + + self.map.connect(None, "/v1.0/{path_info:.*}", controller=RsApiRouter()) + self.map.connect(None, "/ec2/{path_info:.*}", controller=Ec2ApiRouter()) + +class RsApiRouter(MichaelRouter): + def __init__(self): + super(RsApiRouter, self).__init__(self) + + self.map.resource("server", "servers", controller=CloudServersServerApi()) + self.map.resource("image", "images", controller=CloudServersImageApi()) + self.map.resource("flavor", "flavors", controller=CloudServersFlavorApi()) + self.map.resource("sharedipgroup", "sharedipgroups", + controller=CloudServersSharedIpGroupApi()) + +class Ec2ApiRouter(object): + def __getattr__(self, key): + return lambda *x: {'dummy response': 'i am a dummy response'} +CloudServersServerApi = CloudServersImageApi = CloudServersFlavorApi = \ + CloudServersSharedIpGroupApi = Ec2ApiRouter class Router(Middleware): # pylint: disable-msg=R0921 """Wrapper to help setup routes.middleware.RoutesMiddleware.""" diff --git a/tools/pip-requires b/tools/pip-requires index 4eb47ca2b..2317907d1 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -4,11 +4,14 @@ amqplib==0.6.1 anyjson==0.2.4 boto==2.0b1 carrot==0.10.5 +eventlet==0.9.10 lockfile==0.8 python-daemon==1.5.5 python-gflags==1.3 redis==2.0.0 +routes==1.12.3 tornado==1.0 +webob==0.9.8 wsgiref==0.1.2 zope.interface==3.6.1 mox==0.5.0 -- cgit From 14c7bca9cb8451e2ec8224fb5699c6f2ad480dac Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 17:34:20 -0700 Subject: Adds get_roles commands to manager and driver classes --- nova/auth/ldapdriver.py | 34 +++++++++++++++++++++++++++------- nova/auth/manager.py | 18 ++++++++++++++++++ nova/tests/auth_unittest.py | 18 +++++++++++++++++- 3 files changed, 62 insertions(+), 8 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index ec739e134..aaaf8553c 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -181,7 +181,7 @@ class LdapDriver(object): if member_uids != None: for member_uid in member_uids: if not self.__user_exists(member_uid): - raise exception.NotFound("Project can't be created " + raise exception.NotFound("Project can't be created " "because user %s doesn't exist" % member_uid) members.append(self.__uid_to_dn(member_uid)) # always add the manager as a member because members is required @@ -236,6 +236,26 @@ class LdapDriver(object): role_dn = self.__role_to_dn(role, project_id) return self.__remove_from_group(uid, role_dn) + def get_user_roles(self, uid, project_id=None): + """Retrieve list of roles for user (or user and project)""" + if project_id is None: + # NOTE(vish): This is unneccesarily slow, but since we can't + # guarantee that the global roles are located + # together in the ldap tree, we're doing this version. + roles = [] + for role in FLAGS.allowed_roles: + role_dn = self.__role_to_dn(role) + if self.__is_in_group(uid, role_dn): + roles.append(role) + return roles + else: + project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) + roles = self.__find_objects(project_dn, + '(&(&(objectclass=groupOfNames)' + '(!(objectclass=novaProject)))' + '(member=%s))' % self.__uid_to_dn(uid)) + return [role['cn'][0] for role in roles] + def delete_user(self, uid): """Delete a user""" if not self.__user_exists(uid): @@ -253,24 +273,24 @@ class LdapDriver(object): self.conn.delete_s('cn=%s,uid=%s,%s' % (key_name, uid, FLAGS.ldap_user_subtree)) - def delete_project(self, name): + def delete_project(self, project_id): """Delete a project""" - project_dn = 'cn=%s,%s' % (name, FLAGS.ldap_project_subtree) + project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) self.__delete_roles(project_dn) self.__delete_group(project_dn) - def __user_exists(self, name): + def __user_exists(self, uid): """Check if user exists""" - return self.get_user(name) != None + return self.get_user(uid) != None def __key_pair_exists(self, uid, key_name): """Check if key pair exists""" return self.get_user(uid) != None return self.get_key_pair(uid, key_name) != None - def __project_exists(self, name): + def __project_exists(self, project_id): """Check if project exists""" - return self.get_project(name) != None + return self.get_project(project_id) != None def __find_object(self, dn, query=None, scope=None): """Find an object by dn and query""" diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 6d71a7ad6..8195182fc 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -38,6 +38,10 @@ from nova.network import vpn FLAGS = flags.FLAGS +flags.DEFINE_list('allowed_roles', + ['cloudadmin', 'itsec', 'sysadmin', 'netadmin', 'developer'], + 'Allowed roles for project') + # NOTE(vish): a user with one of these roles will be a superuser and # have access to all api commands flags.DEFINE_list('superuser_roles', ['cloudadmin'], @@ -455,6 +459,20 @@ class AuthManager(object): with self.driver() as drv: drv.remove_role(User.safe_id(user), role, Project.safe_id(project)) + def get_roles(self): + """Get list of allowed roles""" + return FLAGS.allowed_roles + + def get_user_roles(self, user, project=None): + """Get user global or per-project roles""" + roles = [] + with self.driver() as drv: + roles = drv.get_user_roles(User.safe_id(user), + Project.safe_id(project)) + if project is not None and self.is_project_manager(user, project): + roles.append('projectmanager') + return roles + def get_project(self, pid): """Get project object by id""" with self.driver() as drv: diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index f7e0625a3..2d99c8e36 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -179,7 +179,23 @@ class AuthTestCase(test.BaseTestCase): project.add_role('test1', 'sysadmin') self.assertTrue(project.has_role('test1', 'sysadmin')) - def test_211_can_remove_project_role(self): + def test_211_can_list_project_roles(self): + project = self.manager.get_project('testproj') + user = self.manager.get_user('test1') + self.manager.add_role(user, 'netadmin', project) + roles = self.manager.get_user_roles(user) + self.assertTrue('sysadmin' in roles) + self.assertFalse('netadmin' in roles) + self.assertFalse('projectmanager' in roles) + project_roles = self.manager.get_user_roles(user, project) + self.assertTrue('sysadmin' in project_roles) + self.assertTrue('netadmin' in project_roles) + self.assertTrue('projectmanager' in project_roles) + # has role should be false because global role is missing + self.assertFalse(self.manager.has_role(user, 'netadmin', project)) + + + def test_212_can_remove_project_role(self): project = self.manager.get_project('testproj') self.assertTrue(project.has_role('test1', 'sysadmin')) project.remove_role('test1', 'sysadmin') -- cgit From 19b9164c4eaae0c2c9144f9e839fbafcac7c3ed3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 17:42:58 -0700 Subject: Throw exceptions for illegal roles on role add --- nova/auth/manager.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 8195182fc..e338dfc83 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -436,6 +436,10 @@ class AuthManager(object): @type project: Project or project_id @param project: Project in which to add local role. """ + if role not in FLAGS.allowed_roles: + raise exception.NotFound("The %s role can not be found" % role) + if project is not None and role in FLAGS.global_roles: + raise exception.NotFound("The %s role is global only" % role) with self.driver() as drv: drv.add_role(User.safe_id(user), role, Project.safe_id(project)) -- cgit From cff3cccc342c7d09cd2ec6c95431e1b373eba620 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 18:04:23 -0700 Subject: change get_roles to have a flag for project_roles or not. Don't show 'projectmanager' in list of roles --- nova/auth/manager.py | 15 +++++++-------- nova/tests/auth_unittest.py | 2 -- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index e338dfc83..064fd78bc 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -463,19 +463,18 @@ class AuthManager(object): with self.driver() as drv: drv.remove_role(User.safe_id(user), role, Project.safe_id(project)) - def get_roles(self): + def get_roles(self, project_roles=True): """Get list of allowed roles""" - return FLAGS.allowed_roles + if project_roles: + return list(set(FLAGS.allowed_roles) - set(FLAGS.global_roles)) + else: + return FLAGS.allowed_roles def get_user_roles(self, user, project=None): """Get user global or per-project roles""" - roles = [] with self.driver() as drv: - roles = drv.get_user_roles(User.safe_id(user), - Project.safe_id(project)) - if project is not None and self.is_project_manager(user, project): - roles.append('projectmanager') - return roles + return drv.get_user_roles(User.safe_id(user), + Project.safe_id(project)) def get_project(self, pid): """Get project object by id""" diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index 2d99c8e36..0b404bfdc 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -186,11 +186,9 @@ class AuthTestCase(test.BaseTestCase): roles = self.manager.get_user_roles(user) self.assertTrue('sysadmin' in roles) self.assertFalse('netadmin' in roles) - self.assertFalse('projectmanager' in roles) project_roles = self.manager.get_user_roles(user, project) self.assertTrue('sysadmin' in project_roles) self.assertTrue('netadmin' in project_roles) - self.assertTrue('projectmanager' in project_roles) # has role should be false because global role is missing self.assertFalse(self.manager.has_role(user, 'netadmin', project)) -- cgit From 2955018b58a731f48dcdee64d889b4be104250f1 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 19:00:35 -0700 Subject: fix spacing issue in ldapdriver --- nova/auth/ldapdriver.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index aaaf8553c..453fa196c 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -181,8 +181,9 @@ class LdapDriver(object): if member_uids != None: for member_uid in member_uids: if not self.__user_exists(member_uid): - raise exception.NotFound("Project can't be created " - "because user %s doesn't exist" % member_uid) + raise exception.NotFound("Project can't be created " + "because user %s doesn't exist" + % member_uid) members.append(self.__uid_to_dn(member_uid)) # always add the manager as a member because members is required if not manager_dn in members: -- cgit From 1637c33927672a6edc9ad7a994787669ea47f602 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 11 Aug 2010 09:46:08 -0400 Subject: Serializing in middleware after all... by tying to the router. maybe a good idea? --- nova/wsgi.py | 113 +++++++++++++++++++++++++++++++++++++---------------------- 1 file changed, 72 insertions(+), 41 deletions(-) diff --git a/nova/wsgi.py b/nova/wsgi.py index 271648105..c511a3f06 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -141,15 +141,24 @@ class ParsedRoutes(Middleware): app = environ['wsgiorg.routing_args'][1]['controller'] return app(environ, start_response) -class MichaelRouter(object): +class MichaelRouterMiddleware(object): """ - My attempt at a routing class. Just override __init__ to call - super, then set up routes in self.map. + Router that maps incoming requests to WSGI apps or to standard + controllers+actions. The response will be a WSGI response; standard + controllers+actions will by default have their results serialized + to the requested Content Type, or you can subclass and override + _to_webob_response to customize this. """ - def __init__(self): - self.map = routes.Mapper() - self._router = routes.middleware.RoutesMiddleware(self._proceed, self.map) + def __init__(self, map): + """ + Create a router for the given routes.Mapper. It may contain standard + routes (i.e. specifying controllers and actions), or may route to a + WSGI app by instead specifying a wsgi_app=SomeApp() parameter in + map.connect(). + """ + self.map = map + self._router = routes.middleware.RoutesMiddleware(self.__proceed, self.map) @webob.dec.wsgify def __call__(self, req): @@ -160,62 +169,84 @@ class MichaelRouter(object): return self._router @webob.dec.wsgify - def _proceed(self, req): - """ - Called by self._router after matching the incoming request to a route - and putting the information into req.environ. - """ + @staticmethod + def __proceed(req): + # Called by self._router after matching the incoming request to a route + # and putting the information into req.environ. Either returns 404, the + # routed WSGI app, or _to_webob_response(the action result). + if req.environ['routes.route'] is None: return webob.exc.HTTPNotFound() match = environ['wsgiorg.routing_args'][1] - if match.get('_is_wsgi', False): - wsgiapp = match['controller'] - return req.get_response(wsgiapp) + if 'wsgi_app' in match: + return match['wsgi_app'] else: - # TODO(gundlach): doubt this is the right way -- and it really - # feels like this code should exist somewhere already on the - # internet + kwargs = match.copy() controller, action = match['controller'], match['action'] - delete match['controller'] - delete match['action'] - return _as_response(getattr(controller, action)(**match)) + delete kwargs['controller'] + delete kwargs['action'] + return _to_webob_response(req, getattr(controller, action)(**kwargs)) - controller = environ['wsgiorg.routing_args'][1]['controller'] - self._dispatch(controller) - - def _as_response(self, result): + def _to_webob_response(self, req, result): + """ + When routing to a non-WSGI controller+action, the webob.Request and the + action's result will be passed here to be converted into a + webob.Response before returning up the WSGI chain. By default it + serializes to the requested Content Type. """ - When routing to a non-wsgi controller+action, its result will - be passed here before returning up the WSGI chain to be converted - into a webob.Response + return Serializer(req).serialize(result) +class Serializer(object): + """ + Serializes a dictionary to a Content Type specified by a WSGI environment. + """ + def __init__(self, environ): + """Create a serializer based on the given WSGI environment.""" + self.environ = environ + def serialize(self, data): + req = webob.Request(environ) + # TODO(gundlach): temp + if 'applicatio/json' in req.accept): + import json + return json.dumps(result) + else: + return '' + repr(data) + '' -class ApiVersionRouter(MichaelRouter): +class ApiVersionRouter(MichaelRouterMiddleware): def __init__(self): - super(ApiVersionRouter, self).__init__(self) + map = routes.Mapper() - self.map.connect(None, "/v1.0/{path_info:.*}", controller=RsApiRouter()) - self.map.connect(None, "/ec2/{path_info:.*}", controller=Ec2ApiRouter()) + map.connect(None, "/v1.0/{path_info:.*}", wsgi_app=RsApiRouter()) + map.connect(None, "/ec2/{path_info:.*}", wsgi_app=Ec2ApiRouter()) -class RsApiRouter(MichaelRouter): + super(ApiVersionRouter, self).__init__(self, map) + +class RsApiRouter(MichaelRouterMiddleware): def __init__(self): - super(RsApiRouter, self).__init__(self) + map = routes.Mapper() + + map.resource("server", "servers", controller=ServerController()) + map.resource("image", "images", controller=ImageController()) + map.resource("flavor", "flavors", controller=FlavorController()) + map.resource("sharedipgroup", "sharedipgroups", + controller=SharedIpGroupController()) - self.map.resource("server", "servers", controller=CloudServersServerApi()) - self.map.resource("image", "images", controller=CloudServersImageApi()) - self.map.resource("flavor", "flavors", controller=CloudServersFlavorApi()) - self.map.resource("sharedipgroup", "sharedipgroups", - controller=CloudServersSharedIpGroupApi()) + super(RsApiRouter, self).__init__(self, map) class Ec2ApiRouter(object): + @webob.dec.wsgify + def __call__(self, req): + return 'dummy response' + +class ServerController(object): def __getattr__(self, key): - return lambda *x: {'dummy response': 'i am a dummy response'} -CloudServersServerApi = CloudServersImageApi = CloudServersFlavorApi = \ - CloudServersSharedIpGroupApi = Ec2ApiRouter + return {'dummy': 'dummy response'} +ImageController = FlavorController = SharedIpGroupController = ServerController + class Router(Middleware): # pylint: disable-msg=R0921 """Wrapper to help setup routes.middleware.RoutesMiddleware.""" -- cgit From a0fb0fdf1e899488f0717bea6ee2cad58120070b Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 11 Aug 2010 14:46:43 -0400 Subject: Working router that can target WSGI middleware or a standard controller+action --- nova/wsgi.py | 205 ++++++++++++++++++++++++++++------------------------------- 1 file changed, 98 insertions(+), 107 deletions(-) diff --git a/nova/wsgi.py b/nova/wsgi.py index c511a3f06..81890499e 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -29,6 +29,8 @@ import eventlet.wsgi eventlet.patcher.monkey_patch(all=False, socket=True) import routes import routes.middleware +import webob.dec +import webob.exc logging.getLogger("routes.middleware").addHandler(logging.StreamHandler()) @@ -89,75 +91,80 @@ class Middleware(Application): # pylint: disable-msg=W0223 class Debug(Middleware): - """Helper class that can be insertd into any WSGI application chain + """Helper class that can be inserted into any WSGI application chain to get information about the request and response.""" - def __call__(self, environ, start_response): - for key, value in environ.items(): + @webob.dec.wsgify + def __call__(self, req): + print ("*" * 40) + " REQUEST ENVIRON" + for key, value in req.environ.items(): print key, "=", value print - wrapper = debug_start_response(start_response) - return debug_print_body(self.application(environ, wrapper)) - - -def debug_start_response(start_response): - """Wrap the start_response to capture when called.""" + resp = req.get_response(self.application) - def wrapper(status, headers, exc_info=None): - """Print out all headers when start_response is called.""" - print status - for (key, value) in headers: + print ("*" * 40) + " RESPONSE HEADERS" + for (key, value) in resp.headers: print key, "=", value print - start_response(status, headers, exc_info) - return wrapper + resp.app_iter = self.print_generator(resp.app_iter) + return resp -def debug_print_body(body): - """Print the body of the response as it is sent back.""" + @staticmethod + def print_generator(app_iter): + """ + Iterator that prints the contents of a wrapper string iterator + when iterated. + """ + print ("*" * 40) + "BODY" + for part in app_iter: + sys.stdout.write(part) + sys.stdout.flush() + yield part + print - class Wrapper(object): - """Iterate through all the body parts and print before returning.""" - def __iter__(self): - for part in body: - sys.stdout.write(part) - sys.stdout.flush() - yield part - print +class Router(object): + """ + WSGI middleware that maps incoming requests to targets. + + Non-WSGI-app targets have their results converted to a WSGI response + automatically -- by default, they are serialized according to the Content + Type from the request. This behavior can be changed by overriding + _to_webob_response(). + """ + + def __init__(self, map, targets): + """ + Create a router for the given routes.Mapper `map`. - return Wrapper() + Each route in `map` must contain either + - a 'wsgi_app' string or + - a 'controller' string and an 'action' string. + 'wsgi_app' is a key into the `target` dictionary whose value + is a WSGI app. 'controller' is a key into `target' whose value is + a class instance containing the method specified by 'action'. -class ParsedRoutes(Middleware): - """Processed parsed routes from routes.middleware.RoutesMiddleware - and call either the controller if found or the default application - otherwise.""" + Examples: + map = routes.Mapper() + targets = { "servers": ServerController(), "blog": BlogWsgiApp() } - def __call__(self, environ, start_response): - if environ['routes.route'] is None: - return self.application(environ, start_response) - app = environ['wsgiorg.routing_args'][1]['controller'] - return app(environ, start_response) + # Explicit mapping of one route to a controller+action + map.connect(None, "/serverlist", controller="servers", action="list") -class MichaelRouterMiddleware(object): - """ - Router that maps incoming requests to WSGI apps or to standard - controllers+actions. The response will be a WSGI response; standard - controllers+actions will by default have their results serialized - to the requested Content Type, or you can subclass and override - _to_webob_response to customize this. - """ - - def __init__(self, map): - """ - Create a router for the given routes.Mapper. It may contain standard - routes (i.e. specifying controllers and actions), or may route to a - WSGI app by instead specifying a wsgi_app=SomeApp() parameter in - map.connect(). + # Controller string is implicitly equal to 2nd param here, and + # actions are all implicitly defined + map.resource("server", "servers") + + # Pointing to a WSGI app. You'll need to specify the {path_info:.*} + # parameter so the target app can work with just his section of the + # URL. + map.connect(None, "/v1.0/{path_info:.*}", wsgi_app="blog") """ self.map = map + self.targets = targets self._router = routes.middleware.RoutesMiddleware(self.__proceed, self.map) @webob.dec.wsgify @@ -169,23 +176,28 @@ class MichaelRouterMiddleware(object): return self._router @webob.dec.wsgify - @staticmethod - def __proceed(req): + def __proceed(self, req): # Called by self._router after matching the incoming request to a route # and putting the information into req.environ. Either returns 404, the # routed WSGI app, or _to_webob_response(the action result). if req.environ['routes.route'] is None: return webob.exc.HTTPNotFound() - match = environ['wsgiorg.routing_args'][1] + match = req.environ['wsgiorg.routing_args'][1] if 'wsgi_app' in match: - return match['wsgi_app'] + app_name = match['wsgi_app'] + app = self.targets[app_name] + return app else: kwargs = match.copy() - controller, action = match['controller'], match['action'] - delete kwargs['controller'] - delete kwargs['action'] - return _to_webob_response(req, getattr(controller, action)(**kwargs)) + controller_name, action = match['controller'], match['action'] + del kwargs['controller'] + del kwargs['action'] + + controller = self.targets[controller_name] + method = getattr(controller, action) + result = method(**kwargs) + return self._to_webob_response(req, result) def _to_webob_response(self, req, result): """ @@ -194,7 +206,8 @@ class MichaelRouterMiddleware(object): webob.Response before returning up the WSGI chain. By default it serializes to the requested Content Type. """ - return Serializer(req).serialize(result) + return Serializer(req.environ).serialize(result) + class Serializer(object): """ @@ -206,75 +219,53 @@ class Serializer(object): self.environ = environ def serialize(self, data): - req = webob.Request(environ) + req = webob.Request(self.environ) # TODO(gundlach): temp - if 'applicatio/json' in req.accept): + if req.accept and 'application/json' in req.accept: import json - return json.dumps(result) + return json.dumps(data) else: return '' + repr(data) + '' -class ApiVersionRouter(MichaelRouterMiddleware): +class ApiVersionRouter(Router): def __init__(self): map = routes.Mapper() - map.connect(None, "/v1.0/{path_info:.*}", wsgi_app=RsApiRouter()) - map.connect(None, "/ec2/{path_info:.*}", wsgi_app=Ec2ApiRouter()) + map.connect(None, "/v1.0/{path_info:.*}", wsgi_app="rs") + map.connect(None, "/ec2/{path_info:.*}", wsgi_app="ec2") + + targets = { "rs": RsApiRouter(), "ec2": Ec2ApiRouter() } - super(ApiVersionRouter, self).__init__(self, map) + super(ApiVersionRouter, self).__init__(map, targets) -class RsApiRouter(MichaelRouterMiddleware): +class RsApiRouter(Router): def __init__(self): map = routes.Mapper() - map.resource("server", "servers", controller=ServerController()) - map.resource("image", "images", controller=ImageController()) - map.resource("flavor", "flavors", controller=FlavorController()) - map.resource("sharedipgroup", "sharedipgroups", - controller=SharedIpGroupController()) + map.resource("server", "servers") + map.resource("image", "images") + map.resource("flavor", "flavors") + map.resource("sharedipgroup", "sharedipgroups") - super(RsApiRouter, self).__init__(self, map) + targets = { + 'servers': ServerController(), + 'images': ImageController(), + 'flavors': FlavorController(), + 'sharedipgroups': SharedIpGroupController() + } + super(RsApiRouter, self).__init__(map, targets) + +# TODO(gundlach): temp class Ec2ApiRouter(object): @webob.dec.wsgify def __call__(self, req): return 'dummy response' - +# TODO(gundlach): temp class ServerController(object): def __getattr__(self, key): - return {'dummy': 'dummy response'} + return lambda **args: {key: 'dummy response for %s' % repr(args)} +# TODO(gundlach): temp ImageController = FlavorController = SharedIpGroupController = ServerController - - -class Router(Middleware): # pylint: disable-msg=R0921 - """Wrapper to help setup routes.middleware.RoutesMiddleware.""" - - def __init__(self, application): - self.map = routes.Mapper() - self._build_map() - application = ParsedRoutes(application) - application = routes.middleware.RoutesMiddleware(application, self.map) - super(Router, self).__init__(application) - - def __call__(self, environ, start_response): - return self.application(environ, start_response) - - def _build_map(self): - """Method to create new connections for the routing map.""" - raise NotImplementedError("You must implement _build_map") - - def _connect(self, *args, **kwargs): - """Wrapper for the map.connect method.""" - self.map.connect(*args, **kwargs) - - -def route_args(application): - """Decorator to make grabbing routing args more convenient.""" - - def wrapper(self, req): - """Call application with req and parsed routing args from.""" - return application(self, req, req.environ['wsgiorg.routing_args'][1]) - - return wrapper -- cgit From 2e753b033dae6270674c0397be8e01bd2ff47980 Mon Sep 17 00:00:00 2001 From: Matthew Dietz Date: Wed, 11 Aug 2010 15:27:27 -0500 Subject: Prototype implementation of Servers controller --- nova/endpoint/aws/cloud.py | 729 +++++++++++++++++++++ nova/endpoint/aws/images.py | 95 +++ nova/endpoint/cloud.py | 729 --------------------- nova/endpoint/images.py | 95 --- nova/endpoint/rackspace.py | 186 ------ nova/endpoint/rackspace/controllers/base.py | 9 + nova/endpoint/rackspace/controllers/flavors.py | 0 nova/endpoint/rackspace/controllers/images.py | 0 nova/endpoint/rackspace/controllers/servers.py | 72 ++ .../rackspace/controllers/shared_ip_groups.py | 0 nova/endpoint/rackspace/rackspace.py | 183 ++++++ 11 files changed, 1088 insertions(+), 1010 deletions(-) create mode 100644 nova/endpoint/aws/cloud.py create mode 100644 nova/endpoint/aws/images.py delete mode 100644 nova/endpoint/cloud.py delete mode 100644 nova/endpoint/images.py delete mode 100644 nova/endpoint/rackspace.py create mode 100644 nova/endpoint/rackspace/controllers/base.py create mode 100644 nova/endpoint/rackspace/controllers/flavors.py create mode 100644 nova/endpoint/rackspace/controllers/images.py create mode 100644 nova/endpoint/rackspace/controllers/servers.py create mode 100644 nova/endpoint/rackspace/controllers/shared_ip_groups.py create mode 100644 nova/endpoint/rackspace/rackspace.py diff --git a/nova/endpoint/aws/cloud.py b/nova/endpoint/aws/cloud.py new file mode 100644 index 000000000..878d54a15 --- /dev/null +++ b/nova/endpoint/aws/cloud.py @@ -0,0 +1,729 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Cloud Controller: Implementation of EC2 REST API calls, which are +dispatched to other nodes via AMQP RPC. State is via distributed +datastore. +""" + +import base64 +import logging +import os +import time +from twisted.internet import defer + +from nova import datastore +from nova import exception +from nova import flags +from nova import rpc +from nova import utils +from nova.auth import rbac +from nova.auth import manager +from nova.compute import model +from nova.compute.instance_types import INSTANCE_TYPES +from nova.endpoint import images +from nova.network import service as network_service +from nova.network import model as network_model +from nova.volume import service + + +FLAGS = flags.FLAGS + +flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') + +def _gen_key(user_id, key_name): + """ Tuck this into AuthManager """ + try: + mgr = manager.AuthManager() + private_key, fingerprint = mgr.generate_key_pair(user_id, key_name) + except Exception as ex: + return {'exception': ex} + return {'private_key': private_key, 'fingerprint': fingerprint} + + +class CloudController(object): + """ CloudController provides the critical dispatch between + inbound API calls through the endpoint and messages + sent to the other nodes. +""" + def __init__(self): + self.instdir = model.InstanceDirectory() + self.setup() + + @property + def instances(self): + """ All instances in the system, as dicts """ + return self.instdir.all + + @property + def volumes(self): + """ returns a list of all volumes """ + for volume_id in datastore.Redis.instance().smembers("volumes"): + volume = service.get_volume(volume_id) + yield volume + + def __str__(self): + return 'CloudController' + + def setup(self): + """ Ensure the keychains and folders exist. """ + # Create keys folder, if it doesn't exist + if not os.path.exists(FLAGS.keys_path): + os.makedirs(os.path.abspath(FLAGS.keys_path)) + # Gen root CA, if we don't have one + root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file) + if not os.path.exists(root_ca_path): + start = os.getcwd() + os.chdir(FLAGS.ca_path) + utils.runthis("Generating root CA: %s", "sh genrootca.sh") + os.chdir(start) + # TODO: Do this with M2Crypto instead + + def get_instance_by_ip(self, ip): + return self.instdir.by_ip(ip) + + def _get_mpi_data(self, project_id): + result = {} + for instance in self.instdir.all: + if instance['project_id'] == project_id: + line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) + if instance['key_name'] in result: + result[instance['key_name']].append(line) + else: + result[instance['key_name']] = [line] + return result + + def get_metadata(self, ip): + i = self.get_instance_by_ip(ip) + if i is None: + return None + mpi = self._get_mpi_data(i['project_id']) + if i['key_name']: + keys = { + '0': { + '_name': i['key_name'], + 'openssh-key': i['key_data'] + } + } + else: + keys = '' + data = { + 'user-data': base64.b64decode(i['user_data']), + 'meta-data': { + 'ami-id': i['image_id'], + 'ami-launch-index': i['ami_launch_index'], + 'ami-manifest-path': 'FIXME', # image property + 'block-device-mapping': { # TODO: replace with real data + 'ami': 'sda1', + 'ephemeral0': 'sda2', + 'root': '/dev/sda1', + 'swap': 'sda3' + }, + 'hostname': i['private_dns_name'], # is this public sometimes? + 'instance-action': 'none', + 'instance-id': i['instance_id'], + 'instance-type': i.get('instance_type', ''), + 'local-hostname': i['private_dns_name'], + 'local-ipv4': i['private_dns_name'], # TODO: switch to IP + 'kernel-id': i.get('kernel_id', ''), + 'placement': { + 'availaibility-zone': i.get('availability_zone', 'nova'), + }, + 'public-hostname': i.get('dns_name', ''), + 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP + 'public-keys' : keys, + 'ramdisk-id': i.get('ramdisk_id', ''), + 'reservation-id': i['reservation_id'], + 'security-groups': i.get('groups', ''), + 'mpi': mpi + } + } + if False: # TODO: store ancestor ids + data['ancestor-ami-ids'] = [] + if i.get('product_codes', None): + data['product-codes'] = i['product_codes'] + return data + + @rbac.allow('all') + def describe_availability_zones(self, context, **kwargs): + return {'availabilityZoneInfo': [{'zoneName': 'nova', + 'zoneState': 'available'}]} + + @rbac.allow('all') + def describe_regions(self, context, region_name=None, **kwargs): + # TODO(vish): region_name is an array. Support filtering + return {'regionInfo': [{'regionName': 'nova', + 'regionUrl': FLAGS.ec2_url}]} + + @rbac.allow('all') + def describe_snapshots(self, + context, + snapshot_id=None, + owner=None, + restorable_by=None, + **kwargs): + return {'snapshotSet': [{'snapshotId': 'fixme', + 'volumeId': 'fixme', + 'status': 'fixme', + 'startTime': 'fixme', + 'progress': 'fixme', + 'ownerId': 'fixme', + 'volumeSize': 0, + 'description': 'fixme'}]} + + @rbac.allow('all') + def describe_key_pairs(self, context, key_name=None, **kwargs): + key_pairs = context.user.get_key_pairs() + if not key_name is None: + key_pairs = [x for x in key_pairs if x.name in key_name] + + result = [] + for key_pair in key_pairs: + # filter out the vpn keys + suffix = FLAGS.vpn_key_suffix + if context.user.is_admin() or not key_pair.name.endswith(suffix): + result.append({ + 'keyName': key_pair.name, + 'keyFingerprint': key_pair.fingerprint, + }) + + return { 'keypairsSet': result } + + @rbac.allow('all') + def create_key_pair(self, context, key_name, **kwargs): + try: + d = defer.Deferred() + p = context.handler.application.settings.get('pool') + def _complete(kwargs): + if 'exception' in kwargs: + d.errback(kwargs['exception']) + return + d.callback({'keyName': key_name, + 'keyFingerprint': kwargs['fingerprint'], + 'keyMaterial': kwargs['private_key']}) + p.apply_async(_gen_key, [context.user.id, key_name], + callback=_complete) + return d + + except manager.UserError as e: + raise + + @rbac.allow('all') + def delete_key_pair(self, context, key_name, **kwargs): + context.user.delete_key_pair(key_name) + # aws returns true even if the key doens't exist + return True + + @rbac.allow('all') + def describe_security_groups(self, context, group_names, **kwargs): + groups = { 'securityGroupSet': [] } + + # Stubbed for now to unblock other things. + return groups + + @rbac.allow('netadmin') + def create_security_group(self, context, group_name, **kwargs): + return True + + @rbac.allow('netadmin') + def delete_security_group(self, context, group_name, **kwargs): + return True + + @rbac.allow('projectmanager', 'sysadmin') + def get_console_output(self, context, instance_id, **kwargs): + # instance_id is passed in as a list of instances + instance = self._get_instance(context, instance_id[0]) + return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']), + {"method": "get_console_output", + "args" : {"instance_id": instance_id[0]}}) + + def _get_user_id(self, context): + if context and context.user: + return context.user.id + else: + return None + + @rbac.allow('projectmanager', 'sysadmin') + def describe_volumes(self, context, **kwargs): + volumes = [] + for volume in self.volumes: + if context.user.is_admin() or volume['project_id'] == context.project.id: + v = self.format_volume(context, volume) + volumes.append(v) + return defer.succeed({'volumeSet': volumes}) + + def format_volume(self, context, volume): + v = {} + v['volumeId'] = volume['volume_id'] + v['status'] = volume['status'] + v['size'] = volume['size'] + v['availabilityZone'] = volume['availability_zone'] + v['createTime'] = volume['create_time'] + if context.user.is_admin(): + v['status'] = '%s (%s, %s, %s, %s)' % ( + volume.get('status', None), + volume.get('user_id', None), + volume.get('node_name', None), + volume.get('instance_id', ''), + volume.get('mountpoint', '')) + if volume['attach_status'] == 'attached': + v['attachmentSet'] = [{'attachTime': volume['attach_time'], + 'deleteOnTermination': volume['delete_on_termination'], + 'device' : volume['mountpoint'], + 'instanceId' : volume['instance_id'], + 'status' : 'attached', + 'volume_id' : volume['volume_id']}] + else: + v['attachmentSet'] = [{}] + return v + + @rbac.allow('projectmanager', 'sysadmin') + @defer.inlineCallbacks + def create_volume(self, context, size, **kwargs): + # TODO(vish): refactor this to create the volume object here and tell service to create it + result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume", + "args" : {"size": size, + "user_id": context.user.id, + "project_id": context.project.id}}) + # NOTE(vish): rpc returned value is in the result key in the dictionary + volume = self._get_volume(context, result['result']) + defer.returnValue({'volumeSet': [self.format_volume(context, volume)]}) + + def _get_address(self, context, public_ip): + # FIXME(vish) this should move into network.py + address = network_model.PublicAddress.lookup(public_ip) + if address and (context.user.is_admin() or address['project_id'] == context.project.id): + return address + raise exception.NotFound("Address at ip %s not found" % public_ip) + + def _get_image(self, context, image_id): + """passes in context because + objectstore does its own authorization""" + result = images.list(context, [image_id]) + if not result: + raise exception.NotFound('Image %s could not be found' % image_id) + image = result[0] + return image + + def _get_instance(self, context, instance_id): + for instance in self.instdir.all: + if instance['instance_id'] == instance_id: + if context.user.is_admin() or instance['project_id'] == context.project.id: + return instance + raise exception.NotFound('Instance %s could not be found' % instance_id) + + def _get_volume(self, context, volume_id): + volume = service.get_volume(volume_id) + if context.user.is_admin() or volume['project_id'] == context.project.id: + return volume + raise exception.NotFound('Volume %s could not be found' % volume_id) + + @rbac.allow('projectmanager', 'sysadmin') + def attach_volume(self, context, volume_id, instance_id, device, **kwargs): + volume = self._get_volume(context, volume_id) + if volume['status'] == "attached": + raise exception.ApiError("Volume is already attached") + # TODO(vish): looping through all volumes is slow. We should probably maintain an index + for vol in self.volumes: + if vol['instance_id'] == instance_id and vol['mountpoint'] == device: + raise exception.ApiError("Volume %s is already attached to %s" % (vol['volume_id'], vol['mountpoint'])) + volume.start_attach(instance_id, device) + instance = self._get_instance(context, instance_id) + compute_node = instance['node_name'] + rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node), + {"method": "attach_volume", + "args" : {"volume_id": volume_id, + "instance_id" : instance_id, + "mountpoint" : device}}) + return defer.succeed({'attachTime' : volume['attach_time'], + 'device' : volume['mountpoint'], + 'instanceId' : instance_id, + 'requestId' : context.request_id, + 'status' : volume['attach_status'], + 'volumeId' : volume_id}) + + + @rbac.allow('projectmanager', 'sysadmin') + def detach_volume(self, context, volume_id, **kwargs): + volume = self._get_volume(context, volume_id) + instance_id = volume.get('instance_id', None) + if not instance_id: + raise exception.Error("Volume isn't attached to anything!") + if volume['status'] == "available": + raise exception.Error("Volume is already detached") + try: + volume.start_detach() + instance = self._get_instance(context, instance_id) + rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), + {"method": "detach_volume", + "args" : {"instance_id": instance_id, + "volume_id": volume_id}}) + except exception.NotFound: + # If the instance doesn't exist anymore, + # then we need to call detach blind + volume.finish_detach() + return defer.succeed({'attachTime' : volume['attach_time'], + 'device' : volume['mountpoint'], + 'instanceId' : instance_id, + 'requestId' : context.request_id, + 'status' : volume['attach_status'], + 'volumeId' : volume_id}) + + def _convert_to_set(self, lst, label): + if lst == None or lst == []: + return None + if not isinstance(lst, list): + lst = [lst] + return [{label: x} for x in lst] + + @rbac.allow('all') + def describe_instances(self, context, **kwargs): + return defer.succeed(self._format_instances(context)) + + def _format_instances(self, context, reservation_id = None): + reservations = {} + if context.user.is_admin(): + instgenerator = self.instdir.all + else: + instgenerator = self.instdir.by_project(context.project.id) + for instance in instgenerator: + res_id = instance.get('reservation_id', 'Unknown') + if reservation_id != None and reservation_id != res_id: + continue + if not context.user.is_admin(): + if instance['image_id'] == FLAGS.vpn_image_id: + continue + i = {} + i['instance_id'] = instance.get('instance_id', None) + i['image_id'] = instance.get('image_id', None) + i['instance_state'] = { + 'code': instance.get('state', 0), + 'name': instance.get('state_description', 'pending') + } + i['public_dns_name'] = network_model.get_public_ip_for_instance( + i['instance_id']) + i['private_dns_name'] = instance.get('private_dns_name', None) + if not i['public_dns_name']: + i['public_dns_name'] = i['private_dns_name'] + i['dns_name'] = instance.get('dns_name', None) + i['key_name'] = instance.get('key_name', None) + if context.user.is_admin(): + i['key_name'] = '%s (%s, %s)' % (i['key_name'], + instance.get('project_id', None), instance.get('node_name','')) + i['product_codes_set'] = self._convert_to_set( + instance.get('product_codes', None), 'product_code') + i['instance_type'] = instance.get('instance_type', None) + i['launch_time'] = instance.get('launch_time', None) + i['ami_launch_index'] = instance.get('ami_launch_index', + None) + if not reservations.has_key(res_id): + r = {} + r['reservation_id'] = res_id + r['owner_id'] = instance.get('project_id', None) + r['group_set'] = self._convert_to_set( + instance.get('groups', None), 'group_id') + r['instances_set'] = [] + reservations[res_id] = r + reservations[res_id]['instances_set'].append(i) + + instance_response = {'reservationSet' : list(reservations.values()) } + return instance_response + + @rbac.allow('all') + def describe_addresses(self, context, **kwargs): + return self.format_addresses(context) + + def format_addresses(self, context): + addresses = [] + for address in network_model.PublicAddress.all(): + # TODO(vish): implement a by_project iterator for addresses + if (context.user.is_admin() or + address['project_id'] == context.project.id): + address_rv = { + 'public_ip': address['address'], + 'instance_id' : address.get('instance_id', 'free') + } + if context.user.is_admin(): + address_rv['instance_id'] = "%s (%s, %s)" % ( + address['instance_id'], + address['user_id'], + address['project_id'], + ) + addresses.append(address_rv) + return {'addressesSet': addresses} + + @rbac.allow('netadmin') + @defer.inlineCallbacks + def allocate_address(self, context, **kwargs): + network_topic = yield self._get_network_topic(context) + alloc_result = yield rpc.call(network_topic, + {"method": "allocate_elastic_ip", + "args": {"user_id": context.user.id, + "project_id": context.project.id}}) + public_ip = alloc_result['result'] + defer.returnValue({'addressSet': [{'publicIp' : public_ip}]}) + + @rbac.allow('netadmin') + @defer.inlineCallbacks + def release_address(self, context, public_ip, **kwargs): + # NOTE(vish): Should we make sure this works? + network_topic = yield self._get_network_topic(context) + rpc.cast(network_topic, + {"method": "deallocate_elastic_ip", + "args": {"elastic_ip": public_ip}}) + defer.returnValue({'releaseResponse': ["Address released."]}) + + @rbac.allow('netadmin') + @defer.inlineCallbacks + def associate_address(self, context, instance_id, public_ip, **kwargs): + instance = self._get_instance(context, instance_id) + address = self._get_address(context, public_ip) + network_topic = yield self._get_network_topic(context) + rpc.cast(network_topic, + {"method": "associate_elastic_ip", + "args": {"elastic_ip": address['address'], + "fixed_ip": instance['private_dns_name'], + "instance_id": instance['instance_id']}}) + defer.returnValue({'associateResponse': ["Address associated."]}) + + @rbac.allow('netadmin') + @defer.inlineCallbacks + def disassociate_address(self, context, public_ip, **kwargs): + address = self._get_address(context, public_ip) + network_topic = yield self._get_network_topic(context) + rpc.cast(network_topic, + {"method": "disassociate_elastic_ip", + "args": {"elastic_ip": address['address']}}) + defer.returnValue({'disassociateResponse': ["Address disassociated."]}) + + @defer.inlineCallbacks + def _get_network_topic(self, context): + """Retrieves the network host for a project""" + host = network_service.get_host_for_project(context.project.id) + if not host: + result = yield rpc.call(FLAGS.network_topic, + {"method": "set_network_host", + "args": {"user_id": context.user.id, + "project_id": context.project.id}}) + host = result['result'] + defer.returnValue('%s.%s' %(FLAGS.network_topic, host)) + + @rbac.allow('projectmanager', 'sysadmin') + @defer.inlineCallbacks + def run_instances(self, context, **kwargs): + # make sure user can access the image + # vpn image is private so it doesn't show up on lists + if kwargs['image_id'] != FLAGS.vpn_image_id: + image = self._get_image(context, kwargs['image_id']) + + # FIXME(ja): if image is cloudpipe, this breaks + + # get defaults from imagestore + image_id = image['imageId'] + kernel_id = image.get('kernelId', FLAGS.default_kernel) + ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) + + # API parameters overrides of defaults + kernel_id = kwargs.get('kernel_id', kernel_id) + ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id) + + # make sure we have access to kernel and ramdisk + self._get_image(context, kernel_id) + self._get_image(context, ramdisk_id) + + logging.debug("Going to run instances...") + reservation_id = utils.generate_uid('r') + launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + key_data = None + if kwargs.has_key('key_name'): + key_pair = context.user.get_key_pair(kwargs['key_name']) + if not key_pair: + raise exception.ApiError('Key Pair %s not found' % + kwargs['key_name']) + key_data = key_pair.public_key + network_topic = yield self._get_network_topic(context) + # TODO: Get the real security group of launch in here + security_group = "default" + for num in range(int(kwargs['max_count'])): + vpn = False + if image_id == FLAGS.vpn_image_id: + vpn = True + allocate_result = yield rpc.call(network_topic, + {"method": "allocate_fixed_ip", + "args": {"user_id": context.user.id, + "project_id": context.project.id, + "security_group": security_group, + "vpn": vpn}}) + allocate_data = allocate_result['result'] + inst = self.instdir.new() + inst['image_id'] = image_id + inst['kernel_id'] = kernel_id + inst['ramdisk_id'] = ramdisk_id + inst['user_data'] = kwargs.get('user_data', '') + inst['instance_type'] = kwargs.get('instance_type', 'm1.small') + inst['reservation_id'] = reservation_id + inst['launch_time'] = launch_time + inst['key_data'] = key_data or '' + inst['key_name'] = kwargs.get('key_name', '') + inst['user_id'] = context.user.id + inst['project_id'] = context.project.id + inst['ami_launch_index'] = num + inst['security_group'] = security_group + for (key, value) in allocate_data.iteritems(): + inst[key] = value + + inst.save() + rpc.cast(FLAGS.compute_topic, + {"method": "run_instance", + "args": {"instance_id" : inst.instance_id}}) + logging.debug("Casting to node for %s's instance with IP of %s" % + (context.user.name, inst['private_dns_name'])) + # TODO: Make Network figure out the network name from ip. + defer.returnValue(self._format_instances(context, reservation_id)) + + @rbac.allow('projectmanager', 'sysadmin') + @defer.inlineCallbacks + def terminate_instances(self, context, instance_id, **kwargs): + logging.debug("Going to start terminating instances") + network_topic = yield self._get_network_topic(context) + for i in instance_id: + logging.debug("Going to try and terminate %s" % i) + try: + instance = self._get_instance(context, i) + except exception.NotFound: + logging.warning("Instance %s was not found during terminate" + % i) + continue + elastic_ip = network_model.get_public_ip_for_instance(i) + if elastic_ip: + logging.debug("Disassociating address %s" % elastic_ip) + # NOTE(vish): Right now we don't really care if the ip is + # disassociated. We may need to worry about + # checking this later. Perhaps in the scheduler? + rpc.cast(network_topic, + {"method": "disassociate_elastic_ip", + "args": {"elastic_ip": elastic_ip}}) + + fixed_ip = instance.get('private_dns_name', None) + if fixed_ip: + logging.debug("Deallocating address %s" % fixed_ip) + # NOTE(vish): Right now we don't really care if the ip is + # actually removed. We may need to worry about + # checking this later. Perhaps in the scheduler? + rpc.cast(network_topic, + {"method": "deallocate_fixed_ip", + "args": {"fixed_ip": fixed_ip}}) + + if instance.get('node_name', 'unassigned') != 'unassigned': + # NOTE(joshua?): It's also internal default + rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), + {"method": "terminate_instance", + "args": {"instance_id": i}}) + else: + instance.destroy() + defer.returnValue(True) + + @rbac.allow('projectmanager', 'sysadmin') + def reboot_instances(self, context, instance_id, **kwargs): + """instance_id is a list of instance ids""" + for i in instance_id: + instance = self._get_instance(context, i) + rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), + {"method": "reboot_instance", + "args" : {"instance_id": i}}) + return defer.succeed(True) + + @rbac.allow('projectmanager', 'sysadmin') + def delete_volume(self, context, volume_id, **kwargs): + # TODO: return error if not authorized + volume = self._get_volume(context, volume_id) + volume_node = volume['node_name'] + rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node), + {"method": "delete_volume", + "args" : {"volume_id": volume_id}}) + return defer.succeed(True) + + @rbac.allow('all') + def describe_images(self, context, image_id=None, **kwargs): + # The objectstore does its own authorization for describe + imageSet = images.list(context, image_id) + return defer.succeed({'imagesSet': imageSet}) + + @rbac.allow('projectmanager', 'sysadmin') + def deregister_image(self, context, image_id, **kwargs): + # FIXME: should the objectstore be doing these authorization checks? + images.deregister(context, image_id) + return defer.succeed({'imageId': image_id}) + + @rbac.allow('projectmanager', 'sysadmin') + def register_image(self, context, image_location=None, **kwargs): + # FIXME: should the objectstore be doing these authorization checks? + if image_location is None and kwargs.has_key('name'): + image_location = kwargs['name'] + image_id = images.register(context, image_location) + logging.debug("Registered %s as %s" % (image_location, image_id)) + + return defer.succeed({'imageId': image_id}) + + @rbac.allow('all') + def describe_image_attribute(self, context, image_id, attribute, **kwargs): + if attribute != 'launchPermission': + raise exception.ApiError('attribute not supported: %s' % attribute) + try: + image = images.list(context, image_id)[0] + except IndexError: + raise exception.ApiError('invalid id: %s' % image_id) + result = { 'image_id': image_id, 'launchPermission': [] } + if image['isPublic']: + result['launchPermission'].append({ 'group': 'all' }) + return defer.succeed(result) + + @rbac.allow('projectmanager', 'sysadmin') + def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs): + # TODO(devcamcar): Support users and groups other than 'all'. + if attribute != 'launchPermission': + raise exception.ApiError('attribute not supported: %s' % attribute) + if not 'user_group' in kwargs: + raise exception.ApiError('user or group not specified') + if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': + raise exception.ApiError('only group "all" is supported') + if not operation_type in ['add', 'remove']: + raise exception.ApiError('operation_type must be add or remove') + result = images.modify(context, image_id, operation_type) + return defer.succeed(result) + + def update_state(self, topic, value): + """ accepts status reports from the queue and consolidates them """ + # TODO(jmc): if an instance has disappeared from + # the node, call instance_death + if topic == "instances": + return defer.succeed(True) + aggregate_state = getattr(self, topic) + node_name = value.keys()[0] + items = value[node_name] + + logging.debug("Updating %s state for %s" % (topic, node_name)) + + for item_id in items.keys(): + if (aggregate_state.has_key('pending') and + aggregate_state['pending'].has_key(item_id)): + del aggregate_state['pending'][item_id] + aggregate_state[node_name] = items + + return defer.succeed(True) diff --git a/nova/endpoint/aws/images.py b/nova/endpoint/aws/images.py new file mode 100644 index 000000000..fe7cb5d11 --- /dev/null +++ b/nova/endpoint/aws/images.py @@ -0,0 +1,95 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Proxy AMI-related calls from the cloud controller, to the running +objectstore daemon. +""" + +import boto.s3.connection +import json +import urllib + +from nova import flags +from nova import utils +from nova.auth import manager + + +FLAGS = flags.FLAGS + +def modify(context, image_id, operation): + conn(context).make_request( + method='POST', + bucket='_images', + query_args=qs({'image_id': image_id, 'operation': operation})) + + return True + + +def register(context, image_location): + """ rpc call to register a new image based from a manifest """ + + image_id = utils.generate_uid('ami') + conn(context).make_request( + method='PUT', + bucket='_images', + query_args=qs({'image_location': image_location, + 'image_id': image_id})) + + return image_id + +def list(context, filter_list=[]): + """ return a list of all images that a user can see + + optionally filtered by a list of image_id """ + + # FIXME: send along the list of only_images to check for + response = conn(context).make_request( + method='GET', + bucket='_images') + + result = json.loads(response.read()) + if not filter_list is None: + return [i for i in result if i['imageId'] in filter_list] + return result + +def deregister(context, image_id): + """ unregister an image """ + conn(context).make_request( + method='DELETE', + bucket='_images', + query_args=qs({'image_id': image_id})) + +def conn(context): + access = manager.AuthManager().get_access_key(context.user, + context.project) + secret = str(context.user.secret) + calling = boto.s3.connection.OrdinaryCallingFormat() + return boto.s3.connection.S3Connection(aws_access_key_id=access, + aws_secret_access_key=secret, + is_secure=False, + calling_format=calling, + port=FLAGS.s3_port, + host=FLAGS.s3_host) + + +def qs(params): + pairs = [] + for key in params.keys(): + pairs.append(key + '=' + urllib.quote(params[key])) + return '&'.join(pairs) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py deleted file mode 100644 index 878d54a15..000000000 --- a/nova/endpoint/cloud.py +++ /dev/null @@ -1,729 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Cloud Controller: Implementation of EC2 REST API calls, which are -dispatched to other nodes via AMQP RPC. State is via distributed -datastore. -""" - -import base64 -import logging -import os -import time -from twisted.internet import defer - -from nova import datastore -from nova import exception -from nova import flags -from nova import rpc -from nova import utils -from nova.auth import rbac -from nova.auth import manager -from nova.compute import model -from nova.compute.instance_types import INSTANCE_TYPES -from nova.endpoint import images -from nova.network import service as network_service -from nova.network import model as network_model -from nova.volume import service - - -FLAGS = flags.FLAGS - -flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') - -def _gen_key(user_id, key_name): - """ Tuck this into AuthManager """ - try: - mgr = manager.AuthManager() - private_key, fingerprint = mgr.generate_key_pair(user_id, key_name) - except Exception as ex: - return {'exception': ex} - return {'private_key': private_key, 'fingerprint': fingerprint} - - -class CloudController(object): - """ CloudController provides the critical dispatch between - inbound API calls through the endpoint and messages - sent to the other nodes. -""" - def __init__(self): - self.instdir = model.InstanceDirectory() - self.setup() - - @property - def instances(self): - """ All instances in the system, as dicts """ - return self.instdir.all - - @property - def volumes(self): - """ returns a list of all volumes """ - for volume_id in datastore.Redis.instance().smembers("volumes"): - volume = service.get_volume(volume_id) - yield volume - - def __str__(self): - return 'CloudController' - - def setup(self): - """ Ensure the keychains and folders exist. """ - # Create keys folder, if it doesn't exist - if not os.path.exists(FLAGS.keys_path): - os.makedirs(os.path.abspath(FLAGS.keys_path)) - # Gen root CA, if we don't have one - root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file) - if not os.path.exists(root_ca_path): - start = os.getcwd() - os.chdir(FLAGS.ca_path) - utils.runthis("Generating root CA: %s", "sh genrootca.sh") - os.chdir(start) - # TODO: Do this with M2Crypto instead - - def get_instance_by_ip(self, ip): - return self.instdir.by_ip(ip) - - def _get_mpi_data(self, project_id): - result = {} - for instance in self.instdir.all: - if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) - if instance['key_name'] in result: - result[instance['key_name']].append(line) - else: - result[instance['key_name']] = [line] - return result - - def get_metadata(self, ip): - i = self.get_instance_by_ip(ip) - if i is None: - return None - mpi = self._get_mpi_data(i['project_id']) - if i['key_name']: - keys = { - '0': { - '_name': i['key_name'], - 'openssh-key': i['key_data'] - } - } - else: - keys = '' - data = { - 'user-data': base64.b64decode(i['user_data']), - 'meta-data': { - 'ami-id': i['image_id'], - 'ami-launch-index': i['ami_launch_index'], - 'ami-manifest-path': 'FIXME', # image property - 'block-device-mapping': { # TODO: replace with real data - 'ami': 'sda1', - 'ephemeral0': 'sda2', - 'root': '/dev/sda1', - 'swap': 'sda3' - }, - 'hostname': i['private_dns_name'], # is this public sometimes? - 'instance-action': 'none', - 'instance-id': i['instance_id'], - 'instance-type': i.get('instance_type', ''), - 'local-hostname': i['private_dns_name'], - 'local-ipv4': i['private_dns_name'], # TODO: switch to IP - 'kernel-id': i.get('kernel_id', ''), - 'placement': { - 'availaibility-zone': i.get('availability_zone', 'nova'), - }, - 'public-hostname': i.get('dns_name', ''), - 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP - 'public-keys' : keys, - 'ramdisk-id': i.get('ramdisk_id', ''), - 'reservation-id': i['reservation_id'], - 'security-groups': i.get('groups', ''), - 'mpi': mpi - } - } - if False: # TODO: store ancestor ids - data['ancestor-ami-ids'] = [] - if i.get('product_codes', None): - data['product-codes'] = i['product_codes'] - return data - - @rbac.allow('all') - def describe_availability_zones(self, context, **kwargs): - return {'availabilityZoneInfo': [{'zoneName': 'nova', - 'zoneState': 'available'}]} - - @rbac.allow('all') - def describe_regions(self, context, region_name=None, **kwargs): - # TODO(vish): region_name is an array. Support filtering - return {'regionInfo': [{'regionName': 'nova', - 'regionUrl': FLAGS.ec2_url}]} - - @rbac.allow('all') - def describe_snapshots(self, - context, - snapshot_id=None, - owner=None, - restorable_by=None, - **kwargs): - return {'snapshotSet': [{'snapshotId': 'fixme', - 'volumeId': 'fixme', - 'status': 'fixme', - 'startTime': 'fixme', - 'progress': 'fixme', - 'ownerId': 'fixme', - 'volumeSize': 0, - 'description': 'fixme'}]} - - @rbac.allow('all') - def describe_key_pairs(self, context, key_name=None, **kwargs): - key_pairs = context.user.get_key_pairs() - if not key_name is None: - key_pairs = [x for x in key_pairs if x.name in key_name] - - result = [] - for key_pair in key_pairs: - # filter out the vpn keys - suffix = FLAGS.vpn_key_suffix - if context.user.is_admin() or not key_pair.name.endswith(suffix): - result.append({ - 'keyName': key_pair.name, - 'keyFingerprint': key_pair.fingerprint, - }) - - return { 'keypairsSet': result } - - @rbac.allow('all') - def create_key_pair(self, context, key_name, **kwargs): - try: - d = defer.Deferred() - p = context.handler.application.settings.get('pool') - def _complete(kwargs): - if 'exception' in kwargs: - d.errback(kwargs['exception']) - return - d.callback({'keyName': key_name, - 'keyFingerprint': kwargs['fingerprint'], - 'keyMaterial': kwargs['private_key']}) - p.apply_async(_gen_key, [context.user.id, key_name], - callback=_complete) - return d - - except manager.UserError as e: - raise - - @rbac.allow('all') - def delete_key_pair(self, context, key_name, **kwargs): - context.user.delete_key_pair(key_name) - # aws returns true even if the key doens't exist - return True - - @rbac.allow('all') - def describe_security_groups(self, context, group_names, **kwargs): - groups = { 'securityGroupSet': [] } - - # Stubbed for now to unblock other things. - return groups - - @rbac.allow('netadmin') - def create_security_group(self, context, group_name, **kwargs): - return True - - @rbac.allow('netadmin') - def delete_security_group(self, context, group_name, **kwargs): - return True - - @rbac.allow('projectmanager', 'sysadmin') - def get_console_output(self, context, instance_id, **kwargs): - # instance_id is passed in as a list of instances - instance = self._get_instance(context, instance_id[0]) - return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']), - {"method": "get_console_output", - "args" : {"instance_id": instance_id[0]}}) - - def _get_user_id(self, context): - if context and context.user: - return context.user.id - else: - return None - - @rbac.allow('projectmanager', 'sysadmin') - def describe_volumes(self, context, **kwargs): - volumes = [] - for volume in self.volumes: - if context.user.is_admin() or volume['project_id'] == context.project.id: - v = self.format_volume(context, volume) - volumes.append(v) - return defer.succeed({'volumeSet': volumes}) - - def format_volume(self, context, volume): - v = {} - v['volumeId'] = volume['volume_id'] - v['status'] = volume['status'] - v['size'] = volume['size'] - v['availabilityZone'] = volume['availability_zone'] - v['createTime'] = volume['create_time'] - if context.user.is_admin(): - v['status'] = '%s (%s, %s, %s, %s)' % ( - volume.get('status', None), - volume.get('user_id', None), - volume.get('node_name', None), - volume.get('instance_id', ''), - volume.get('mountpoint', '')) - if volume['attach_status'] == 'attached': - v['attachmentSet'] = [{'attachTime': volume['attach_time'], - 'deleteOnTermination': volume['delete_on_termination'], - 'device' : volume['mountpoint'], - 'instanceId' : volume['instance_id'], - 'status' : 'attached', - 'volume_id' : volume['volume_id']}] - else: - v['attachmentSet'] = [{}] - return v - - @rbac.allow('projectmanager', 'sysadmin') - @defer.inlineCallbacks - def create_volume(self, context, size, **kwargs): - # TODO(vish): refactor this to create the volume object here and tell service to create it - result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume", - "args" : {"size": size, - "user_id": context.user.id, - "project_id": context.project.id}}) - # NOTE(vish): rpc returned value is in the result key in the dictionary - volume = self._get_volume(context, result['result']) - defer.returnValue({'volumeSet': [self.format_volume(context, volume)]}) - - def _get_address(self, context, public_ip): - # FIXME(vish) this should move into network.py - address = network_model.PublicAddress.lookup(public_ip) - if address and (context.user.is_admin() or address['project_id'] == context.project.id): - return address - raise exception.NotFound("Address at ip %s not found" % public_ip) - - def _get_image(self, context, image_id): - """passes in context because - objectstore does its own authorization""" - result = images.list(context, [image_id]) - if not result: - raise exception.NotFound('Image %s could not be found' % image_id) - image = result[0] - return image - - def _get_instance(self, context, instance_id): - for instance in self.instdir.all: - if instance['instance_id'] == instance_id: - if context.user.is_admin() or instance['project_id'] == context.project.id: - return instance - raise exception.NotFound('Instance %s could not be found' % instance_id) - - def _get_volume(self, context, volume_id): - volume = service.get_volume(volume_id) - if context.user.is_admin() or volume['project_id'] == context.project.id: - return volume - raise exception.NotFound('Volume %s could not be found' % volume_id) - - @rbac.allow('projectmanager', 'sysadmin') - def attach_volume(self, context, volume_id, instance_id, device, **kwargs): - volume = self._get_volume(context, volume_id) - if volume['status'] == "attached": - raise exception.ApiError("Volume is already attached") - # TODO(vish): looping through all volumes is slow. We should probably maintain an index - for vol in self.volumes: - if vol['instance_id'] == instance_id and vol['mountpoint'] == device: - raise exception.ApiError("Volume %s is already attached to %s" % (vol['volume_id'], vol['mountpoint'])) - volume.start_attach(instance_id, device) - instance = self._get_instance(context, instance_id) - compute_node = instance['node_name'] - rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node), - {"method": "attach_volume", - "args" : {"volume_id": volume_id, - "instance_id" : instance_id, - "mountpoint" : device}}) - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) - - - @rbac.allow('projectmanager', 'sysadmin') - def detach_volume(self, context, volume_id, **kwargs): - volume = self._get_volume(context, volume_id) - instance_id = volume.get('instance_id', None) - if not instance_id: - raise exception.Error("Volume isn't attached to anything!") - if volume['status'] == "available": - raise exception.Error("Volume is already detached") - try: - volume.start_detach() - instance = self._get_instance(context, instance_id) - rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), - {"method": "detach_volume", - "args" : {"instance_id": instance_id, - "volume_id": volume_id}}) - except exception.NotFound: - # If the instance doesn't exist anymore, - # then we need to call detach blind - volume.finish_detach() - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) - - def _convert_to_set(self, lst, label): - if lst == None or lst == []: - return None - if not isinstance(lst, list): - lst = [lst] - return [{label: x} for x in lst] - - @rbac.allow('all') - def describe_instances(self, context, **kwargs): - return defer.succeed(self._format_instances(context)) - - def _format_instances(self, context, reservation_id = None): - reservations = {} - if context.user.is_admin(): - instgenerator = self.instdir.all - else: - instgenerator = self.instdir.by_project(context.project.id) - for instance in instgenerator: - res_id = instance.get('reservation_id', 'Unknown') - if reservation_id != None and reservation_id != res_id: - continue - if not context.user.is_admin(): - if instance['image_id'] == FLAGS.vpn_image_id: - continue - i = {} - i['instance_id'] = instance.get('instance_id', None) - i['image_id'] = instance.get('image_id', None) - i['instance_state'] = { - 'code': instance.get('state', 0), - 'name': instance.get('state_description', 'pending') - } - i['public_dns_name'] = network_model.get_public_ip_for_instance( - i['instance_id']) - i['private_dns_name'] = instance.get('private_dns_name', None) - if not i['public_dns_name']: - i['public_dns_name'] = i['private_dns_name'] - i['dns_name'] = instance.get('dns_name', None) - i['key_name'] = instance.get('key_name', None) - if context.user.is_admin(): - i['key_name'] = '%s (%s, %s)' % (i['key_name'], - instance.get('project_id', None), instance.get('node_name','')) - i['product_codes_set'] = self._convert_to_set( - instance.get('product_codes', None), 'product_code') - i['instance_type'] = instance.get('instance_type', None) - i['launch_time'] = instance.get('launch_time', None) - i['ami_launch_index'] = instance.get('ami_launch_index', - None) - if not reservations.has_key(res_id): - r = {} - r['reservation_id'] = res_id - r['owner_id'] = instance.get('project_id', None) - r['group_set'] = self._convert_to_set( - instance.get('groups', None), 'group_id') - r['instances_set'] = [] - reservations[res_id] = r - reservations[res_id]['instances_set'].append(i) - - instance_response = {'reservationSet' : list(reservations.values()) } - return instance_response - - @rbac.allow('all') - def describe_addresses(self, context, **kwargs): - return self.format_addresses(context) - - def format_addresses(self, context): - addresses = [] - for address in network_model.PublicAddress.all(): - # TODO(vish): implement a by_project iterator for addresses - if (context.user.is_admin() or - address['project_id'] == context.project.id): - address_rv = { - 'public_ip': address['address'], - 'instance_id' : address.get('instance_id', 'free') - } - if context.user.is_admin(): - address_rv['instance_id'] = "%s (%s, %s)" % ( - address['instance_id'], - address['user_id'], - address['project_id'], - ) - addresses.append(address_rv) - return {'addressesSet': addresses} - - @rbac.allow('netadmin') - @defer.inlineCallbacks - def allocate_address(self, context, **kwargs): - network_topic = yield self._get_network_topic(context) - alloc_result = yield rpc.call(network_topic, - {"method": "allocate_elastic_ip", - "args": {"user_id": context.user.id, - "project_id": context.project.id}}) - public_ip = alloc_result['result'] - defer.returnValue({'addressSet': [{'publicIp' : public_ip}]}) - - @rbac.allow('netadmin') - @defer.inlineCallbacks - def release_address(self, context, public_ip, **kwargs): - # NOTE(vish): Should we make sure this works? - network_topic = yield self._get_network_topic(context) - rpc.cast(network_topic, - {"method": "deallocate_elastic_ip", - "args": {"elastic_ip": public_ip}}) - defer.returnValue({'releaseResponse': ["Address released."]}) - - @rbac.allow('netadmin') - @defer.inlineCallbacks - def associate_address(self, context, instance_id, public_ip, **kwargs): - instance = self._get_instance(context, instance_id) - address = self._get_address(context, public_ip) - network_topic = yield self._get_network_topic(context) - rpc.cast(network_topic, - {"method": "associate_elastic_ip", - "args": {"elastic_ip": address['address'], - "fixed_ip": instance['private_dns_name'], - "instance_id": instance['instance_id']}}) - defer.returnValue({'associateResponse': ["Address associated."]}) - - @rbac.allow('netadmin') - @defer.inlineCallbacks - def disassociate_address(self, context, public_ip, **kwargs): - address = self._get_address(context, public_ip) - network_topic = yield self._get_network_topic(context) - rpc.cast(network_topic, - {"method": "disassociate_elastic_ip", - "args": {"elastic_ip": address['address']}}) - defer.returnValue({'disassociateResponse': ["Address disassociated."]}) - - @defer.inlineCallbacks - def _get_network_topic(self, context): - """Retrieves the network host for a project""" - host = network_service.get_host_for_project(context.project.id) - if not host: - result = yield rpc.call(FLAGS.network_topic, - {"method": "set_network_host", - "args": {"user_id": context.user.id, - "project_id": context.project.id}}) - host = result['result'] - defer.returnValue('%s.%s' %(FLAGS.network_topic, host)) - - @rbac.allow('projectmanager', 'sysadmin') - @defer.inlineCallbacks - def run_instances(self, context, **kwargs): - # make sure user can access the image - # vpn image is private so it doesn't show up on lists - if kwargs['image_id'] != FLAGS.vpn_image_id: - image = self._get_image(context, kwargs['image_id']) - - # FIXME(ja): if image is cloudpipe, this breaks - - # get defaults from imagestore - image_id = image['imageId'] - kernel_id = image.get('kernelId', FLAGS.default_kernel) - ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) - - # API parameters overrides of defaults - kernel_id = kwargs.get('kernel_id', kernel_id) - ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id) - - # make sure we have access to kernel and ramdisk - self._get_image(context, kernel_id) - self._get_image(context, ramdisk_id) - - logging.debug("Going to run instances...") - reservation_id = utils.generate_uid('r') - launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) - key_data = None - if kwargs.has_key('key_name'): - key_pair = context.user.get_key_pair(kwargs['key_name']) - if not key_pair: - raise exception.ApiError('Key Pair %s not found' % - kwargs['key_name']) - key_data = key_pair.public_key - network_topic = yield self._get_network_topic(context) - # TODO: Get the real security group of launch in here - security_group = "default" - for num in range(int(kwargs['max_count'])): - vpn = False - if image_id == FLAGS.vpn_image_id: - vpn = True - allocate_result = yield rpc.call(network_topic, - {"method": "allocate_fixed_ip", - "args": {"user_id": context.user.id, - "project_id": context.project.id, - "security_group": security_group, - "vpn": vpn}}) - allocate_data = allocate_result['result'] - inst = self.instdir.new() - inst['image_id'] = image_id - inst['kernel_id'] = kernel_id - inst['ramdisk_id'] = ramdisk_id - inst['user_data'] = kwargs.get('user_data', '') - inst['instance_type'] = kwargs.get('instance_type', 'm1.small') - inst['reservation_id'] = reservation_id - inst['launch_time'] = launch_time - inst['key_data'] = key_data or '' - inst['key_name'] = kwargs.get('key_name', '') - inst['user_id'] = context.user.id - inst['project_id'] = context.project.id - inst['ami_launch_index'] = num - inst['security_group'] = security_group - for (key, value) in allocate_data.iteritems(): - inst[key] = value - - inst.save() - rpc.cast(FLAGS.compute_topic, - {"method": "run_instance", - "args": {"instance_id" : inst.instance_id}}) - logging.debug("Casting to node for %s's instance with IP of %s" % - (context.user.name, inst['private_dns_name'])) - # TODO: Make Network figure out the network name from ip. - defer.returnValue(self._format_instances(context, reservation_id)) - - @rbac.allow('projectmanager', 'sysadmin') - @defer.inlineCallbacks - def terminate_instances(self, context, instance_id, **kwargs): - logging.debug("Going to start terminating instances") - network_topic = yield self._get_network_topic(context) - for i in instance_id: - logging.debug("Going to try and terminate %s" % i) - try: - instance = self._get_instance(context, i) - except exception.NotFound: - logging.warning("Instance %s was not found during terminate" - % i) - continue - elastic_ip = network_model.get_public_ip_for_instance(i) - if elastic_ip: - logging.debug("Disassociating address %s" % elastic_ip) - # NOTE(vish): Right now we don't really care if the ip is - # disassociated. We may need to worry about - # checking this later. Perhaps in the scheduler? - rpc.cast(network_topic, - {"method": "disassociate_elastic_ip", - "args": {"elastic_ip": elastic_ip}}) - - fixed_ip = instance.get('private_dns_name', None) - if fixed_ip: - logging.debug("Deallocating address %s" % fixed_ip) - # NOTE(vish): Right now we don't really care if the ip is - # actually removed. We may need to worry about - # checking this later. Perhaps in the scheduler? - rpc.cast(network_topic, - {"method": "deallocate_fixed_ip", - "args": {"fixed_ip": fixed_ip}}) - - if instance.get('node_name', 'unassigned') != 'unassigned': - # NOTE(joshua?): It's also internal default - rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), - {"method": "terminate_instance", - "args": {"instance_id": i}}) - else: - instance.destroy() - defer.returnValue(True) - - @rbac.allow('projectmanager', 'sysadmin') - def reboot_instances(self, context, instance_id, **kwargs): - """instance_id is a list of instance ids""" - for i in instance_id: - instance = self._get_instance(context, i) - rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), - {"method": "reboot_instance", - "args" : {"instance_id": i}}) - return defer.succeed(True) - - @rbac.allow('projectmanager', 'sysadmin') - def delete_volume(self, context, volume_id, **kwargs): - # TODO: return error if not authorized - volume = self._get_volume(context, volume_id) - volume_node = volume['node_name'] - rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node), - {"method": "delete_volume", - "args" : {"volume_id": volume_id}}) - return defer.succeed(True) - - @rbac.allow('all') - def describe_images(self, context, image_id=None, **kwargs): - # The objectstore does its own authorization for describe - imageSet = images.list(context, image_id) - return defer.succeed({'imagesSet': imageSet}) - - @rbac.allow('projectmanager', 'sysadmin') - def deregister_image(self, context, image_id, **kwargs): - # FIXME: should the objectstore be doing these authorization checks? - images.deregister(context, image_id) - return defer.succeed({'imageId': image_id}) - - @rbac.allow('projectmanager', 'sysadmin') - def register_image(self, context, image_location=None, **kwargs): - # FIXME: should the objectstore be doing these authorization checks? - if image_location is None and kwargs.has_key('name'): - image_location = kwargs['name'] - image_id = images.register(context, image_location) - logging.debug("Registered %s as %s" % (image_location, image_id)) - - return defer.succeed({'imageId': image_id}) - - @rbac.allow('all') - def describe_image_attribute(self, context, image_id, attribute, **kwargs): - if attribute != 'launchPermission': - raise exception.ApiError('attribute not supported: %s' % attribute) - try: - image = images.list(context, image_id)[0] - except IndexError: - raise exception.ApiError('invalid id: %s' % image_id) - result = { 'image_id': image_id, 'launchPermission': [] } - if image['isPublic']: - result['launchPermission'].append({ 'group': 'all' }) - return defer.succeed(result) - - @rbac.allow('projectmanager', 'sysadmin') - def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs): - # TODO(devcamcar): Support users and groups other than 'all'. - if attribute != 'launchPermission': - raise exception.ApiError('attribute not supported: %s' % attribute) - if not 'user_group' in kwargs: - raise exception.ApiError('user or group not specified') - if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': - raise exception.ApiError('only group "all" is supported') - if not operation_type in ['add', 'remove']: - raise exception.ApiError('operation_type must be add or remove') - result = images.modify(context, image_id, operation_type) - return defer.succeed(result) - - def update_state(self, topic, value): - """ accepts status reports from the queue and consolidates them """ - # TODO(jmc): if an instance has disappeared from - # the node, call instance_death - if topic == "instances": - return defer.succeed(True) - aggregate_state = getattr(self, topic) - node_name = value.keys()[0] - items = value[node_name] - - logging.debug("Updating %s state for %s" % (topic, node_name)) - - for item_id in items.keys(): - if (aggregate_state.has_key('pending') and - aggregate_state['pending'].has_key(item_id)): - del aggregate_state['pending'][item_id] - aggregate_state[node_name] = items - - return defer.succeed(True) diff --git a/nova/endpoint/images.py b/nova/endpoint/images.py deleted file mode 100644 index fe7cb5d11..000000000 --- a/nova/endpoint/images.py +++ /dev/null @@ -1,95 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Proxy AMI-related calls from the cloud controller, to the running -objectstore daemon. -""" - -import boto.s3.connection -import json -import urllib - -from nova import flags -from nova import utils -from nova.auth import manager - - -FLAGS = flags.FLAGS - -def modify(context, image_id, operation): - conn(context).make_request( - method='POST', - bucket='_images', - query_args=qs({'image_id': image_id, 'operation': operation})) - - return True - - -def register(context, image_location): - """ rpc call to register a new image based from a manifest """ - - image_id = utils.generate_uid('ami') - conn(context).make_request( - method='PUT', - bucket='_images', - query_args=qs({'image_location': image_location, - 'image_id': image_id})) - - return image_id - -def list(context, filter_list=[]): - """ return a list of all images that a user can see - - optionally filtered by a list of image_id """ - - # FIXME: send along the list of only_images to check for - response = conn(context).make_request( - method='GET', - bucket='_images') - - result = json.loads(response.read()) - if not filter_list is None: - return [i for i in result if i['imageId'] in filter_list] - return result - -def deregister(context, image_id): - """ unregister an image """ - conn(context).make_request( - method='DELETE', - bucket='_images', - query_args=qs({'image_id': image_id})) - -def conn(context): - access = manager.AuthManager().get_access_key(context.user, - context.project) - secret = str(context.user.secret) - calling = boto.s3.connection.OrdinaryCallingFormat() - return boto.s3.connection.S3Connection(aws_access_key_id=access, - aws_secret_access_key=secret, - is_secure=False, - calling_format=calling, - port=FLAGS.s3_port, - host=FLAGS.s3_host) - - -def qs(params): - pairs = [] - for key in params.keys(): - pairs.append(key + '=' + urllib.quote(params[key])) - return '&'.join(pairs) diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py deleted file mode 100644 index b4e6cd823..000000000 --- a/nova/endpoint/rackspace.py +++ /dev/null @@ -1,186 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Rackspace API Endpoint -""" - -import json -import time - -import webob.dec -import webob.exc - -from nova import flags -from nova import rpc -from nova import utils -from nova import wsgi -from nova.auth import manager -from nova.compute import model as compute -from nova.network import model as network - - -FLAGS = flags.FLAGS -flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') - - -class API(wsgi.Middleware): - """Entry point for all requests.""" - - def __init__(self): - super(API, self).__init__(Router(webob.exc.HTTPNotFound())) - - @webob.dec.wsgify - def __call__(self, req): - return self.application - context = {} - if "HTTP_X_AUTH_TOKEN" in req.environ: - context['user'] = manager.AuthManager().get_user_from_access_key( - req.environ['HTTP_X_AUTH_TOKEN']) - if context['user']: - context['project'] = manager.AuthManager().get_project( - context['user'].name) - if "user" not in context: - return webob.exc.HTTPForbidden() - environ['nova.context'] = context - return self.application - - -class Router(wsgi.Router): - """Route requests to the next WSGI application.""" - - def _build_map(self): - """Build routing map for authentication and cloud.""" - self.map.resource("server", "servers", controller=CloudServerAPI()) - #self._connect("/v1.0", controller=AuthenticationAPI()) - #cloud = CloudServerAPI() - #self._connect("/servers", controller=cloud.launch_server, - # conditions={"method": ["POST"]}) - #self._connect("/servers/{server_id}", controller=cloud.delete_server, - # conditions={'method': ["DELETE"]}) - #self._connect("/servers", controller=cloud) - - -class AuthenticationAPI(wsgi.Application): - """Handle all authorization requests through WSGI applications.""" - - @webob.dec.wsgify - def __call__(self, req): # pylint: disable-msg=W0221 - # TODO(todd): make a actual session with a unique token - # just pass the auth key back through for now - res = webob.Response() - res.status = '204 No Content' - res.headers.add('X-Server-Management-Url', req.host_url) - res.headers.add('X-Storage-Url', req.host_url) - res.headers.add('X-CDN-Managment-Url', req.host_url) - res.headers.add('X-Auth-Token', req.headers['X-Auth-Key']) - return res - - -class CloudServerAPI(wsgi.Application): - """Handle all server requests through WSGI applications.""" - - def __init__(self): - super(CloudServerAPI, self).__init__() - self.instdir = compute.InstanceDirectory() - self.network = network.PublicNetworkController() - - @webob.dec.wsgify - def __call__(self, req): # pylint: disable-msg=W0221 - value = {"servers": []} - for inst in self.instdir.all: - value["servers"].append(self.instance_details(inst)) - return json.dumps(value) - - def instance_details(self, inst): # pylint: disable-msg=R0201 - """Build the data structure to represent details for an instance.""" - return { - "id": inst.get("instance_id", None), - "imageId": inst.get("image_id", None), - "flavorId": inst.get("instacne_type", None), - "hostId": inst.get("node_name", None), - "status": inst.get("state", "pending"), - "addresses": { - "public": [network.get_public_ip_for_instance( - inst.get("instance_id", None))], - "private": [inst.get("private_dns_name", None)]}, - - # implemented only by Rackspace, not AWS - "name": inst.get("name", "Not-Specified"), - - # not supported - "progress": "Not-Supported", - "metadata": { - "Server Label": "Not-Supported", - "Image Version": "Not-Supported"}} - - @webob.dec.wsgify - def launch_server(self, req): - """Launch a new instance.""" - data = json.loads(req.body) - inst = self.build_server_instance(data, req.environ['nova.context']) - rpc.cast( - FLAGS.compute_topic, { - "method": "run_instance", - "args": {"instance_id": inst.instance_id}}) - - return json.dumps({"server": self.instance_details(inst)}) - - def build_server_instance(self, env, context): - """Build instance data structure and save it to the data store.""" - reservation = utils.generate_uid('r') - ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) - inst = self.instdir.new() - inst['name'] = env['server']['name'] - inst['image_id'] = env['server']['imageId'] - inst['instance_type'] = env['server']['flavorId'] - inst['user_id'] = context['user'].id - inst['project_id'] = context['project'].id - inst['reservation_id'] = reservation - inst['launch_time'] = ltime - inst['mac_address'] = utils.generate_mac() - address = self.network.allocate_ip( - inst['user_id'], - inst['project_id'], - mac=inst['mac_address']) - inst['private_dns_name'] = str(address) - inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( - inst['user_id'], - inst['project_id'], - 'default')['bridge_name'] - # key_data, key_name, ami_launch_index - # TODO(todd): key data or root password - inst.save() - return inst - - @webob.dec.wsgify - @wsgi.route_args - def delete_server(self, req, route_args): # pylint: disable-msg=R0201 - """Delete an instance.""" - owner_hostname = None - instance = compute.Instance.lookup(route_args['server_id']) - if instance: - owner_hostname = instance["node_name"] - if not owner_hostname: - return webob.exc.HTTPNotFound("Did not find image, or it was " - "not in a running state.") - rpc_transport = "%s:%s" % (FLAGS.compute_topic, owner_hostname) - rpc.cast(rpc_transport, - {"method": "reboot_instance", - "args": {"instance_id": route_args['server_id']}}) - req.status = "202 Accepted" diff --git a/nova/endpoint/rackspace/controllers/base.py b/nova/endpoint/rackspace/controllers/base.py new file mode 100644 index 000000000..a83925cc3 --- /dev/null +++ b/nova/endpoint/rackspace/controllers/base.py @@ -0,0 +1,9 @@ +class BaseController(object): + @classmethod + def render(cls, instance): + if isinstance(instance, list): + return [ cls.entity_name : { cls.render(instance) } + else + return + + diff --git a/nova/endpoint/rackspace/controllers/flavors.py b/nova/endpoint/rackspace/controllers/flavors.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/endpoint/rackspace/controllers/images.py b/nova/endpoint/rackspace/controllers/images.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/endpoint/rackspace/controllers/servers.py b/nova/endpoint/rackspace/controllers/servers.py new file mode 100644 index 000000000..af6c958bb --- /dev/null +++ b/nova/endpoint/rackspace/controllers/servers.py @@ -0,0 +1,72 @@ +from nova import rpc +from nova.compute import model as compute +from nova.endpoint.rackspace import BaseController + +class ServersController(BaseController): + entity_name = 'servers' + + def __init__(self): + raise NotImplemented("You may not create an instance of this class") + + @classmethod + def index(cls): + return [instance_details(inst) for inst in compute.InstanceDirectory().all] + + @classmethod + def show(cls, **kwargs): + instance_id = kwargs['id'] + return compute.InstanceDirectory().get(instance_id) + + @classmethod + def delete(cls, **kwargs): + instance_id = kwargs['id'] + instance = compute.InstanceDirectory().get(instance_id) + if not instance + raise ServerNotFound("The requested server was not found") + instance.destroy() + return True + + @classmethod + def create(cls, **kwargs): + inst = self.build_server_instance(kwargs['server']) + rpc.cast( + FLAGS.compute_topic, { + "method": "run_instance", + "args": {"instance_id": inst.instance_id}}) + + @classmethod + def update(cls, **kwargs): + instance_id = kwargs['id'] + instance = compute.InstanceDirectory().get(instance_id) + if not instance: + raise ServerNotFound("The requested server was not found") + instance.update(kwargs['server']) + instance.save() + + @classmethod + def build_server_instance(self, env): + """Build instance data structure and save it to the data store.""" + reservation = utils.generate_uid('r') + ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + inst = self.instdir.new() + inst['name'] = env['server']['name'] + inst['image_id'] = env['server']['imageId'] + inst['instance_type'] = env['server']['flavorId'] + inst['user_id'] = env['user']['id'] + inst['project_id'] = env['project']['id'] + inst['reservation_id'] = reservation + inst['launch_time'] = ltime + inst['mac_address'] = utils.generate_mac() + address = self.network.allocate_ip( + inst['user_id'], + inst['project_id'], + mac=inst['mac_address']) + inst['private_dns_name'] = str(address) + inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( + inst['user_id'], + inst['project_id'], + 'default')['bridge_name'] + # key_data, key_name, ami_launch_index + # TODO(todd): key data or root password + inst.save() + return inst diff --git a/nova/endpoint/rackspace/controllers/shared_ip_groups.py b/nova/endpoint/rackspace/controllers/shared_ip_groups.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/endpoint/rackspace/rackspace.py b/nova/endpoint/rackspace/rackspace.py new file mode 100644 index 000000000..75b828e91 --- /dev/null +++ b/nova/endpoint/rackspace/rackspace.py @@ -0,0 +1,183 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Rackspace API Endpoint +""" + +import json +import time + +import webob.dec +import webob.exc + +from nova import flags +from nova import rpc +from nova import utils +from nova import wsgi +from nova.auth import manager +from nova.compute import model as compute +from nova.network import model as network + + +FLAGS = flags.FLAGS +flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') + + +class API(wsgi.Middleware): + """Entry point for all requests.""" + + def __init__(self): + super(API, self).__init__(Router(webob.exc.HTTPNotFound())) + + def __call__(self, environ, start_response): + context = {} + if "HTTP_X_AUTH_TOKEN" in environ: + context['user'] = manager.AuthManager().get_user_from_access_key( + environ['HTTP_X_AUTH_TOKEN']) + if context['user']: + context['project'] = manager.AuthManager().get_project( + context['user'].name) + if "user" not in context: + return webob.exc.HTTPForbidden()(environ, start_response) + environ['nova.context'] = context + return self.application(environ, start_response) + + +class Router(wsgi.Router): + """Route requests to the next WSGI application.""" + + def _build_map(self): + """Build routing map for authentication and cloud.""" + self._connect("/v1.0", controller=AuthenticationAPI()) + cloud = CloudServerAPI() + self._connect("/servers", controller=cloud.launch_server, + conditions={"method": ["POST"]}) + self._connect("/servers/{server_id}", controller=cloud.delete_server, + conditions={'method': ["DELETE"]}) + self._connect("/servers", controller=cloud) + + +class AuthenticationAPI(wsgi.Application): + """Handle all authorization requests through WSGI applications.""" + + @webob.dec.wsgify + def __call__(self, req): # pylint: disable-msg=W0221 + # TODO(todd): make a actual session with a unique token + # just pass the auth key back through for now + res = webob.Response() + res.status = '204 No Content' + res.headers.add('X-Server-Management-Url', req.host_url) + res.headers.add('X-Storage-Url', req.host_url) + res.headers.add('X-CDN-Managment-Url', req.host_url) + res.headers.add('X-Auth-Token', req.headers['X-Auth-Key']) + return res + + +class CloudServerAPI(wsgi.Application): + """Handle all server requests through WSGI applications.""" + + def __init__(self): + super(CloudServerAPI, self).__init__() + self.instdir = compute.InstanceDirectory() + self.network = network.PublicNetworkController() + + @webob.dec.wsgify + def __call__(self, req): # pylint: disable-msg=W0221 + value = {"servers": []} + for inst in self.instdir.all: + value["servers"].append(self.instance_details(inst)) + return json.dumps(value) + + def instance_details(self, inst): # pylint: disable-msg=R0201 + """Build the data structure to represent details for an instance.""" + return { + "id": inst.get("instance_id", None), + "imageId": inst.get("image_id", None), + "flavorId": inst.get("instacne_type", None), + "hostId": inst.get("node_name", None), + "status": inst.get("state", "pending"), + "addresses": { + "public": [network.get_public_ip_for_instance( + inst.get("instance_id", None))], + "private": [inst.get("private_dns_name", None)]}, + + # implemented only by Rackspace, not AWS + "name": inst.get("name", "Not-Specified"), + + # not supported + "progress": "Not-Supported", + "metadata": { + "Server Label": "Not-Supported", + "Image Version": "Not-Supported"}} + + @webob.dec.wsgify + def launch_server(self, req): + """Launch a new instance.""" + data = json.loads(req.body) + inst = self.build_server_instance(data, req.environ['nova.context']) + rpc.cast( + FLAGS.compute_topic, { + "method": "run_instance", + "args": {"instance_id": inst.instance_id}}) + + return json.dumps({"server": self.instance_details(inst)}) + + def build_server_instance(self, env, context): + """Build instance data structure and save it to the data store.""" + reservation = utils.generate_uid('r') + ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + inst = self.instdir.new() + inst['name'] = env['server']['name'] + inst['image_id'] = env['server']['imageId'] + inst['instance_type'] = env['server']['flavorId'] + inst['user_id'] = context['user'].id + inst['project_id'] = context['project'].id + inst['reservation_id'] = reservation + inst['launch_time'] = ltime + inst['mac_address'] = utils.generate_mac() + address = self.network.allocate_ip( + inst['user_id'], + inst['project_id'], + mac=inst['mac_address']) + inst['private_dns_name'] = str(address) + inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( + inst['user_id'], + inst['project_id'], + 'default')['bridge_name'] + # key_data, key_name, ami_launch_index + # TODO(todd): key data or root password + inst.save() + return inst + + @webob.dec.wsgify + @wsgi.route_args + def delete_server(self, req, route_args): # pylint: disable-msg=R0201 + """Delete an instance.""" + owner_hostname = None + instance = compute.Instance.lookup(route_args['server_id']) + if instance: + owner_hostname = instance["node_name"] + if not owner_hostname: + return webob.exc.HTTPNotFound("Did not find image, or it was " + "not in a running state.") + rpc_transport = "%s:%s" % (FLAGS.compute_topic, owner_hostname) + rpc.cast(rpc_transport, + {"method": "reboot_instance", + "args": {"instance_id": route_args['server_id']}}) + req.status = "202 Accepted" -- cgit