From f5e19272844f2f0d2c72bf55a2bdf533f40d1ea5 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Thu, 22 Jul 2010 12:28:47 -0700 Subject: Check exit codes when spawning processes by default --- bin/nova-manage | 2 +- nova/objectstore/image.py | 6 +++--- nova/utils.py | 11 ++++++++--- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 56f89ce30..61ac86db6 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -56,7 +56,7 @@ class VpnCommands(object): vpn = self.__vpn_for(project.id) if vpn: - out, err = utils.execute("ping -c1 -w1 %s > /dev/null; echo $?" % vpn['private_dns_name']) + out, err = utils.execute("ping -c1 -w1 %s > /dev/null; echo $?" % vpn['private_dns_name'], check_exit_code=False) if out.strip() == '0': net = 'up' else: diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py index bea2e9637..b98de276c 100644 --- a/nova/objectstore/image.py +++ b/nova/objectstore/image.py @@ -227,13 +227,13 @@ class Image(object): @staticmethod def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_private_key, decrypted_filename): - key, err = utils.execute('openssl rsautl -decrypt -inkey %s' % cloud_private_key, encrypted_key) + key, err = utils.execute('openssl rsautl -decrypt -inkey %s' % cloud_private_key, encrypted_key, check_exit_code=False) if err: raise exception.Error("Failed to decrypt private key: %s" % err) - iv, err = utils.execute('openssl rsautl -decrypt -inkey %s' % cloud_private_key, encrypted_iv) + iv, err = utils.execute('openssl rsautl -decrypt -inkey %s' % cloud_private_key, encrypted_iv, check_exit_code=False) if err: raise exception.Error("Failed to decrypt initialization vector: %s" % err) - out, err = utils.execute('openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s' % (encrypted_filename, key, iv, decrypted_filename)) + out, err = utils.execute('openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s' % (encrypted_filename, key, iv, decrypted_filename), check_exit_code=False) if err: raise exception.Error("Failed to decrypt image file %s : %s" % (encrypted_filename, err)) diff --git a/nova/utils.py b/nova/utils.py index 9ecceafe0..d01c33042 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -45,7 +45,7 @@ def fetchfile(url, target): # fp.close() execute("curl %s -o %s" % (url, target)) -def execute(cmd, input=None, addl_env=None): +def execute(cmd, input=None, addl_env=None, check_exit_code=True): env = os.environ.copy() if addl_env: env.update(addl_env) @@ -59,6 +59,8 @@ def execute(cmd, input=None, addl_env=None): obj.stdin.close() if obj.returncode: logging.debug("Result was %s" % (obj.returncode)) + if check_exit_code and obj.returncode <> 0: + raise Exception("Unexpected exit code: %s. result=%s" % (obj.returncode, result)) return result @@ -84,9 +86,12 @@ def debug(arg): return arg -def runthis(prompt, cmd): +def runthis(prompt, cmd, check_exit_code = True): logging.debug("Running %s" % (cmd)) - logging.debug(prompt % (subprocess.call(cmd.split(" ")))) + exit_code = subprocess.call(cmd.split(" ")) + logging.debug(prompt % (exit_code)) + if check_exit_code and exit_code <> 0: + raise Exception("Unexpected exit code: %s from cmd: %s" % (exit_code, cmd)) def generate_uid(topic, size=8): -- cgit From 8dad7d1d45599880571689d62857cb437dea182e Mon Sep 17 00:00:00 2001 From: "Joel Moore joelbm24@gmail.com" <> Date: Tue, 27 Jul 2010 15:35:20 -0700 Subject: fixed path to keys directory --- nova/endpoint/cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 8c6c05566..9d81d3bba 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -86,7 +86,7 @@ class CloudController(object): """ Ensure the keychains and folders exist. """ # Create keys folder, if it doesn't exist if not os.path.exists(FLAGS.keys_path): - os.makedirs(os.path.abspath(FLAGS.keys_path)) + os.makedirs(FLAGS.keys_path) # Gen root CA, if we don't have one root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file) if not os.path.exists(root_ca_path): -- cgit From 93aee19fa2f24c4f9c1fd59c0666e024c6891565 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Thu, 29 Jul 2010 14:48:10 -0700 Subject: Added --fail argument to curl invocations, so that HTTP request fails get surfaced as non-zero exit codes --- bin/nova-import-canonical-imagestore | 6 +++--- nova/cloudpipe/bootscript.sh | 4 ++-- nova/utils.py | 2 +- nova/virt/images.py | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/bin/nova-import-canonical-imagestore b/bin/nova-import-canonical-imagestore index 2e79f09b7..8106cc5ca 100755 --- a/bin/nova-import-canonical-imagestore +++ b/bin/nova-import-canonical-imagestore @@ -59,21 +59,21 @@ def download(img): for f in img['files']: if f['kind'] == 'kernel': dest = os.path.join(tempdir, 'kernel') - subprocess.call(['curl', f['url'], '-o', dest]) + subprocess.call(['curl', '--fail', f['url'], '-o', dest]) kernel_id = image.Image.add(dest, description='kernel/' + img['title'], kernel=True) for f in img['files']: if f['kind'] == 'ramdisk': dest = os.path.join(tempdir, 'ramdisk') - subprocess.call(['curl', f['url'], '-o', dest]) + subprocess.call(['curl', '--fail', f['url'], '-o', dest]) ramdisk_id = image.Image.add(dest, description='ramdisk/' + img['title'], ramdisk=True) for f in img['files']: if f['kind'] == 'image': dest = os.path.join(tempdir, 'image') - subprocess.call(['curl', f['url'], '-o', dest]) + subprocess.call(['curl', '--fail', f['url'], '-o', dest]) ramdisk_id = image.Image.add(dest, description=img['title'], kernel=kernel_id, ramdisk=ramdisk_id) diff --git a/nova/cloudpipe/bootscript.sh b/nova/cloudpipe/bootscript.sh index 82ec2012a..30d9ad102 100755 --- a/nova/cloudpipe/bootscript.sh +++ b/nova/cloudpipe/bootscript.sh @@ -44,8 +44,8 @@ CSRTEXT=$(python -c "import urllib; print urllib.quote('''$CSRTEXT''')") # SIGN the csr and save as server.crt # CURL fetch to the supervisor, POSTing the CSR text, saving the result as the CRT file -curl $SUPERVISOR -d "cert=$CSRTEXT" > /etc/openvpn/server.crt -curl $SUPERVISOR/getca/ > /etc/openvpn/ca.crt +curl --fail $SUPERVISOR -d "cert=$CSRTEXT" > /etc/openvpn/server.crt +curl --fail $SUPERVISOR/getca/ > /etc/openvpn/ca.crt # Customize the server.conf.template cd /etc/openvpn diff --git a/nova/utils.py b/nova/utils.py index fd30f1f2d..74c7c021c 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -53,7 +53,7 @@ def fetchfile(url, target): # c.perform() # c.close() # fp.close() - execute("curl %s -o %s" % (url, target)) + execute("curl --fail %s -o %s" % (url, target)) def execute(cmd, input=None, addl_env=None, check_exit_code=True): env = os.environ.copy() diff --git a/nova/virt/images.py b/nova/virt/images.py index 92210e242..75fd1625c 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -54,7 +54,7 @@ def _fetch_s3_image(image, path, user): auth = signer.Signer(user.secret.encode()).s3_authorization(headers, 'GET', uri) headers['Authorization'] = 'AWS %s:%s' % (user.access, auth) - cmd = ['/usr/bin/curl', '--silent', url] + cmd = ['/usr/bin/curl', '--fail', '--silent', url] for (k,v) in headers.iteritems(): cmd += ['-H', '%s: %s' % (k,v)] -- cgit From 3897047a2c0f8906c99418ddad6e2c68f0dec5c7 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Fri, 30 Jul 2010 12:05:32 -0700 Subject: Added exit code checking to process.py (twisted process utils). A bit of class refactoring to make it work & cleaner. Also added some more instructive messages to install_venv.py, because otherwise people that don't know what they're doing will install the wrong pip... i.e. I did :-) --- nova/process.py | 90 ++++++++++++++++-------------------------- nova/tests/process_unittest.py | 2 +- nova/virt/libvirt_conn.py | 2 +- tools/install_venv.py | 15 ++++--- 4 files changed, 44 insertions(+), 65 deletions(-) diff --git a/nova/process.py b/nova/process.py index 2dc56372f..24ea3eb7f 100644 --- a/nova/process.py +++ b/nova/process.py @@ -54,19 +54,20 @@ class UnexpectedErrorOutput(IOError): IOError.__init__(self, "got stdout: %r\nstderr: %r" % (stdout, stderr)) -# NOTE(termie): this too -class _BackRelay(protocol.ProcessProtocol): +# This is based on _BackRelay from twister.internal.utils, but modified to capture +# both stdout and stderr without odd stderr handling, and also to handle stdin +class BackRelayWithInput(protocol.ProcessProtocol): """ Trivial protocol for communicating with a process and turning its output into the result of a L{Deferred}. @ivar deferred: A L{Deferred} which will be called back with all of stdout - and, if C{errortoo} is true, all of stderr as well (mixed together in - one string). If C{errortoo} is false and any bytes are received over - stderr, this will fire with an L{_UnexpectedErrorOutput} instance and - the attribute will be set to C{None}. + and all of stderr as well (as a tuple). C{terminate_on_stderr} is true + and any bytes are received over stderr, this will fire with an + L{_UnexpectedErrorOutput} instance and the attribute will be set to + C{None}. - @ivar onProcessEnded: If C{errortoo} is false and bytes are received over + @ivar onProcessEnded: If C{terminate_on_stderr} is false and bytes are received over stderr, this attribute will refer to a L{Deferred} which will be called back when the process ends. This C{Deferred} is also associated with the L{_UnexpectedErrorOutput} which C{deferred} fires with earlier in @@ -74,52 +75,43 @@ class _BackRelay(protocol.ProcessProtocol): ended, in addition to knowing when bytes have been received via stderr. """ - def __init__(self, deferred, errortoo=0): + def __init__(self, deferred, startedDeferred=None, terminate_on_stderr=False, + check_exit_code=True, input=None): self.deferred = deferred - self.s = StringIO.StringIO() - if errortoo: - self.errReceived = self.errReceivedIsGood - else: - self.errReceived = self.errReceivedIsBad - - def errReceivedIsBad(self, text): - if self.deferred is not None: + self.stdout = StringIO.StringIO() + self.stderr = StringIO.StringIO() + self.startedDeferred = startedDeferred + self.terminate_on_stderr = terminate_on_stderr + self.check_exit_code = check_exit_code + self.input = input + + def errReceived(self, text): + self.sterr.write(text) + if self.terminate_on_stderr and (self.deferred is not None): self.onProcessEnded = defer.Deferred() - err = UnexpectedErrorOutput(text, self.onProcessEnded) - self.deferred.errback(failure.Failure(err)) + self.deferred.errback(UnexpectedErrorOutput(stdout=self.stdout.getvalue(), stderr=self.stderr.getvalue())) self.deferred = None self.transport.loseConnection() - def errReceivedIsGood(self, text): - self.s.write(text) + def errReceived(self, text): + self.stderr.write(text) def outReceived(self, text): - self.s.write(text) + self.stdout.write(text) def processEnded(self, reason): if self.deferred is not None: - self.deferred.callback(self.s.getvalue()) + stdout, stderr = self.stdout.getvalue(), self.stderr.getvalue() + try: + if self.check_exit_code: + reason.trap(error.ProcessDone) + self.deferred.callback((stdout, stderr)) + except: + self.deferred.errback(UnexpectedErrorOutput(stdout, stderr)) elif self.onProcessEnded is not None: self.onProcessEnded.errback(reason) -class BackRelayWithInput(_BackRelay): - def __init__(self, deferred, startedDeferred=None, error_ok=0, - input=None): - # Twisted doesn't use new-style classes in most places :( - _BackRelay.__init__(self, deferred, errortoo=error_ok) - self.error_ok = error_ok - self.input = input - self.stderr = StringIO.StringIO() - self.startedDeferred = startedDeferred - - def errReceivedIsBad(self, text): - self.stderr.write(text) - self.transport.loseConnection() - - def errReceivedIsGood(self, text): - self.stderr.write(text) - def connectionMade(self): if self.startedDeferred: self.startedDeferred.callback(self) @@ -127,31 +119,15 @@ class BackRelayWithInput(_BackRelay): self.transport.write(self.input) self.transport.closeStdin() - def processEnded(self, reason): - if self.deferred is not None: - stdout, stderr = self.s.getvalue(), self.stderr.getvalue() - try: - # NOTE(termie): current behavior means if error_ok is True - # we won't throw an error even if the process - # exited with a non-0 status, so you can't be - # okay with stderr output and not with bad exit - # codes. - if not self.error_ok: - reason.trap(error.ProcessDone) - self.deferred.callback((stdout, stderr)) - except: - self.deferred.errback(UnexpectedErrorOutput(stdout, stderr)) - - def getProcessOutput(executable, args=None, env=None, path=None, reactor=None, - error_ok=0, input=None, startedDeferred=None): + check_exit_code=True, input=None, startedDeferred=None): if reactor is None: from twisted.internet import reactor args = args and args or () env = env and env and {} d = defer.Deferred() p = BackRelayWithInput( - d, startedDeferred=startedDeferred, error_ok=error_ok, input=input) + d, startedDeferred=startedDeferred, check_exit_code=check_exit_code, input=input) # NOTE(vish): commands come in as unicode, but self.executes needs # strings or process.spawn raises a deprecation warning executable = str(executable) diff --git a/nova/tests/process_unittest.py b/nova/tests/process_unittest.py index 75187e1fc..25c60c616 100644 --- a/nova/tests/process_unittest.py +++ b/nova/tests/process_unittest.py @@ -48,7 +48,7 @@ class ProcessTestCase(test.TrialTestCase): def test_execute_stderr(self): pool = process.ProcessPool(2) - d = pool.simple_execute('cat BAD_FILE', error_ok=1) + d = pool.simple_execute('cat BAD_FILE', check_exit_code=False) def _check(rv): self.assertEqual(rv[0], '') self.assert_('No such file' in rv[1]) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index c545e4190..6cb9acb29 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -197,7 +197,7 @@ class LibvirtConnection(object): execute = lambda cmd, input=None: \ process.simple_execute(cmd=cmd, input=input, - error_ok=1) + check_exit_code=True) key = data['key_data'] net = None diff --git a/tools/install_venv.py b/tools/install_venv.py index 0b35fc8e9..b9eac70e6 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -18,7 +18,7 @@ def die(message, *args): sys.exit(1) -def run_command(cmd, redirect_output=True, error_ok=False): +def run_command(cmd, redirect_output=True, check_exit_code=True): # Useful for debugging: #print >>sys.stderr, ' '.join(cmd) if redirect_output: @@ -28,23 +28,26 @@ def run_command(cmd, redirect_output=True, error_ok=False): proc = subprocess.Popen(cmd, stdout=stdout) output = proc.communicate()[0] - if not error_ok and proc.returncode != 0: + if check_exit_code and proc.returncode != 0: die('Command "%s" failed.\n%s', ' '.join(cmd), output) return output def check_dependencies(): """Make sure pip and virtualenv are on the path.""" + # Perl also has a pip program. Hopefully the user has installed the right one! print 'Checking for pip...', - if not run_command(['which', 'pip']).strip(): + if not run_command(['which', 'pip'], check_exit_code=False).strip(): die('ERROR: pip not found.\n\nNova development requires pip,' - ' please install it using your favorite package management tool') + ' please install it using your favorite package management tool ' + ' (e.g. "sudo apt-get install python-pip")') print 'done.' print 'Checking for virtualenv...', - if not run_command(['which', 'virtualenv']).strip(): + if not run_command(['which', 'virtualenv'], check_exit_code=False).strip(): die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,' - ' please install it using your favorite package management tool') + ' please install it using your favorite package management tool ' + ' (e.g. "sudo easy_install virtualenv")') print 'done.' -- cgit From 40b2bbcfe6274aca9fd4361c56b2b042ba22e3c2 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Mon, 2 Aug 2010 08:31:19 +0100 Subject: Turn the private _image_url(path) into a public image_url(image). This will be used by virt.xenapi to instruct xapi as to which images to download. As part of this, the value returned became a complete URL, with http:// on the front. This caused the URL parsing to be adjusted. --- nova/virt/images.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/nova/virt/images.py b/nova/virt/images.py index 92210e242..698536324 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -23,6 +23,7 @@ Handling of VM disk images. import os.path import time +import urlparse from nova import flags from nova import process @@ -42,7 +43,7 @@ def fetch(image, path, user): return f(image, path, user) def _fetch_s3_image(image, path, user): - url = _image_url('%s/image' % image) + url = image_url(image) # This should probably move somewhere else, like e.g. a download_as # method on User objects and at the same time get rewritten to use @@ -50,8 +51,8 @@ def _fetch_s3_image(image, path, user): headers = {} headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) - uri = '/' + url.partition('/')[2] - auth = signer.Signer(user.secret.encode()).s3_authorization(headers, 'GET', uri) + (_, _, url_path, _, _, _) = urlparse.urlparse(url) + auth = signer.Signer(user.secret.encode()).s3_authorization(headers, 'GET', url_path) headers['Authorization'] = 'AWS %s:%s' % (user.access, auth) cmd = ['/usr/bin/curl', '--silent', url] @@ -68,5 +69,6 @@ def _fetch_local_image(image, path, _): def _image_path(path): return os.path.join(FLAGS.images_path, path) -def _image_url(path): - return "%s:%s/_images/%s" % (FLAGS.s3_host, FLAGS.s3_port, path) +def image_url(image): + return "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port, + image) -- cgit From 4c8ae5e0a5b30039075a87ba39aec6da64fdd138 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Tue, 3 Aug 2010 00:52:06 +0100 Subject: Added a xapi plugin that can pull images from nova-objectstore, and use that to get a disk, kernel, and ramdisk for the VM. The VM actually boots! --- nova/virt/xenapi.py | 105 ++++++++++++- xenapi/README | 2 + xenapi/etc/xapi.d/plugins/objectstore | 231 ++++++++++++++++++++++++++++ xenapi/etc/xapi.d/plugins/pluginlib_nova.py | 216 ++++++++++++++++++++++++++ 4 files changed, 547 insertions(+), 7 deletions(-) create mode 100644 xenapi/README create mode 100644 xenapi/etc/xapi.d/plugins/objectstore create mode 100755 xenapi/etc/xapi.d/plugins/pluginlib_nova.py diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index dc372e3e3..b84e55138 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -19,6 +19,7 @@ A connection to XenServer or Xen Cloud Platform. """ import logging +import xmlrpclib from twisted.internet import defer from twisted.internet import task @@ -26,7 +27,9 @@ from twisted.internet import task from nova import exception from nova import flags from nova import process +from nova.auth.manager import AuthManager from nova.compute import power_state +from nova.virt import images XenAPI = None @@ -71,10 +74,26 @@ class XenAPIConnection(object): @defer.inlineCallbacks @exception.wrap_exception def spawn(self, instance): - vm = self.lookup(instance.name) + vm = yield self.lookup(instance.name) if vm is not None: raise Exception('Attempted to create non-unique name %s' % instance.name) + + user = AuthManager().get_user(instance.datamodel['user_id']) + vdi_uuid = yield self.fetch_image( + instance.datamodel['image_id'], user, True) + kernel = yield self.fetch_image( + instance.datamodel['kernel_id'], user, False) + ramdisk = yield self.fetch_image( + instance.datamodel['ramdisk_id'], user, False) + vdi_ref = yield self._conn.xenapi.VDI.get_by_uuid(vdi_uuid) + + vm_ref = yield self.create_vm(instance, kernel, ramdisk) + yield self.create_vbd(vm_ref, vdi_ref, 0, True) + yield self._conn.xenapi.VM.start(vm_ref, False, False) + + + def create_vm(self, instance, kernel, ramdisk): mem = str(long(instance.datamodel['memory_kb']) * 1024) vcpus = str(instance.datamodel['vcpus']) rec = { @@ -92,9 +111,9 @@ class XenAPIConnection(object): 'actions_after_reboot': 'restart', 'actions_after_crash': 'destroy', 'PV_bootloader': '', - 'PV_kernel': instance.datamodel['kernel_id'], - 'PV_ramdisk': instance.datamodel['ramdisk_id'], - 'PV_args': '', + 'PV_kernel': kernel, + 'PV_ramdisk': ramdisk, + 'PV_args': 'root=/dev/xvda1', 'PV_bootloader_args': '', 'PV_legacy_args': '', 'HVM_boot_policy': '', @@ -106,8 +125,48 @@ class XenAPIConnection(object): 'user_version': '0', 'other_config': {}, } - vm = yield self._conn.xenapi.VM.create(rec) - #yield self._conn.xenapi.VM.start(vm, False, False) + logging.debug('Created VM %s...', instance.name) + vm_ref = self._conn.xenapi.VM.create(rec) + logging.debug('Created VM %s as %s.', instance.name, vm_ref) + return vm_ref + + + def create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): + vbd_rec = {} + vbd_rec['VM'] = vm_ref + vbd_rec['VDI'] = vdi_ref + vbd_rec['userdevice'] = str(userdevice) + vbd_rec['bootable'] = bootable + vbd_rec['mode'] = 'RW' + vbd_rec['type'] = 'disk' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref) + vbd_ref = self._conn.xenapi.VBD.create(vbd_rec) + logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref, + vdi_ref) + return vbd_ref + + + def fetch_image(self, image, user, use_sr): + """use_sr: True to put the image as a VDI in an SR, False to place + it on dom0's filesystem. The former is for VM disks, the latter for + its kernel and ramdisk (if external kernels are being used).""" + + url = images.image_url(image) + logging.debug("Asking xapi to fetch %s as %s" % (url, user.access)) + fn = use_sr and 'get_vdi' or 'get_kernel' + args = {} + args['src_url'] = url + args['username'] = user.access + args['password'] = user.secret + if use_sr: + args['add_partition'] = 'true' + return self._call_plugin('objectstore', fn, args) def reboot(self, instance): @@ -143,10 +202,42 @@ class XenAPIConnection(object): else: return vms[0] + + def _call_plugin(self, plugin, fn, args): + return _unwrap_plugin_exceptions( + self._conn.xenapi.host.call_plugin, + self._get_xenapi_host(), plugin, fn, args) + + + def _get_xenapi_host(self): + return self._conn.xenapi.session.get_this_host(self._conn.handle) + + power_state_from_xenapi = { - 'Halted' : power_state.RUNNING, #FIXME + 'Halted' : power_state.SHUTDOWN, 'Running' : power_state.RUNNING, 'Paused' : power_state.PAUSED, 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed' : power_state.CRASHED } + + +def _unwrap_plugin_exceptions(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except XenAPI.Failure, exn: + logging.debug("Got exception: %s", exn) + if (len(exn.details) == 4 and + exn.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and + exn.details[2] == 'Failure'): + params = None + try: + params = eval(exn.details[3]) + except: + raise exn + raise XenAPI.Failure(params) + else: + raise + except xmlrpclib.ProtocolError, exn: + logging.debug("Got exception: %s", exn) + raise diff --git a/xenapi/README b/xenapi/README new file mode 100644 index 000000000..1fc67aa7a --- /dev/null +++ b/xenapi/README @@ -0,0 +1,2 @@ +This directory contains files that are required for the XenAPI support. They +should be installed in the XenServer / Xen Cloud Platform domain 0. diff --git a/xenapi/etc/xapi.d/plugins/objectstore b/xenapi/etc/xapi.d/plugins/objectstore new file mode 100644 index 000000000..271e7337f --- /dev/null +++ b/xenapi/etc/xapi.d/plugins/objectstore @@ -0,0 +1,231 @@ +#!/usr/bin/env python + +# Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# XenAPI plugin for fetching images from nova-objectstore. +# + +import base64 +import errno +import hmac +import os +import os.path +import sha +import time +import urlparse + +import XenAPIPlugin + +from pluginlib_nova import * +configure_logging('objectstore') + + +KERNEL_DIR = '/boot/guest' + +DOWNLOAD_CHUNK_SIZE = 2 * 1024 * 1024 +SECTOR_SIZE = 512 +MBR_SIZE_SECTORS = 63 +MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE + + +def get_vdi(session, args): + src_url = exists(args, 'src_url') + username = exists(args, 'username') + password = exists(args, 'password') + add_partition = validate_bool(args, 'add_partition', 'false') + + (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) + + sr = find_sr(session) + if sr is None: + raise Exception('Cannot find SR to write VDI to') + + virtual_size = \ + get_content_length(proto, netloc, url_path, username, password) + if virtual_size < 0: + raise Exception('Cannot get VDI size') + + vdi_size = virtual_size + if add_partition: + # Make room for MBR. + vdi_size += MBR_SIZE_BYTES + + vdi = create_vdi(session, sr, src_url, vdi_size, False) + with_vdi_in_dom0(session, vdi, False, + lambda dev: get_vdi_(proto, netloc, url_path, + username, password, add_partition, + virtual_size, '/dev/%s' % dev)) + return session.xenapi.VDI.get_uuid(vdi) + + +def get_vdi_(proto, netloc, url_path, username, password, add_partition, + virtual_size, dest): + + if add_partition: + write_partition(virtual_size, dest) + + offset = add_partition and MBR_SIZE_BYTES or 0 + get(proto, netloc, url_path, username, password, dest, offset) + + +def write_partition(virtual_size, dest): + mbr_last = MBR_SIZE_SECTORS - 1 + primary_first = MBR_SIZE_SECTORS + primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 + + logging.debug('Writing partition table %d %d to %s...', + primary_first, primary_last, dest) + + result = os.system('parted --script %s mklabel msdos' % dest) + if result != 0: + raise Exception('Failed to mklabel') + result = os.system('parted --script %s mkpart primary %ds %ds' % + (dest, primary_first, primary_last)) + if result != 0: + raise Exception('Failed to mkpart') + + logging.debug('Writing partition table %s done.', dest) + + +def find_sr(session): + host = get_this_host(session) + srs = session.xenapi.SR.get_all() + for sr in srs: + sr_rec = session.xenapi.SR.get_record(sr) + if not ('i18n-key' in sr_rec['other_config'] and + sr_rec['other_config']['i18n-key'] == 'local-storage'): + continue + for pbd in sr_rec['PBDs']: + pbd_rec = session.xenapi.PBD.get_record(pbd) + if pbd_rec['host'] == host: + return sr + return None + + +def get_kernel(session, args): + src_url = exists(args, 'src_url') + username = exists(args, 'username') + password = exists(args, 'password') + + (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) + + dest = os.path.join(KERNEL_DIR, url_path[1:]) + + # Paranoid check against people using ../ to do rude things. + if os.path.commonprefix([KERNEL_DIR, dest]) != KERNEL_DIR: + raise Exception('Illegal destination %s %s', (url_path, dest)) + + dirname = os.path.dirname(dest) + try: + os.makedirs(dirname) + except os.error, e: + if e.errno != errno.EEXIST: + raise + if not os.path.isdir(dirname): + raise Exception('Cannot make directory %s', dirname) + + try: + os.remove(dest) + except: + pass + + get(proto, netloc, url_path, username, password, dest, 0) + + return dest + + +def get_content_length(proto, netloc, url_path, username, password): + headers = make_headers('HEAD', url_path, username, password) + return with_http_connection( + proto, netloc, + lambda conn: get_content_length_(url_path, headers, conn)) + + +def get_content_length_(url_path, headers, conn): + conn.request('HEAD', url_path, None, headers) + response = conn.getresponse() + if response.status != 200: + raise Exception('%d %s' % (response.status, response.reason)) + + return long(response.getheader('Content-Length', -1)) + + +def get(proto, netloc, url_path, username, password, dest, offset): + headers = make_headers('GET', url_path, username, password) + download(proto, netloc, url_path, headers, dest, offset) + + +def make_headers(verb, url_path, username, password): + headers = {} + headers['Date'] = \ + time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) + headers['Authorization'] = \ + 'AWS %s:%s' % (username, + s3_authorization(verb, url_path, password, headers)) + return headers + + +def s3_authorization(verb, path, password, headers): + sha1 = hmac.new(password, digestmod=sha) + sha1.update(plaintext(verb, path, headers)) + return base64.encodestring(sha1.digest()).strip() + + +def plaintext(verb, path, headers): + return '%s\n\n\n%s\n%s' % (verb, + "\n".join([headers[h] for h in headers]), + path) + + +def download(proto, netloc, url_path, headers, dest, offset): + with_http_connection( + proto, netloc, + lambda conn: download_(url_path, dest, offset, headers, conn)) + + +def download_(url_path, dest, offset, headers, conn): + conn.request('GET', url_path, None, headers) + response = conn.getresponse() + if response.status != 200: + raise Exception('%d %s' % (response.status, response.reason)) + + length = response.getheader('Content-Length', -1) + + with_file( + dest, 'a', + lambda dest_file: download_all(response, length, dest_file, offset)) + + +def download_all(response, length, dest_file, offset): + dest_file.seek(offset) + i = 0 + while True: + buf = response.read(DOWNLOAD_CHUNK_SIZE) + if buf: + dest_file.write(buf) + else: + return + i += len(buf) + if length != -1 and i >= length: + return + + +if __name__ == '__main__': + XenAPIPlugin.dispatch({'get_vdi': get_vdi, + 'get_kernel': get_kernel}) diff --git a/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/xenapi/etc/xapi.d/plugins/pluginlib_nova.py new file mode 100755 index 000000000..2d323a016 --- /dev/null +++ b/xenapi/etc/xapi.d/plugins/pluginlib_nova.py @@ -0,0 +1,216 @@ +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# Helper functions for the Nova xapi plugins. In time, this will merge +# with the pluginlib.py shipped with xapi, but for now, that file is not +# very stable, so it's easiest just to have a copy of all the functions +# that we need. +# + +import httplib +import logging +import logging.handlers +import re +import time + + +##### Logging setup + +def configure_logging(name): + log = logging.getLogger() + log.setLevel(logging.DEBUG) + sysh = logging.handlers.SysLogHandler('/dev/log') + sysh.setLevel(logging.DEBUG) + formatter = logging.Formatter('%s: %%(levelname)-8s %%(message)s' % name) + sysh.setFormatter(formatter) + log.addHandler(sysh) + + +##### Exceptions + +class PluginError(Exception): + """Base Exception class for all plugin errors.""" + def __init__(self, *args): + Exception.__init__(self, *args) + +class ArgumentError(PluginError): + """Raised when required arguments are missing, argument values are invalid, + or incompatible arguments are given. + """ + def __init__(self, *args): + PluginError.__init__(self, *args) + + +##### Helpers + +def ignore_failure(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except XenAPI.Failure, e: + logging.error('Ignoring XenAPI.Failure %s', e) + return None + + +##### Argument validation + +ARGUMENT_PATTERN = re.compile(r'^[a-zA-Z0-9_:\.\-,]+$') + +def validate_exists(args, key, default=None): + """Validates that a string argument to a RPC method call is given, and + matches the shell-safe regex, with an optional default value in case it + does not exist. + + Returns the string. + """ + if key in args: + if len(args[key]) == 0: + raise ArgumentError('Argument %r value %r is too short.' % (key, args[key])) + if not ARGUMENT_PATTERN.match(args[key]): + raise ArgumentError('Argument %r value %r contains invalid characters.' % (key, args[key])) + if args[key][0] == '-': + raise ArgumentError('Argument %r value %r starts with a hyphen.' % (key, args[key])) + return args[key] + elif default is not None: + return default + else: + raise ArgumentError('Argument %s is required.' % key) + +def validate_bool(args, key, default=None): + """Validates that a string argument to a RPC method call is a boolean string, + with an optional default value in case it does not exist. + + Returns the python boolean value. + """ + value = validate_exists(args, key, default) + if value.lower() == 'true': + return True + elif value.lower() == 'false': + return False + else: + raise ArgumentError("Argument %s may not take value %r. Valid values are ['true', 'false']." % (key, value)) + +def exists(args, key): + """Validates that a freeform string argument to a RPC method call is given. + Returns the string. + """ + if key in args: + return args[key] + else: + raise ArgumentError('Argument %s is required.' % key) + +def optional(args, key): + """If the given key is in args, return the corresponding value, otherwise + return None""" + return key in args and args[key] or None + + +def get_this_host(session): + return session.xenapi.session.get_this_host(session.handle) + + +def get_domain_0(session): + this_host_ref = get_this_host(session) + expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"' % this_host_ref + return session.xenapi.VM.get_all_records_where(expr).keys()[0] + + +def create_vdi(session, sr_ref, name_label, virtual_size, read_only): + vdi_ref = session.xenapi.VDI.create( + { 'name_label': name_label, + 'name_description': '', + 'SR': sr_ref, + 'virtual_size': str(virtual_size), + 'type': 'User', + 'sharable': False, + 'read_only': read_only, + 'xenstore_data': {}, + 'other_config': {}, + 'sm_config': {}, + 'tags': [] }) + logging.debug('Created VDI %s (%s, %s, %s) on %s.', vdi_ref, name_label, + virtual_size, read_only, sr_ref) + return vdi_ref + + +def with_vdi_in_dom0(session, vdi, read_only, f): + dom0 = get_domain_0(session) + vbd_rec = {} + vbd_rec['VM'] = dom0 + vbd_rec['VDI'] = vdi + vbd_rec['userdevice'] = 'autodetect' + vbd_rec['bootable'] = False + vbd_rec['mode'] = read_only and 'RO' or 'RW' + vbd_rec['type'] = 'disk' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + logging.debug('Creating VBD for VDI %s ... ', vdi) + vbd = session.xenapi.VBD.create(vbd_rec) + logging.debug('Creating VBD for VDI %s done.', vdi) + try: + logging.debug('Plugging VBD %s ... ', vbd) + session.xenapi.VBD.plug(vbd) + logging.debug('Plugging VBD %s done.', vbd) + return f(session.xenapi.VBD.get_device(vbd)) + finally: + logging.debug('Destroying VBD for VDI %s ... ', vdi) + vbd_unplug_with_retry(session, vbd) + ignore_failure(session.xenapi.VBD.destroy, vbd) + logging.debug('Destroying VBD for VDI %s done.', vdi) + + +def vbd_unplug_with_retry(session, vbd): + """Call VBD.unplug on the given VBD, with a retry if we get + DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're + seeing the device still in use, even when all processes using the device + should be dead.""" + while True: + try: + session.xenapi.VBD.unplug(vbd) + logging.debug('VBD.unplug successful first time.') + return + except XenAPI.Failure, e: + if (len(e.details) > 0 and + e.details[0] == 'DEVICE_DETACH_REJECTED'): + logging.debug('VBD.unplug rejected: retrying...') + time.sleep(1) + elif (len(e.details) > 0 and + e.details[0] == 'DEVICE_ALREADY_DETACHED'): + logging.debug('VBD.unplug successful eventually.') + return + else: + logging.error('Ignoring XenAPI.Failure in VBD.unplug: %s', e) + return + + +def with_http_connection(proto, netloc, f): + conn = (proto == 'https' and + httplib.HTTPSConnection(netloc) or + httplib.HTTPConnection(netloc)) + try: + return f(conn) + finally: + conn.close() + + +def with_file(dest_path, mode, f): + dest = open(dest_path, mode) + try: + return f(dest) + finally: + dest.close() -- cgit From b31d4f795dbd94bae2c3d8f01aea3b15ed9684b2 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Tue, 3 Aug 2010 15:37:31 +0100 Subject: Define __contains__ on BasicModel, so that we can use "x in datamodel". --- nova/datastore.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/datastore.py b/nova/datastore.py index 9c2592334..f6c11d2c9 100644 --- a/nova/datastore.py +++ b/nova/datastore.py @@ -168,6 +168,9 @@ class BasicModel(object): def setdefault(self, item, default): return self.state.setdefault(item, default) + def __contains__(self, item): + return item in self.state + def __getitem__(self, item): return self.state[item] -- cgit From 89e057cf2f008ebb7ec1c99605ff99f5849d9b40 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Tue, 3 Aug 2010 15:41:35 +0100 Subject: Implement VIF creation. --- nova/virt/xenapi.py | 57 ++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 50 insertions(+), 7 deletions(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index b84e55138..b4768cffa 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -79,6 +79,18 @@ class XenAPIConnection(object): raise Exception('Attempted to create non-unique name %s' % instance.name) + if 'bridge_name' in instance.datamodel: + network_ref = \ + yield self._find_network_with_bridge( + instance.datamodel['bridge_name']) + else: + network_ref = None + + if 'mac_address' in instance.datamodel: + mac_address = instance.datamodel['mac_address'] + else: + mac_address = '' + user = AuthManager().get_user(instance.datamodel['user_id']) vdi_uuid = yield self.fetch_image( instance.datamodel['image_id'], user, True) @@ -90,6 +102,8 @@ class XenAPIConnection(object): vm_ref = yield self.create_vm(instance, kernel, ramdisk) yield self.create_vbd(vm_ref, vdi_ref, 0, True) + if network_ref: + yield self._create_vif(vm_ref, network_ref, mac_address) yield self._conn.xenapi.VM.start(vm_ref, False, False) @@ -152,6 +166,35 @@ class XenAPIConnection(object): return vbd_ref + def _create_vif(self, vm_ref, network_ref, mac_address): + vif_rec = {} + vif_rec['device'] = '0' + vif_rec['network']= network_ref + vif_rec['VM'] = vm_ref + vif_rec['MAC'] = mac_address + vif_rec['MTU'] = '1500' + vif_rec['other_config'] = {} + vif_rec['qos_algorithm_type'] = '' + vif_rec['qos_algorithm_params'] = {} + logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref, + network_ref) + vif_ref = self._conn.xenapi.VIF.create(vif_rec) + logging.debug('Created VIF %s for VM %s, network %s.', vif_ref, + vm_ref, network_ref) + return vif_ref + + + def _find_network_with_bridge(self, bridge): + expr = 'field "bridge" = "%s"' % bridge + networks = self._conn.xenapi.network.get_all_records_where(expr) + if len(networks) == 1: + return networks.keys()[0] + elif len(networks) > 1: + raise Exception('Found non-unique network for bridge %s' % bridge) + else: + raise Exception('Found no network for bridge %s' % bridge) + + def fetch_image(self, image, user, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place it on dom0's filesystem. The former is for VM disks, the latter for @@ -213,13 +256,13 @@ class XenAPIConnection(object): return self._conn.xenapi.session.get_this_host(self._conn.handle) - power_state_from_xenapi = { - 'Halted' : power_state.SHUTDOWN, - 'Running' : power_state.RUNNING, - 'Paused' : power_state.PAUSED, - 'Suspended': power_state.SHUTDOWN, # FIXME - 'Crashed' : power_state.CRASHED - } +power_state_from_xenapi = { + 'Halted' : power_state.SHUTDOWN, + 'Running' : power_state.RUNNING, + 'Paused' : power_state.PAUSED, + 'Suspended': power_state.SHUTDOWN, # FIXME + 'Crashed' : power_state.CRASHED +} def _unwrap_plugin_exceptions(func, *args, **kwargs): -- cgit From 035f93aa7dc19656bf22de9b7ccfe12b28cde61b Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Tue, 3 Aug 2010 15:42:17 +0100 Subject: Fix exception in get_info. --- nova/virt/xenapi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index b4768cffa..c3e84c2b9 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -227,7 +227,7 @@ class XenAPIConnection(object): def get_info(self, instance_id): vm = self.lookup(instance_id) if vm is None: - raise Exception('instance not present %s' % instance.name) + raise Exception('instance not present %s' % instance_id) rec = self._conn.xenapi.VM.get_record(vm) return {'state': power_state_from_xenapi[rec['power_state']], 'max_mem': long(rec['memory_static_max']) >> 10, -- cgit From 8d70245cc78075356ec1ebabc4810df8b07428f6 Mon Sep 17 00:00:00 2001 From: "Joel Moore joelbm24@gmail.com" <> Date: Tue, 3 Aug 2010 11:02:58 -0700 Subject: Get IP doesn't fail of you not connected to the intetnet ------------- This line and the following will be ignored -------------- modified: nova/utils.py unknown: CA/cacert.pem CA/index.txt CA/openssl.cnf CA/serial CA/private/cakey.pem bin/nova@ --- nova/utils.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/nova/utils.py b/nova/utils.py index 0016b656e..da6efd39a 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -119,11 +119,15 @@ def get_my_ip(): ''' if getattr(FLAGS, 'fake_tests', None): return '127.0.0.1' - csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - csock.connect(('www.google.com', 80)) - (addr, port) = csock.getsockname() - csock.close() - return addr + try: + csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + csock.connect(('www.google.com', 80)) + (addr, port) = csock.getsockname() + csock.close() + return addr + except socket.gaierror as ex: + logging.warn("Couldn't get IP, using 127.0.0.1 %s", ex) + return "127.0.0.1" def isotime(at=None): if not at: -- cgit From 6187529119ab51a6df7e30ef5190757ee0feca5e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 3 Aug 2010 15:04:38 -0700 Subject: vblade commands randomly toss stuff into stderr, ignore it --- nova/volume/service.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/nova/volume/service.py b/nova/volume/service.py index e12f675a7..9dd63e88f 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -227,11 +227,7 @@ class Volume(datastore.BasicModel): @defer.inlineCallbacks def destroy(self): - try: - yield self._remove_export() - except Exception as ex: - logging.debug("Ingnoring failure to remove export %s" % ex) - pass + yield self._remove_export() yield self._delete_lv() super(Volume, self).destroy() @@ -250,7 +246,7 @@ class Volume(datastore.BasicModel): def _delete_lv(self): yield process.simple_execute( "sudo lvremove -f %s/%s" % (FLAGS.volume_group, - self['volume_id'])) + self['volume_id']), error_ok=1) @defer.inlineCallbacks def _setup_export(self): @@ -275,10 +271,10 @@ class Volume(datastore.BasicModel): def _remove_export(self): yield process.simple_execute( "sudo vblade-persist stop %s %s" % (self['shelf_id'], - self['blade_id'])) + self['blade_id']), error_ok=1) yield process.simple_execute( "sudo vblade-persist destroy %s %s" % (self['shelf_id'], - self['blade_id'])) + self['blade_id']), error_ok=1) class FakeVolume(Volume): -- cgit From 24d5113636a92df386fa076cc89cea5b1c8b2580 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Wed, 4 Aug 2010 11:14:11 +0100 Subject: Added note to README. --- xenapi/README | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/xenapi/README b/xenapi/README index 1fc67aa7a..fbd471035 100644 --- a/xenapi/README +++ b/xenapi/README @@ -1,2 +1,6 @@ This directory contains files that are required for the XenAPI support. They should be installed in the XenServer / Xen Cloud Platform domain 0. + +Also, you need to + +chmod u+x /etc/xapi.d/plugins/objectstore -- cgit From 4130a506900c833dba831cabbd0197b7d4b59dc0 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Wed, 4 Aug 2010 23:45:41 +0100 Subject: Move the xenapi top level directory under plugins, as suggested by Jay Pipes. --- plugins/xenapi/README | 6 + plugins/xenapi/etc/xapi.d/plugins/objectstore | 231 +++++++++++++++++++++ .../xenapi/etc/xapi.d/plugins/pluginlib_nova.py | 216 +++++++++++++++++++ xenapi/README | 6 - xenapi/etc/xapi.d/plugins/objectstore | 231 --------------------- xenapi/etc/xapi.d/plugins/pluginlib_nova.py | 216 ------------------- 6 files changed, 453 insertions(+), 453 deletions(-) create mode 100644 plugins/xenapi/README create mode 100644 plugins/xenapi/etc/xapi.d/plugins/objectstore create mode 100755 plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py delete mode 100644 xenapi/README delete mode 100644 xenapi/etc/xapi.d/plugins/objectstore delete mode 100755 xenapi/etc/xapi.d/plugins/pluginlib_nova.py diff --git a/plugins/xenapi/README b/plugins/xenapi/README new file mode 100644 index 000000000..fbd471035 --- /dev/null +++ b/plugins/xenapi/README @@ -0,0 +1,6 @@ +This directory contains files that are required for the XenAPI support. They +should be installed in the XenServer / Xen Cloud Platform domain 0. + +Also, you need to + +chmod u+x /etc/xapi.d/plugins/objectstore diff --git a/plugins/xenapi/etc/xapi.d/plugins/objectstore b/plugins/xenapi/etc/xapi.d/plugins/objectstore new file mode 100644 index 000000000..271e7337f --- /dev/null +++ b/plugins/xenapi/etc/xapi.d/plugins/objectstore @@ -0,0 +1,231 @@ +#!/usr/bin/env python + +# Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# XenAPI plugin for fetching images from nova-objectstore. +# + +import base64 +import errno +import hmac +import os +import os.path +import sha +import time +import urlparse + +import XenAPIPlugin + +from pluginlib_nova import * +configure_logging('objectstore') + + +KERNEL_DIR = '/boot/guest' + +DOWNLOAD_CHUNK_SIZE = 2 * 1024 * 1024 +SECTOR_SIZE = 512 +MBR_SIZE_SECTORS = 63 +MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE + + +def get_vdi(session, args): + src_url = exists(args, 'src_url') + username = exists(args, 'username') + password = exists(args, 'password') + add_partition = validate_bool(args, 'add_partition', 'false') + + (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) + + sr = find_sr(session) + if sr is None: + raise Exception('Cannot find SR to write VDI to') + + virtual_size = \ + get_content_length(proto, netloc, url_path, username, password) + if virtual_size < 0: + raise Exception('Cannot get VDI size') + + vdi_size = virtual_size + if add_partition: + # Make room for MBR. + vdi_size += MBR_SIZE_BYTES + + vdi = create_vdi(session, sr, src_url, vdi_size, False) + with_vdi_in_dom0(session, vdi, False, + lambda dev: get_vdi_(proto, netloc, url_path, + username, password, add_partition, + virtual_size, '/dev/%s' % dev)) + return session.xenapi.VDI.get_uuid(vdi) + + +def get_vdi_(proto, netloc, url_path, username, password, add_partition, + virtual_size, dest): + + if add_partition: + write_partition(virtual_size, dest) + + offset = add_partition and MBR_SIZE_BYTES or 0 + get(proto, netloc, url_path, username, password, dest, offset) + + +def write_partition(virtual_size, dest): + mbr_last = MBR_SIZE_SECTORS - 1 + primary_first = MBR_SIZE_SECTORS + primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 + + logging.debug('Writing partition table %d %d to %s...', + primary_first, primary_last, dest) + + result = os.system('parted --script %s mklabel msdos' % dest) + if result != 0: + raise Exception('Failed to mklabel') + result = os.system('parted --script %s mkpart primary %ds %ds' % + (dest, primary_first, primary_last)) + if result != 0: + raise Exception('Failed to mkpart') + + logging.debug('Writing partition table %s done.', dest) + + +def find_sr(session): + host = get_this_host(session) + srs = session.xenapi.SR.get_all() + for sr in srs: + sr_rec = session.xenapi.SR.get_record(sr) + if not ('i18n-key' in sr_rec['other_config'] and + sr_rec['other_config']['i18n-key'] == 'local-storage'): + continue + for pbd in sr_rec['PBDs']: + pbd_rec = session.xenapi.PBD.get_record(pbd) + if pbd_rec['host'] == host: + return sr + return None + + +def get_kernel(session, args): + src_url = exists(args, 'src_url') + username = exists(args, 'username') + password = exists(args, 'password') + + (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) + + dest = os.path.join(KERNEL_DIR, url_path[1:]) + + # Paranoid check against people using ../ to do rude things. + if os.path.commonprefix([KERNEL_DIR, dest]) != KERNEL_DIR: + raise Exception('Illegal destination %s %s', (url_path, dest)) + + dirname = os.path.dirname(dest) + try: + os.makedirs(dirname) + except os.error, e: + if e.errno != errno.EEXIST: + raise + if not os.path.isdir(dirname): + raise Exception('Cannot make directory %s', dirname) + + try: + os.remove(dest) + except: + pass + + get(proto, netloc, url_path, username, password, dest, 0) + + return dest + + +def get_content_length(proto, netloc, url_path, username, password): + headers = make_headers('HEAD', url_path, username, password) + return with_http_connection( + proto, netloc, + lambda conn: get_content_length_(url_path, headers, conn)) + + +def get_content_length_(url_path, headers, conn): + conn.request('HEAD', url_path, None, headers) + response = conn.getresponse() + if response.status != 200: + raise Exception('%d %s' % (response.status, response.reason)) + + return long(response.getheader('Content-Length', -1)) + + +def get(proto, netloc, url_path, username, password, dest, offset): + headers = make_headers('GET', url_path, username, password) + download(proto, netloc, url_path, headers, dest, offset) + + +def make_headers(verb, url_path, username, password): + headers = {} + headers['Date'] = \ + time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) + headers['Authorization'] = \ + 'AWS %s:%s' % (username, + s3_authorization(verb, url_path, password, headers)) + return headers + + +def s3_authorization(verb, path, password, headers): + sha1 = hmac.new(password, digestmod=sha) + sha1.update(plaintext(verb, path, headers)) + return base64.encodestring(sha1.digest()).strip() + + +def plaintext(verb, path, headers): + return '%s\n\n\n%s\n%s' % (verb, + "\n".join([headers[h] for h in headers]), + path) + + +def download(proto, netloc, url_path, headers, dest, offset): + with_http_connection( + proto, netloc, + lambda conn: download_(url_path, dest, offset, headers, conn)) + + +def download_(url_path, dest, offset, headers, conn): + conn.request('GET', url_path, None, headers) + response = conn.getresponse() + if response.status != 200: + raise Exception('%d %s' % (response.status, response.reason)) + + length = response.getheader('Content-Length', -1) + + with_file( + dest, 'a', + lambda dest_file: download_all(response, length, dest_file, offset)) + + +def download_all(response, length, dest_file, offset): + dest_file.seek(offset) + i = 0 + while True: + buf = response.read(DOWNLOAD_CHUNK_SIZE) + if buf: + dest_file.write(buf) + else: + return + i += len(buf) + if length != -1 and i >= length: + return + + +if __name__ == '__main__': + XenAPIPlugin.dispatch({'get_vdi': get_vdi, + 'get_kernel': get_kernel}) diff --git a/plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py new file mode 100755 index 000000000..2d323a016 --- /dev/null +++ b/plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py @@ -0,0 +1,216 @@ +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# Helper functions for the Nova xapi plugins. In time, this will merge +# with the pluginlib.py shipped with xapi, but for now, that file is not +# very stable, so it's easiest just to have a copy of all the functions +# that we need. +# + +import httplib +import logging +import logging.handlers +import re +import time + + +##### Logging setup + +def configure_logging(name): + log = logging.getLogger() + log.setLevel(logging.DEBUG) + sysh = logging.handlers.SysLogHandler('/dev/log') + sysh.setLevel(logging.DEBUG) + formatter = logging.Formatter('%s: %%(levelname)-8s %%(message)s' % name) + sysh.setFormatter(formatter) + log.addHandler(sysh) + + +##### Exceptions + +class PluginError(Exception): + """Base Exception class for all plugin errors.""" + def __init__(self, *args): + Exception.__init__(self, *args) + +class ArgumentError(PluginError): + """Raised when required arguments are missing, argument values are invalid, + or incompatible arguments are given. + """ + def __init__(self, *args): + PluginError.__init__(self, *args) + + +##### Helpers + +def ignore_failure(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except XenAPI.Failure, e: + logging.error('Ignoring XenAPI.Failure %s', e) + return None + + +##### Argument validation + +ARGUMENT_PATTERN = re.compile(r'^[a-zA-Z0-9_:\.\-,]+$') + +def validate_exists(args, key, default=None): + """Validates that a string argument to a RPC method call is given, and + matches the shell-safe regex, with an optional default value in case it + does not exist. + + Returns the string. + """ + if key in args: + if len(args[key]) == 0: + raise ArgumentError('Argument %r value %r is too short.' % (key, args[key])) + if not ARGUMENT_PATTERN.match(args[key]): + raise ArgumentError('Argument %r value %r contains invalid characters.' % (key, args[key])) + if args[key][0] == '-': + raise ArgumentError('Argument %r value %r starts with a hyphen.' % (key, args[key])) + return args[key] + elif default is not None: + return default + else: + raise ArgumentError('Argument %s is required.' % key) + +def validate_bool(args, key, default=None): + """Validates that a string argument to a RPC method call is a boolean string, + with an optional default value in case it does not exist. + + Returns the python boolean value. + """ + value = validate_exists(args, key, default) + if value.lower() == 'true': + return True + elif value.lower() == 'false': + return False + else: + raise ArgumentError("Argument %s may not take value %r. Valid values are ['true', 'false']." % (key, value)) + +def exists(args, key): + """Validates that a freeform string argument to a RPC method call is given. + Returns the string. + """ + if key in args: + return args[key] + else: + raise ArgumentError('Argument %s is required.' % key) + +def optional(args, key): + """If the given key is in args, return the corresponding value, otherwise + return None""" + return key in args and args[key] or None + + +def get_this_host(session): + return session.xenapi.session.get_this_host(session.handle) + + +def get_domain_0(session): + this_host_ref = get_this_host(session) + expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"' % this_host_ref + return session.xenapi.VM.get_all_records_where(expr).keys()[0] + + +def create_vdi(session, sr_ref, name_label, virtual_size, read_only): + vdi_ref = session.xenapi.VDI.create( + { 'name_label': name_label, + 'name_description': '', + 'SR': sr_ref, + 'virtual_size': str(virtual_size), + 'type': 'User', + 'sharable': False, + 'read_only': read_only, + 'xenstore_data': {}, + 'other_config': {}, + 'sm_config': {}, + 'tags': [] }) + logging.debug('Created VDI %s (%s, %s, %s) on %s.', vdi_ref, name_label, + virtual_size, read_only, sr_ref) + return vdi_ref + + +def with_vdi_in_dom0(session, vdi, read_only, f): + dom0 = get_domain_0(session) + vbd_rec = {} + vbd_rec['VM'] = dom0 + vbd_rec['VDI'] = vdi + vbd_rec['userdevice'] = 'autodetect' + vbd_rec['bootable'] = False + vbd_rec['mode'] = read_only and 'RO' or 'RW' + vbd_rec['type'] = 'disk' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + logging.debug('Creating VBD for VDI %s ... ', vdi) + vbd = session.xenapi.VBD.create(vbd_rec) + logging.debug('Creating VBD for VDI %s done.', vdi) + try: + logging.debug('Plugging VBD %s ... ', vbd) + session.xenapi.VBD.plug(vbd) + logging.debug('Plugging VBD %s done.', vbd) + return f(session.xenapi.VBD.get_device(vbd)) + finally: + logging.debug('Destroying VBD for VDI %s ... ', vdi) + vbd_unplug_with_retry(session, vbd) + ignore_failure(session.xenapi.VBD.destroy, vbd) + logging.debug('Destroying VBD for VDI %s done.', vdi) + + +def vbd_unplug_with_retry(session, vbd): + """Call VBD.unplug on the given VBD, with a retry if we get + DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're + seeing the device still in use, even when all processes using the device + should be dead.""" + while True: + try: + session.xenapi.VBD.unplug(vbd) + logging.debug('VBD.unplug successful first time.') + return + except XenAPI.Failure, e: + if (len(e.details) > 0 and + e.details[0] == 'DEVICE_DETACH_REJECTED'): + logging.debug('VBD.unplug rejected: retrying...') + time.sleep(1) + elif (len(e.details) > 0 and + e.details[0] == 'DEVICE_ALREADY_DETACHED'): + logging.debug('VBD.unplug successful eventually.') + return + else: + logging.error('Ignoring XenAPI.Failure in VBD.unplug: %s', e) + return + + +def with_http_connection(proto, netloc, f): + conn = (proto == 'https' and + httplib.HTTPSConnection(netloc) or + httplib.HTTPConnection(netloc)) + try: + return f(conn) + finally: + conn.close() + + +def with_file(dest_path, mode, f): + dest = open(dest_path, mode) + try: + return f(dest) + finally: + dest.close() diff --git a/xenapi/README b/xenapi/README deleted file mode 100644 index fbd471035..000000000 --- a/xenapi/README +++ /dev/null @@ -1,6 +0,0 @@ -This directory contains files that are required for the XenAPI support. They -should be installed in the XenServer / Xen Cloud Platform domain 0. - -Also, you need to - -chmod u+x /etc/xapi.d/plugins/objectstore diff --git a/xenapi/etc/xapi.d/plugins/objectstore b/xenapi/etc/xapi.d/plugins/objectstore deleted file mode 100644 index 271e7337f..000000000 --- a/xenapi/etc/xapi.d/plugins/objectstore +++ /dev/null @@ -1,231 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2010 Citrix Systems, Inc. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# -# XenAPI plugin for fetching images from nova-objectstore. -# - -import base64 -import errno -import hmac -import os -import os.path -import sha -import time -import urlparse - -import XenAPIPlugin - -from pluginlib_nova import * -configure_logging('objectstore') - - -KERNEL_DIR = '/boot/guest' - -DOWNLOAD_CHUNK_SIZE = 2 * 1024 * 1024 -SECTOR_SIZE = 512 -MBR_SIZE_SECTORS = 63 -MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE - - -def get_vdi(session, args): - src_url = exists(args, 'src_url') - username = exists(args, 'username') - password = exists(args, 'password') - add_partition = validate_bool(args, 'add_partition', 'false') - - (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) - - sr = find_sr(session) - if sr is None: - raise Exception('Cannot find SR to write VDI to') - - virtual_size = \ - get_content_length(proto, netloc, url_path, username, password) - if virtual_size < 0: - raise Exception('Cannot get VDI size') - - vdi_size = virtual_size - if add_partition: - # Make room for MBR. - vdi_size += MBR_SIZE_BYTES - - vdi = create_vdi(session, sr, src_url, vdi_size, False) - with_vdi_in_dom0(session, vdi, False, - lambda dev: get_vdi_(proto, netloc, url_path, - username, password, add_partition, - virtual_size, '/dev/%s' % dev)) - return session.xenapi.VDI.get_uuid(vdi) - - -def get_vdi_(proto, netloc, url_path, username, password, add_partition, - virtual_size, dest): - - if add_partition: - write_partition(virtual_size, dest) - - offset = add_partition and MBR_SIZE_BYTES or 0 - get(proto, netloc, url_path, username, password, dest, offset) - - -def write_partition(virtual_size, dest): - mbr_last = MBR_SIZE_SECTORS - 1 - primary_first = MBR_SIZE_SECTORS - primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 - - logging.debug('Writing partition table %d %d to %s...', - primary_first, primary_last, dest) - - result = os.system('parted --script %s mklabel msdos' % dest) - if result != 0: - raise Exception('Failed to mklabel') - result = os.system('parted --script %s mkpart primary %ds %ds' % - (dest, primary_first, primary_last)) - if result != 0: - raise Exception('Failed to mkpart') - - logging.debug('Writing partition table %s done.', dest) - - -def find_sr(session): - host = get_this_host(session) - srs = session.xenapi.SR.get_all() - for sr in srs: - sr_rec = session.xenapi.SR.get_record(sr) - if not ('i18n-key' in sr_rec['other_config'] and - sr_rec['other_config']['i18n-key'] == 'local-storage'): - continue - for pbd in sr_rec['PBDs']: - pbd_rec = session.xenapi.PBD.get_record(pbd) - if pbd_rec['host'] == host: - return sr - return None - - -def get_kernel(session, args): - src_url = exists(args, 'src_url') - username = exists(args, 'username') - password = exists(args, 'password') - - (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) - - dest = os.path.join(KERNEL_DIR, url_path[1:]) - - # Paranoid check against people using ../ to do rude things. - if os.path.commonprefix([KERNEL_DIR, dest]) != KERNEL_DIR: - raise Exception('Illegal destination %s %s', (url_path, dest)) - - dirname = os.path.dirname(dest) - try: - os.makedirs(dirname) - except os.error, e: - if e.errno != errno.EEXIST: - raise - if not os.path.isdir(dirname): - raise Exception('Cannot make directory %s', dirname) - - try: - os.remove(dest) - except: - pass - - get(proto, netloc, url_path, username, password, dest, 0) - - return dest - - -def get_content_length(proto, netloc, url_path, username, password): - headers = make_headers('HEAD', url_path, username, password) - return with_http_connection( - proto, netloc, - lambda conn: get_content_length_(url_path, headers, conn)) - - -def get_content_length_(url_path, headers, conn): - conn.request('HEAD', url_path, None, headers) - response = conn.getresponse() - if response.status != 200: - raise Exception('%d %s' % (response.status, response.reason)) - - return long(response.getheader('Content-Length', -1)) - - -def get(proto, netloc, url_path, username, password, dest, offset): - headers = make_headers('GET', url_path, username, password) - download(proto, netloc, url_path, headers, dest, offset) - - -def make_headers(verb, url_path, username, password): - headers = {} - headers['Date'] = \ - time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) - headers['Authorization'] = \ - 'AWS %s:%s' % (username, - s3_authorization(verb, url_path, password, headers)) - return headers - - -def s3_authorization(verb, path, password, headers): - sha1 = hmac.new(password, digestmod=sha) - sha1.update(plaintext(verb, path, headers)) - return base64.encodestring(sha1.digest()).strip() - - -def plaintext(verb, path, headers): - return '%s\n\n\n%s\n%s' % (verb, - "\n".join([headers[h] for h in headers]), - path) - - -def download(proto, netloc, url_path, headers, dest, offset): - with_http_connection( - proto, netloc, - lambda conn: download_(url_path, dest, offset, headers, conn)) - - -def download_(url_path, dest, offset, headers, conn): - conn.request('GET', url_path, None, headers) - response = conn.getresponse() - if response.status != 200: - raise Exception('%d %s' % (response.status, response.reason)) - - length = response.getheader('Content-Length', -1) - - with_file( - dest, 'a', - lambda dest_file: download_all(response, length, dest_file, offset)) - - -def download_all(response, length, dest_file, offset): - dest_file.seek(offset) - i = 0 - while True: - buf = response.read(DOWNLOAD_CHUNK_SIZE) - if buf: - dest_file.write(buf) - else: - return - i += len(buf) - if length != -1 and i >= length: - return - - -if __name__ == '__main__': - XenAPIPlugin.dispatch({'get_vdi': get_vdi, - 'get_kernel': get_kernel}) diff --git a/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/xenapi/etc/xapi.d/plugins/pluginlib_nova.py deleted file mode 100755 index 2d323a016..000000000 --- a/xenapi/etc/xapi.d/plugins/pluginlib_nova.py +++ /dev/null @@ -1,216 +0,0 @@ -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# -# Helper functions for the Nova xapi plugins. In time, this will merge -# with the pluginlib.py shipped with xapi, but for now, that file is not -# very stable, so it's easiest just to have a copy of all the functions -# that we need. -# - -import httplib -import logging -import logging.handlers -import re -import time - - -##### Logging setup - -def configure_logging(name): - log = logging.getLogger() - log.setLevel(logging.DEBUG) - sysh = logging.handlers.SysLogHandler('/dev/log') - sysh.setLevel(logging.DEBUG) - formatter = logging.Formatter('%s: %%(levelname)-8s %%(message)s' % name) - sysh.setFormatter(formatter) - log.addHandler(sysh) - - -##### Exceptions - -class PluginError(Exception): - """Base Exception class for all plugin errors.""" - def __init__(self, *args): - Exception.__init__(self, *args) - -class ArgumentError(PluginError): - """Raised when required arguments are missing, argument values are invalid, - or incompatible arguments are given. - """ - def __init__(self, *args): - PluginError.__init__(self, *args) - - -##### Helpers - -def ignore_failure(func, *args, **kwargs): - try: - return func(*args, **kwargs) - except XenAPI.Failure, e: - logging.error('Ignoring XenAPI.Failure %s', e) - return None - - -##### Argument validation - -ARGUMENT_PATTERN = re.compile(r'^[a-zA-Z0-9_:\.\-,]+$') - -def validate_exists(args, key, default=None): - """Validates that a string argument to a RPC method call is given, and - matches the shell-safe regex, with an optional default value in case it - does not exist. - - Returns the string. - """ - if key in args: - if len(args[key]) == 0: - raise ArgumentError('Argument %r value %r is too short.' % (key, args[key])) - if not ARGUMENT_PATTERN.match(args[key]): - raise ArgumentError('Argument %r value %r contains invalid characters.' % (key, args[key])) - if args[key][0] == '-': - raise ArgumentError('Argument %r value %r starts with a hyphen.' % (key, args[key])) - return args[key] - elif default is not None: - return default - else: - raise ArgumentError('Argument %s is required.' % key) - -def validate_bool(args, key, default=None): - """Validates that a string argument to a RPC method call is a boolean string, - with an optional default value in case it does not exist. - - Returns the python boolean value. - """ - value = validate_exists(args, key, default) - if value.lower() == 'true': - return True - elif value.lower() == 'false': - return False - else: - raise ArgumentError("Argument %s may not take value %r. Valid values are ['true', 'false']." % (key, value)) - -def exists(args, key): - """Validates that a freeform string argument to a RPC method call is given. - Returns the string. - """ - if key in args: - return args[key] - else: - raise ArgumentError('Argument %s is required.' % key) - -def optional(args, key): - """If the given key is in args, return the corresponding value, otherwise - return None""" - return key in args and args[key] or None - - -def get_this_host(session): - return session.xenapi.session.get_this_host(session.handle) - - -def get_domain_0(session): - this_host_ref = get_this_host(session) - expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"' % this_host_ref - return session.xenapi.VM.get_all_records_where(expr).keys()[0] - - -def create_vdi(session, sr_ref, name_label, virtual_size, read_only): - vdi_ref = session.xenapi.VDI.create( - { 'name_label': name_label, - 'name_description': '', - 'SR': sr_ref, - 'virtual_size': str(virtual_size), - 'type': 'User', - 'sharable': False, - 'read_only': read_only, - 'xenstore_data': {}, - 'other_config': {}, - 'sm_config': {}, - 'tags': [] }) - logging.debug('Created VDI %s (%s, %s, %s) on %s.', vdi_ref, name_label, - virtual_size, read_only, sr_ref) - return vdi_ref - - -def with_vdi_in_dom0(session, vdi, read_only, f): - dom0 = get_domain_0(session) - vbd_rec = {} - vbd_rec['VM'] = dom0 - vbd_rec['VDI'] = vdi - vbd_rec['userdevice'] = 'autodetect' - vbd_rec['bootable'] = False - vbd_rec['mode'] = read_only and 'RO' or 'RW' - vbd_rec['type'] = 'disk' - vbd_rec['unpluggable'] = True - vbd_rec['empty'] = False - vbd_rec['other_config'] = {} - vbd_rec['qos_algorithm_type'] = '' - vbd_rec['qos_algorithm_params'] = {} - vbd_rec['qos_supported_algorithms'] = [] - logging.debug('Creating VBD for VDI %s ... ', vdi) - vbd = session.xenapi.VBD.create(vbd_rec) - logging.debug('Creating VBD for VDI %s done.', vdi) - try: - logging.debug('Plugging VBD %s ... ', vbd) - session.xenapi.VBD.plug(vbd) - logging.debug('Plugging VBD %s done.', vbd) - return f(session.xenapi.VBD.get_device(vbd)) - finally: - logging.debug('Destroying VBD for VDI %s ... ', vdi) - vbd_unplug_with_retry(session, vbd) - ignore_failure(session.xenapi.VBD.destroy, vbd) - logging.debug('Destroying VBD for VDI %s done.', vdi) - - -def vbd_unplug_with_retry(session, vbd): - """Call VBD.unplug on the given VBD, with a retry if we get - DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're - seeing the device still in use, even when all processes using the device - should be dead.""" - while True: - try: - session.xenapi.VBD.unplug(vbd) - logging.debug('VBD.unplug successful first time.') - return - except XenAPI.Failure, e: - if (len(e.details) > 0 and - e.details[0] == 'DEVICE_DETACH_REJECTED'): - logging.debug('VBD.unplug rejected: retrying...') - time.sleep(1) - elif (len(e.details) > 0 and - e.details[0] == 'DEVICE_ALREADY_DETACHED'): - logging.debug('VBD.unplug successful eventually.') - return - else: - logging.error('Ignoring XenAPI.Failure in VBD.unplug: %s', e) - return - - -def with_http_connection(proto, netloc, f): - conn = (proto == 'https' and - httplib.HTTPSConnection(netloc) or - httplib.HTTPConnection(netloc)) - try: - return f(conn) - finally: - conn.close() - - -def with_file(dest_path, mode, f): - dest = open(dest_path, mode) - try: - return f(dest) - finally: - dest.close() -- cgit From d79fd0df0bf9c59483b30c0d8c3a811580a1ee39 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 5 Aug 2010 04:31:21 -0700 Subject: Changed volumes to use a pool instead of globbing filesystem for concurrency reasons. Fixed broken tests. --- nova/tests/volume_unittest.py | 77 +++++++++++++++++++++++-------- nova/volume/service.py | 102 +++++++++++++++++++++++------------------- 2 files changed, 114 insertions(+), 65 deletions(-) diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index 0f4f0e34d..2a07afe69 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -17,6 +17,10 @@ # under the License. import logging +import shutil +import tempfile + +from twisted.internet import defer from nova import compute from nova import exception @@ -34,10 +38,16 @@ class VolumeTestCase(test.TrialTestCase): super(VolumeTestCase, self).setUp() self.compute = compute.service.ComputeService() self.volume = None + self.tempdir = tempfile.mkdtemp() self.flags(connection_type='fake', - fake_storage=True) + fake_storage=True, + aoe_export_dir=self.tempdir) self.volume = volume_service.VolumeService() + def tearDown(self): + shutil.rmtree(self.tempdir) + + @defer.inlineCallbacks def test_run_create_volume(self): vol_size = '0' user_id = 'fake' @@ -48,34 +58,40 @@ class VolumeTestCase(test.TrialTestCase): volume_service.get_volume(volume_id)['volume_id']) rv = self.volume.delete_volume(volume_id) - self.assertFailure(volume_service.get_volume(volume_id), - exception.Error) + self.assertRaises(exception.Error, volume_service.get_volume, volume_id) + @defer.inlineCallbacks def test_too_big_volume(self): vol_size = '1001' user_id = 'fake' project_id = 'fake' - self.assertRaises(TypeError, - self.volume.create_volume, - vol_size, user_id, project_id) + try: + yield self.volume.create_volume(vol_size, user_id, project_id) + self.fail("Should have thrown TypeError") + except TypeError: + pass + @defer.inlineCallbacks def test_too_many_volumes(self): vol_size = '1' user_id = 'fake' project_id = 'fake' num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1 - total_slots = FLAGS.slots_per_shelf * num_shelves + total_slots = FLAGS.blades_per_shelf * num_shelves vols = [] + from nova import datastore + redis = datastore.Redis.instance() for i in xrange(total_slots): vid = yield self.volume.create_volume(vol_size, user_id, project_id) vols.append(vid) self.assertFailure(self.volume.create_volume(vol_size, user_id, project_id), - volume_service.NoMoreVolumes) + volume_service.NoMoreBlades) for id in vols: yield self.volume.delete_volume(id) + @defer.inlineCallbacks def test_run_attach_detach_volume(self): # Create one volume and one compute to test with instance_id = "storage-test" @@ -84,22 +100,26 @@ class VolumeTestCase(test.TrialTestCase): project_id = 'fake' mountpoint = "/dev/sdf" volume_id = yield self.volume.create_volume(vol_size, user_id, project_id) - volume_obj = volume_service.get_volume(volume_id) volume_obj.start_attach(instance_id, mountpoint) - rv = yield self.compute.attach_volume(volume_id, - instance_id, - mountpoint) + if FLAGS.fake_tests: + volume_obj.finish_attach() + else: + rv = yield self.compute.attach_volume(instance_id, + volume_id, + mountpoint) self.assertEqual(volume_obj['status'], "in-use") - self.assertEqual(volume_obj['attachStatus'], "attached") + self.assertEqual(volume_obj['attach_status'], "attached") self.assertEqual(volume_obj['instance_id'], instance_id) self.assertEqual(volume_obj['mountpoint'], mountpoint) - self.assertRaises(exception.Error, - self.volume.delete_volume, - volume_id) - - rv = yield self.volume.detach_volume(volume_id) + self.assertFailure(self.volume.delete_volume(volume_id), exception.Error) + volume_obj.start_detach() + if FLAGS.fake_tests: + volume_obj.finish_detach() + else: + rv = yield self.volume.detach_volume(instance_id, + volume_id) volume_obj = volume_service.get_volume(volume_id) self.assertEqual(volume_obj['status'], "available") @@ -108,6 +128,27 @@ class VolumeTestCase(test.TrialTestCase): volume_service.get_volume, volume_id) + @defer.inlineCallbacks + def test_multiple_volume_race_condition(self): + vol_size = "5" + user_id = "fake" + project_id = 'fake' + shelf_blades = [] + def _check(volume_id): + vol = volume_service.get_volume(volume_id) + shelf_blade = '%s.%s' % (vol['shelf_id'], vol['blade_id']) + self.assert_(shelf_blade not in shelf_blades) + shelf_blades.append(shelf_blade) + logging.debug("got %s" % shelf_blade) + vol.destroy() + deferreds = [] + for i in range(5): + d = self.volume.create_volume(vol_size, user_id, project_id) + d.addCallback(_check) + d.addErrback(self.fail) + deferreds.append(d) + yield defer.DeferredList(deferreds) + def test_multi_node(self): # TODO(termie): Figure out how to test with two nodes, # each of them having a different FLAG for storage_node diff --git a/nova/volume/service.py b/nova/volume/service.py index 9dd63e88f..9c52ee469 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -22,12 +22,8 @@ destroying persistent storage volumes, ala EBS. Currently uses Ata-over-Ethernet. """ -import glob import logging import os -import shutil -import socket -import tempfile from twisted.internet import defer @@ -47,9 +43,6 @@ flags.DEFINE_string('volume_group', 'nova-volumes', 'Name for the VG that will contain exported volumes') flags.DEFINE_string('aoe_eth_dev', 'eth0', 'Which device to export the volumes on') -flags.DEFINE_string('storage_name', - socket.gethostname(), - 'name of this service') flags.DEFINE_integer('first_shelf_id', utils.last_octet(utils.get_my_ip()) * 10, 'AoE starting shelf_id for this service') @@ -59,9 +52,9 @@ flags.DEFINE_integer('last_shelf_id', flags.DEFINE_string('aoe_export_dir', '/var/lib/vblade-persist/vblades', 'AoE directory where exports are created') -flags.DEFINE_integer('slots_per_shelf', +flags.DEFINE_integer('blades_per_shelf', 16, - 'Number of AoE slots per shelf') + 'Number of AoE blades per shelf') flags.DEFINE_string('storage_availability_zone', 'nova', 'availability zone of this service') @@ -69,7 +62,7 @@ flags.DEFINE_boolean('fake_storage', False, 'Should we make real storage volumes to attach?') -class NoMoreVolumes(exception.Error): +class NoMoreBlades(exception.Error): pass def get_volume(volume_id): @@ -77,8 +70,9 @@ def get_volume(volume_id): volume_class = Volume if FLAGS.fake_storage: volume_class = FakeVolume - if datastore.Redis.instance().sismember('volumes', volume_id): - return volume_class(volume_id=volume_id) + vol = volume_class.lookup(volume_id) + if vol: + return vol raise exception.Error("Volume does not exist") class VolumeService(service.Service): @@ -91,18 +85,9 @@ class VolumeService(service.Service): super(VolumeService, self).__init__() self.volume_class = Volume if FLAGS.fake_storage: - FLAGS.aoe_export_dir = tempfile.mkdtemp() self.volume_class = FakeVolume self._init_volume_group() - def __del__(self): - # TODO(josh): Get rid of this destructor, volumes destroy themselves - if FLAGS.fake_storage: - try: - shutil.rmtree(FLAGS.aoe_export_dir) - except Exception, err: - pass - @defer.inlineCallbacks @validate.rangetest(size=(0, 1000)) def create_volume(self, size, user_id, project_id): @@ -113,8 +98,6 @@ class VolumeService(service.Service): """ logging.debug("Creating volume of size: %s" % (size)) vol = yield self.volume_class.create(size, user_id, project_id) - datastore.Redis.instance().sadd('volumes', vol['volume_id']) - datastore.Redis.instance().sadd('volumes:%s' % (FLAGS.storage_name), vol['volume_id']) logging.debug("restarting exports") yield self._restart_exports() defer.returnValue(vol['volume_id']) @@ -134,13 +117,11 @@ class VolumeService(service.Service): def delete_volume(self, volume_id): logging.debug("Deleting volume with id of: %s" % (volume_id)) vol = get_volume(volume_id) - if vol['status'] == "attached": + if vol['attach_status'] == "attached": raise exception.Error("Volume is still attached") - if vol['node_name'] != FLAGS.storage_name: + if vol['node_name'] != FLAGS.node_name: raise exception.Error("Volume is not local to this node") yield vol.destroy() - datastore.Redis.instance().srem('volumes', vol['volume_id']) - datastore.Redis.instance().srem('volumes:%s' % (FLAGS.storage_name), vol['volume_id']) defer.returnValue(True) @defer.inlineCallbacks @@ -172,14 +153,15 @@ class Volume(datastore.BasicModel): return self.volume_id def default_state(self): - return {"volume_id": self.volume_id} + return {"volume_id": self.volume_id, + "node_name": "unassigned"} @classmethod @defer.inlineCallbacks def create(cls, size, user_id, project_id): volume_id = utils.generate_uid('vol') vol = cls(volume_id) - vol['node_name'] = FLAGS.storage_name + vol['node_name'] = FLAGS.node_name vol['size'] = size vol['user_id'] = user_id vol['project_id'] = project_id @@ -225,10 +207,31 @@ class Volume(datastore.BasicModel): self['attach_status'] = "detached" self.save() + def save(self): + is_new = self.is_new_record() + super(Volume, self).save() + if is_new: + redis = datastore.Redis.instance() + key = self.__devices_key + # TODO(vish): these should be added by admin commands + more = redis.scard(self._redis_association_name("node", + self['node_name'])) + if (not redis.exists(key) and not more): + for shelf_id in range(FLAGS.first_shelf_id, + FLAGS.last_shelf_id + 1): + for blade_id in range(FLAGS.blades_per_shelf): + redis.sadd(key, "%s.%s" % (shelf_id, blade_id)) + self.associate_with("node", self['node_name']) + @defer.inlineCallbacks def destroy(self): yield self._remove_export() yield self._delete_lv() + self.unassociate_with("node", self['node_name']) + if self.get('shelf_id', None) and self.get('blade_id', None): + redis = datastore.Redis.instance() + key = self.__devices_key + redis.sadd(key, "%s.%s" % (self['shelf_id'], self['blade_id'])) super(Volume, self).destroy() @defer.inlineCallbacks @@ -248,17 +251,26 @@ class Volume(datastore.BasicModel): "sudo lvremove -f %s/%s" % (FLAGS.volume_group, self['volume_id']), error_ok=1) + @property + def __devices_key(self): + return 'volume_devices:%s' % FLAGS.node_name + @defer.inlineCallbacks def _setup_export(self): - (shelf_id, blade_id) = get_next_aoe_numbers() + redis = datastore.Redis.instance() + key = self.__devices_key + device = redis.spop(key) + if not device: + raise NoMoreBlades() + (shelf_id, blade_id) = device.split('.') self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id) self['shelf_id'] = shelf_id self['blade_id'] = blade_id self.save() - yield self._exec_export() + yield self._exec_setup_export() @defer.inlineCallbacks - def _exec_export(self): + def _exec_setup_export(self): yield process.simple_execute( "sudo vblade-persist setup %s %s %s /dev/%s/%s" % (self['shelf_id'], @@ -269,6 +281,13 @@ class Volume(datastore.BasicModel): @defer.inlineCallbacks def _remove_export(self): + if not self.get('shelf_id', None) or not self.get('blade_id', None): + defer.returnValue(False) + yield self._exec_remove_export() + defer.returnValue(True) + + @defer.inlineCallbacks + def _exec_remove_export(self): yield process.simple_execute( "sudo vblade-persist stop %s %s" % (self['shelf_id'], self['blade_id']), error_ok=1) @@ -277,29 +296,18 @@ class Volume(datastore.BasicModel): self['blade_id']), error_ok=1) + class FakeVolume(Volume): def _create_lv(self): pass - def _exec_export(self): + def _exec_setup_export(self): fname = os.path.join(FLAGS.aoe_export_dir, self['aoe_device']) f = file(fname, "w") f.close() - def _remove_export(self): - pass + def _exec_remove_export(self): + os.unlink(os.path.join(FLAGS.aoe_export_dir, self['aoe_device'])) def _delete_lv(self): pass - -def get_next_aoe_numbers(): - for shelf_id in xrange(FLAGS.first_shelf_id, FLAGS.last_shelf_id + 1): - aoes = glob.glob("%s/e%s.*" % (FLAGS.aoe_export_dir, shelf_id)) - if not aoes: - blade_id = 0 - else: - blade_id = int(max([int(a.rpartition('.')[2]) for a in aoes])) + 1 - if blade_id < FLAGS.slots_per_shelf: - logging.debug("Next shelf.blade is %s.%s", shelf_id, blade_id) - return (shelf_id, blade_id) - raise NoMoreVolumes() -- cgit From a33dce2da8dc8e25d0943732adfa6b14b1e48c7b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 6 Aug 2010 15:48:46 -0700 Subject: a few more commands were putting output on stderr. In general, exceptions on stderr output seems like a bad idea --- nova/volume/service.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/nova/volume/service.py b/nova/volume/service.py index 9c52ee469..66163a812 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -128,8 +128,8 @@ class VolumeService(service.Service): def _restart_exports(self): if FLAGS.fake_storage: return - yield process.simple_execute("sudo vblade-persist auto all") - # NOTE(vish): this command sometimes sends output to stderr for warnings + # NOTE(vish): these commands sometimes sends output to stderr for warnings + yield process.simple_execute("sudo vblade-persist auto all", error_ok=1) yield process.simple_execute("sudo vblade-persist start all", error_ok=1) @defer.inlineCallbacks @@ -243,7 +243,8 @@ class Volume(datastore.BasicModel): yield process.simple_execute( "sudo lvcreate -L %s -n %s %s" % (sizestr, self['volume_id'], - FLAGS.volume_group)) + FLAGS.volume_group), + error_ok=1) @defer.inlineCallbacks def _delete_lv(self): @@ -277,7 +278,7 @@ class Volume(datastore.BasicModel): self['blade_id'], FLAGS.aoe_eth_dev, FLAGS.volume_group, - self['volume_id'])) + self['volume_id']), error_ok=1) @defer.inlineCallbacks def _remove_export(self): -- cgit From 86a7e62f0b72763088b0a34516ffc30f22ca937e Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Sun, 8 Aug 2010 09:49:47 -0700 Subject: adding pep8 and pylint for regular cleanup tasks --- tools/pip-requires | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/pip-requires b/tools/pip-requires index 4eb47ca2b..24aefb25e 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -1,3 +1,5 @@ +pep8==0.5.0 +pylint==0.21.1 IPy==0.70 M2Crypto==0.20.2 amqplib==0.6.1 -- cgit From abd9bed8f7f88617c0a402faef47da13963ccea7 Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Sun, 8 Aug 2010 09:50:22 -0700 Subject: attempting some cleanup work --- nova/endpoint/cloud.py | 77 ++++++++++++++++++++++++++------------------------ 1 file changed, 40 insertions(+), 37 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 878d54a15..ee22863a9 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -47,6 +47,7 @@ FLAGS = flags.FLAGS flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') + def _gen_key(user_id, key_name): """ Tuck this into AuthManager """ try: @@ -102,15 +103,16 @@ class CloudController(object): result = {} for instance in self.instdir.all: if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) + line = '%s slots=%d' % (instance['private_dns_name'], + INSTANCE_TYPES[instance['instance_type']]['vcpus']) if instance['key_name'] in result: result[instance['key_name']].append(line) else: result[instance['key_name']] = [line] return result - def get_metadata(self, ip): - i = self.get_instance_by_ip(ip) + def get_metadata(self, ipaddress): + i = self.get_instance_by_ip(ipaddress) if i is None: return None mpi = self._get_mpi_data(i['project_id']) @@ -147,7 +149,7 @@ class CloudController(object): }, 'public-hostname': i.get('dns_name', ''), 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP - 'public-keys' : keys, + 'public-keys': keys, 'ramdisk-id': i.get('ramdisk_id', ''), 'reservation-id': i['reservation_id'], 'security-groups': i.get('groups', ''), @@ -203,7 +205,7 @@ class CloudController(object): 'keyFingerprint': key_pair.fingerprint, }) - return { 'keypairsSet': result } + return {'keypairsSet': result} @rbac.allow('all') def create_key_pair(self, context, key_name, **kwargs): @@ -232,7 +234,7 @@ class CloudController(object): @rbac.allow('all') def describe_security_groups(self, context, group_names, **kwargs): - groups = { 'securityGroupSet': [] } + groups = {'securityGroupSet': []} # Stubbed for now to unblock other things. return groups @@ -251,7 +253,7 @@ class CloudController(object): instance = self._get_instance(context, instance_id[0]) return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "get_console_output", - "args" : {"instance_id": instance_id[0]}}) + "args": {"instance_id": instance_id[0]}}) def _get_user_id(self, context): if context and context.user: @@ -285,10 +287,10 @@ class CloudController(object): if volume['attach_status'] == 'attached': v['attachmentSet'] = [{'attachTime': volume['attach_time'], 'deleteOnTermination': volume['delete_on_termination'], - 'device' : volume['mountpoint'], - 'instanceId' : volume['instance_id'], - 'status' : 'attached', - 'volume_id' : volume['volume_id']}] + 'device': volume['mountpoint'], + 'instanceId': volume['instance_id'], + 'status': 'attached', + 'volume_id': volume['volume_id']}] else: v['attachmentSet'] = [{}] return v @@ -298,7 +300,7 @@ class CloudController(object): def create_volume(self, context, size, **kwargs): # TODO(vish): refactor this to create the volume object here and tell service to create it result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume", - "args" : {"size": size, + "args": {"size": size, "user_id": context.user.id, "project_id": context.project.id}}) # NOTE(vish): rpc returned value is in the result key in the dictionary @@ -348,15 +350,15 @@ class CloudController(object): compute_node = instance['node_name'] rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node), {"method": "attach_volume", - "args" : {"volume_id": volume_id, - "instance_id" : instance_id, - "mountpoint" : device}}) - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) + "args": {"volume_id": volume_id, + "instance_id": instance_id, + "mountpoint": device}}) + return defer.succeed({'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': instance_id, + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id}) @rbac.allow('projectmanager', 'sysadmin') @@ -372,18 +374,18 @@ class CloudController(object): instance = self._get_instance(context, instance_id) rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "detach_volume", - "args" : {"instance_id": instance_id, + "args": {"instance_id": instance_id, "volume_id": volume_id}}) except exception.NotFound: # If the instance doesn't exist anymore, # then we need to call detach blind volume.finish_detach() - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) + return defer.succeed({'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': instance_id, + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id}) def _convert_to_set(self, lst, label): if lst == None or lst == []: @@ -425,7 +427,8 @@ class CloudController(object): i['key_name'] = instance.get('key_name', None) if context.user.is_admin(): i['key_name'] = '%s (%s, %s)' % (i['key_name'], - instance.get('project_id', None), instance.get('node_name','')) + instance.get('project_id', None), + instance.get('node_name', '')) i['product_codes_set'] = self._convert_to_set( instance.get('product_codes', None), 'product_code') i['instance_type'] = instance.get('instance_type', None) @@ -442,7 +445,7 @@ class CloudController(object): reservations[res_id] = r reservations[res_id]['instances_set'].append(i) - instance_response = {'reservationSet' : list(reservations.values()) } + instance_response = {'reservationSet': list(reservations.values())} return instance_response @rbac.allow('all') @@ -457,7 +460,7 @@ class CloudController(object): address['project_id'] == context.project.id): address_rv = { 'public_ip': address['address'], - 'instance_id' : address.get('instance_id', 'free') + 'instance_id': address.get('instance_id', 'free') } if context.user.is_admin(): address_rv['instance_id'] = "%s (%s, %s)" % ( @@ -477,7 +480,7 @@ class CloudController(object): "args": {"user_id": context.user.id, "project_id": context.project.id}}) public_ip = alloc_result['result'] - defer.returnValue({'addressSet': [{'publicIp' : public_ip}]}) + defer.returnValue({'addressSet': [{'publicIp': public_ip}]}) @rbac.allow('netadmin') @defer.inlineCallbacks @@ -591,7 +594,7 @@ class CloudController(object): inst.save() rpc.cast(FLAGS.compute_topic, {"method": "run_instance", - "args": {"instance_id" : inst.instance_id}}) + "args": {"instance_id": inst.instance_id}}) logging.debug("Casting to node for %s's instance with IP of %s" % (context.user.name, inst['private_dns_name'])) # TODO: Make Network figure out the network name from ip. @@ -646,7 +649,7 @@ class CloudController(object): instance = self._get_instance(context, i) rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "reboot_instance", - "args" : {"instance_id": i}}) + "args": {"instance_id": i}}) return defer.succeed(True) @rbac.allow('projectmanager', 'sysadmin') @@ -656,7 +659,7 @@ class CloudController(object): volume_node = volume['node_name'] rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node), {"method": "delete_volume", - "args" : {"volume_id": volume_id}}) + "args": {"volume_id": volume_id}}) return defer.succeed(True) @rbac.allow('all') @@ -689,9 +692,9 @@ class CloudController(object): image = images.list(context, image_id)[0] except IndexError: raise exception.ApiError('invalid id: %s' % image_id) - result = { 'image_id': image_id, 'launchPermission': [] } + result = {'image_id': image_id, 'launchPermission': []} if image['isPublic']: - result['launchPermission'].append({ 'group': 'all' }) + result['launchPermission'].append({'group': 'all'}) return defer.succeed(result) @rbac.allow('projectmanager', 'sysadmin') -- cgit From e59b769cf1ad12f63788d2e90fd3a4412f9db6f4 Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Sun, 8 Aug 2010 11:39:14 -0700 Subject: variable name cleanup --- nova/endpoint/cloud.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index ee22863a9..8b937306e 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -210,18 +210,18 @@ class CloudController(object): @rbac.allow('all') def create_key_pair(self, context, key_name, **kwargs): try: - d = defer.Deferred() - p = context.handler.application.settings.get('pool') + dcall = defer.Deferred() + pool = context.handler.application.settings.get('pool') def _complete(kwargs): if 'exception' in kwargs: - d.errback(kwargs['exception']) + dcall.errback(kwargs['exception']) return - d.callback({'keyName': key_name, + dcall.callback({'keyName': key_name, 'keyFingerprint': kwargs['fingerprint'], 'keyMaterial': kwargs['private_key']}) - p.apply_async(_gen_key, [context.user.id, key_name], + pool.apply_async(_gen_key, [context.user.id, key_name], callback=_complete) - return d + return dcall except manager.UserError as e: raise -- cgit From 3fe167e1e398b3d602699b8219dcbfc8fec86859 Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Sun, 8 Aug 2010 11:40:03 -0700 Subject: removing what appears to be an unused try/except statement - nova.auth.manager.UserError doesn't exist in this codebase. Leftover? Something intended to be there but never added? --- nova/endpoint/cloud.py | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 8b937306e..ad9188ff3 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -209,22 +209,18 @@ class CloudController(object): @rbac.allow('all') def create_key_pair(self, context, key_name, **kwargs): - try: - dcall = defer.Deferred() - pool = context.handler.application.settings.get('pool') - def _complete(kwargs): - if 'exception' in kwargs: - dcall.errback(kwargs['exception']) - return - dcall.callback({'keyName': key_name, - 'keyFingerprint': kwargs['fingerprint'], - 'keyMaterial': kwargs['private_key']}) - pool.apply_async(_gen_key, [context.user.id, key_name], - callback=_complete) - return dcall - - except manager.UserError as e: - raise + dcall = defer.Deferred() + pool = context.handler.application.settings.get('pool') + def _complete(kwargs): + if 'exception' in kwargs: + dcall.errback(kwargs['exception']) + return + dcall.callback({'keyName': key_name, + 'keyFingerprint': kwargs['fingerprint'], + 'keyMaterial': kwargs['private_key']}) + pool.apply_async(_gen_key, [context.user.id, key_name], + callback=_complete) + return dcall @rbac.allow('all') def delete_key_pair(self, context, key_name, **kwargs): -- cgit From 0ee7d2f74a959bcf1cf611f63842302866774475 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Sun, 8 Aug 2010 12:57:33 -0700 Subject: Greater compliance with pep8/pylint style checks --- bin/nova-manage | 4 +- nova/objectstore/image.py | 15 +++++-- nova/process.py | 108 +++++++++++++++++++++++----------------------- nova/utils.py | 12 +++--- nova/virt/libvirt_conn.py | 4 +- tools/install_venv.py | 2 +- 6 files changed, 80 insertions(+), 65 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 1f7f808f1..36dc1dde9 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -56,7 +56,9 @@ class VpnCommands(object): vpn = self.__vpn_for(project.id) if vpn: - out, err = utils.execute("ping -c1 -w1 %s > /dev/null; echo $?" % vpn['private_dns_name'], check_exit_code=False) + out, err = utils.execute( + "ping -c1 -w1 %s > /dev/null; echo $?" + % vpn['private_dns_name'], check_exit_code=False) if out.strip() == '0': net = 'up' else: diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py index b98de276c..5dbf37133 100644 --- a/nova/objectstore/image.py +++ b/nova/objectstore/image.py @@ -227,13 +227,22 @@ class Image(object): @staticmethod def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_private_key, decrypted_filename): - key, err = utils.execute('openssl rsautl -decrypt -inkey %s' % cloud_private_key, encrypted_key, check_exit_code=False) + key, err = utils.execute( + 'openssl rsautl -decrypt -inkey %s' % cloud_private_key, + process_input=encrypted_key, + check_exit_code=False) if err: raise exception.Error("Failed to decrypt private key: %s" % err) - iv, err = utils.execute('openssl rsautl -decrypt -inkey %s' % cloud_private_key, encrypted_iv, check_exit_code=False) + iv, err = utils.execute( + 'openssl rsautl -decrypt -inkey %s' % cloud_private_key, + process_input=encrypted_iv, + check_exit_code=False) if err: raise exception.Error("Failed to decrypt initialization vector: %s" % err) - out, err = utils.execute('openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s' % (encrypted_filename, key, iv, decrypted_filename), check_exit_code=False) + _out, err = utils.execute( + 'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s' + % (encrypted_filename, key, iv, decrypted_filename), + check_exit_code=False) if err: raise exception.Error("Failed to decrypt image file %s : %s" % (encrypted_filename, err)) diff --git a/nova/process.py b/nova/process.py index 9e9de2ee8..37ab538ee 100644 --- a/nova/process.py +++ b/nova/process.py @@ -2,6 +2,7 @@ # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. +# Copyright 2010 FathomDB Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -20,16 +21,11 @@ Process pool, still buggy right now. """ -import logging -import multiprocessing import StringIO from twisted.internet import defer from twisted.internet import error -from twisted.internet import process from twisted.internet import protocol from twisted.internet import reactor -from twisted.internet import threads -from twisted.python import failure from nova import flags @@ -54,8 +50,9 @@ class UnexpectedErrorOutput(IOError): IOError.__init__(self, "got stdout: %r\nstderr: %r" % (stdout, stderr)) -# This is based on _BackRelay from twister.internal.utils, but modified to capture -# both stdout and stderr without odd stderr handling, and also to handle stdin +# This is based on _BackRelay from twister.internal.utils, but modified to +# capture both stdout and stderr, without odd stderr handling, and also to +# handle stdin class BackRelayWithInput(protocol.ProcessProtocol): """ Trivial protocol for communicating with a process and turning its output @@ -67,35 +64,37 @@ class BackRelayWithInput(protocol.ProcessProtocol): L{_UnexpectedErrorOutput} instance and the attribute will be set to C{None}. - @ivar onProcessEnded: If C{terminate_on_stderr} is false and bytes are received over - stderr, this attribute will refer to a L{Deferred} which will be called - back when the process ends. This C{Deferred} is also associated with - the L{_UnexpectedErrorOutput} which C{deferred} fires with earlier in - this case so that users can determine when the process has actually - ended, in addition to knowing when bytes have been received via stderr. + @ivar onProcessEnded: If C{terminate_on_stderr} is false and bytes are + received over stderr, this attribute will refer to a L{Deferred} which + will be called back when the process ends. This C{Deferred} is also + associated with the L{_UnexpectedErrorOutput} which C{deferred} fires + with earlier in this case so that users can determine when the process + has actually ended, in addition to knowing when bytes have been received + via stderr. """ - def __init__(self, deferred, startedDeferred=None, terminate_on_stderr=False, - check_exit_code=True, input=None): + def __init__(self, deferred, started_deferred=None, + terminate_on_stderr=False, check_exit_code=True, + process_input=None): self.deferred = deferred self.stdout = StringIO.StringIO() self.stderr = StringIO.StringIO() - self.startedDeferred = startedDeferred + self.started_deferred = started_deferred self.terminate_on_stderr = terminate_on_stderr self.check_exit_code = check_exit_code - self.input = input + self.process_input = process_input + self.on_process_ended = None def errReceived(self, text): - self.sterr.write(text) + self.stderr.write(text) if self.terminate_on_stderr and (self.deferred is not None): - self.onProcessEnded = defer.Deferred() - self.deferred.errback(UnexpectedErrorOutput(stdout=self.stdout.getvalue(), stderr=self.stderr.getvalue())) + self.on_process_ended = defer.Deferred() + self.deferred.errback(UnexpectedErrorOutput( + stdout=self.stdout.getvalue(), + stderr=self.stderr.getvalue())) self.deferred = None self.transport.loseConnection() - def errReceived(self, text): - self.stderr.write(text) - def outReceived(self, text): self.stdout.write(text) @@ -107,37 +106,40 @@ class BackRelayWithInput(protocol.ProcessProtocol): reason.trap(error.ProcessDone) self.deferred.callback((stdout, stderr)) except: - # This logic is a little suspicious to me (justinsb)... - # If the callback throws an exception, then errback will be called also. - # However, this is what the unit tests test for... + # NOTE(justinsb): This logic is a little suspicious to me... + # If the callback throws an exception, then errback will be + # called also. However, this is what the unit tests test for... self.deferred.errback(UnexpectedErrorOutput(stdout, stderr)) - elif self.onProcessEnded is not None: - self.onProcessEnded.errback(reason) + elif self.on_process_ended is not None: + self.on_process_ended.errback(reason) def connectionMade(self): - if self.startedDeferred: - self.startedDeferred.callback(self) - if self.input: - self.transport.write(self.input) + if self.started_deferred: + self.started_deferred.callback(self) + if self.process_input: + self.transport.write(self.process_input) self.transport.closeStdin() -def getProcessOutput(executable, args=None, env=None, path=None, reactor=None, - check_exit_code=True, input=None, startedDeferred=None): - if reactor is None: - from twisted.internet import reactor +def get_process_output(executable, args=None, env=None, path=None, + process_reactor=None, check_exit_code=True, + process_input=None, started_deferred=None): + if process_reactor is None: + process_reactor = reactor args = args and args or () env = env and env and {} - d = defer.Deferred() - p = BackRelayWithInput( - d, startedDeferred=startedDeferred, check_exit_code=check_exit_code, input=input) + deferred = defer.Deferred() + process_handler = BackRelayWithInput( + deferred, started_deferred=started_deferred, + check_exit_code=check_exit_code, process_input=process_input) # NOTE(vish): commands come in as unicode, but self.executes needs # strings or process.spawn raises a deprecation warning executable = str(executable) if not args is None: args = [str(x) for x in args] - reactor.spawnProcess(p, executable, (executable,)+tuple(args), env, path) - return d + process_reactor.spawnProcess( process_handler, executable, + (executable,)+tuple(args), env, path) + return deferred class ProcessPool(object): @@ -163,26 +165,26 @@ class ProcessPool(object): return self.execute(executable, args, **kw) def execute(self, *args, **kw): - d = self._pool.acquire() + deferred = self._pool.acquire() - def _associateProcess(proto): - d.process = proto.transport + def _associate_process(proto): + deferred.process = proto.transport return proto.transport started = defer.Deferred() - started.addCallback(_associateProcess) - kw.setdefault('startedDeferred', started) + started.addCallback(_associate_process) + kw.setdefault('started_deferred', started) - d.process = None - d.started = started + deferred.process = None + deferred.started = started - d.addCallback(lambda _: getProcessOutput(*args, **kw)) - d.addBoth(self._release) - return d + deferred.addCallback(lambda _: get_process_output(*args, **kw)) + deferred.addBoth(self._release) + return deferred - def _release(self, rv=None): + def _release(self, retval=None): self._pool.release() - return rv + return retval class SharedPool(object): _instance = None diff --git a/nova/utils.py b/nova/utils.py index 74c7c021c..1acc205b5 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -55,22 +55,23 @@ def fetchfile(url, target): # fp.close() execute("curl --fail %s -o %s" % (url, target)) -def execute(cmd, input=None, addl_env=None, check_exit_code=True): +def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): env = os.environ.copy() if addl_env: env.update(addl_env) obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) result = None - if input != None: - result = obj.communicate(input) + if process_input != None: + result = obj.communicate(process_input) else: result = obj.communicate() obj.stdin.close() if obj.returncode: logging.debug("Result was %s" % (obj.returncode)) if check_exit_code and obj.returncode <> 0: - raise Exception("Unexpected exit code: %s. result=%s" % (obj.returncode, result)) + raise Exception( "Unexpected exit code: %s. result=%s" + % (obj.returncode, result)) return result @@ -101,7 +102,8 @@ def runthis(prompt, cmd, check_exit_code = True): exit_code = subprocess.call(cmd.split(" ")) logging.debug(prompt % (exit_code)) if check_exit_code and exit_code <> 0: - raise Exception("Unexpected exit code: %s from cmd: %s" % (exit_code, cmd)) + raise Exception( "Unexpected exit code: %s from cmd: %s" + % (exit_code, cmd)) def generate_uid(topic, size=8): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 6cb9acb29..e36bfc7f5 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -194,9 +194,9 @@ class LibvirtConnection(object): if not os.path.exists(basepath('ramdisk')): yield images.fetch(data['ramdisk_id'], basepath('ramdisk'), user) - execute = lambda cmd, input=None: \ + execute = lambda cmd, process_input=None: \ process.simple_execute(cmd=cmd, - input=input, + process_input=process_input, check_exit_code=True) key = data['key_data'] diff --git a/tools/install_venv.py b/tools/install_venv.py index b9eac70e6..3cff8051d 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -35,7 +35,7 @@ def run_command(cmd, redirect_output=True, check_exit_code=True): def check_dependencies(): """Make sure pip and virtualenv are on the path.""" - # Perl also has a pip program. Hopefully the user has installed the right one! + # Perl also has a pip program. Hopefully the user has installed the right one! print 'Checking for pip...', if not run_command(['which', 'pip'], check_exit_code=False).strip(): die('ERROR: pip not found.\n\nNova development requires pip,' -- cgit From 993563b6cc9db9f24480678cf8b2d0750aee7a92 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Sun, 8 Aug 2010 13:05:24 -0700 Subject: Used new (clearer) flag names when calling processes --- nova/process.py | 10 +++++++--- nova/volume/service.py | 3 ++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/nova/process.py b/nova/process.py index 37ab538ee..d36de0410 100644 --- a/nova/process.py +++ b/nova/process.py @@ -123,15 +123,19 @@ class BackRelayWithInput(protocol.ProcessProtocol): def get_process_output(executable, args=None, env=None, path=None, process_reactor=None, check_exit_code=True, - process_input=None, started_deferred=None): + process_input=None, started_deferred=None, + terminate_on_stderr=False): if process_reactor is None: process_reactor = reactor args = args and args or () env = env and env and {} deferred = defer.Deferred() process_handler = BackRelayWithInput( - deferred, started_deferred=started_deferred, - check_exit_code=check_exit_code, process_input=process_input) + deferred, + started_deferred=started_deferred, + check_exit_code=check_exit_code, + process_input=process_input, + terminate_on_stderr=terminate_on_stderr) # NOTE(vish): commands come in as unicode, but self.executes needs # strings or process.spawn raises a deprecation warning executable = str(executable) diff --git a/nova/volume/service.py b/nova/volume/service.py index e12f675a7..98cd0d3bf 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -149,7 +149,8 @@ class VolumeService(service.Service): return yield process.simple_execute("sudo vblade-persist auto all") # NOTE(vish): this command sometimes sends output to stderr for warnings - yield process.simple_execute("sudo vblade-persist start all", error_ok=1) + yield process.simple_execute("sudo vblade-persist start all", + terminate_on_stderr=False) @defer.inlineCallbacks def _init_volume_group(self): -- cgit From 86150042191005a9bf04ef243396667cb9dad1b0 Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Sun, 8 Aug 2010 13:20:50 -0700 Subject: convention and variable naming cleanup for pylint/pep8 --- nova/network/model.py | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/nova/network/model.py b/nova/network/model.py index daac035e4..eada776c7 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -97,11 +97,11 @@ class Vlan(datastore.BasicModel): def dict_by_vlan(cls): """a hash of vlan:project""" set_name = cls._redis_set_name(cls.__name__) - rv = {} - h = datastore.Redis.instance().hgetall(set_name) - for v in h.keys(): - rv[h[v]] = v - return rv + retvals = {} + hashset = datastore.Redis.instance().hgetall(set_name) + for val in hashset.keys(): + retvals[hashset[val]] = val + return retvals @classmethod @datastore.absorb_connection_error @@ -136,7 +136,8 @@ class Vlan(datastore.BasicModel): # CLEANUP: # TODO(ja): Save the IPs at the top of each subnet for cloudpipe vpn clients -# TODO(ja): does vlanpool "keeper" need to know the min/max - shouldn't FLAGS always win? +# TODO(ja): does vlanpool "keeper" need to know the min/max - +# shouldn't FLAGS always win? # TODO(joshua): Save the IPs at the top of each subnet for cloudpipe vpn clients class BaseNetwork(datastore.BasicModel): @@ -217,7 +218,9 @@ class BaseNetwork(datastore.BasicModel): def available(self): # the .2 address is always CloudPipe # and the top are for vpn clients - for idx in range(self.num_static_ips, len(self.network)-(1 + FLAGS.cnt_vpn_clients)): + num_ips = self.num_static_ips + num_clients = FLAGS.cnt_vpn_clients + for idx in range(num_ips, len(self.network)-(1 + num_clients)): address = str(self.network[idx]) if not address in self.hosts.keys(): yield address @@ -338,8 +341,9 @@ class DHCPNetwork(BridgedNetwork): private_ip = str(self.network[2]) linux_net.confirm_rule("FORWARD -d %s -p udp --dport 1194 -j ACCEPT" % (private_ip, )) - linux_net.confirm_rule("PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194" - % (self.project.vpn_ip, self.project.vpn_port, private_ip)) + linux_net.confirm_rule( + "PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194" + % (self.project.vpn_ip, self.project.vpn_port, private_ip)) def deexpress(self, address=None): # if this is the last address, stop dns @@ -374,13 +378,14 @@ class PublicAddress(datastore.BasicModel): return addr -DEFAULT_PORTS = [("tcp",80), ("tcp",22), ("udp",1194), ("tcp",443)] +DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] class PublicNetworkController(BaseNetwork): override_type = 'network' def __init__(self, *args, **kwargs): network_id = "public:default" - super(PublicNetworkController, self).__init__(network_id, FLAGS.public_range) + super(PublicNetworkController, self).__init__(network_id, + FLAGS.public_range) self['user_id'] = "public" self['project_id'] = "public" self["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) @@ -415,7 +420,7 @@ class PublicNetworkController(BaseNetwork): def deallocate_ip(self, ip_str): # NOTE(vish): cleanup is now done on release by the parent class - self.release_ip(ip_str) + self.release_ip(ip_str) def associate_address(self, public_ip, private_ip, instance_id): if not public_ip in self.assigned: @@ -461,8 +466,9 @@ class PublicNetworkController(BaseNetwork): linux_net.confirm_rule("FORWARD -d %s -p icmp -j ACCEPT" % (private_ip)) for (protocol, port) in DEFAULT_PORTS: - linux_net.confirm_rule("FORWARD -d %s -p %s --dport %s -j ACCEPT" - % (private_ip, protocol, port)) + linux_net.confirm_rule( + "FORWARD -d %s -p %s --dport %s -j ACCEPT" + % (private_ip, protocol, port)) def deexpress(self, address=None): addr = self.get_host(address) -- cgit From 21c1d379199c528024c5e85571609e77e53c6ee7 Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Sun, 8 Aug 2010 13:31:40 -0700 Subject: light cleanup - convention stuff mostly --- nova/auth/manager.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index d44ed52b2..e5efbca24 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -29,15 +29,17 @@ import uuid import zipfile from nova import crypto -from nova import datastore from nova import exception from nova import flags -from nova import objectstore # for flags from nova import utils -from nova.auth import ldapdriver # for flags from nova.auth import signer from nova.network import vpn +#unused imports +#from nova import datastore +#from nova.auth import ldapdriver # for flags +#from nova import objectstore # for flags + FLAGS = flags.FLAGS # NOTE(vish): a user with one of these roles will be a superuser and @@ -99,6 +101,7 @@ class AuthBase(object): class User(AuthBase): """Object representing a user""" def __init__(self, id, name, access, secret, admin): + AuthBase.__init__(self) self.id = id self.name = name self.access = access @@ -159,6 +162,7 @@ class KeyPair(AuthBase): fingerprint is stored. The user's private key is not saved. """ def __init__(self, id, name, owner_id, public_key, fingerprint): + AuthBase.__init__(self) self.id = id self.name = name self.owner_id = owner_id @@ -176,6 +180,7 @@ class KeyPair(AuthBase): class Project(AuthBase): """Represents a Project returned from the datastore""" def __init__(self, id, name, project_manager_id, description, member_ids): + AuthBase.__init__(self) self.id = id self.name = name self.project_manager_id = project_manager_id @@ -234,7 +239,7 @@ class AuthManager(object): AuthManager also manages associated data related to Auth objects that need to be more accessible, such as vpn ips and ports. """ - _instance=None + _instance = None def __new__(cls, *args, **kwargs): """Returns the AuthManager singleton""" if not cls._instance: @@ -248,7 +253,7 @@ class AuthManager(object): reset the driver if it is not set or a new driver is specified. """ if driver or not getattr(self, 'driver', None): - self.driver = utils.import_class(driver or FLAGS.auth_driver) + self.driver = utils.import_class(driver or FLAGS.auth_driver) def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', -- cgit From d1977a820db3dad7e907e976c5502ffd37e1b719 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Mon, 9 Aug 2010 13:23:19 +0100 Subject: Move the xenapi top level directory under plugins, as suggested by Jay Pipes. --- plugins/xenapi/README | 2 + plugins/xenapi/etc/xapi.d/plugins/objectstore | 231 +++++++++++++++++++++ .../xenapi/etc/xapi.d/plugins/pluginlib_nova.py | 216 +++++++++++++++++++ xenapi/README | 2 - xenapi/etc/xapi.d/plugins/objectstore | 231 --------------------- xenapi/etc/xapi.d/plugins/pluginlib_nova.py | 216 ------------------- 6 files changed, 449 insertions(+), 449 deletions(-) create mode 100644 plugins/xenapi/README create mode 100644 plugins/xenapi/etc/xapi.d/plugins/objectstore create mode 100755 plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py delete mode 100644 xenapi/README delete mode 100644 xenapi/etc/xapi.d/plugins/objectstore delete mode 100755 xenapi/etc/xapi.d/plugins/pluginlib_nova.py diff --git a/plugins/xenapi/README b/plugins/xenapi/README new file mode 100644 index 000000000..1fc67aa7a --- /dev/null +++ b/plugins/xenapi/README @@ -0,0 +1,2 @@ +This directory contains files that are required for the XenAPI support. They +should be installed in the XenServer / Xen Cloud Platform domain 0. diff --git a/plugins/xenapi/etc/xapi.d/plugins/objectstore b/plugins/xenapi/etc/xapi.d/plugins/objectstore new file mode 100644 index 000000000..271e7337f --- /dev/null +++ b/plugins/xenapi/etc/xapi.d/plugins/objectstore @@ -0,0 +1,231 @@ +#!/usr/bin/env python + +# Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# XenAPI plugin for fetching images from nova-objectstore. +# + +import base64 +import errno +import hmac +import os +import os.path +import sha +import time +import urlparse + +import XenAPIPlugin + +from pluginlib_nova import * +configure_logging('objectstore') + + +KERNEL_DIR = '/boot/guest' + +DOWNLOAD_CHUNK_SIZE = 2 * 1024 * 1024 +SECTOR_SIZE = 512 +MBR_SIZE_SECTORS = 63 +MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE + + +def get_vdi(session, args): + src_url = exists(args, 'src_url') + username = exists(args, 'username') + password = exists(args, 'password') + add_partition = validate_bool(args, 'add_partition', 'false') + + (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) + + sr = find_sr(session) + if sr is None: + raise Exception('Cannot find SR to write VDI to') + + virtual_size = \ + get_content_length(proto, netloc, url_path, username, password) + if virtual_size < 0: + raise Exception('Cannot get VDI size') + + vdi_size = virtual_size + if add_partition: + # Make room for MBR. + vdi_size += MBR_SIZE_BYTES + + vdi = create_vdi(session, sr, src_url, vdi_size, False) + with_vdi_in_dom0(session, vdi, False, + lambda dev: get_vdi_(proto, netloc, url_path, + username, password, add_partition, + virtual_size, '/dev/%s' % dev)) + return session.xenapi.VDI.get_uuid(vdi) + + +def get_vdi_(proto, netloc, url_path, username, password, add_partition, + virtual_size, dest): + + if add_partition: + write_partition(virtual_size, dest) + + offset = add_partition and MBR_SIZE_BYTES or 0 + get(proto, netloc, url_path, username, password, dest, offset) + + +def write_partition(virtual_size, dest): + mbr_last = MBR_SIZE_SECTORS - 1 + primary_first = MBR_SIZE_SECTORS + primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 + + logging.debug('Writing partition table %d %d to %s...', + primary_first, primary_last, dest) + + result = os.system('parted --script %s mklabel msdos' % dest) + if result != 0: + raise Exception('Failed to mklabel') + result = os.system('parted --script %s mkpart primary %ds %ds' % + (dest, primary_first, primary_last)) + if result != 0: + raise Exception('Failed to mkpart') + + logging.debug('Writing partition table %s done.', dest) + + +def find_sr(session): + host = get_this_host(session) + srs = session.xenapi.SR.get_all() + for sr in srs: + sr_rec = session.xenapi.SR.get_record(sr) + if not ('i18n-key' in sr_rec['other_config'] and + sr_rec['other_config']['i18n-key'] == 'local-storage'): + continue + for pbd in sr_rec['PBDs']: + pbd_rec = session.xenapi.PBD.get_record(pbd) + if pbd_rec['host'] == host: + return sr + return None + + +def get_kernel(session, args): + src_url = exists(args, 'src_url') + username = exists(args, 'username') + password = exists(args, 'password') + + (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) + + dest = os.path.join(KERNEL_DIR, url_path[1:]) + + # Paranoid check against people using ../ to do rude things. + if os.path.commonprefix([KERNEL_DIR, dest]) != KERNEL_DIR: + raise Exception('Illegal destination %s %s', (url_path, dest)) + + dirname = os.path.dirname(dest) + try: + os.makedirs(dirname) + except os.error, e: + if e.errno != errno.EEXIST: + raise + if not os.path.isdir(dirname): + raise Exception('Cannot make directory %s', dirname) + + try: + os.remove(dest) + except: + pass + + get(proto, netloc, url_path, username, password, dest, 0) + + return dest + + +def get_content_length(proto, netloc, url_path, username, password): + headers = make_headers('HEAD', url_path, username, password) + return with_http_connection( + proto, netloc, + lambda conn: get_content_length_(url_path, headers, conn)) + + +def get_content_length_(url_path, headers, conn): + conn.request('HEAD', url_path, None, headers) + response = conn.getresponse() + if response.status != 200: + raise Exception('%d %s' % (response.status, response.reason)) + + return long(response.getheader('Content-Length', -1)) + + +def get(proto, netloc, url_path, username, password, dest, offset): + headers = make_headers('GET', url_path, username, password) + download(proto, netloc, url_path, headers, dest, offset) + + +def make_headers(verb, url_path, username, password): + headers = {} + headers['Date'] = \ + time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) + headers['Authorization'] = \ + 'AWS %s:%s' % (username, + s3_authorization(verb, url_path, password, headers)) + return headers + + +def s3_authorization(verb, path, password, headers): + sha1 = hmac.new(password, digestmod=sha) + sha1.update(plaintext(verb, path, headers)) + return base64.encodestring(sha1.digest()).strip() + + +def plaintext(verb, path, headers): + return '%s\n\n\n%s\n%s' % (verb, + "\n".join([headers[h] for h in headers]), + path) + + +def download(proto, netloc, url_path, headers, dest, offset): + with_http_connection( + proto, netloc, + lambda conn: download_(url_path, dest, offset, headers, conn)) + + +def download_(url_path, dest, offset, headers, conn): + conn.request('GET', url_path, None, headers) + response = conn.getresponse() + if response.status != 200: + raise Exception('%d %s' % (response.status, response.reason)) + + length = response.getheader('Content-Length', -1) + + with_file( + dest, 'a', + lambda dest_file: download_all(response, length, dest_file, offset)) + + +def download_all(response, length, dest_file, offset): + dest_file.seek(offset) + i = 0 + while True: + buf = response.read(DOWNLOAD_CHUNK_SIZE) + if buf: + dest_file.write(buf) + else: + return + i += len(buf) + if length != -1 and i >= length: + return + + +if __name__ == '__main__': + XenAPIPlugin.dispatch({'get_vdi': get_vdi, + 'get_kernel': get_kernel}) diff --git a/plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py new file mode 100755 index 000000000..2d323a016 --- /dev/null +++ b/plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py @@ -0,0 +1,216 @@ +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# Helper functions for the Nova xapi plugins. In time, this will merge +# with the pluginlib.py shipped with xapi, but for now, that file is not +# very stable, so it's easiest just to have a copy of all the functions +# that we need. +# + +import httplib +import logging +import logging.handlers +import re +import time + + +##### Logging setup + +def configure_logging(name): + log = logging.getLogger() + log.setLevel(logging.DEBUG) + sysh = logging.handlers.SysLogHandler('/dev/log') + sysh.setLevel(logging.DEBUG) + formatter = logging.Formatter('%s: %%(levelname)-8s %%(message)s' % name) + sysh.setFormatter(formatter) + log.addHandler(sysh) + + +##### Exceptions + +class PluginError(Exception): + """Base Exception class for all plugin errors.""" + def __init__(self, *args): + Exception.__init__(self, *args) + +class ArgumentError(PluginError): + """Raised when required arguments are missing, argument values are invalid, + or incompatible arguments are given. + """ + def __init__(self, *args): + PluginError.__init__(self, *args) + + +##### Helpers + +def ignore_failure(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except XenAPI.Failure, e: + logging.error('Ignoring XenAPI.Failure %s', e) + return None + + +##### Argument validation + +ARGUMENT_PATTERN = re.compile(r'^[a-zA-Z0-9_:\.\-,]+$') + +def validate_exists(args, key, default=None): + """Validates that a string argument to a RPC method call is given, and + matches the shell-safe regex, with an optional default value in case it + does not exist. + + Returns the string. + """ + if key in args: + if len(args[key]) == 0: + raise ArgumentError('Argument %r value %r is too short.' % (key, args[key])) + if not ARGUMENT_PATTERN.match(args[key]): + raise ArgumentError('Argument %r value %r contains invalid characters.' % (key, args[key])) + if args[key][0] == '-': + raise ArgumentError('Argument %r value %r starts with a hyphen.' % (key, args[key])) + return args[key] + elif default is not None: + return default + else: + raise ArgumentError('Argument %s is required.' % key) + +def validate_bool(args, key, default=None): + """Validates that a string argument to a RPC method call is a boolean string, + with an optional default value in case it does not exist. + + Returns the python boolean value. + """ + value = validate_exists(args, key, default) + if value.lower() == 'true': + return True + elif value.lower() == 'false': + return False + else: + raise ArgumentError("Argument %s may not take value %r. Valid values are ['true', 'false']." % (key, value)) + +def exists(args, key): + """Validates that a freeform string argument to a RPC method call is given. + Returns the string. + """ + if key in args: + return args[key] + else: + raise ArgumentError('Argument %s is required.' % key) + +def optional(args, key): + """If the given key is in args, return the corresponding value, otherwise + return None""" + return key in args and args[key] or None + + +def get_this_host(session): + return session.xenapi.session.get_this_host(session.handle) + + +def get_domain_0(session): + this_host_ref = get_this_host(session) + expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"' % this_host_ref + return session.xenapi.VM.get_all_records_where(expr).keys()[0] + + +def create_vdi(session, sr_ref, name_label, virtual_size, read_only): + vdi_ref = session.xenapi.VDI.create( + { 'name_label': name_label, + 'name_description': '', + 'SR': sr_ref, + 'virtual_size': str(virtual_size), + 'type': 'User', + 'sharable': False, + 'read_only': read_only, + 'xenstore_data': {}, + 'other_config': {}, + 'sm_config': {}, + 'tags': [] }) + logging.debug('Created VDI %s (%s, %s, %s) on %s.', vdi_ref, name_label, + virtual_size, read_only, sr_ref) + return vdi_ref + + +def with_vdi_in_dom0(session, vdi, read_only, f): + dom0 = get_domain_0(session) + vbd_rec = {} + vbd_rec['VM'] = dom0 + vbd_rec['VDI'] = vdi + vbd_rec['userdevice'] = 'autodetect' + vbd_rec['bootable'] = False + vbd_rec['mode'] = read_only and 'RO' or 'RW' + vbd_rec['type'] = 'disk' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + logging.debug('Creating VBD for VDI %s ... ', vdi) + vbd = session.xenapi.VBD.create(vbd_rec) + logging.debug('Creating VBD for VDI %s done.', vdi) + try: + logging.debug('Plugging VBD %s ... ', vbd) + session.xenapi.VBD.plug(vbd) + logging.debug('Plugging VBD %s done.', vbd) + return f(session.xenapi.VBD.get_device(vbd)) + finally: + logging.debug('Destroying VBD for VDI %s ... ', vdi) + vbd_unplug_with_retry(session, vbd) + ignore_failure(session.xenapi.VBD.destroy, vbd) + logging.debug('Destroying VBD for VDI %s done.', vdi) + + +def vbd_unplug_with_retry(session, vbd): + """Call VBD.unplug on the given VBD, with a retry if we get + DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're + seeing the device still in use, even when all processes using the device + should be dead.""" + while True: + try: + session.xenapi.VBD.unplug(vbd) + logging.debug('VBD.unplug successful first time.') + return + except XenAPI.Failure, e: + if (len(e.details) > 0 and + e.details[0] == 'DEVICE_DETACH_REJECTED'): + logging.debug('VBD.unplug rejected: retrying...') + time.sleep(1) + elif (len(e.details) > 0 and + e.details[0] == 'DEVICE_ALREADY_DETACHED'): + logging.debug('VBD.unplug successful eventually.') + return + else: + logging.error('Ignoring XenAPI.Failure in VBD.unplug: %s', e) + return + + +def with_http_connection(proto, netloc, f): + conn = (proto == 'https' and + httplib.HTTPSConnection(netloc) or + httplib.HTTPConnection(netloc)) + try: + return f(conn) + finally: + conn.close() + + +def with_file(dest_path, mode, f): + dest = open(dest_path, mode) + try: + return f(dest) + finally: + dest.close() diff --git a/xenapi/README b/xenapi/README deleted file mode 100644 index 1fc67aa7a..000000000 --- a/xenapi/README +++ /dev/null @@ -1,2 +0,0 @@ -This directory contains files that are required for the XenAPI support. They -should be installed in the XenServer / Xen Cloud Platform domain 0. diff --git a/xenapi/etc/xapi.d/plugins/objectstore b/xenapi/etc/xapi.d/plugins/objectstore deleted file mode 100644 index 271e7337f..000000000 --- a/xenapi/etc/xapi.d/plugins/objectstore +++ /dev/null @@ -1,231 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2010 Citrix Systems, Inc. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# -# XenAPI plugin for fetching images from nova-objectstore. -# - -import base64 -import errno -import hmac -import os -import os.path -import sha -import time -import urlparse - -import XenAPIPlugin - -from pluginlib_nova import * -configure_logging('objectstore') - - -KERNEL_DIR = '/boot/guest' - -DOWNLOAD_CHUNK_SIZE = 2 * 1024 * 1024 -SECTOR_SIZE = 512 -MBR_SIZE_SECTORS = 63 -MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE - - -def get_vdi(session, args): - src_url = exists(args, 'src_url') - username = exists(args, 'username') - password = exists(args, 'password') - add_partition = validate_bool(args, 'add_partition', 'false') - - (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) - - sr = find_sr(session) - if sr is None: - raise Exception('Cannot find SR to write VDI to') - - virtual_size = \ - get_content_length(proto, netloc, url_path, username, password) - if virtual_size < 0: - raise Exception('Cannot get VDI size') - - vdi_size = virtual_size - if add_partition: - # Make room for MBR. - vdi_size += MBR_SIZE_BYTES - - vdi = create_vdi(session, sr, src_url, vdi_size, False) - with_vdi_in_dom0(session, vdi, False, - lambda dev: get_vdi_(proto, netloc, url_path, - username, password, add_partition, - virtual_size, '/dev/%s' % dev)) - return session.xenapi.VDI.get_uuid(vdi) - - -def get_vdi_(proto, netloc, url_path, username, password, add_partition, - virtual_size, dest): - - if add_partition: - write_partition(virtual_size, dest) - - offset = add_partition and MBR_SIZE_BYTES or 0 - get(proto, netloc, url_path, username, password, dest, offset) - - -def write_partition(virtual_size, dest): - mbr_last = MBR_SIZE_SECTORS - 1 - primary_first = MBR_SIZE_SECTORS - primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 - - logging.debug('Writing partition table %d %d to %s...', - primary_first, primary_last, dest) - - result = os.system('parted --script %s mklabel msdos' % dest) - if result != 0: - raise Exception('Failed to mklabel') - result = os.system('parted --script %s mkpart primary %ds %ds' % - (dest, primary_first, primary_last)) - if result != 0: - raise Exception('Failed to mkpart') - - logging.debug('Writing partition table %s done.', dest) - - -def find_sr(session): - host = get_this_host(session) - srs = session.xenapi.SR.get_all() - for sr in srs: - sr_rec = session.xenapi.SR.get_record(sr) - if not ('i18n-key' in sr_rec['other_config'] and - sr_rec['other_config']['i18n-key'] == 'local-storage'): - continue - for pbd in sr_rec['PBDs']: - pbd_rec = session.xenapi.PBD.get_record(pbd) - if pbd_rec['host'] == host: - return sr - return None - - -def get_kernel(session, args): - src_url = exists(args, 'src_url') - username = exists(args, 'username') - password = exists(args, 'password') - - (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) - - dest = os.path.join(KERNEL_DIR, url_path[1:]) - - # Paranoid check against people using ../ to do rude things. - if os.path.commonprefix([KERNEL_DIR, dest]) != KERNEL_DIR: - raise Exception('Illegal destination %s %s', (url_path, dest)) - - dirname = os.path.dirname(dest) - try: - os.makedirs(dirname) - except os.error, e: - if e.errno != errno.EEXIST: - raise - if not os.path.isdir(dirname): - raise Exception('Cannot make directory %s', dirname) - - try: - os.remove(dest) - except: - pass - - get(proto, netloc, url_path, username, password, dest, 0) - - return dest - - -def get_content_length(proto, netloc, url_path, username, password): - headers = make_headers('HEAD', url_path, username, password) - return with_http_connection( - proto, netloc, - lambda conn: get_content_length_(url_path, headers, conn)) - - -def get_content_length_(url_path, headers, conn): - conn.request('HEAD', url_path, None, headers) - response = conn.getresponse() - if response.status != 200: - raise Exception('%d %s' % (response.status, response.reason)) - - return long(response.getheader('Content-Length', -1)) - - -def get(proto, netloc, url_path, username, password, dest, offset): - headers = make_headers('GET', url_path, username, password) - download(proto, netloc, url_path, headers, dest, offset) - - -def make_headers(verb, url_path, username, password): - headers = {} - headers['Date'] = \ - time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) - headers['Authorization'] = \ - 'AWS %s:%s' % (username, - s3_authorization(verb, url_path, password, headers)) - return headers - - -def s3_authorization(verb, path, password, headers): - sha1 = hmac.new(password, digestmod=sha) - sha1.update(plaintext(verb, path, headers)) - return base64.encodestring(sha1.digest()).strip() - - -def plaintext(verb, path, headers): - return '%s\n\n\n%s\n%s' % (verb, - "\n".join([headers[h] for h in headers]), - path) - - -def download(proto, netloc, url_path, headers, dest, offset): - with_http_connection( - proto, netloc, - lambda conn: download_(url_path, dest, offset, headers, conn)) - - -def download_(url_path, dest, offset, headers, conn): - conn.request('GET', url_path, None, headers) - response = conn.getresponse() - if response.status != 200: - raise Exception('%d %s' % (response.status, response.reason)) - - length = response.getheader('Content-Length', -1) - - with_file( - dest, 'a', - lambda dest_file: download_all(response, length, dest_file, offset)) - - -def download_all(response, length, dest_file, offset): - dest_file.seek(offset) - i = 0 - while True: - buf = response.read(DOWNLOAD_CHUNK_SIZE) - if buf: - dest_file.write(buf) - else: - return - i += len(buf) - if length != -1 and i >= length: - return - - -if __name__ == '__main__': - XenAPIPlugin.dispatch({'get_vdi': get_vdi, - 'get_kernel': get_kernel}) diff --git a/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/xenapi/etc/xapi.d/plugins/pluginlib_nova.py deleted file mode 100755 index 2d323a016..000000000 --- a/xenapi/etc/xapi.d/plugins/pluginlib_nova.py +++ /dev/null @@ -1,216 +0,0 @@ -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# -# Helper functions for the Nova xapi plugins. In time, this will merge -# with the pluginlib.py shipped with xapi, but for now, that file is not -# very stable, so it's easiest just to have a copy of all the functions -# that we need. -# - -import httplib -import logging -import logging.handlers -import re -import time - - -##### Logging setup - -def configure_logging(name): - log = logging.getLogger() - log.setLevel(logging.DEBUG) - sysh = logging.handlers.SysLogHandler('/dev/log') - sysh.setLevel(logging.DEBUG) - formatter = logging.Formatter('%s: %%(levelname)-8s %%(message)s' % name) - sysh.setFormatter(formatter) - log.addHandler(sysh) - - -##### Exceptions - -class PluginError(Exception): - """Base Exception class for all plugin errors.""" - def __init__(self, *args): - Exception.__init__(self, *args) - -class ArgumentError(PluginError): - """Raised when required arguments are missing, argument values are invalid, - or incompatible arguments are given. - """ - def __init__(self, *args): - PluginError.__init__(self, *args) - - -##### Helpers - -def ignore_failure(func, *args, **kwargs): - try: - return func(*args, **kwargs) - except XenAPI.Failure, e: - logging.error('Ignoring XenAPI.Failure %s', e) - return None - - -##### Argument validation - -ARGUMENT_PATTERN = re.compile(r'^[a-zA-Z0-9_:\.\-,]+$') - -def validate_exists(args, key, default=None): - """Validates that a string argument to a RPC method call is given, and - matches the shell-safe regex, with an optional default value in case it - does not exist. - - Returns the string. - """ - if key in args: - if len(args[key]) == 0: - raise ArgumentError('Argument %r value %r is too short.' % (key, args[key])) - if not ARGUMENT_PATTERN.match(args[key]): - raise ArgumentError('Argument %r value %r contains invalid characters.' % (key, args[key])) - if args[key][0] == '-': - raise ArgumentError('Argument %r value %r starts with a hyphen.' % (key, args[key])) - return args[key] - elif default is not None: - return default - else: - raise ArgumentError('Argument %s is required.' % key) - -def validate_bool(args, key, default=None): - """Validates that a string argument to a RPC method call is a boolean string, - with an optional default value in case it does not exist. - - Returns the python boolean value. - """ - value = validate_exists(args, key, default) - if value.lower() == 'true': - return True - elif value.lower() == 'false': - return False - else: - raise ArgumentError("Argument %s may not take value %r. Valid values are ['true', 'false']." % (key, value)) - -def exists(args, key): - """Validates that a freeform string argument to a RPC method call is given. - Returns the string. - """ - if key in args: - return args[key] - else: - raise ArgumentError('Argument %s is required.' % key) - -def optional(args, key): - """If the given key is in args, return the corresponding value, otherwise - return None""" - return key in args and args[key] or None - - -def get_this_host(session): - return session.xenapi.session.get_this_host(session.handle) - - -def get_domain_0(session): - this_host_ref = get_this_host(session) - expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"' % this_host_ref - return session.xenapi.VM.get_all_records_where(expr).keys()[0] - - -def create_vdi(session, sr_ref, name_label, virtual_size, read_only): - vdi_ref = session.xenapi.VDI.create( - { 'name_label': name_label, - 'name_description': '', - 'SR': sr_ref, - 'virtual_size': str(virtual_size), - 'type': 'User', - 'sharable': False, - 'read_only': read_only, - 'xenstore_data': {}, - 'other_config': {}, - 'sm_config': {}, - 'tags': [] }) - logging.debug('Created VDI %s (%s, %s, %s) on %s.', vdi_ref, name_label, - virtual_size, read_only, sr_ref) - return vdi_ref - - -def with_vdi_in_dom0(session, vdi, read_only, f): - dom0 = get_domain_0(session) - vbd_rec = {} - vbd_rec['VM'] = dom0 - vbd_rec['VDI'] = vdi - vbd_rec['userdevice'] = 'autodetect' - vbd_rec['bootable'] = False - vbd_rec['mode'] = read_only and 'RO' or 'RW' - vbd_rec['type'] = 'disk' - vbd_rec['unpluggable'] = True - vbd_rec['empty'] = False - vbd_rec['other_config'] = {} - vbd_rec['qos_algorithm_type'] = '' - vbd_rec['qos_algorithm_params'] = {} - vbd_rec['qos_supported_algorithms'] = [] - logging.debug('Creating VBD for VDI %s ... ', vdi) - vbd = session.xenapi.VBD.create(vbd_rec) - logging.debug('Creating VBD for VDI %s done.', vdi) - try: - logging.debug('Plugging VBD %s ... ', vbd) - session.xenapi.VBD.plug(vbd) - logging.debug('Plugging VBD %s done.', vbd) - return f(session.xenapi.VBD.get_device(vbd)) - finally: - logging.debug('Destroying VBD for VDI %s ... ', vdi) - vbd_unplug_with_retry(session, vbd) - ignore_failure(session.xenapi.VBD.destroy, vbd) - logging.debug('Destroying VBD for VDI %s done.', vdi) - - -def vbd_unplug_with_retry(session, vbd): - """Call VBD.unplug on the given VBD, with a retry if we get - DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're - seeing the device still in use, even when all processes using the device - should be dead.""" - while True: - try: - session.xenapi.VBD.unplug(vbd) - logging.debug('VBD.unplug successful first time.') - return - except XenAPI.Failure, e: - if (len(e.details) > 0 and - e.details[0] == 'DEVICE_DETACH_REJECTED'): - logging.debug('VBD.unplug rejected: retrying...') - time.sleep(1) - elif (len(e.details) > 0 and - e.details[0] == 'DEVICE_ALREADY_DETACHED'): - logging.debug('VBD.unplug successful eventually.') - return - else: - logging.error('Ignoring XenAPI.Failure in VBD.unplug: %s', e) - return - - -def with_http_connection(proto, netloc, f): - conn = (proto == 'https' and - httplib.HTTPSConnection(netloc) or - httplib.HTTPConnection(netloc)) - try: - return f(conn) - finally: - conn.close() - - -def with_file(dest_path, mode, f): - dest = open(dest_path, mode) - try: - return f(dest) - finally: - dest.close() -- cgit From 09b5be11e7b61aa0ae344cec669e4f62dd18c0b2 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 9 Aug 2010 17:45:00 -0400 Subject: Run correctly even if called while in tools/ directory, as 'python install_venv.py' --- tools/install_venv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/install_venv.py b/tools/install_venv.py index 0b35fc8e9..96bb12efb 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -7,7 +7,7 @@ import subprocess import sys -ROOT = os.path.dirname(os.path.dirname(__file__)) +ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) VENV = os.path.join(ROOT, '.nova-venv') PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires') TWISTED_NOVA='http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz' -- cgit From e3f8aa57873b7de69980c57cd05e3f1bdf6f7d08 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Mon, 9 Aug 2010 23:22:59 +0100 Subject: Implement the same fix as lp:~vishvananda/nova/fix-curl-project, but for virt.xenapi. --- nova/virt/xenapi.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index c3e84c2b9..9fe15644f 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -92,12 +92,13 @@ class XenAPIConnection(object): mac_address = '' user = AuthManager().get_user(instance.datamodel['user_id']) + project = AuthManager().get_project(instance.datamodel['project_id']) vdi_uuid = yield self.fetch_image( - instance.datamodel['image_id'], user, True) + instance.datamodel['image_id'], user, project, True) kernel = yield self.fetch_image( - instance.datamodel['kernel_id'], user, False) + instance.datamodel['kernel_id'], user, project, False) ramdisk = yield self.fetch_image( - instance.datamodel['ramdisk_id'], user, False) + instance.datamodel['ramdisk_id'], user, project, False) vdi_ref = yield self._conn.xenapi.VDI.get_by_uuid(vdi_uuid) vm_ref = yield self.create_vm(instance, kernel, ramdisk) @@ -195,17 +196,18 @@ class XenAPIConnection(object): raise Exception('Found no network for bridge %s' % bridge) - def fetch_image(self, image, user, use_sr): + def fetch_image(self, image, user, project, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place it on dom0's filesystem. The former is for VM disks, the latter for its kernel and ramdisk (if external kernels are being used).""" url = images.image_url(image) - logging.debug("Asking xapi to fetch %s as %s" % (url, user.access)) + access = AuthManager().get_access_key(user, project) + logging.debug("Asking xapi to fetch %s as %s" % (url, access)) fn = use_sr and 'get_vdi' or 'get_kernel' args = {} args['src_url'] = url - args['username'] = user.access + args['username'] = access args['password'] = user.secret if use_sr: args['add_partition'] = 'true' -- cgit From bd0645153fb1b60a551c50c657a7837713da54a9 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 9 Aug 2010 15:34:05 -0700 Subject: initial cleanup of tests for network --- nova/network/model.py | 39 ++++++++------- nova/network/vpn.py | 26 ++++++---- nova/tests/network_unittest.py | 106 ++++++++++++++++++++++++++--------------- 3 files changed, 107 insertions(+), 64 deletions(-) diff --git a/nova/network/model.py b/nova/network/model.py index daac035e4..a70671632 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -141,7 +141,6 @@ class Vlan(datastore.BasicModel): class BaseNetwork(datastore.BasicModel): override_type = 'network' - NUM_STATIC_IPS = 3 # Network, Gateway, and CloudPipe @property def identifier(self): @@ -215,16 +214,19 @@ class BaseNetwork(datastore.BasicModel): @property def available(self): - # the .2 address is always CloudPipe - # and the top are for vpn clients - for idx in range(self.num_static_ips, len(self.network)-(1 + FLAGS.cnt_vpn_clients)): + for idx in range(self.num_bottom_reserved_ips, + len(self.network) - self.num_top_reserved_ips): address = str(self.network[idx]) if not address in self.hosts.keys(): yield address @property - def num_static_ips(self): - return BaseNetwork.NUM_STATIC_IPS + def num_bottom_reserved_ips(self): + return 2 # Network, Gateway + + @property + def num_top_reserved_ips(self): + return 1 # Broadcast def allocate_ip(self, user_id, project_id, mac): for address in self.available: @@ -306,9 +308,9 @@ class DHCPNetwork(BridgedNetwork): def __init__(self, *args, **kwargs): super(DHCPNetwork, self).__init__(*args, **kwargs) # logging.debug("Initing DHCPNetwork object...") - self.dhcp_listen_address = self.network[1] - self.dhcp_range_start = self.network[3] - self.dhcp_range_end = self.network[-(1 + FLAGS.cnt_vpn_clients)] + self.dhcp_listen_address = self.gateway + self.dhcp_range_start = self.network[self.num_bottom_reserved_ips] + self.dhcp_range_end = self.network[-self.num_top_reserved_ips] try: os.makedirs(FLAGS.networks_path) # NOTE(todd): I guess this is a lazy way to not have to check if the @@ -318,6 +320,16 @@ class DHCPNetwork(BridgedNetwork): except Exception, err: pass + @property + def num_bottom_reserved_ips(self): + # For cloudpipe + return super(DHCPNetwork, self).num_bottom_reserved_ips + 1 + + @property + def num_top_reserved_ips(self): + return super(DHCPNetwork, self).num_top_reserved_ips + \ + FLAGS.cnt_vpn_clients + def express(self, address=None): super(DHCPNetwork, self).express(address=address) if len(self.assigned) > 0: @@ -388,13 +400,6 @@ class PublicNetworkController(BaseNetwork): self.save() self.express() - @property - def available(self): - for idx in range(2, len(self.network)-1): - address = str(self.network[idx]) - if not address in self.hosts.keys(): - yield address - @property def host_objs(self): for address in self.assigned: @@ -415,7 +420,7 @@ class PublicNetworkController(BaseNetwork): def deallocate_ip(self, ip_str): # NOTE(vish): cleanup is now done on release by the parent class - self.release_ip(ip_str) + self.release_ip(ip_str) def associate_address(self, public_ip, private_ip, instance_id): if not public_ip in self.assigned: diff --git a/nova/network/vpn.py b/nova/network/vpn.py index cec84287c..1b6dd7a56 100644 --- a/nova/network/vpn.py +++ b/nova/network/vpn.py @@ -74,23 +74,31 @@ class NetworkData(datastore.BasicModel): # similar to an association, but we are just # storing a set of values instead of keys that # should be turned into objects. - redis = datastore.Redis.instance() - key = 'ip:%s:ports' % ip - # TODO(vish): these ports should be allocated through an admin - # command instead of a flag - if (not redis.exists(key) and - not redis.exists(cls._redis_association_name('ip', ip))): - for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1): - redis.sadd(key, i) + cls._ensure_set_exists(ip) - port = redis.spop(key) + port = datastore.Redis.instance().spop(cls._redis_ports_key(ip)) if not port: raise NoMorePorts() return port + @classmethod + def _redis_ports_key(cls, ip): + return 'ip:%s:ports' % ip + + @classmethod + def _ensure_set_exists(cls, ip): + # TODO(vish): these ports should be allocated through an admin + # command instead of a flag + redis = datastore.Redis.instance() + if (not redis.exists(cls._redis_ports_key(ip)) and + not redis.exists(cls._redis_association_name('ip', ip))): + for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1): + redis.sadd(cls._redis_ports_key(ip), i) + @classmethod def num_ports_for_ip(cls, ip): """Calculates the number of free ports for a given ip""" + cls._ensure_set_exists(ip) return datastore.Redis.instance().scard('ip:%s:ports' % ip) @property diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 879ee02a4..94d10200e 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -54,6 +54,7 @@ class NetworkTestCase(test.TrialTestCase): self.projects.append(self.manager.create_project(name, 'netuser', name)) + vpn.NetworkData.create(self.projects[i].id) self.network = model.PublicNetworkController() self.service = service.VlanNetworkService() @@ -70,7 +71,7 @@ class NetworkTestCase(test.TrialTestCase): self.assertTrue(IPy.IP(address) in self.network.network) def test_allocate_deallocate_fixed_ip(self): - result = yield self.service.allocate_fixed_ip( + result = self.service.allocate_fixed_ip( self.user.id, self.projects[0].id) address = result['private_dns_name'] mac = result['mac_address'] @@ -89,11 +90,11 @@ class NetworkTestCase(test.TrialTestCase): def test_range_allocation(self): hostname = "test-host" - result = yield self.service.allocate_fixed_ip( + result = self.service.allocate_fixed_ip( self.user.id, self.projects[0].id) mac = result['mac_address'] address = result['private_dns_name'] - result = yield self.service.allocate_fixed_ip( + result = self.service.allocate_fixed_ip( self.user, self.projects[1].id) secondmac = result['mac_address'] secondaddress = result['private_dns_name'] @@ -123,21 +124,21 @@ class NetworkTestCase(test.TrialTestCase): self.assertEqual(False, is_in_project(secondaddress, self.projects[1].id)) def test_subnet_edge(self): - result = yield self.service.allocate_fixed_ip(self.user.id, + result = self.service.allocate_fixed_ip(self.user.id, self.projects[0].id) firstaddress = result['private_dns_name'] hostname = "toomany-hosts" for i in range(1,5): project_id = self.projects[i].id - result = yield self.service.allocate_fixed_ip( + result = self.service.allocate_fixed_ip( self.user, project_id) mac = result['mac_address'] address = result['private_dns_name'] - result = yield self.service.allocate_fixed_ip( + result = self.service.allocate_fixed_ip( self.user, project_id) mac2 = result['mac_address'] address2 = result['private_dns_name'] - result = yield self.service.allocate_fixed_ip( + result = self.service.allocate_fixed_ip( self.user, project_id) mac3 = result['mac_address'] address3 = result['private_dns_name'] @@ -155,8 +156,7 @@ class NetworkTestCase(test.TrialTestCase): rv = self.service.deallocate_fixed_ip(firstaddress) self.dnsmasq.release_ip(mac, firstaddress, hostname, net.bridge_name) - def test_212_vpn_ip_and_port_looks_valid(self): - vpn.NetworkData.create(self.projects[0].id) + def test_vpn_ip_and_port_looks_valid(self): self.assert_(self.projects[0].vpn_ip) self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start_port) self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_end_port) @@ -169,55 +169,85 @@ class NetworkTestCase(test.TrialTestCase): for network_datum in vpns: network_datum.destroy() - def test_release_before_deallocate(self): - pass + def test_ips_are_reused(self): + """Makes sure that ip addresses that are deallocated get reused""" - def test_deallocate_before_issued(self): - pass + result = self.service.allocate_fixed_ip( + self.user.id, self.projects[0].id) + mac = result['mac_address'] + address = result['private_dns_name'] - def test_too_many_addresses(self): - """ - Here, we test that a proper NoMoreAddresses exception is raised. + hostname = "reuse-host" + net = model.get_project_network(self.projects[0].id, "default") + + self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name) + rv = self.service.deallocate_fixed_ip(address) + self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name) - However, the number of available IP addresses depends on the test + result = self.service.allocate_fixed_ip( + self.user, self.projects[0].id) + secondmac = result['mac_address'] + secondaddress = result['private_dns_name'] + self.assertEqual(address, secondaddress) + rv = self.service.deallocate_fixed_ip(secondaddress) + self.dnsmasq.issue_ip(secondmac, + secondaddress, + hostname, + net.bridge_name) + self.dnsmasq.release_ip(secondmac, + secondaddress, + hostname, + net.bridge_name) + + def test_available_ips(self): + """Make sure the number of available ips for the network is correct + + The number of available IP addresses depends on the test environment's setup. Network size is set in test fixture's setUp method. - There are FLAGS.cnt_vpn_clients addresses reserved for VPN (NUM_RESERVED_VPN_IPS) - - And there are NUM_STATIC_IPS that are always reserved by Nova for the necessary - services (gateway, CloudPipe, etc) - - So we should get flags.network_size - (NUM_STATIC_IPS + - NUM_PREALLOCATED_IPS + - NUM_RESERVED_VPN_IPS) - usable addresses + There are ips reserved at the bottom and top of the range. + services (network, gateway, CloudPipe, broadcast) """ net = model.get_project_network(self.projects[0].id, "default") - - # Determine expected number of available IP addresses - num_static_ips = net.num_static_ips num_preallocated_ips = len(net.hosts.keys()) - num_reserved_vpn_ips = flags.FLAGS.cnt_vpn_clients - num_available_ips = flags.FLAGS.network_size - (num_static_ips + + num_available_ips = flags.FLAGS.network_size - (net.num_bottom_reserved_ips + num_preallocated_ips + - num_reserved_vpn_ips) + net.num_top_reserved_ips) + self.assertEqual(num_available_ips, len(list(net.available))) + + def test_too_many_addresses(self): + """Test for a NoMoreAddresses exception when all fixed ips are used. + """ + net = model.get_project_network(self.projects[0].id, "default") hostname = "toomany-hosts" macs = {} addresses = {} - for i in range(0, (num_available_ips - 1)): - result = yield self.service.allocate_fixed_ip(self.user.id, self.projects[0].id) + # Number of availaible ips is len of the available list + num_available_ips = len(list(net.available)) + for i in range(num_available_ips): + result = self.service.allocate_fixed_ip(self.user.id, + self.projects[0].id) macs[i] = result['mac_address'] addresses[i] = result['private_dns_name'] - self.dnsmasq.issue_ip(macs[i], addresses[i], hostname, net.bridge_name) + self.dnsmasq.issue_ip(macs[i], + addresses[i], + hostname, + net.bridge_name) - self.assertFailure(self.service.allocate_fixed_ip(self.user.id, self.projects[0].id), NoMoreAddresses) + self.assertEqual(len(list(net.available)), 0) + self.assertRaises(NoMoreAddresses, self.service.allocate_fixed_ip, + self.user.id, self.projects[0].id) - for i in range(0, (num_available_ips - 1)): + for i in range(len(addresses)): rv = self.service.deallocate_fixed_ip(addresses[i]) - self.dnsmasq.release_ip(macs[i], addresses[i], hostname, net.bridge_name) + self.dnsmasq.release_ip(macs[i], + addresses[i], + hostname, + net.bridge_name) + self.assertEqual(len(list(net.available)), num_available_ips) def is_in_project(address, project_id): return address in model.get_project_network(project_id).list_addresses() -- cgit From 8990a62b0e654dcacac06246733a17fa0502bcc7 Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Mon, 9 Aug 2010 17:53:10 -0700 Subject: fixing - removing unused imports per Eric & Jay review --- nova/auth/manager.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index e5efbca24..6d71a7ad6 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -35,10 +35,6 @@ from nova import utils from nova.auth import signer from nova.network import vpn -#unused imports -#from nova import datastore -#from nova.auth import ldapdriver # for flags -#from nova import objectstore # for flags FLAGS = flags.FLAGS -- cgit From d8c1a74342af9af442e4ef0508fa1f66eac48bb5 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 9 Aug 2010 23:02:06 -0700 Subject: fix releasing to work properly --- nova/network/model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/network/model.py b/nova/network/model.py index a70671632..109ae64c7 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -243,12 +243,12 @@ class BaseNetwork(datastore.BasicModel): def release_ip(self, ip_str): if not ip_str in self.assigned: raise exception.AddressNotAllocated() - self.deexpress(address=ip_str) self._rem_host(ip_str) + self.deexpress(address=ip_str) def deallocate_ip(self, ip_str): - # Do nothing for now, cleanup on ip release - pass + # go ahead and remove ip + self.release_ip(ip_str) def list_addresses(self): for address in self.hosts: -- cgit From 8c7558ed5ae7dd0b78a91a385dbd9b044ec7c8db Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Tue, 10 Aug 2010 12:44:38 -0400 Subject: Changes the run_tests.sh and /tools/install_venv.py scripts to be more user-friendly and not depend on PIP while not in the virtual environment. Running run_tests.sh should not just work out of the box on all systems supporting easy_install... --- run_tests.sh | 7 +++--- tools/install_venv.py | 59 ++++++++++++++++++++++++++++++++++++++------------- 2 files changed, 47 insertions(+), 19 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index 9b2de7aea..85d7c8834 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -6,8 +6,7 @@ with_venv=tools/with_venv.sh if [ -e ${venv} ]; then ${with_venv} python run_tests.py $@ else - echo "You need to install the Nova virtualenv before you can run this." - echo "" - echo "Please run tools/install_venv.py" - exit 1 + echo "No virtual environment found...creating one" + python tools/install_venv.py + ${with_venv} python run_tests.py $@ fi diff --git a/tools/install_venv.py b/tools/install_venv.py index 0b35fc8e9..adf24b365 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -1,3 +1,23 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Copyright 2010 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + """ Installation script for Nova's development virtualenv """ @@ -12,15 +32,14 @@ VENV = os.path.join(ROOT, '.nova-venv') PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires') TWISTED_NOVA='http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz' - def die(message, *args): print >>sys.stderr, message % args sys.exit(1) - def run_command(cmd, redirect_output=True, error_ok=False): - # Useful for debugging: - #print >>sys.stderr, ' '.join(cmd) + """Runs a command in an out-of-process shell, returning the + output of that command + """ if redirect_output: stdout = subprocess.PIPE else: @@ -32,33 +51,43 @@ def run_command(cmd, redirect_output=True, error_ok=False): die('Command "%s" failed.\n%s', ' '.join(cmd), output) return output +HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install']).strip()) +HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv']).strip()) def check_dependencies(): - """Make sure pip and virtualenv are on the path.""" - print 'Checking for pip...', - if not run_command(['which', 'pip']).strip(): - die('ERROR: pip not found.\n\nNova development requires pip,' - ' please install it using your favorite package management tool') - print 'done.' + """Make sure virtualenv is in the path.""" print 'Checking for virtualenv...', - if not run_command(['which', 'virtualenv']).strip(): - die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,' - ' please install it using your favorite package management tool') + if not HAS_VIRTUALENV: + print 'not found.' + # Try installing it via easy_install... + if HAS_EASY_INSTALL: + if not run_command(['which', 'easy_install']): + print 'Installing virtualenv via easy_install...', + die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,' + ' please install it using your favorite package management tool') + print 'done.' print 'done.' def create_virtualenv(venv=VENV): + """Creates the virtual environment and installs PIP only into the + virtual environment + """ print 'Creating venv...', run_command(['virtualenv', '-q', '--no-site-packages', VENV]) print 'done.' + print 'Installing pip in virtualenv...', + if not run_command(['tools/with_venv.sh', 'easy_install', 'pip']).strip(): + die("Failed to install pip.") + print 'done.' def install_dependencies(venv=VENV): print 'Installing dependencies with pip (this can take a while)...' - run_command(['pip', 'install', '-E', venv, '-r', PIP_REQUIRES], + run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, '-r', PIP_REQUIRES], redirect_output=False) - run_command(['pip', 'install', '-E', venv, TWISTED_NOVA], + run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, TWISTED_NOVA], redirect_output=False) -- cgit From f5695429db27110d8a95df3b66e4045c59d88c6a Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Tue, 10 Aug 2010 12:51:03 -0400 Subject: Quick fix on location of printouts when trying to install virtualenv. --- tools/install_venv.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/install_venv.py b/tools/install_venv.py index adf24b365..494535b5e 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -36,6 +36,7 @@ def die(message, *args): print >>sys.stderr, message % args sys.exit(1) + def run_command(cmd, redirect_output=True, error_ok=False): """Runs a command in an out-of-process shell, returning the output of that command @@ -51,9 +52,11 @@ def run_command(cmd, redirect_output=True, error_ok=False): die('Command "%s" failed.\n%s', ' '.join(cmd), output) return output + HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install']).strip()) HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv']).strip()) + def check_dependencies(): """Make sure virtualenv is in the path.""" @@ -62,8 +65,8 @@ def check_dependencies(): print 'not found.' # Try installing it via easy_install... if HAS_EASY_INSTALL: + print 'Installing virtualenv via easy_install...', if not run_command(['which', 'easy_install']): - print 'Installing virtualenv via easy_install...', die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,' ' please install it using your favorite package management tool') print 'done.' -- cgit From 7a1709561f1fed6e46a1c31aaa8e3ac54b9eebd3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 10:25:52 -0700 Subject: rename create_zip to zipfile so lazy match works --- bin/nova-manage | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 2dd569df0..6af092922 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -203,7 +203,7 @@ class ProjectCommands(object): arguments: project user""" self.manager.remove_from_project(user, project) - def create_zip(self, project_id, user_id, filename='nova.zip'): + def zipfile(self, project_id, user_id, filename='nova.zip'): """Exports credentials for project to a zip file arguments: project_id user_id [filename='nova.zip]""" zip_file = self.manager.get_credentials(project_id, user_id) -- cgit From fadaf1d9842abb991b093b04c031fa9947d82fbc Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 11:48:14 -0700 Subject: pep8 cleanup --- nova/network/exception.py | 5 ++++- nova/network/linux_net.py | 46 ++++++++++++++++++++++++++++---------- nova/network/model.py | 39 +++++++++++++++++++------------- nova/network/service.py | 13 +++++++---- nova/network/vpn.py | 2 +- nova/tests/network_unittest.py | 50 +++++++++++++++++++++++------------------- 6 files changed, 100 insertions(+), 55 deletions(-) diff --git a/nova/network/exception.py b/nova/network/exception.py index 5722e9672..884ea54b4 100644 --- a/nova/network/exception.py +++ b/nova/network/exception.py @@ -26,15 +26,18 @@ from nova.exception import Error class NoMoreAddresses(Error): pass + class AddressNotAllocated(Error): pass + class AddressAlreadyAssociated(Error): pass + class AddressNotAssociated(Error): pass + class NotValidNetworkSize(Error): pass - diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 4a4b4c8a8..35bfded49 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -23,15 +23,16 @@ import subprocess # todo(ja): does the definition of network_path belong here? +from nova import flags from nova import utils -from nova import flags -FLAGS=flags.FLAGS +FLAGS = flags.FLAGS flags.DEFINE_string('dhcpbridge_flagfile', '/etc/nova/nova-dhcpbridge.conf', 'location of flagfile for dhcpbridge') + def execute(cmd, addl_env=None): if FLAGS.fake_network: logging.debug("FAKE NET: %s" % cmd) @@ -39,11 +40,13 @@ def execute(cmd, addl_env=None): else: return utils.execute(cmd, addl_env=addl_env) + def runthis(desc, cmd): if FLAGS.fake_network: return execute(cmd) else: - return utils.runthis(desc,cmd) + return utils.runthis(desc, cmd) + def Popen(cmd): if FLAGS.fake_network: @@ -56,18 +59,25 @@ def device_exists(device): (out, err) = execute("ifconfig %s" % device) return not err + def confirm_rule(cmd): execute("sudo iptables --delete %s" % (cmd)) execute("sudo iptables -I %s" % (cmd)) + def remove_rule(cmd): execute("sudo iptables --delete %s" % (cmd)) + def bind_public_ip(ip, interface): - runthis("Binding IP to interface: %s", "sudo ip addr add %s dev %s" % (ip, interface)) + runthis("Binding IP to interface: %s", + "sudo ip addr add %s dev %s" % (ip, interface)) + def unbind_public_ip(ip, interface): - runthis("Binding IP to interface: %s", "sudo ip addr del %s dev %s" % (ip, interface)) + runthis("Binding IP to interface: %s", + "sudo ip addr del %s dev %s" % (ip, interface)) + def vlan_create(net): """ create a vlan on on a bridge device unless vlan already exists """ @@ -77,6 +87,7 @@ def vlan_create(net): execute("sudo vconfig add %s %s" % (FLAGS.bridge_dev, net['vlan'])) execute("sudo ifconfig vlan%s up" % (net['vlan'])) + def bridge_create(net): """ create a bridge on a vlan unless it already exists """ if not device_exists(net['bridge_name']): @@ -85,14 +96,17 @@ def bridge_create(net): execute("sudo brctl setfd %s 0" % (net.bridge_name)) # execute("sudo brctl setageing %s 10" % (net.bridge_name)) execute("sudo brctl stp %s off" % (net['bridge_name'])) - execute("sudo brctl addif %s vlan%s" % (net['bridge_name'], net['vlan'])) + execute("sudo brctl addif %s vlan%s" % (net['bridge_name'], + net['vlan'])) if net.bridge_gets_ip: execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \ (net['bridge_name'], net.gateway, net.broadcast, net.netmask)) - confirm_rule("FORWARD --in-interface %s -j ACCEPT" % (net['bridge_name'])) + confirm_rule("FORWARD --in-interface %s -j ACCEPT" % + (net['bridge_name'])) else: execute("sudo ifconfig %s up" % net['bridge_name']) + def dnsmasq_cmd(net): cmd = ['sudo -E dnsmasq', ' --strict-order', @@ -107,12 +121,15 @@ def dnsmasq_cmd(net): ' --leasefile-ro'] return ''.join(cmd) + def hostDHCP(network, host, mac): - idx = host.split(".")[-1] # Logically, the idx of instances they've launched in this net + # Logically, the idx of instances they've launched in this net + idx = host.split(".")[-1] return "%s,%s-%s-%s.novalocal,%s" % \ (mac, network['user_id'], network['vlan'], idx, host) -# todo(ja): if the system has restarted or pid numbers have wrapped + +# TODO(ja): if the system has restarted or pid numbers have wrapped # then you cannot be certain that the pid refers to the # dnsmasq. As well, sending a HUP only reloads the hostfile, # so any configuration options (like dchp-range, vlan, ...) @@ -125,13 +142,15 @@ def start_dnsmasq(network): """ with open(dhcp_file(network['vlan'], 'conf'), 'w') as f: for host_name in network.hosts: - f.write("%s\n" % hostDHCP(network, host_name, network.hosts[host_name])) + f.write("%s\n" % hostDHCP(network, + host_name, + network.hosts[host_name])) pid = dnsmasq_pid_for(network) # if dnsmasq is already running, then tell it to reload if pid: - # todo(ja): use "/proc/%d/cmdline" % (pid) to determine if pid refers + # TODO(ja): use "/proc/%d/cmdline" % (pid) to determine if pid refers # correct dnsmasq process try: os.kill(pid, signal.SIGHUP) @@ -148,6 +167,7 @@ def start_dnsmasq(network): 'DNSMASQ_INTERFACE': network['bridge_name']} execute(dnsmasq_cmd(network), addl_env=env) + def stop_dnsmasq(network): """ stops the dnsmasq instance for a given network """ pid = dnsmasq_pid_for(network) @@ -158,14 +178,17 @@ def stop_dnsmasq(network): except Exception, e: logging.debug("Killing dnsmasq threw %s", e) + def dhcp_file(vlan, kind): """ return path to a pid, leases or conf file for a vlan """ return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path, vlan, kind)) + def bin_file(script): return os.path.abspath(os.path.join(__file__, "../../../bin", script)) + def dnsmasq_pid_for(network): """ the pid for prior dnsmasq instance for a vlan, returns None if no pid file exists @@ -178,4 +201,3 @@ def dnsmasq_pid_for(network): if os.path.exists(pid_file): with open(pid_file, 'r') as f: return int(f.read()) - diff --git a/nova/network/model.py b/nova/network/model.py index 2074a6d46..734a3f7a9 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -119,7 +119,9 @@ class Vlan(datastore.BasicModel): default way of saving into "vlan:ID" and adding to a set of "vlans". """ set_name = self._redis_set_name(self.__class__.__name__) - datastore.Redis.instance().hset(set_name, self.project_id, self.vlan_id) + datastore.Redis.instance().hset(set_name, + self.project_id, + self.vlan_id) @datastore.absorb_connection_error def destroy(self): @@ -129,17 +131,16 @@ class Vlan(datastore.BasicModel): def subnet(self): vlan = int(self.vlan_id) network = IPy.IP(FLAGS.private_range) - start = (vlan-FLAGS.vlan_start) * FLAGS.network_size + start = (vlan - FLAGS.vlan_start) * FLAGS.network_size # minus one for the gateway. return "%s-%s" % (network[start], network[start + FLAGS.network_size - 1]) + # CLEANUP: # TODO(ja): Save the IPs at the top of each subnet for cloudpipe vpn clients # TODO(ja): does vlanpool "keeper" need to know the min/max - # shouldn't FLAGS always win? -# TODO(joshua): Save the IPs at the top of each subnet for cloudpipe vpn clients - class BaseNetwork(datastore.BasicModel): override_type = 'network' @@ -223,11 +224,11 @@ class BaseNetwork(datastore.BasicModel): @property def num_bottom_reserved_ips(self): - return 2 # Network, Gateway + return 2 # Network, Gateway @property def num_top_reserved_ips(self): - return 1 # Broadcast + return 1 # Broadcast def allocate_ip(self, user_id, project_id, mac): for address in self.available: @@ -257,8 +258,11 @@ class BaseNetwork(datastore.BasicModel): for address in self.hosts: yield address - def express(self, address=None): pass - def deexpress(self, address=None): pass + def express(self, address=None): + pass + + def deexpress(self, address=None): + pass class BridgedNetwork(BaseNetwork): @@ -298,6 +302,7 @@ class BridgedNetwork(BaseNetwork): linux_net.vlan_create(self) linux_net.bridge_create(self) + class DHCPNetwork(BridgedNetwork): """ properties: @@ -365,6 +370,7 @@ class DHCPNetwork(BridgedNetwork): else: linux_net.start_dnsmasq(self) + class PublicAddress(datastore.BasicModel): override_type = "address" @@ -391,6 +397,8 @@ class PublicAddress(datastore.BasicModel): DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] + + class PublicNetworkController(BaseNetwork): override_type = 'network' @@ -400,7 +408,8 @@ class PublicNetworkController(BaseNetwork): FLAGS.public_range) self['user_id'] = "public" self['project_id'] = "public" - self["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + self["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', + time.gmtime()) self["vlan"] = FLAGS.public_vlan self.save() self.express() @@ -458,7 +467,7 @@ class PublicNetworkController(BaseNetwork): if address: addresses = [self.get_host(address)] for addr in addresses: - if addr.get('private_ip','available') == 'available': + if addr.get('private_ip', 'available') == 'available': continue public_ip = addr['address'] private_ip = addr['private_ip'] @@ -490,8 +499,9 @@ class PublicNetworkController(BaseNetwork): % (private_ip, protocol, port)) -# FIXME(todd): does this present a race condition, or is there some piece of -# architecture that mitigates it (only one queue listener per net)? +# FIXME(todd): does this present a race condition, or is there some +# piece of architecture that mitigates it (only one queue +# listener per net)? def get_vlan_for_project(project_id): """ Allocate vlan IDs to individual users. @@ -502,7 +512,7 @@ def get_vlan_for_project(project_id): known_vlans = Vlan.dict_by_vlan() for vnum in range(FLAGS.vlan_start, FLAGS.vlan_end): vstr = str(vnum) - if not known_vlans.has_key(vstr): + if not vstr in known_vlans: return Vlan.create(project_id, vnum) old_project_id = known_vlans[vstr] if not manager.AuthManager().get_project(old_project_id): @@ -526,6 +536,7 @@ def get_vlan_for_project(project_id): return Vlan.create(project_id, vnum) raise exception.AddressNotAllocated("Out of VLANs") + def get_project_network(project_id, security_group='default'): """ get a project's private network, allocating one if needed """ project = manager.AuthManager().get_project(project_id) @@ -556,10 +567,8 @@ def get_network_by_interface(iface, security_group='default'): return get_project_network(project_id, security_group) - def get_public_ip_for_instance(instance_id): # FIXME: this should be a lookup - iteration won't scale for address_record in PublicAddress.all(): if address_record.get('instance_id', 'available') == instance_id: return address_record['address'] - diff --git a/nova/network/service.py b/nova/network/service.py index 1a61f49d4..f13324103 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -38,7 +38,7 @@ flags.DEFINE_string('network_type', flags.DEFINE_string('flat_network_bridge', 'br100', 'Bridge for simple network instances') flags.DEFINE_list('flat_network_ips', - ['192.168.0.2','192.168.0.3','192.168.0.4'], + ['192.168.0.2', '192.168.0.3', '192.168.0.4'], 'Available ips for simple network') flags.DEFINE_string('flat_network_network', '192.168.0.0', 'Network for simple network') @@ -51,17 +51,21 @@ flags.DEFINE_string('flat_network_broadcast', '192.168.0.255', flags.DEFINE_string('flat_network_dns', '8.8.4.4', 'Dns for simple network') + def type_to_class(network_type): if network_type == 'flat': return FlatNetworkService - elif network_type == 'vlan': + elif network_type == 'vlan': return VlanNetworkService raise NotFound("Couldn't find %s network type" % network_type) def setup_compute_network(network_type, user_id, project_id, security_group): srv = type_to_class(network_type) - srv.setup_compute_network(network_type, user_id, project_id, security_group) + srv.setup_compute_network(network_type, + user_id, + project_id, + security_group) def get_host_for_project(project_id): @@ -175,6 +179,7 @@ class FlatNetworkService(BaseNetworkService): """Returns an ip to the pool""" datastore.Redis.instance().sadd('ips', fixed_ip) + class VlanNetworkService(BaseNetworkService): """Vlan network with dhcp""" # NOTE(vish): A lot of the interactions with network/model.py can be @@ -194,7 +199,7 @@ class VlanNetworkService(BaseNetworkService): return {'network_type': FLAGS.network_type, 'bridge_name': net['bridge_name'], 'mac_address': mac, - 'private_dns_name' : fixed_ip} + 'private_dns_name': fixed_ip} def deallocate_fixed_ip(self, fixed_ip, *args, **kwargs): diff --git a/nova/network/vpn.py b/nova/network/vpn.py index 1b6dd7a56..74eebf9a8 100644 --- a/nova/network/vpn.py +++ b/nova/network/vpn.py @@ -33,6 +33,7 @@ flags.DEFINE_integer('vpn_start_port', 1000, flags.DEFINE_integer('vpn_end_port', 2000, 'End port for the cloudpipe VPN servers') + class NoMorePorts(exception.Error): pass @@ -121,4 +122,3 @@ class NetworkData(datastore.BasicModel): self.unassociate_with('ip', self.ip) datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port) super(NetworkData, self).destroy() - diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 94d10200e..9aa39e516 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -31,6 +31,7 @@ from nova.network.exception import NoMoreAddresses FLAGS = flags.FLAGS + class NetworkTestCase(test.TrialTestCase): def setUp(self): super(NetworkTestCase, self).setUp() @@ -66,12 +67,14 @@ class NetworkTestCase(test.TrialTestCase): def test_public_network_allocation(self): pubnet = IPy.IP(flags.FLAGS.public_range) - address = self.network.allocate_ip(self.user.id, self.projects[0].id, "public") + address = self.network.allocate_ip(self.user.id, + self.projects[0].id, + "public") self.assertTrue(IPy.IP(address) in pubnet) self.assertTrue(IPy.IP(address) in self.network.network) def test_allocate_deallocate_fixed_ip(self): - result = self.service.allocate_fixed_ip( + result = self.service.allocate_fixed_ip( self.user.id, self.projects[0].id) address = result['private_dns_name'] mac = result['mac_address'] @@ -103,7 +106,8 @@ class NetworkTestCase(test.TrialTestCase): secondnet = model.get_project_network(self.projects[1].id, "default") self.assertEqual(True, is_in_project(address, self.projects[0].id)) - self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id)) + self.assertEqual(True, is_in_project(secondaddress, + self.projects[1].id)) self.assertEqual(False, is_in_project(address, self.projects[1].id)) # Addresses are allocated before they're issued @@ -116,19 +120,21 @@ class NetworkTestCase(test.TrialTestCase): self.assertEqual(False, is_in_project(address, self.projects[0].id)) # First address release shouldn't affect the second - self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id)) + self.assertEqual(True, is_in_project(secondaddress, + self.projects[1].id)) rv = self.service.deallocate_fixed_ip(secondaddress) self.dnsmasq.release_ip(secondmac, secondaddress, hostname, secondnet.bridge_name) - self.assertEqual(False, is_in_project(secondaddress, self.projects[1].id)) + self.assertEqual(False, is_in_project(secondaddress, + self.projects[1].id)) def test_subnet_edge(self): result = self.service.allocate_fixed_ip(self.user.id, self.projects[0].id) firstaddress = result['private_dns_name'] hostname = "toomany-hosts" - for i in range(1,5): + for i in range(1, 5): project_id = self.projects[i].id result = self.service.allocate_fixed_ip( self.user, project_id) @@ -142,9 +148,12 @@ class NetworkTestCase(test.TrialTestCase): self.user, project_id) mac3 = result['mac_address'] address3 = result['private_dns_name'] - self.assertEqual(False, is_in_project(address, self.projects[0].id)) - self.assertEqual(False, is_in_project(address2, self.projects[0].id)) - self.assertEqual(False, is_in_project(address3, self.projects[0].id)) + self.assertEqual(False, is_in_project(address, + self.projects[0].id)) + self.assertEqual(False, is_in_project(address2, + self.projects[0].id)) + self.assertEqual(False, is_in_project(address3, + self.projects[0].id)) rv = self.service.deallocate_fixed_ip(address) rv = self.service.deallocate_fixed_ip(address2) rv = self.service.deallocate_fixed_ip(address3) @@ -212,9 +221,10 @@ class NetworkTestCase(test.TrialTestCase): """ net = model.get_project_network(self.projects[0].id, "default") num_preallocated_ips = len(net.hosts.keys()) - num_available_ips = flags.FLAGS.network_size - (net.num_bottom_reserved_ips + - num_preallocated_ips + - net.num_top_reserved_ips) + net_size = flags.FLAGS.network_size + num_available_ips = net_size - (net.num_bottom_reserved_ips + + num_preallocated_ips + + net.num_top_reserved_ips) self.assertEqual(num_available_ips, len(list(net.available))) def test_too_many_addresses(self): @@ -249,25 +259,22 @@ class NetworkTestCase(test.TrialTestCase): net.bridge_name) self.assertEqual(len(list(net.available)), num_available_ips) + def is_in_project(address, project_id): return address in model.get_project_network(project_id).list_addresses() -def _get_project_addresses(project_id): - project_addresses = [] - for addr in model.get_project_network(project_id).list_addresses(): - project_addresses.append(addr) - return project_addresses def binpath(script): return os.path.abspath(os.path.join(__file__, "../../../bin", script)) + class FakeDNSMasq(object): def issue_ip(self, mac, ip, hostname, interface): cmd = "%s add %s %s %s" % (binpath('nova-dhcpbridge'), mac, ip, hostname) env = {'DNSMASQ_INTERFACE': interface, - 'TESTING' : '1', - 'FLAGFILE' : FLAGS.dhcpbridge_flagfile} + 'TESTING': '1', + 'FLAGFILE': FLAGS.dhcpbridge_flagfile} (out, err) = utils.execute(cmd, addl_env=env) logging.debug("ISSUE_IP: %s, %s " % (out, err)) @@ -275,8 +282,7 @@ class FakeDNSMasq(object): cmd = "%s del %s %s %s" % (binpath('nova-dhcpbridge'), mac, ip, hostname) env = {'DNSMASQ_INTERFACE': interface, - 'TESTING' : '1', - 'FLAGFILE' : FLAGS.dhcpbridge_flagfile} + 'TESTING': '1', + 'FLAGFILE': FLAGS.dhcpbridge_flagfile} (out, err) = utils.execute(cmd, addl_env=env) logging.debug("RELEASE_IP: %s, %s " % (out, err)) - -- cgit From c4f6500a4c33d4ad093d29f971c139b63984a0a5 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 12:27:06 -0700 Subject: pylint cleanup --- bin/nova-dhcpbridge | 5 ++-- nova/network/exception.py | 5 ++++ nova/network/linux_net.py | 66 ++++++++++++++++++++++++++--------------------- 3 files changed, 44 insertions(+), 32 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 7789dac98..6a9115fcb 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -70,8 +70,9 @@ def init_leases(interface): net = model.get_network_by_interface(interface) res = "" for host_name in net.hosts: - res += "%s\n" % linux_net.hostDHCP(net, host_name, - net.hosts[host_name]) + res += "%s\n" % linux_net.host_dhcp(net, + host_name, + net.hosts[host_name]) return res diff --git a/nova/network/exception.py b/nova/network/exception.py index 884ea54b4..8d7aa1498 100644 --- a/nova/network/exception.py +++ b/nova/network/exception.py @@ -24,20 +24,25 @@ from nova.exception import Error class NoMoreAddresses(Error): + """No More Addresses are available in the network""" pass class AddressNotAllocated(Error): + """The specified address has not been allocated""" pass class AddressAlreadyAssociated(Error): + """The specified address has already been associated""" pass class AddressNotAssociated(Error): + """The specified address is not associated""" pass class NotValidNetworkSize(Error): + """The network size is not valid""" pass diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 35bfded49..2f6a9638d 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -15,11 +15,13 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +""" +Implements vlans, bridges, and iptables rules using linux utilities. +""" import logging import signal import os -import subprocess # todo(ja): does the definition of network_path belong here? @@ -34,53 +36,53 @@ flags.DEFINE_string('dhcpbridge_flagfile', def execute(cmd, addl_env=None): + """Wrapper around utils.execute for fake_network""" if FLAGS.fake_network: - logging.debug("FAKE NET: %s" % cmd) + logging.debug("FAKE NET: %s", cmd) return "fake", 0 else: return utils.execute(cmd, addl_env=addl_env) def runthis(desc, cmd): + """Wrapper around utils.runthis for fake_network""" if FLAGS.fake_network: return execute(cmd) else: return utils.runthis(desc, cmd) -def Popen(cmd): - if FLAGS.fake_network: - execute(' '.join(cmd)) - else: - subprocess.Popen(cmd) - - def device_exists(device): - (out, err) = execute("ifconfig %s" % device) + """Check if ethernet device exists""" + (_out, err) = execute("ifconfig %s" % device) return not err def confirm_rule(cmd): + """Delete and re-add iptables rule""" execute("sudo iptables --delete %s" % (cmd)) execute("sudo iptables -I %s" % (cmd)) def remove_rule(cmd): + """Remove iptables rule""" execute("sudo iptables --delete %s" % (cmd)) -def bind_public_ip(ip, interface): +def bind_public_ip(public_ip, interface): + """Bind ip to an interface""" runthis("Binding IP to interface: %s", - "sudo ip addr add %s dev %s" % (ip, interface)) + "sudo ip addr add %s dev %s" % (public_ip, interface)) -def unbind_public_ip(ip, interface): +def unbind_public_ip(public_ip, interface): + """Unbind a public ip from an interface""" runthis("Binding IP to interface: %s", - "sudo ip addr del %s dev %s" % (ip, interface)) + "sudo ip addr del %s dev %s" % (public_ip, interface)) def vlan_create(net): - """ create a vlan on on a bridge device unless vlan already exists """ + """Create a vlan on on a bridge device unless vlan already exists""" if not device_exists("vlan%s" % net['vlan']): logging.debug("Starting VLAN inteface for %s network", (net['vlan'])) execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD") @@ -89,7 +91,7 @@ def vlan_create(net): def bridge_create(net): - """ create a bridge on a vlan unless it already exists """ + """Create a bridge on a vlan unless it already exists""" if not device_exists(net['bridge_name']): logging.debug("Starting Bridge inteface for %s network", (net['vlan'])) execute("sudo brctl addbr %s" % (net['bridge_name'])) @@ -107,7 +109,8 @@ def bridge_create(net): execute("sudo ifconfig %s up" % net['bridge_name']) -def dnsmasq_cmd(net): +def _dnsmasq_cmd(net): + """Builds dnsmasq command""" cmd = ['sudo -E dnsmasq', ' --strict-order', ' --bind-interfaces', @@ -122,7 +125,8 @@ def dnsmasq_cmd(net): return ''.join(cmd) -def hostDHCP(network, host, mac): +def host_dhcp(network, host, mac): + """Return a host string for a network, host, and mac""" # Logically, the idx of instances they've launched in this net idx = host.split(".")[-1] return "%s,%s-%s-%s.novalocal,%s" % \ @@ -135,14 +139,14 @@ def hostDHCP(network, host, mac): # so any configuration options (like dchp-range, vlan, ...) # aren't reloaded def start_dnsmasq(network): - """ (re)starts a dnsmasq server for a given network + """(Re)starts a dnsmasq server for a given network if a dnsmasq instance is already running then send a HUP signal causing it to reload, otherwise spawn a new instance """ with open(dhcp_file(network['vlan'], 'conf'), 'w') as f: for host_name in network.hosts: - f.write("%s\n" % hostDHCP(network, + f.write("%s\n" % host_dhcp(network, host_name, network.hosts[host_name])) @@ -154,8 +158,8 @@ def start_dnsmasq(network): # correct dnsmasq process try: os.kill(pid, signal.SIGHUP) - except Exception, e: - logging.debug("Hupping dnsmasq threw %s", e) + except Exception as exc: # pylint: disable-msg=W0703 + logging.debug("Hupping dnsmasq threw %s", exc) # otherwise delete the existing leases file and start dnsmasq lease_file = dhcp_file(network['vlan'], 'leases') @@ -165,35 +169,37 @@ def start_dnsmasq(network): # FLAGFILE and DNSMASQ_INTERFACE in env env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, 'DNSMASQ_INTERFACE': network['bridge_name']} - execute(dnsmasq_cmd(network), addl_env=env) + execute(_dnsmasq_cmd(network), addl_env=env) def stop_dnsmasq(network): - """ stops the dnsmasq instance for a given network """ + """Stops the dnsmasq instance for a given network""" pid = dnsmasq_pid_for(network) if pid: try: os.kill(pid, signal.SIGTERM) - except Exception, e: - logging.debug("Killing dnsmasq threw %s", e) + except Exception as exc: # pylint: disable-msg=W0703 + logging.debug("Killing dnsmasq threw %s", exc) def dhcp_file(vlan, kind): - """ return path to a pid, leases or conf file for a vlan """ + """Return path to a pid, leases or conf file for a vlan""" return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path, vlan, kind)) def bin_file(script): + """Return the absolute path to scipt in the bin directory""" return os.path.abspath(os.path.join(__file__, "../../../bin", script)) def dnsmasq_pid_for(network): - """ the pid for prior dnsmasq instance for a vlan, - returns None if no pid file exists + """Returns he pid for prior dnsmasq instance for a vlan + + Returns None if no pid file exists - if machine has rebooted pid might be incorrect (caller should check) + If machine has rebooted pid might be incorrect (caller should check) """ pid_file = dhcp_file(network['vlan'], 'pid') -- cgit From 538fe868a8c89f892bffbfc0001b64e3bf1c9cf5 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 10 Aug 2010 15:28:35 -0400 Subject: Oops, we need eventlet as well. --- tools/pip-requires | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/pip-requires b/tools/pip-requires index 4eb47ca2b..e3591e92d 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -4,6 +4,7 @@ amqplib==0.6.1 anyjson==0.2.4 boto==2.0b1 carrot==0.10.5 +eventlet==0.9.10 lockfile==0.8 python-daemon==1.5.5 python-gflags==1.3 -- cgit From e0983caad1c3ff7ca451094f8778b1a62bf91531 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 12:46:40 -0700 Subject: Further pylint cleanup --- nova/endpoint/cloud.py | 10 +++++----- nova/network/linux_net.py | 4 ++-- nova/network/service.py | 24 ++++++++++++++++-------- 3 files changed, 23 insertions(+), 15 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index ad9188ff3..02969c8e9 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -103,7 +103,7 @@ class CloudController(object): result = {} for instance in self.instdir.all: if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], + line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) if instance['key_name'] in result: result[instance['key_name']].append(line) @@ -423,7 +423,7 @@ class CloudController(object): i['key_name'] = instance.get('key_name', None) if context.user.is_admin(): i['key_name'] = '%s (%s, %s)' % (i['key_name'], - instance.get('project_id', None), + instance.get('project_id', None), instance.get('node_name', '')) i['product_codes_set'] = self._convert_to_set( instance.get('product_codes', None), 'product_code') @@ -560,15 +560,15 @@ class CloudController(object): # TODO: Get the real security group of launch in here security_group = "default" for num in range(int(kwargs['max_count'])): - vpn = False + is_vpn = False if image_id == FLAGS.vpn_image_id: - vpn = True + is_vpn = True allocate_result = yield rpc.call(network_topic, {"method": "allocate_fixed_ip", "args": {"user_id": context.user.id, "project_id": context.project.id, "security_group": security_group, - "vpn": vpn}}) + "is_vpn": is_vpn}}) allocate_data = allocate_result['result'] inst = self.instdir.new() inst['image_id'] = image_id diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 2f6a9638d..56b4a9dd2 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -158,7 +158,7 @@ def start_dnsmasq(network): # correct dnsmasq process try: os.kill(pid, signal.SIGHUP) - except Exception as exc: # pylint: disable-msg=W0703 + except Exception as exc: # pylint: disable=W0703 logging.debug("Hupping dnsmasq threw %s", exc) # otherwise delete the existing leases file and start dnsmasq @@ -179,7 +179,7 @@ def stop_dnsmasq(network): if pid: try: os.kill(pid, signal.SIGTERM) - except Exception as exc: # pylint: disable-msg=W0703 + except Exception as exc: # pylint: disable=W0703 logging.debug("Killing dnsmasq threw %s", exc) diff --git a/nova/network/service.py b/nova/network/service.py index f13324103..fd45496c9 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -17,7 +17,7 @@ # under the License. """ -Network Nodes are responsible for allocating ips and setting up network +Network Hosts are responsible for allocating ips and setting up network """ from nova import datastore @@ -53,6 +53,7 @@ flags.DEFINE_string('flat_network_dns', '8.8.4.4', def type_to_class(network_type): + """Convert a network_type string into an actual Python class""" if network_type == 'flat': return FlatNetworkService elif network_type == 'vlan': @@ -61,6 +62,7 @@ def type_to_class(network_type): def setup_compute_network(network_type, user_id, project_id, security_group): + """Sets up the network on a compute host""" srv = type_to_class(network_type) srv.setup_compute_network(network_type, user_id, @@ -69,12 +71,14 @@ def setup_compute_network(network_type, user_id, project_id, security_group): def get_host_for_project(project_id): + """Get host allocated to project from datastore""" redis = datastore.Redis.instance() return redis.get(_host_key(project_id)) def _host_key(project_id): - return "network_host:%s" % project_id + """Returns redis host key for network""" + return "networkhost:%s" % project_id class BaseNetworkService(service.Service): @@ -84,6 +88,7 @@ class BaseNetworkService(service.Service): """ def __init__(self, *args, **kwargs): self.network = model.PublicNetworkController() + super(BaseNetworkService, self).__init__(*args, **kwargs) def set_network_host(self, user_id, project_id, *args, **kwargs): """Safely sets the host of the projects network""" @@ -113,7 +118,7 @@ class BaseNetworkService(service.Service): pass @classmethod - def setup_compute_network(self, user_id, project_id, security_group, + def setup_compute_network(cls, user_id, project_id, security_group, *args, **kwargs): """Sets up matching network for compute hosts""" raise NotImplementedError() @@ -142,7 +147,7 @@ class FlatNetworkService(BaseNetworkService): """Basic network where no vlans are used""" @classmethod - def setup_compute_network(self, user_id, project_id, security_group, + def setup_compute_network(cls, user_id, project_id, security_group, *args, **kwargs): """Network is created manually""" pass @@ -186,13 +191,14 @@ class VlanNetworkService(BaseNetworkService): # simplified and improved. Also there it may be useful # to support vlans separately from dhcp, instead of having # both of them together in this class. + # pylint: disable=W0221 def allocate_fixed_ip(self, user_id, project_id, security_group='default', - vpn=False, *args, **kwargs): - """Gets a fixed ip from the pool """ + is_vpn=False, *args, **kwargs): + """Gets a fixed ip from the pool""" mac = utils.generate_mac() net = model.get_project_network(project_id) - if vpn: + if is_vpn: fixed_ip = net.allocate_vpn_ip(user_id, project_id, mac) else: fixed_ip = net.allocate_ip(user_id, project_id, mac) @@ -207,9 +213,11 @@ class VlanNetworkService(BaseNetworkService): return model.get_network_by_address(fixed_ip).deallocate_ip(fixed_ip) def lease_ip(self, address): + """Called by bridge when ip is leased""" return model.get_network_by_address(address).lease_ip(address) def release_ip(self, address): + """Called by bridge when ip is released""" return model.get_network_by_address(address).release_ip(address) def restart_nets(self): @@ -223,7 +231,7 @@ class VlanNetworkService(BaseNetworkService): vpn.NetworkData.create(project_id) @classmethod - def setup_compute_network(self, user_id, project_id, security_group, + def setup_compute_network(cls, user_id, project_id, security_group, *args, **kwargs): """Sets up matching network for compute hosts""" # NOTE(vish): Use BridgedNetwork instead of DHCPNetwork because -- cgit From 712b6e41d40303a7a3e9d0ce21dde628361417ae Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 12:51:42 -0700 Subject: Pylint clean of vpn.py --- nova/network/vpn.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/nova/network/vpn.py b/nova/network/vpn.py index 74eebf9a8..a0e2a7fa1 100644 --- a/nova/network/vpn.py +++ b/nova/network/vpn.py @@ -35,6 +35,7 @@ flags.DEFINE_integer('vpn_end_port', 2000, class NoMorePorts(exception.Error): + """No ports available to allocate for the given ip""" pass @@ -68,42 +69,44 @@ class NetworkData(datastore.BasicModel): return network_data @classmethod - def find_free_port_for_ip(cls, ip): + def find_free_port_for_ip(cls, vpn_ip): """Finds a free port for a given ip from the redis set""" # TODO(vish): these redis commands should be generalized and # placed into a base class. Conceptually, it is # similar to an association, but we are just # storing a set of values instead of keys that # should be turned into objects. - cls._ensure_set_exists(ip) + cls._ensure_set_exists(vpn_ip) - port = datastore.Redis.instance().spop(cls._redis_ports_key(ip)) + port = datastore.Redis.instance().spop(cls._redis_ports_key(vpn_ip)) if not port: raise NoMorePorts() return port @classmethod - def _redis_ports_key(cls, ip): - return 'ip:%s:ports' % ip + def _redis_ports_key(cls, vpn_ip): + """Key that ports are stored under in redis""" + return 'ip:%s:ports' % vpn_ip @classmethod - def _ensure_set_exists(cls, ip): + def _ensure_set_exists(cls, vpn_ip): + """Creates the set of ports for the ip if it doesn't already exist""" # TODO(vish): these ports should be allocated through an admin # command instead of a flag redis = datastore.Redis.instance() - if (not redis.exists(cls._redis_ports_key(ip)) and - not redis.exists(cls._redis_association_name('ip', ip))): + if (not redis.exists(cls._redis_ports_key(vpn_ip)) and + not redis.exists(cls._redis_association_name('ip', vpn_ip))): for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1): - redis.sadd(cls._redis_ports_key(ip), i) + redis.sadd(cls._redis_ports_key(vpn_ip), i) @classmethod - def num_ports_for_ip(cls, ip): + def num_ports_for_ip(cls, vpn_ip): """Calculates the number of free ports for a given ip""" - cls._ensure_set_exists(ip) - return datastore.Redis.instance().scard('ip:%s:ports' % ip) + cls._ensure_set_exists(vpn_ip) + return datastore.Redis.instance().scard('ip:%s:ports' % vpn_ip) @property - def ip(self): + def ip(self): # pylint: disable=C0103 """The ip assigned to the project""" return self['ip'] -- cgit From 049b89babe10068d3976f3f3a99b7dce120e2962 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 10 Aug 2010 18:17:44 -0400 Subject: work on a router that works with wsgi and non-wsgi routing --- nova/endpoint/rackspace.py | 27 ++++++++-------- nova/wsgi.py | 76 ++++++++++++++++++++++++++++++++++++++++++++++ tools/pip-requires | 3 ++ 3 files changed, 94 insertions(+), 12 deletions(-) diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py index 75b828e91..b4e6cd823 100644 --- a/nova/endpoint/rackspace.py +++ b/nova/endpoint/rackspace.py @@ -45,18 +45,20 @@ class API(wsgi.Middleware): def __init__(self): super(API, self).__init__(Router(webob.exc.HTTPNotFound())) - def __call__(self, environ, start_response): + @webob.dec.wsgify + def __call__(self, req): + return self.application context = {} - if "HTTP_X_AUTH_TOKEN" in environ: + if "HTTP_X_AUTH_TOKEN" in req.environ: context['user'] = manager.AuthManager().get_user_from_access_key( - environ['HTTP_X_AUTH_TOKEN']) + req.environ['HTTP_X_AUTH_TOKEN']) if context['user']: context['project'] = manager.AuthManager().get_project( context['user'].name) if "user" not in context: - return webob.exc.HTTPForbidden()(environ, start_response) + return webob.exc.HTTPForbidden() environ['nova.context'] = context - return self.application(environ, start_response) + return self.application class Router(wsgi.Router): @@ -64,13 +66,14 @@ class Router(wsgi.Router): def _build_map(self): """Build routing map for authentication and cloud.""" - self._connect("/v1.0", controller=AuthenticationAPI()) - cloud = CloudServerAPI() - self._connect("/servers", controller=cloud.launch_server, - conditions={"method": ["POST"]}) - self._connect("/servers/{server_id}", controller=cloud.delete_server, - conditions={'method': ["DELETE"]}) - self._connect("/servers", controller=cloud) + self.map.resource("server", "servers", controller=CloudServerAPI()) + #self._connect("/v1.0", controller=AuthenticationAPI()) + #cloud = CloudServerAPI() + #self._connect("/servers", controller=cloud.launch_server, + # conditions={"method": ["POST"]}) + #self._connect("/servers/{server_id}", controller=cloud.delete_server, + # conditions={'method': ["DELETE"]}) + #self._connect("/servers", controller=cloud) class AuthenticationAPI(wsgi.Application): diff --git a/nova/wsgi.py b/nova/wsgi.py index 4fd6e59e3..271648105 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -40,6 +40,7 @@ def run_server(application, port): eventlet.wsgi.server(sock, application) +# TODO(gundlach): I think we should toss this class, now that it has no purpose. class Application(object): """Base WSGI application wrapper. Subclasses need to implement __call__.""" @@ -140,6 +141,81 @@ class ParsedRoutes(Middleware): app = environ['wsgiorg.routing_args'][1]['controller'] return app(environ, start_response) +class MichaelRouter(object): + """ + My attempt at a routing class. Just override __init__ to call + super, then set up routes in self.map. + """ + + def __init__(self): + self.map = routes.Mapper() + self._router = routes.middleware.RoutesMiddleware(self._proceed, self.map) + + @webob.dec.wsgify + def __call__(self, req): + """ + Route the incoming request to a controller based on self.map. + If no match, return a 404. + """ + return self._router + + @webob.dec.wsgify + def _proceed(self, req): + """ + Called by self._router after matching the incoming request to a route + and putting the information into req.environ. + """ + if req.environ['routes.route'] is None: + return webob.exc.HTTPNotFound() + match = environ['wsgiorg.routing_args'][1] + if match.get('_is_wsgi', False): + wsgiapp = match['controller'] + return req.get_response(wsgiapp) + else: + # TODO(gundlach): doubt this is the right way -- and it really + # feels like this code should exist somewhere already on the + # internet + controller, action = match['controller'], match['action'] + delete match['controller'] + delete match['action'] + return _as_response(getattr(controller, action)(**match)) + + controller = environ['wsgiorg.routing_args'][1]['controller'] + self._dispatch(controller) + + def _as_response(self, result): + """ + When routing to a non-wsgi controller+action, its result will + be passed here before returning up the WSGI chain to be converted + into a webob.Response + + + + + +class ApiVersionRouter(MichaelRouter): + + def __init__(self): + super(ApiVersionRouter, self).__init__(self) + + self.map.connect(None, "/v1.0/{path_info:.*}", controller=RsApiRouter()) + self.map.connect(None, "/ec2/{path_info:.*}", controller=Ec2ApiRouter()) + +class RsApiRouter(MichaelRouter): + def __init__(self): + super(RsApiRouter, self).__init__(self) + + self.map.resource("server", "servers", controller=CloudServersServerApi()) + self.map.resource("image", "images", controller=CloudServersImageApi()) + self.map.resource("flavor", "flavors", controller=CloudServersFlavorApi()) + self.map.resource("sharedipgroup", "sharedipgroups", + controller=CloudServersSharedIpGroupApi()) + +class Ec2ApiRouter(object): + def __getattr__(self, key): + return lambda *x: {'dummy response': 'i am a dummy response'} +CloudServersServerApi = CloudServersImageApi = CloudServersFlavorApi = \ + CloudServersSharedIpGroupApi = Ec2ApiRouter class Router(Middleware): # pylint: disable-msg=R0921 """Wrapper to help setup routes.middleware.RoutesMiddleware.""" diff --git a/tools/pip-requires b/tools/pip-requires index 4eb47ca2b..2317907d1 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -4,11 +4,14 @@ amqplib==0.6.1 anyjson==0.2.4 boto==2.0b1 carrot==0.10.5 +eventlet==0.9.10 lockfile==0.8 python-daemon==1.5.5 python-gflags==1.3 redis==2.0.0 +routes==1.12.3 tornado==1.0 +webob==0.9.8 wsgiref==0.1.2 zope.interface==3.6.1 mox==0.5.0 -- cgit From 47bf3ed11f2f372a07ea3b1b8deb9f7684cc2e5d Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 15:45:24 -0700 Subject: lots more pylint fixes --- nova/network/linux_net.py | 2 +- nova/network/model.py | 131 ++++++++++++++++++++++++------------ nova/tests/network_unittest.py | 147 ++++++++++++++++++++--------------------- 3 files changed, 160 insertions(+), 120 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 56b4a9dd2..0e8ddcc6a 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -118,7 +118,7 @@ def _dnsmasq_cmd(net): ' --pid-file=%s' % dhcp_file(net['vlan'], 'pid'), ' --listen-address=%s' % net.dhcp_listen_address, ' --except-interface=lo', - ' --dhcp-range=%s,static,600s' % (net.dhcp_range_start), + ' --dhcp-range=%s,static,600s' % net.dhcp_range_start, ' --dhcp-hostsfile=%s' % dhcp_file(net['vlan'], 'conf'), ' --dhcp-script=%s' % bin_file('nova-dhcpbridge'), ' --leasefile-ro'] diff --git a/nova/network/model.py b/nova/network/model.py index 734a3f7a9..7b1e16f26 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -57,7 +57,8 @@ logging.getLogger().setLevel(logging.DEBUG) class Vlan(datastore.BasicModel): - def __init__(self, project, vlan): + """Tracks vlans assigned to project it the datastore""" + def __init__(self, project, vlan): # pylint: disable=W0231 """ Since we don't want to try and find a vlan by its identifier, but by a project id, we don't call super-init. @@ -67,10 +68,12 @@ class Vlan(datastore.BasicModel): @property def identifier(self): + """Datastore identifier""" return "%s:%s" % (self.project_id, self.vlan_id) @classmethod def create(cls, project, vlan): + """Create a Vlan object""" instance = cls(project, vlan) instance.save() return instance @@ -78,6 +81,7 @@ class Vlan(datastore.BasicModel): @classmethod @datastore.absorb_connection_error def lookup(cls, project): + """Returns object by project if it exists in datastore or None""" set_name = cls._redis_set_name(cls.__name__) vlan = datastore.Redis.instance().hget(set_name, project) if vlan: @@ -88,19 +92,19 @@ class Vlan(datastore.BasicModel): @classmethod @datastore.absorb_connection_error def dict_by_project(cls): - """a hash of project:vlan""" + """A hash of project:vlan""" set_name = cls._redis_set_name(cls.__name__) - return datastore.Redis.instance().hgetall(set_name) + return datastore.Redis.instance().hgetall(set_name) or {} @classmethod @datastore.absorb_connection_error def dict_by_vlan(cls): - """a hash of vlan:project""" + """A hash of vlan:project""" set_name = cls._redis_set_name(cls.__name__) retvals = {} - hashset = datastore.Redis.instance().hgetall(set_name) - for val in hashset.keys(): - retvals[hashset[val]] = val + hashset = datastore.Redis.instance().hgetall(set_name) or {} + for (key, val) in hashset.iteritems(): + retvals[val] = key return retvals @classmethod @@ -125,10 +129,12 @@ class Vlan(datastore.BasicModel): @datastore.absorb_connection_error def destroy(self): + """Removes the object from the datastore""" set_name = self._redis_set_name(self.__class__.__name__) datastore.Redis.instance().hdel(set_name, self.project_id) def subnet(self): + """Returns a string containing the subnet""" vlan = int(self.vlan_id) network = IPy.IP(FLAGS.private_range) start = (vlan - FLAGS.vlan_start) * FLAGS.network_size @@ -142,17 +148,22 @@ class Vlan(datastore.BasicModel): # TODO(ja): does vlanpool "keeper" need to know the min/max - # shouldn't FLAGS always win? class BaseNetwork(datastore.BasicModel): + """Implements basic logic for allocating ips in a network""" override_type = 'network' @property def identifier(self): + """Datastore identifier""" return self.network_id def default_state(self): + """Default values for new objects""" return {'network_id': self.network_id, 'network_str': self.network_str} @classmethod + # pylint: disable=R0913 def create(cls, user_id, project_id, security_group, vlan, network_str): + """Create a BaseNetwork object""" network_id = "%s:%s" % (project_id, security_group) net = cls(network_id, network_str) net['user_id'] = user_id @@ -170,52 +181,65 @@ class BaseNetwork(datastore.BasicModel): @property def network(self): + """Returns a string representing the network""" return IPy.IP(self['network_str']) @property def netmask(self): + """Returns the netmask of this network""" return self.network.netmask() @property def gateway(self): + """Returns the network gateway address""" return self.network[1] @property def broadcast(self): + """Returns the network broadcast address""" return self.network.broadcast() @property def bridge_name(self): + """Returns the bridge associated with this network""" return "br%s" % (self["vlan"]) @property def user(self): + """Returns the user associated with this network""" return manager.AuthManager().get_user(self['user_id']) @property def project(self): + """Returns the project associated with this network""" return manager.AuthManager().get_project(self['project_id']) @property def _hosts_key(self): + """Datastore key where hosts are stored""" return "network:%s:hosts" % (self['network_str']) @property def hosts(self): + """Returns a hash of all hosts allocated in this network""" return datastore.Redis.instance().hgetall(self._hosts_key) or {} def _add_host(self, _user_id, _project_id, host, target): + """Add a host to the datastore""" datastore.Redis.instance().hset(self._hosts_key, host, target) def _rem_host(self, host): + """Remove a host from the datastore""" datastore.Redis.instance().hdel(self._hosts_key, host) @property def assigned(self): + """Returns a list of all assigned keys""" return datastore.Redis.instance().hkeys(self._hosts_key) @property def available(self): + """Returns a list of all available addresses in the network""" for idx in range(self.num_bottom_reserved_ips, len(self.network) - self.num_top_reserved_ips): address = str(self.network[idx]) @@ -224,15 +248,18 @@ class BaseNetwork(datastore.BasicModel): @property def num_bottom_reserved_ips(self): + """Returns number of ips reserved at the bottom of the range""" return 2 # Network, Gateway @property def num_top_reserved_ips(self): + """Returns number of ips reserved at the top of the range""" return 1 # Broadcast def allocate_ip(self, user_id, project_id, mac): + """Allocates an ip to a mac address""" for address in self.available: - logging.debug("Allocating IP %s to %s" % (address, project_id)) + logging.debug("Allocating IP %s to %s", address, project_id) self._add_host(user_id, project_id, address, mac) self.express(address=address) return address @@ -240,28 +267,37 @@ class BaseNetwork(datastore.BasicModel): (project_id, str(self.network))) def lease_ip(self, ip_str): - logging.debug("Leasing allocated IP %s" % (ip_str)) + """Called when DHCP lease is activated""" + logging.debug("Leasing allocated IP %s", ip_str) def release_ip(self, ip_str): + """Called when DHCP lease expires + + Removes the ip from the assigned list""" if not ip_str in self.assigned: raise exception.AddressNotAllocated() self._rem_host(ip_str) self.deexpress(address=ip_str) + logging.debug("Releasing IP %s", ip_str) def deallocate_ip(self, ip_str): + """Deallocates an allocated ip""" # NOTE(vish): Perhaps we should put the ip into an intermediate # state, so we know that we are pending waiting for # dnsmasq to confirm that it has been released. - pass + logging.debug("Deallocating allocated IP %s", ip_str) def list_addresses(self): + """List all allocated addresses""" for address in self.hosts: yield address def express(self, address=None): + """Set up network. Implemented in subclasses""" pass def deexpress(self, address=None): + """Tear down network. Implemented in subclasses""" pass @@ -286,7 +322,11 @@ class BridgedNetwork(BaseNetwork): override_type = 'network' @classmethod - def get_network_for_project(cls, user_id, project_id, security_group): + def get_network_for_project(cls, + user_id, + project_id, + security_group='default'): + """Returns network for a given project""" vlan = get_vlan_for_project(project_id) network_str = vlan.subnet() return cls.create(user_id, project_id, security_group, vlan.vlan_id, @@ -304,29 +344,14 @@ class BridgedNetwork(BaseNetwork): class DHCPNetwork(BridgedNetwork): - """ - properties: - dhcp_listen_address: the ip of the gateway / dhcp host - dhcp_range_start: the first ip to give out - dhcp_range_end: the last ip to give out - """ + """Network supporting DHCP""" bridge_gets_ip = True override_type = 'network' def __init__(self, *args, **kwargs): super(DHCPNetwork, self).__init__(*args, **kwargs) - # logging.debug("Initing DHCPNetwork object...") - self.dhcp_listen_address = self.gateway - self.dhcp_range_start = self.network[self.num_bottom_reserved_ips] - self.dhcp_range_end = self.network[-self.num_top_reserved_ips] - try: + if not(os.path.exists(FLAGS.networks_path)): os.makedirs(FLAGS.networks_path) - # NOTE(todd): I guess this is a lazy way to not have to check if the - # directory exists, but shouldn't we be smarter about - # telling the difference between existing directory and - # permission denied? (Errno 17 vs 13, OSError) - except Exception, err: - pass @property def num_bottom_reserved_ips(self): @@ -338,6 +363,16 @@ class DHCPNetwork(BridgedNetwork): return super(DHCPNetwork, self).num_top_reserved_ips + \ FLAGS.cnt_vpn_clients + @property + def dhcp_listen_address(self): + """Address where dhcp server should listen""" + return self.gateway + + @property + def dhcp_range_start(self): + """Starting address dhcp server should use""" + return self.network[self.num_bottom_reserved_ips] + def express(self, address=None): super(DHCPNetwork, self).express(address=address) if len(self.assigned) > 0: @@ -346,15 +381,17 @@ class DHCPNetwork(BridgedNetwork): linux_net.start_dnsmasq(self) else: logging.debug("Not launching dnsmasq: no hosts.") - self.express_cloudpipe() + self.express_vpn() def allocate_vpn_ip(self, user_id, project_id, mac): + """Allocates the reserved ip to a vpn instance""" address = str(self.network[2]) self._add_host(user_id, project_id, address, mac) self.express(address=address) return address - def express_cloudpipe(self): + def express_vpn(self): + """Sets up routing rules for vpn""" private_ip = str(self.network[2]) linux_net.confirm_rule("FORWARD -d %s -p udp --dport 1194 -j ACCEPT" % (private_ip, )) @@ -372,6 +409,7 @@ class DHCPNetwork(BridgedNetwork): class PublicAddress(datastore.BasicModel): + """Represents an elastic ip in the datastore""" override_type = "address" def __init__(self, address): @@ -387,6 +425,7 @@ class PublicAddress(datastore.BasicModel): @classmethod def create(cls, user_id, project_id, address): + """Creates a PublicAddress object""" addr = cls(address) addr['user_id'] = user_id addr['project_id'] = project_id @@ -400,12 +439,13 @@ DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] class PublicNetworkController(BaseNetwork): + """Handles elastic ips""" override_type = 'network' def __init__(self, *args, **kwargs): network_id = "public:default" super(PublicNetworkController, self).__init__(network_id, - FLAGS.public_range) + FLAGS.public_range, *args, **kwargs) self['user_id'] = "public" self['project_id'] = "public" self["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', @@ -416,12 +456,14 @@ class PublicNetworkController(BaseNetwork): @property def host_objs(self): + """Returns assigned addresses as PublicAddress objects""" for address in self.assigned: yield PublicAddress(address) - def get_host(self, host): - if host in self.assigned: - return PublicAddress(host) + def get_host(self, public_ip): + """Returns a specific public ip as PublicAddress object""" + if public_ip in self.assigned: + return PublicAddress(public_ip) return None def _add_host(self, user_id, project_id, host, _target): @@ -437,9 +479,10 @@ class PublicNetworkController(BaseNetwork): self.release_ip(ip_str) def associate_address(self, public_ip, private_ip, instance_id): + """Associates a public ip to a private ip and instance id""" if not public_ip in self.assigned: raise exception.AddressNotAllocated() - # TODO(joshua): Keep an index going both ways + # TODO(josh): Keep an index going both ways for addr in self.host_objs: if addr.get('private_ip', None) == private_ip: raise exception.AddressAlreadyAssociated() @@ -452,6 +495,7 @@ class PublicNetworkController(BaseNetwork): self.express(address=public_ip) def disassociate_address(self, public_ip): + """Disassociates a public ip with its private ip""" if not public_ip in self.assigned: raise exception.AddressNotAllocated() addr = self.get_host(public_ip) @@ -476,7 +520,7 @@ class PublicNetworkController(BaseNetwork): % (public_ip, private_ip)) linux_net.confirm_rule("POSTROUTING -t nat -s %s -j SNAT --to %s" % (private_ip, public_ip)) - # TODO: Get these from the secgroup datastore entries + # TODO(joshua): Get these from the secgroup datastore entries linux_net.confirm_rule("FORWARD -d %s -p icmp -j ACCEPT" % (private_ip)) for (protocol, port) in DEFAULT_PORTS: @@ -503,9 +547,7 @@ class PublicNetworkController(BaseNetwork): # piece of architecture that mitigates it (only one queue # listener per net)? def get_vlan_for_project(project_id): - """ - Allocate vlan IDs to individual users. - """ + """Allocate vlan IDs to individual users""" vlan = Vlan.lookup(project_id) if vlan: return vlan @@ -538,7 +580,7 @@ def get_vlan_for_project(project_id): def get_project_network(project_id, security_group='default'): - """ get a project's private network, allocating one if needed """ + """Gets a project's private network, allocating one if needed""" project = manager.AuthManager().get_project(project_id) if not project: raise nova_exception.NotFound("Project %s doesn't exist." % project_id) @@ -549,26 +591,29 @@ def get_project_network(project_id, security_group='default'): def get_network_by_address(address): + """Gets the network for a given private ip""" # TODO(vish): This is completely the wrong way to do this, but # I'm getting the network binary working before I # tackle doing this the right way. - logging.debug("Get Network By Address: %s" % address) + logging.debug("Get Network By Address: %s", address) for project in manager.AuthManager().get_projects(): net = get_project_network(project.id) if address in net.assigned: - logging.debug("Found %s in %s" % (address, project.id)) + logging.debug("Found %s in %s", address, project.id) return net raise exception.AddressNotAllocated() def get_network_by_interface(iface, security_group='default'): + """Gets the network for a given interface""" vlan = iface.rpartition("br")[2] project_id = Vlan.dict_by_vlan().get(vlan) return get_project_network(project_id, security_group) def get_public_ip_for_instance(instance_id): - # FIXME: this should be a lookup - iteration won't scale + """Gets the public ip for a given instance""" + # FIXME(josh): this should be a lookup - iteration won't scale for address_record in PublicAddress.all(): if address_record.get('instance_id', 'available') == instance_id: return address_record['address'] diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 9aa39e516..5671a8886 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -15,7 +15,9 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - +""" +Unit Tests for network code +""" import IPy import os import logging @@ -33,7 +35,8 @@ FLAGS = flags.FLAGS class NetworkTestCase(test.TrialTestCase): - def setUp(self): + """Test cases for network code""" + def setUp(self): # pylint: disable=C0103 super(NetworkTestCase, self).setUp() # NOTE(vish): if you change these flags, make sure to change the # flags in the corresponding section in nova-dhcpbridge @@ -44,7 +47,6 @@ class NetworkTestCase(test.TrialTestCase): network_size=32) logging.getLogger().setLevel(logging.DEBUG) self.manager = manager.AuthManager() - self.dnsmasq = FakeDNSMasq() self.user = self.manager.create_user('netuser', 'netuser', 'netuser') self.projects = [] self.projects.append(self.manager.create_project('netuser', @@ -56,49 +58,48 @@ class NetworkTestCase(test.TrialTestCase): 'netuser', name)) vpn.NetworkData.create(self.projects[i].id) - self.network = model.PublicNetworkController() self.service = service.VlanNetworkService() - def tearDown(self): + def tearDown(self): # pylint: disable=C0103 super(NetworkTestCase, self).tearDown() for project in self.projects: self.manager.delete_project(project) self.manager.delete_user(self.user) def test_public_network_allocation(self): + """Makes sure that we can allocaate a public ip""" pubnet = IPy.IP(flags.FLAGS.public_range) - address = self.network.allocate_ip(self.user.id, - self.projects[0].id, - "public") + address = self.service.allocate_elastic_ip(self.user.id, + self.projects[0].id) self.assertTrue(IPy.IP(address) in pubnet) - self.assertTrue(IPy.IP(address) in self.network.network) def test_allocate_deallocate_fixed_ip(self): + """Makes sure that we can allocate and deallocate a fixed ip""" result = self.service.allocate_fixed_ip( self.user.id, self.projects[0].id) address = result['private_dns_name'] mac = result['mac_address'] - logging.debug("Was allocated %s" % (address)) net = model.get_project_network(self.projects[0].id, "default") self.assertEqual(True, is_in_project(address, self.projects[0].id)) hostname = "test-host" - self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name) - rv = self.service.deallocate_fixed_ip(address) + issue_ip(mac, address, hostname, net.bridge_name) + self.service.deallocate_fixed_ip(address) # Doesn't go away until it's dhcp released self.assertEqual(True, is_in_project(address, self.projects[0].id)) - self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name) + release_ip(mac, address, hostname, net.bridge_name) self.assertEqual(False, is_in_project(address, self.projects[0].id)) - def test_range_allocation(self): - hostname = "test-host" - result = self.service.allocate_fixed_ip( - self.user.id, self.projects[0].id) + def test_side_effects(self): + """Ensures allocating and releasing has no side effects""" + hostname = "side-effect-host" + result = self.service.allocate_fixed_ip(self.user.id, + self.projects[0].id) mac = result['mac_address'] address = result['private_dns_name'] - result = self.service.allocate_fixed_ip( - self.user, self.projects[1].id) + result = self.service.allocate_fixed_ip(self.user, + self.projects[1].id) secondmac = result['mac_address'] secondaddress = result['private_dns_name'] @@ -111,25 +112,24 @@ class NetworkTestCase(test.TrialTestCase): self.assertEqual(False, is_in_project(address, self.projects[1].id)) # Addresses are allocated before they're issued - self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name) - self.dnsmasq.issue_ip(secondmac, secondaddress, - hostname, secondnet.bridge_name) + issue_ip(mac, address, hostname, net.bridge_name) + issue_ip(secondmac, secondaddress, hostname, secondnet.bridge_name) - rv = self.service.deallocate_fixed_ip(address) - self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name) + self.service.deallocate_fixed_ip(address) + release_ip(mac, address, hostname, net.bridge_name) self.assertEqual(False, is_in_project(address, self.projects[0].id)) # First address release shouldn't affect the second self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id)) - rv = self.service.deallocate_fixed_ip(secondaddress) - self.dnsmasq.release_ip(secondmac, secondaddress, - hostname, secondnet.bridge_name) + self.service.deallocate_fixed_ip(secondaddress) + release_ip(secondmac, secondaddress, hostname, secondnet.bridge_name) self.assertEqual(False, is_in_project(secondaddress, self.projects[1].id)) def test_subnet_edge(self): + """Makes sure that private ips don't overlap""" result = self.service.allocate_fixed_ip(self.user.id, self.projects[0].id) firstaddress = result['private_dns_name'] @@ -148,29 +148,34 @@ class NetworkTestCase(test.TrialTestCase): self.user, project_id) mac3 = result['mac_address'] address3 = result['private_dns_name'] + net = model.get_project_network(project_id, "default") + issue_ip(mac, address, hostname, net.bridge_name) + issue_ip(mac2, address2, hostname, net.bridge_name) + issue_ip(mac3, address3, hostname, net.bridge_name) self.assertEqual(False, is_in_project(address, self.projects[0].id)) self.assertEqual(False, is_in_project(address2, self.projects[0].id)) self.assertEqual(False, is_in_project(address3, self.projects[0].id)) - rv = self.service.deallocate_fixed_ip(address) - rv = self.service.deallocate_fixed_ip(address2) - rv = self.service.deallocate_fixed_ip(address3) - net = model.get_project_network(project_id, "default") - self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name) - self.dnsmasq.release_ip(mac2, address2, hostname, net.bridge_name) - self.dnsmasq.release_ip(mac3, address3, hostname, net.bridge_name) + self.service.deallocate_fixed_ip(address) + self.service.deallocate_fixed_ip(address2) + self.service.deallocate_fixed_ip(address3) + release_ip(mac, address, hostname, net.bridge_name) + release_ip(mac2, address2, hostname, net.bridge_name) + release_ip(mac3, address3, hostname, net.bridge_name) net = model.get_project_network(self.projects[0].id, "default") - rv = self.service.deallocate_fixed_ip(firstaddress) - self.dnsmasq.release_ip(mac, firstaddress, hostname, net.bridge_name) + self.service.deallocate_fixed_ip(firstaddress) + release_ip(mac, firstaddress, hostname, net.bridge_name) def test_vpn_ip_and_port_looks_valid(self): + """Ensure the vpn ip and port are reasonable""" self.assert_(self.projects[0].vpn_ip) self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start_port) self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_end_port) def test_too_many_vpns(self): + """Ensure error is raised if we run out of vpn ports""" vpns = [] for i in xrange(vpn.NetworkData.num_ports_for_ip(FLAGS.vpn_ip)): vpns.append(vpn.NetworkData.create("vpnuser%s" % i)) @@ -180,7 +185,6 @@ class NetworkTestCase(test.TrialTestCase): def test_ips_are_reused(self): """Makes sure that ip addresses that are deallocated get reused""" - result = self.service.allocate_fixed_ip( self.user.id, self.projects[0].id) mac = result['mac_address'] @@ -189,24 +193,18 @@ class NetworkTestCase(test.TrialTestCase): hostname = "reuse-host" net = model.get_project_network(self.projects[0].id, "default") - self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name) - rv = self.service.deallocate_fixed_ip(address) - self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name) + issue_ip(mac, address, hostname, net.bridge_name) + self.service.deallocate_fixed_ip(address) + release_ip(mac, address, hostname, net.bridge_name) result = self.service.allocate_fixed_ip( self.user, self.projects[0].id) secondmac = result['mac_address'] secondaddress = result['private_dns_name'] self.assertEqual(address, secondaddress) - rv = self.service.deallocate_fixed_ip(secondaddress) - self.dnsmasq.issue_ip(secondmac, - secondaddress, - hostname, - net.bridge_name) - self.dnsmasq.release_ip(secondmac, - secondaddress, - hostname, - net.bridge_name) + self.service.deallocate_fixed_ip(secondaddress) + issue_ip(secondmac, secondaddress, hostname, net.bridge_name) + release_ip(secondmac, secondaddress, hostname, net.bridge_name) def test_available_ips(self): """Make sure the number of available ips for the network is correct @@ -242,47 +240,44 @@ class NetworkTestCase(test.TrialTestCase): self.projects[0].id) macs[i] = result['mac_address'] addresses[i] = result['private_dns_name'] - self.dnsmasq.issue_ip(macs[i], - addresses[i], - hostname, - net.bridge_name) + issue_ip(macs[i], addresses[i], hostname, net.bridge_name) self.assertEqual(len(list(net.available)), 0) self.assertRaises(NoMoreAddresses, self.service.allocate_fixed_ip, self.user.id, self.projects[0].id) for i in range(len(addresses)): - rv = self.service.deallocate_fixed_ip(addresses[i]) - self.dnsmasq.release_ip(macs[i], - addresses[i], - hostname, - net.bridge_name) + self.service.deallocate_fixed_ip(addresses[i]) + release_ip(macs[i], addresses[i], hostname, net.bridge_name) self.assertEqual(len(list(net.available)), num_available_ips) def is_in_project(address, project_id): + """Returns true if address is in specified project""" return address in model.get_project_network(project_id).list_addresses() def binpath(script): + """Returns the absolute path to a script in bin""" return os.path.abspath(os.path.join(__file__, "../../../bin", script)) -class FakeDNSMasq(object): - def issue_ip(self, mac, ip, hostname, interface): - cmd = "%s add %s %s %s" % (binpath('nova-dhcpbridge'), - mac, ip, hostname) - env = {'DNSMASQ_INTERFACE': interface, - 'TESTING': '1', - 'FLAGFILE': FLAGS.dhcpbridge_flagfile} - (out, err) = utils.execute(cmd, addl_env=env) - logging.debug("ISSUE_IP: %s, %s " % (out, err)) - - def release_ip(self, mac, ip, hostname, interface): - cmd = "%s del %s %s %s" % (binpath('nova-dhcpbridge'), - mac, ip, hostname) - env = {'DNSMASQ_INTERFACE': interface, - 'TESTING': '1', - 'FLAGFILE': FLAGS.dhcpbridge_flagfile} - (out, err) = utils.execute(cmd, addl_env=env) - logging.debug("RELEASE_IP: %s, %s " % (out, err)) +def issue_ip(mac, private_ip, hostname, interface): + """Run add command on dhcpbridge""" + cmd = "%s add %s %s %s" % (binpath('nova-dhcpbridge'), + mac, private_ip, hostname) + env = {'DNSMASQ_INTERFACE': interface, + 'TESTING': '1', + 'FLAGFILE': FLAGS.dhcpbridge_flagfile} + (out, err) = utils.execute(cmd, addl_env=env) + logging.debug("ISSUE_IP: %s, %s ", out, err) + +def release_ip(mac, private_ip, hostname, interface): + """Run del command on dhcpbridge""" + cmd = "%s del %s %s %s" % (binpath('nova-dhcpbridge'), + mac, private_ip, hostname) + env = {'DNSMASQ_INTERFACE': interface, + 'TESTING': '1', + 'FLAGFILE': FLAGS.dhcpbridge_flagfile} + (out, err) = utils.execute(cmd, addl_env=env) + logging.debug("RELEASE_IP: %s, %s ", out, err) -- cgit From 14c7bca9cb8451e2ec8224fb5699c6f2ad480dac Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 17:34:20 -0700 Subject: Adds get_roles commands to manager and driver classes --- nova/auth/ldapdriver.py | 34 +++++++++++++++++++++++++++------- nova/auth/manager.py | 18 ++++++++++++++++++ nova/tests/auth_unittest.py | 18 +++++++++++++++++- 3 files changed, 62 insertions(+), 8 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index ec739e134..aaaf8553c 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -181,7 +181,7 @@ class LdapDriver(object): if member_uids != None: for member_uid in member_uids: if not self.__user_exists(member_uid): - raise exception.NotFound("Project can't be created " + raise exception.NotFound("Project can't be created " "because user %s doesn't exist" % member_uid) members.append(self.__uid_to_dn(member_uid)) # always add the manager as a member because members is required @@ -236,6 +236,26 @@ class LdapDriver(object): role_dn = self.__role_to_dn(role, project_id) return self.__remove_from_group(uid, role_dn) + def get_user_roles(self, uid, project_id=None): + """Retrieve list of roles for user (or user and project)""" + if project_id is None: + # NOTE(vish): This is unneccesarily slow, but since we can't + # guarantee that the global roles are located + # together in the ldap tree, we're doing this version. + roles = [] + for role in FLAGS.allowed_roles: + role_dn = self.__role_to_dn(role) + if self.__is_in_group(uid, role_dn): + roles.append(role) + return roles + else: + project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) + roles = self.__find_objects(project_dn, + '(&(&(objectclass=groupOfNames)' + '(!(objectclass=novaProject)))' + '(member=%s))' % self.__uid_to_dn(uid)) + return [role['cn'][0] for role in roles] + def delete_user(self, uid): """Delete a user""" if not self.__user_exists(uid): @@ -253,24 +273,24 @@ class LdapDriver(object): self.conn.delete_s('cn=%s,uid=%s,%s' % (key_name, uid, FLAGS.ldap_user_subtree)) - def delete_project(self, name): + def delete_project(self, project_id): """Delete a project""" - project_dn = 'cn=%s,%s' % (name, FLAGS.ldap_project_subtree) + project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) self.__delete_roles(project_dn) self.__delete_group(project_dn) - def __user_exists(self, name): + def __user_exists(self, uid): """Check if user exists""" - return self.get_user(name) != None + return self.get_user(uid) != None def __key_pair_exists(self, uid, key_name): """Check if key pair exists""" return self.get_user(uid) != None return self.get_key_pair(uid, key_name) != None - def __project_exists(self, name): + def __project_exists(self, project_id): """Check if project exists""" - return self.get_project(name) != None + return self.get_project(project_id) != None def __find_object(self, dn, query=None, scope=None): """Find an object by dn and query""" diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 6d71a7ad6..8195182fc 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -38,6 +38,10 @@ from nova.network import vpn FLAGS = flags.FLAGS +flags.DEFINE_list('allowed_roles', + ['cloudadmin', 'itsec', 'sysadmin', 'netadmin', 'developer'], + 'Allowed roles for project') + # NOTE(vish): a user with one of these roles will be a superuser and # have access to all api commands flags.DEFINE_list('superuser_roles', ['cloudadmin'], @@ -455,6 +459,20 @@ class AuthManager(object): with self.driver() as drv: drv.remove_role(User.safe_id(user), role, Project.safe_id(project)) + def get_roles(self): + """Get list of allowed roles""" + return FLAGS.allowed_roles + + def get_user_roles(self, user, project=None): + """Get user global or per-project roles""" + roles = [] + with self.driver() as drv: + roles = drv.get_user_roles(User.safe_id(user), + Project.safe_id(project)) + if project is not None and self.is_project_manager(user, project): + roles.append('projectmanager') + return roles + def get_project(self, pid): """Get project object by id""" with self.driver() as drv: diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index f7e0625a3..2d99c8e36 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -179,7 +179,23 @@ class AuthTestCase(test.BaseTestCase): project.add_role('test1', 'sysadmin') self.assertTrue(project.has_role('test1', 'sysadmin')) - def test_211_can_remove_project_role(self): + def test_211_can_list_project_roles(self): + project = self.manager.get_project('testproj') + user = self.manager.get_user('test1') + self.manager.add_role(user, 'netadmin', project) + roles = self.manager.get_user_roles(user) + self.assertTrue('sysadmin' in roles) + self.assertFalse('netadmin' in roles) + self.assertFalse('projectmanager' in roles) + project_roles = self.manager.get_user_roles(user, project) + self.assertTrue('sysadmin' in project_roles) + self.assertTrue('netadmin' in project_roles) + self.assertTrue('projectmanager' in project_roles) + # has role should be false because global role is missing + self.assertFalse(self.manager.has_role(user, 'netadmin', project)) + + + def test_212_can_remove_project_role(self): project = self.manager.get_project('testproj') self.assertTrue(project.has_role('test1', 'sysadmin')) project.remove_role('test1', 'sysadmin') -- cgit From 19b9164c4eaae0c2c9144f9e839fbafcac7c3ed3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 17:42:58 -0700 Subject: Throw exceptions for illegal roles on role add --- nova/auth/manager.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 8195182fc..e338dfc83 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -436,6 +436,10 @@ class AuthManager(object): @type project: Project or project_id @param project: Project in which to add local role. """ + if role not in FLAGS.allowed_roles: + raise exception.NotFound("The %s role can not be found" % role) + if project is not None and role in FLAGS.global_roles: + raise exception.NotFound("The %s role is global only" % role) with self.driver() as drv: drv.add_role(User.safe_id(user), role, Project.safe_id(project)) -- cgit From cff3cccc342c7d09cd2ec6c95431e1b373eba620 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 18:04:23 -0700 Subject: change get_roles to have a flag for project_roles or not. Don't show 'projectmanager' in list of roles --- nova/auth/manager.py | 15 +++++++-------- nova/tests/auth_unittest.py | 2 -- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index e338dfc83..064fd78bc 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -463,19 +463,18 @@ class AuthManager(object): with self.driver() as drv: drv.remove_role(User.safe_id(user), role, Project.safe_id(project)) - def get_roles(self): + def get_roles(self, project_roles=True): """Get list of allowed roles""" - return FLAGS.allowed_roles + if project_roles: + return list(set(FLAGS.allowed_roles) - set(FLAGS.global_roles)) + else: + return FLAGS.allowed_roles def get_user_roles(self, user, project=None): """Get user global or per-project roles""" - roles = [] with self.driver() as drv: - roles = drv.get_user_roles(User.safe_id(user), - Project.safe_id(project)) - if project is not None and self.is_project_manager(user, project): - roles.append('projectmanager') - return roles + return drv.get_user_roles(User.safe_id(user), + Project.safe_id(project)) def get_project(self, pid): """Get project object by id""" diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index 2d99c8e36..0b404bfdc 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -186,11 +186,9 @@ class AuthTestCase(test.BaseTestCase): roles = self.manager.get_user_roles(user) self.assertTrue('sysadmin' in roles) self.assertFalse('netadmin' in roles) - self.assertFalse('projectmanager' in roles) project_roles = self.manager.get_user_roles(user, project) self.assertTrue('sysadmin' in project_roles) self.assertTrue('netadmin' in project_roles) - self.assertTrue('projectmanager' in project_roles) # has role should be false because global role is missing self.assertFalse(self.manager.has_role(user, 'netadmin', project)) -- cgit From 253cc1f683dfcfe75b1a5c1eb3a93f07e85bb041 Mon Sep 17 00:00:00 2001 From: Devin Carlen Date: Tue, 10 Aug 2010 18:46:49 -0700 Subject: Wired up admin api for user roles --- nova/adminclient.py | 36 +++++++++++++++++++++++++++++++++++- nova/endpoint/admin.py | 13 +++++++++++++ 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/nova/adminclient.py b/nova/adminclient.py index 25d5e71cb..5aa8ff9c2 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -57,6 +57,28 @@ class UserInfo(object): elif name == 'secretkey': self.secretkey = str(value) +class UserRole(object): + """ + Information about a Nova user's role, as parsed through SAX. + Fields include: + role + """ + def __init__(self, connection=None): + self.connection = connection + self.role = None + + def __repr__(self): + return 'UserRole:%s' % self.role + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'role': + self.role = value + else: + setattr(self, name, str(value)) + class ProjectInfo(object): """ Information about a Nova project, as parsed through SAX @@ -114,7 +136,6 @@ class ProjectMember(object): else: setattr(self, name, str(value)) - class HostInfo(object): """ Information about a Nova Host, as parsed through SAX: @@ -196,6 +217,19 @@ class NovaAdminClient(object): """ deletes a user """ return self.apiconn.get_object('DeregisterUser', {'Name': username}, UserInfo) + def get_user_roles(self, user, project=None): + """ + Returns a list of roles for the given user. + Omitting project will return any global roles that the user has. + Specifying project will return only project specific roles. + """ + params = {'User':user} + if project: + params['Project'] = project + return self.apiconn.get_list('DescribeUserRoles', + params, + [('item', UserRole)]) + def add_user_role(self, user, role, project=None): """ Add a role to a user either globally or for a specific project. diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py index c4b8c05ca..a3114c0a3 100644 --- a/nova/endpoint/admin.py +++ b/nova/endpoint/admin.py @@ -102,6 +102,19 @@ class AdminController(object): return True + @admin_only + def describe_roles(self, context, project_roles=True, **kwargs): + """Returns a list of allowed roles.""" + return manager.AuthManager().get_roles(project_roles) + + @admin_only + def describe_user_roles(self, context, user, project=None, **kwargs): + """Returns a list of roles for the given user. + Omitting project will return any global roles that the user has. + Specifying project will return only project specific roles. + """ + return manager.AuthManager().get_user_roles(user, project=project) + @admin_only def modify_user_role(self, context, user, role, project=None, operation='add', **kwargs): -- cgit From 2955018b58a731f48dcdee64d889b4be104250f1 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 19:00:35 -0700 Subject: fix spacing issue in ldapdriver --- nova/auth/ldapdriver.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index aaaf8553c..453fa196c 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -181,8 +181,9 @@ class LdapDriver(object): if member_uids != None: for member_uid in member_uids: if not self.__user_exists(member_uid): - raise exception.NotFound("Project can't be created " - "because user %s doesn't exist" % member_uid) + raise exception.NotFound("Project can't be created " + "because user %s doesn't exist" + % member_uid) members.append(self.__uid_to_dn(member_uid)) # always add the manager as a member because members is required if not manager_dn in members: -- cgit From cf2002486d651576a28a4c53c6b49bb30c047108 Mon Sep 17 00:00:00 2001 From: Devin Carlen Date: Tue, 10 Aug 2010 19:01:40 -0700 Subject: Fixed admin api for user roles --- nova/adminclient.py | 13 +++++++++---- nova/endpoint/admin.py | 6 ++++-- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/nova/adminclient.py b/nova/adminclient.py index 5aa8ff9c2..242298a75 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -217,11 +217,16 @@ class NovaAdminClient(object): """ deletes a user """ return self.apiconn.get_object('DeregisterUser', {'Name': username}, UserInfo) + def get_roles(self, project_roles=True): + """Returns a list of available roles.""" + return self.apiconn.get_list('DescribeRoles', + {'ProjectRoles': project_roles}, + [('item', UserRole)]) + def get_user_roles(self, user, project=None): - """ - Returns a list of roles for the given user. - Omitting project will return any global roles that the user has. - Specifying project will return only project specific roles. + """Returns a list of roles for the given user. + Omitting project will return any global roles that the user has. + Specifying project will return only project specific roles. """ params = {'User':user} if project: diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py index a3114c0a3..4f4824fca 100644 --- a/nova/endpoint/admin.py +++ b/nova/endpoint/admin.py @@ -105,7 +105,8 @@ class AdminController(object): @admin_only def describe_roles(self, context, project_roles=True, **kwargs): """Returns a list of allowed roles.""" - return manager.AuthManager().get_roles(project_roles) + roles = manager.AuthManager().get_roles(project_roles) + return { 'roles': [{'role': r} for r in roles]} @admin_only def describe_user_roles(self, context, user, project=None, **kwargs): @@ -113,7 +114,8 @@ class AdminController(object): Omitting project will return any global roles that the user has. Specifying project will return only project specific roles. """ - return manager.AuthManager().get_user_roles(user, project=project) + roles = manager.AuthManager().get_user_roles(user, project=project) + return { 'roles': [{'role': r} for r in roles]} @admin_only def modify_user_role(self, context, user, role, project=None, -- cgit From 0ccd10283b922cb9822872b89713aad1a5da214e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 21:51:18 -0700 Subject: support a hostname that can be looked up --- bin/nova-dhcpbridge | 6 ++---- nova/compute/model.py | 10 +++++++++- nova/endpoint/cloud.py | 6 +++--- nova/network/linux_net.py | 21 +++++++++++---------- 4 files changed, 25 insertions(+), 18 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 6a9115fcb..0dac2672a 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -69,10 +69,8 @@ def init_leases(interface): """Get the list of hosts for an interface.""" net = model.get_network_by_interface(interface) res = "" - for host_name in net.hosts: - res += "%s\n" % linux_net.host_dhcp(net, - host_name, - net.hosts[host_name]) + for fixed_ip in net.hosts: + res += "%s\n" % linux_net.host_dhcp(fixed_ip, net.hosts[fixed_ip]) return res diff --git a/nova/compute/model.py b/nova/compute/model.py index 266a93b9a..94fe43c1a 100644 --- a/nova/compute/model.py +++ b/nova/compute/model.py @@ -123,7 +123,15 @@ class Instance(datastore.BasicModel): 'node_name': 'unassigned', 'project_id': 'unassigned', 'user_id': 'unassigned', - 'private_dns_name': 'unassigned'} + 'private_dns_name': 'unassigned', + 'hostname': self.instance_id} + + + @property + def hostname(self): + # NOTE(vish): this is to be backward compatible with instances that may + # not have been created with a hostname + return self.get('hostname', self.instance_id) @property def identifier(self): diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 02969c8e9..26071cfed 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -137,17 +137,17 @@ class CloudController(object): 'root': '/dev/sda1', 'swap': 'sda3' }, - 'hostname': i['private_dns_name'], # is this public sometimes? + 'hostname': i.hostname, 'instance-action': 'none', 'instance-id': i['instance_id'], 'instance-type': i.get('instance_type', ''), - 'local-hostname': i['private_dns_name'], + 'local-hostname': i.hostname, 'local-ipv4': i['private_dns_name'], # TODO: switch to IP 'kernel-id': i.get('kernel_id', ''), 'placement': { 'availaibility-zone': i.get('availability_zone', 'nova'), }, - 'public-hostname': i.get('dns_name', ''), + 'public-hostname': i.hostname, 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP 'public-keys': keys, 'ramdisk-id': i.get('ramdisk_id', ''), diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 0e8ddcc6a..8a8fff225 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -27,6 +27,7 @@ import os from nova import flags from nova import utils +from nova.compute import model FLAGS = flags.FLAGS @@ -125,12 +126,14 @@ def _dnsmasq_cmd(net): return ''.join(cmd) -def host_dhcp(network, host, mac): - """Return a host string for a network, host, and mac""" - # Logically, the idx of instances they've launched in this net - idx = host.split(".")[-1] - return "%s,%s-%s-%s.novalocal,%s" % \ - (mac, network['user_id'], network['vlan'], idx, host) +def host_dhcp(fixed_ip, mac): + """Return a host string for a fixed_ip and mac""" + instance = model.InstanceDirectory().by_ip(fixed_ip) + if instance is None: + hostname = 'ip-%s' % fixed_ip.replace('.', '-') + else: + hostname = instance.hostname + return "%s,%s.novalocal,%s" % (mac, hostname, fixed_ip) # TODO(ja): if the system has restarted or pid numbers have wrapped @@ -145,10 +148,8 @@ def start_dnsmasq(network): signal causing it to reload, otherwise spawn a new instance """ with open(dhcp_file(network['vlan'], 'conf'), 'w') as f: - for host_name in network.hosts: - f.write("%s\n" % host_dhcp(network, - host_name, - network.hosts[host_name])) + for fixed_ip in network.hosts: + f.write("%s\n" % host_dhcp(fixed_ip, network.hosts[fixed_ip])) pid = dnsmasq_pid_for(network) -- cgit From 24f8cb89f8b92563d364186b80c7d73d28b26bea Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 11 Aug 2010 01:20:21 -0700 Subject: Actually pass in hostname and create a proper model for data in network code --- bin/nova-dhcpbridge | 4 +- nova/compute/model.py | 10 +-- nova/datastore.py | 12 ++- nova/endpoint/cloud.py | 18 ++-- nova/network/linux_net.py | 20 ++--- nova/network/model.py | 181 ++++++++++++++++++++--------------------- nova/network/service.py | 26 ++++-- nova/tests/network_unittest.py | 7 +- 8 files changed, 145 insertions(+), 133 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 0dac2672a..b1ad1c8fe 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -69,8 +69,8 @@ def init_leases(interface): """Get the list of hosts for an interface.""" net = model.get_network_by_interface(interface) res = "" - for fixed_ip in net.hosts: - res += "%s\n" % linux_net.host_dhcp(fixed_ip, net.hosts[fixed_ip]) + for address in net.address_objs: + res += "%s\n" % linux_net.host_dhcp(address) return res diff --git a/nova/compute/model.py b/nova/compute/model.py index 94fe43c1a..266a93b9a 100644 --- a/nova/compute/model.py +++ b/nova/compute/model.py @@ -123,15 +123,7 @@ class Instance(datastore.BasicModel): 'node_name': 'unassigned', 'project_id': 'unassigned', 'user_id': 'unassigned', - 'private_dns_name': 'unassigned', - 'hostname': self.instance_id} - - - @property - def hostname(self): - # NOTE(vish): this is to be backward compatible with instances that may - # not have been created with a hostname - return self.get('hostname', self.instance_id) + 'private_dns_name': 'unassigned'} @property def identifier(self): diff --git a/nova/datastore.py b/nova/datastore.py index 51ef7a758..926e41f67 100644 --- a/nova/datastore.py +++ b/nova/datastore.py @@ -124,12 +124,16 @@ class BasicModel(object): yield cls(identifier) @classmethod - @absorb_connection_error def associated_to(cls, foreign_type, foreign_id): - redis_set = cls._redis_association_name(foreign_type, foreign_id) - for identifier in Redis.instance().smembers(redis_set): + for identifier in cls.associated_keys(foreign_type, foreign_id): yield cls(identifier) + @classmethod + @absorb_connection_error + def associated_keys(cls, foreign_type, foreign_id): + redis_set = cls._redis_association_name(foreign_type, foreign_id) + return Redis.instance().smembers(redis_set) or [] + @classmethod def _redis_set_name(cls, kls_name): # stupidly pluralize (for compatiblity with previous codebase) @@ -138,7 +142,7 @@ class BasicModel(object): @classmethod def _redis_association_name(cls, foreign_type, foreign_id): return cls._redis_set_name("%s:%s:%s" % - (foreign_type, foreign_id, cls.__name__)) + (foreign_type, foreign_id, cls._redis_name())) @property def identifier(self): diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 26071cfed..c79e96f5d 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -125,6 +125,12 @@ class CloudController(object): } else: keys = '' + + address_record = network_model.Address(i['private_dns_name']) + if address_record: + hostname = address_record['hostname'] + else: + hostname = 'ip-%s' % i['private_dns_name'].replace('.', '-') data = { 'user-data': base64.b64decode(i['user_data']), 'meta-data': { @@ -137,17 +143,17 @@ class CloudController(object): 'root': '/dev/sda1', 'swap': 'sda3' }, - 'hostname': i.hostname, + 'hostname': hostname, 'instance-action': 'none', 'instance-id': i['instance_id'], 'instance-type': i.get('instance_type', ''), - 'local-hostname': i.hostname, + 'local-hostname': hostname, 'local-ipv4': i['private_dns_name'], # TODO: switch to IP 'kernel-id': i.get('kernel_id', ''), 'placement': { 'availaibility-zone': i.get('availability_zone', 'nova'), }, - 'public-hostname': i.hostname, + 'public-hostname': hostname, 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP 'public-keys': keys, 'ramdisk-id': i.get('ramdisk_id', ''), @@ -563,14 +569,15 @@ class CloudController(object): is_vpn = False if image_id == FLAGS.vpn_image_id: is_vpn = True + inst = self.instdir.new() allocate_result = yield rpc.call(network_topic, {"method": "allocate_fixed_ip", "args": {"user_id": context.user.id, "project_id": context.project.id, "security_group": security_group, - "is_vpn": is_vpn}}) + "is_vpn": is_vpn, + "hostname": inst.instance_id}}) allocate_data = allocate_result['result'] - inst = self.instdir.new() inst['image_id'] = image_id inst['kernel_id'] = kernel_id inst['ramdisk_id'] = ramdisk_id @@ -584,6 +591,7 @@ class CloudController(object): inst['project_id'] = context.project.id inst['ami_launch_index'] = num inst['security_group'] = security_group + inst['hostname'] = inst.instance_id for (key, value) in allocate_data.iteritems(): inst[key] = value diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 8a8fff225..4ebc2097b 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -27,7 +25,6 @@ import os from nova import flags from nova import utils -from nova.compute import model FLAGS = flags.FLAGS @@ -126,14 +123,11 @@ def _dnsmasq_cmd(net): return ''.join(cmd) -def host_dhcp(fixed_ip, mac): - """Return a host string for a fixed_ip and mac""" - instance = model.InstanceDirectory().by_ip(fixed_ip) - if instance is None: - hostname = 'ip-%s' % fixed_ip.replace('.', '-') - else: - hostname = instance.hostname - return "%s,%s.novalocal,%s" % (mac, hostname, fixed_ip) +def host_dhcp(address): + """Return a host string for an address object""" + return "%s,%s.novalocal,%s" % (address['mac'], + address['hostname'], + address.address) # TODO(ja): if the system has restarted or pid numbers have wrapped @@ -148,8 +142,8 @@ def start_dnsmasq(network): signal causing it to reload, otherwise spawn a new instance """ with open(dhcp_file(network['vlan'], 'conf'), 'w') as f: - for fixed_ip in network.hosts: - f.write("%s\n" % host_dhcp(fixed_ip, network.hosts[fixed_ip])) + for address in network.assigned_objs: + f.write("%s\n" % host_dhcp(address)) pid = dnsmasq_pid_for(network) diff --git a/nova/network/model.py b/nova/network/model.py index 7b1e16f26..ce9345067 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -143,13 +143,64 @@ class Vlan(datastore.BasicModel): network[start + FLAGS.network_size - 1]) +class Address(datastore.BasicModel): + """Represents a fixed ip in the datastore""" + override_type = "address" + + def __init__(self, address): + self.address = address + super(Address, self).__init__() + + @property + def identifier(self): + return self.address + + def default_state(self): + return {'address': self.address} + + @classmethod + # pylint: disable=R0913 + def create(cls, user_id, project_id, address, mac, hostname, network_id): + """Creates an Address object""" + addr = cls(address) + addr['user_id'] = user_id + addr['project_id'] = project_id + addr['mac'] = mac + if hostname is None: + hostname = "ip-%s" % address.replace('.', '-') + addr['hostname'] = hostname + addr['network_id'] = network_id + addr.save() + return addr + + def save(self): + is_new = self.is_new_record() + success = super(Address, self).save() + if success and is_new: + self.associate_with("network", self['network_id']) + + def destroy(self): + self.unassociate_with("network", self['network_id']) + super(Address, self).destroy() + + +class PublicAddress(Address): + """Represents an elastic ip in the datastore""" + override_type = "address" + + def default_state(self): + return {'address': self.address, + 'instance_id': 'available', + 'private_ip': 'available'} + + # CLEANUP: -# TODO(ja): Save the IPs at the top of each subnet for cloudpipe vpn clients # TODO(ja): does vlanpool "keeper" need to know the min/max - # shouldn't FLAGS always win? class BaseNetwork(datastore.BasicModel): """Implements basic logic for allocating ips in a network""" override_type = 'network' + address_class = Address @property def identifier(self): @@ -214,28 +265,31 @@ class BaseNetwork(datastore.BasicModel): """Returns the project associated with this network""" return manager.AuthManager().get_project(self['project_id']) - @property - def _hosts_key(self): - """Datastore key where hosts are stored""" - return "network:%s:hosts" % (self['network_str']) - - @property - def hosts(self): - """Returns a hash of all hosts allocated in this network""" - return datastore.Redis.instance().hgetall(self._hosts_key) or {} - - def _add_host(self, _user_id, _project_id, host, target): + # pylint: disable=R0913 + def _add_host(self, user_id, project_id, ip_address, mac, hostname): """Add a host to the datastore""" - datastore.Redis.instance().hset(self._hosts_key, host, target) + Address.create(user_id, project_id, ip_address, + mac, hostname, self.identifier) - def _rem_host(self, host): + def _rem_host(self, ip_address): """Remove a host from the datastore""" - datastore.Redis.instance().hdel(self._hosts_key, host) + Address(ip_address).destroy() @property def assigned(self): - """Returns a list of all assigned keys""" - return datastore.Redis.instance().hkeys(self._hosts_key) + """Returns a list of all assigned addresses""" + return self.address_class.associated_keys('network', self.identifier) + + @property + def assigned_objs(self): + """Returns a list of all assigned addresses as objects""" + return self.address_class.associated_to('network', self.identifier) + + def get_address(self, ip_address): + """Returns a specific ip as an object""" + if ip_address in self.assigned: + return self.address_class(ip_address) + return None @property def available(self): @@ -243,7 +297,7 @@ class BaseNetwork(datastore.BasicModel): for idx in range(self.num_bottom_reserved_ips, len(self.network) - self.num_top_reserved_ips): address = str(self.network[idx]) - if not address in self.hosts.keys(): + if not address in self.assigned: yield address @property @@ -256,11 +310,11 @@ class BaseNetwork(datastore.BasicModel): """Returns number of ips reserved at the top of the range""" return 1 # Broadcast - def allocate_ip(self, user_id, project_id, mac): + def allocate_ip(self, user_id, project_id, mac, hostname=None): """Allocates an ip to a mac address""" for address in self.available: logging.debug("Allocating IP %s to %s", address, project_id) - self._add_host(user_id, project_id, address, mac) + self._add_host(user_id, project_id, address, mac, hostname) self.express(address=address) return address raise exception.NoMoreAddresses("Project %s with network %s" % @@ -287,11 +341,6 @@ class BaseNetwork(datastore.BasicModel): # dnsmasq to confirm that it has been released. logging.debug("Deallocating allocated IP %s", ip_str) - def list_addresses(self): - """List all allocated addresses""" - for address in self.hosts: - yield address - def express(self, address=None): """Set up network. Implemented in subclasses""" pass @@ -383,10 +432,10 @@ class DHCPNetwork(BridgedNetwork): logging.debug("Not launching dnsmasq: no hosts.") self.express_vpn() - def allocate_vpn_ip(self, user_id, project_id, mac): + def allocate_vpn_ip(self, user_id, project_id, mac, hostname=None): """Allocates the reserved ip to a vpn instance""" address = str(self.network[2]) - self._add_host(user_id, project_id, address, mac) + self._add_host(user_id, project_id, address, mac, hostname) self.express(address=address) return address @@ -407,40 +456,13 @@ class DHCPNetwork(BridgedNetwork): else: linux_net.start_dnsmasq(self) - -class PublicAddress(datastore.BasicModel): - """Represents an elastic ip in the datastore""" - override_type = "address" - - def __init__(self, address): - self.address = address - super(PublicAddress, self).__init__() - - @property - def identifier(self): - return self.address - - def default_state(self): - return {'address': self.address} - - @classmethod - def create(cls, user_id, project_id, address): - """Creates a PublicAddress object""" - addr = cls(address) - addr['user_id'] = user_id - addr['project_id'] = project_id - addr['instance_id'] = 'available' - addr['private_ip'] = 'available' - addr.save() - return addr - - DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] class PublicNetworkController(BaseNetwork): """Handles elastic ips""" override_type = 'network' + address_class = PublicAddress def __init__(self, *args, **kwargs): network_id = "public:default" @@ -454,26 +476,6 @@ class PublicNetworkController(BaseNetwork): self.save() self.express() - @property - def host_objs(self): - """Returns assigned addresses as PublicAddress objects""" - for address in self.assigned: - yield PublicAddress(address) - - def get_host(self, public_ip): - """Returns a specific public ip as PublicAddress object""" - if public_ip in self.assigned: - return PublicAddress(public_ip) - return None - - def _add_host(self, user_id, project_id, host, _target): - datastore.Redis.instance().hset(self._hosts_key, host, project_id) - PublicAddress.create(user_id, project_id, host) - - def _rem_host(self, host): - PublicAddress(host).destroy() - datastore.Redis.instance().hdel(self._hosts_key, host) - def deallocate_ip(self, ip_str): # NOTE(vish): cleanup is now done on release by the parent class self.release_ip(ip_str) @@ -483,10 +485,10 @@ class PublicNetworkController(BaseNetwork): if not public_ip in self.assigned: raise exception.AddressNotAllocated() # TODO(josh): Keep an index going both ways - for addr in self.host_objs: + for addr in self.assigned_objs: if addr.get('private_ip', None) == private_ip: raise exception.AddressAlreadyAssociated() - addr = self.get_host(public_ip) + addr = self.get_address(public_ip) if addr.get('private_ip', 'available') != 'available': raise exception.AddressAlreadyAssociated() addr['private_ip'] = private_ip @@ -498,7 +500,7 @@ class PublicNetworkController(BaseNetwork): """Disassociates a public ip with its private ip""" if not public_ip in self.assigned: raise exception.AddressNotAllocated() - addr = self.get_host(public_ip) + addr = self.get_address(public_ip) if addr.get('private_ip', 'available') == 'available': raise exception.AddressNotAssociated() self.deexpress(address=public_ip) @@ -507,9 +509,12 @@ class PublicNetworkController(BaseNetwork): addr.save() def express(self, address=None): - addresses = self.host_objs if address: - addresses = [self.get_host(address)] + if not address in self.assigned: + raise exception.AddressNotAllocated() + addresses = [self.get_address(address)] + else: + addresses = self.assigned_objs for addr in addresses: if addr.get('private_ip', 'available') == 'available': continue @@ -529,7 +534,7 @@ class PublicNetworkController(BaseNetwork): % (private_ip, protocol, port)) def deexpress(self, address=None): - addr = self.get_host(address) + addr = self.get_address(address) private_ip = addr['private_ip'] linux_net.unbind_public_ip(address, FLAGS.public_interface) linux_net.remove_rule("PREROUTING -t nat -d %s -j DNAT --to %s" @@ -592,16 +597,10 @@ def get_project_network(project_id, security_group='default'): def get_network_by_address(address): """Gets the network for a given private ip""" - # TODO(vish): This is completely the wrong way to do this, but - # I'm getting the network binary working before I - # tackle doing this the right way. - logging.debug("Get Network By Address: %s", address) - for project in manager.AuthManager().get_projects(): - net = get_project_network(project.id) - if address in net.assigned: - logging.debug("Found %s in %s", address, project.id) - return net - raise exception.AddressNotAllocated() + address_record = Address.lookup(address) + if not address_record: + raise exception.AddressNotAllocated() + return get_project_network(address_record['project_id']) def get_network_by_interface(iface, security_group='default'): diff --git a/nova/network/service.py b/nova/network/service.py index fd45496c9..9c0f5520b 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -152,7 +152,9 @@ class FlatNetworkService(BaseNetworkService): """Network is created manually""" pass - def allocate_fixed_ip(self, user_id, project_id, + def allocate_fixed_ip(self, + user_id, + project_id, security_group='default', *args, **kwargs): """Gets a fixed ip from the pool @@ -161,7 +163,7 @@ class FlatNetworkService(BaseNetworkService): """ # NOTE(vish): Some automation could be done here. For example, # creating the flat_network_bridge and setting up - # a gateway. This is all done manually atm + # a gateway. This is all done manually atm. redis = datastore.Redis.instance() if not redis.exists('ips') and not len(redis.keys('instances:*')): for fixed_ip in FLAGS.flat_network_ips: @@ -169,6 +171,8 @@ class FlatNetworkService(BaseNetworkService): fixed_ip = redis.spop('ips') if not fixed_ip: raise exception.NoMoreAddresses() + # TODO(vish): some sort of dns handling for hostname should + # probably be done here. return {'inject_network': True, 'network_type': FLAGS.network_type, 'mac_address': utils.generate_mac(), @@ -192,16 +196,26 @@ class VlanNetworkService(BaseNetworkService): # to support vlans separately from dhcp, instead of having # both of them together in this class. # pylint: disable=W0221 - def allocate_fixed_ip(self, user_id, project_id, + def allocate_fixed_ip(self, + user_id, + project_id, security_group='default', - is_vpn=False, *args, **kwargs): + is_vpn=False, + hostname=None, + *args, **kwargs): """Gets a fixed ip from the pool""" mac = utils.generate_mac() net = model.get_project_network(project_id) if is_vpn: - fixed_ip = net.allocate_vpn_ip(user_id, project_id, mac) + fixed_ip = net.allocate_vpn_ip(user_id, + project_id, + mac, + hostname) else: - fixed_ip = net.allocate_ip(user_id, project_id, mac) + fixed_ip = net.allocate_ip(user_id, + project_id, + mac, + hostname) return {'network_type': FLAGS.network_type, 'bridge_name': net['bridge_name'], 'mac_address': mac, diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 5671a8886..039509809 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -202,8 +202,8 @@ class NetworkTestCase(test.TrialTestCase): secondmac = result['mac_address'] secondaddress = result['private_dns_name'] self.assertEqual(address, secondaddress) - self.service.deallocate_fixed_ip(secondaddress) issue_ip(secondmac, secondaddress, hostname, net.bridge_name) + self.service.deallocate_fixed_ip(secondaddress) release_ip(secondmac, secondaddress, hostname, net.bridge_name) def test_available_ips(self): @@ -218,7 +218,7 @@ class NetworkTestCase(test.TrialTestCase): services (network, gateway, CloudPipe, broadcast) """ net = model.get_project_network(self.projects[0].id, "default") - num_preallocated_ips = len(net.hosts.keys()) + num_preallocated_ips = len(net.assigned) net_size = flags.FLAGS.network_size num_available_ips = net_size - (net.num_bottom_reserved_ips + num_preallocated_ips + @@ -254,7 +254,7 @@ class NetworkTestCase(test.TrialTestCase): def is_in_project(address, project_id): """Returns true if address is in specified project""" - return address in model.get_project_network(project_id).list_addresses() + return address in model.get_project_network(project_id).assigned def binpath(script): @@ -272,6 +272,7 @@ def issue_ip(mac, private_ip, hostname, interface): (out, err) = utils.execute(cmd, addl_env=env) logging.debug("ISSUE_IP: %s, %s ", out, err) + def release_ip(mac, private_ip, hostname, interface): """Run del command on dhcpbridge""" cmd = "%s del %s %s %s" % (binpath('nova-dhcpbridge'), -- cgit From 1637c33927672a6edc9ad7a994787669ea47f602 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 11 Aug 2010 09:46:08 -0400 Subject: Serializing in middleware after all... by tying to the router. maybe a good idea? --- nova/wsgi.py | 113 +++++++++++++++++++++++++++++++++++++---------------------- 1 file changed, 72 insertions(+), 41 deletions(-) diff --git a/nova/wsgi.py b/nova/wsgi.py index 271648105..c511a3f06 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -141,15 +141,24 @@ class ParsedRoutes(Middleware): app = environ['wsgiorg.routing_args'][1]['controller'] return app(environ, start_response) -class MichaelRouter(object): +class MichaelRouterMiddleware(object): """ - My attempt at a routing class. Just override __init__ to call - super, then set up routes in self.map. + Router that maps incoming requests to WSGI apps or to standard + controllers+actions. The response will be a WSGI response; standard + controllers+actions will by default have their results serialized + to the requested Content Type, or you can subclass and override + _to_webob_response to customize this. """ - def __init__(self): - self.map = routes.Mapper() - self._router = routes.middleware.RoutesMiddleware(self._proceed, self.map) + def __init__(self, map): + """ + Create a router for the given routes.Mapper. It may contain standard + routes (i.e. specifying controllers and actions), or may route to a + WSGI app by instead specifying a wsgi_app=SomeApp() parameter in + map.connect(). + """ + self.map = map + self._router = routes.middleware.RoutesMiddleware(self.__proceed, self.map) @webob.dec.wsgify def __call__(self, req): @@ -160,62 +169,84 @@ class MichaelRouter(object): return self._router @webob.dec.wsgify - def _proceed(self, req): - """ - Called by self._router after matching the incoming request to a route - and putting the information into req.environ. - """ + @staticmethod + def __proceed(req): + # Called by self._router after matching the incoming request to a route + # and putting the information into req.environ. Either returns 404, the + # routed WSGI app, or _to_webob_response(the action result). + if req.environ['routes.route'] is None: return webob.exc.HTTPNotFound() match = environ['wsgiorg.routing_args'][1] - if match.get('_is_wsgi', False): - wsgiapp = match['controller'] - return req.get_response(wsgiapp) + if 'wsgi_app' in match: + return match['wsgi_app'] else: - # TODO(gundlach): doubt this is the right way -- and it really - # feels like this code should exist somewhere already on the - # internet + kwargs = match.copy() controller, action = match['controller'], match['action'] - delete match['controller'] - delete match['action'] - return _as_response(getattr(controller, action)(**match)) + delete kwargs['controller'] + delete kwargs['action'] + return _to_webob_response(req, getattr(controller, action)(**kwargs)) - controller = environ['wsgiorg.routing_args'][1]['controller'] - self._dispatch(controller) - - def _as_response(self, result): + def _to_webob_response(self, req, result): + """ + When routing to a non-WSGI controller+action, the webob.Request and the + action's result will be passed here to be converted into a + webob.Response before returning up the WSGI chain. By default it + serializes to the requested Content Type. """ - When routing to a non-wsgi controller+action, its result will - be passed here before returning up the WSGI chain to be converted - into a webob.Response + return Serializer(req).serialize(result) +class Serializer(object): + """ + Serializes a dictionary to a Content Type specified by a WSGI environment. + """ + def __init__(self, environ): + """Create a serializer based on the given WSGI environment.""" + self.environ = environ + def serialize(self, data): + req = webob.Request(environ) + # TODO(gundlach): temp + if 'applicatio/json' in req.accept): + import json + return json.dumps(result) + else: + return '' + repr(data) + '' -class ApiVersionRouter(MichaelRouter): +class ApiVersionRouter(MichaelRouterMiddleware): def __init__(self): - super(ApiVersionRouter, self).__init__(self) + map = routes.Mapper() - self.map.connect(None, "/v1.0/{path_info:.*}", controller=RsApiRouter()) - self.map.connect(None, "/ec2/{path_info:.*}", controller=Ec2ApiRouter()) + map.connect(None, "/v1.0/{path_info:.*}", wsgi_app=RsApiRouter()) + map.connect(None, "/ec2/{path_info:.*}", wsgi_app=Ec2ApiRouter()) -class RsApiRouter(MichaelRouter): + super(ApiVersionRouter, self).__init__(self, map) + +class RsApiRouter(MichaelRouterMiddleware): def __init__(self): - super(RsApiRouter, self).__init__(self) + map = routes.Mapper() + + map.resource("server", "servers", controller=ServerController()) + map.resource("image", "images", controller=ImageController()) + map.resource("flavor", "flavors", controller=FlavorController()) + map.resource("sharedipgroup", "sharedipgroups", + controller=SharedIpGroupController()) - self.map.resource("server", "servers", controller=CloudServersServerApi()) - self.map.resource("image", "images", controller=CloudServersImageApi()) - self.map.resource("flavor", "flavors", controller=CloudServersFlavorApi()) - self.map.resource("sharedipgroup", "sharedipgroups", - controller=CloudServersSharedIpGroupApi()) + super(RsApiRouter, self).__init__(self, map) class Ec2ApiRouter(object): + @webob.dec.wsgify + def __call__(self, req): + return 'dummy response' + +class ServerController(object): def __getattr__(self, key): - return lambda *x: {'dummy response': 'i am a dummy response'} -CloudServersServerApi = CloudServersImageApi = CloudServersFlavorApi = \ - CloudServersSharedIpGroupApi = Ec2ApiRouter + return {'dummy': 'dummy response'} +ImageController = FlavorController = SharedIpGroupController = ServerController + class Router(Middleware): # pylint: disable-msg=R0921 """Wrapper to help setup routes.middleware.RoutesMiddleware.""" -- cgit From a0fb0fdf1e899488f0717bea6ee2cad58120070b Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 11 Aug 2010 14:46:43 -0400 Subject: Working router that can target WSGI middleware or a standard controller+action --- nova/wsgi.py | 205 ++++++++++++++++++++++++++++------------------------------- 1 file changed, 98 insertions(+), 107 deletions(-) diff --git a/nova/wsgi.py b/nova/wsgi.py index c511a3f06..81890499e 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -29,6 +29,8 @@ import eventlet.wsgi eventlet.patcher.monkey_patch(all=False, socket=True) import routes import routes.middleware +import webob.dec +import webob.exc logging.getLogger("routes.middleware").addHandler(logging.StreamHandler()) @@ -89,75 +91,80 @@ class Middleware(Application): # pylint: disable-msg=W0223 class Debug(Middleware): - """Helper class that can be insertd into any WSGI application chain + """Helper class that can be inserted into any WSGI application chain to get information about the request and response.""" - def __call__(self, environ, start_response): - for key, value in environ.items(): + @webob.dec.wsgify + def __call__(self, req): + print ("*" * 40) + " REQUEST ENVIRON" + for key, value in req.environ.items(): print key, "=", value print - wrapper = debug_start_response(start_response) - return debug_print_body(self.application(environ, wrapper)) - - -def debug_start_response(start_response): - """Wrap the start_response to capture when called.""" + resp = req.get_response(self.application) - def wrapper(status, headers, exc_info=None): - """Print out all headers when start_response is called.""" - print status - for (key, value) in headers: + print ("*" * 40) + " RESPONSE HEADERS" + for (key, value) in resp.headers: print key, "=", value print - start_response(status, headers, exc_info) - return wrapper + resp.app_iter = self.print_generator(resp.app_iter) + return resp -def debug_print_body(body): - """Print the body of the response as it is sent back.""" + @staticmethod + def print_generator(app_iter): + """ + Iterator that prints the contents of a wrapper string iterator + when iterated. + """ + print ("*" * 40) + "BODY" + for part in app_iter: + sys.stdout.write(part) + sys.stdout.flush() + yield part + print - class Wrapper(object): - """Iterate through all the body parts and print before returning.""" - def __iter__(self): - for part in body: - sys.stdout.write(part) - sys.stdout.flush() - yield part - print +class Router(object): + """ + WSGI middleware that maps incoming requests to targets. + + Non-WSGI-app targets have their results converted to a WSGI response + automatically -- by default, they are serialized according to the Content + Type from the request. This behavior can be changed by overriding + _to_webob_response(). + """ + + def __init__(self, map, targets): + """ + Create a router for the given routes.Mapper `map`. - return Wrapper() + Each route in `map` must contain either + - a 'wsgi_app' string or + - a 'controller' string and an 'action' string. + 'wsgi_app' is a key into the `target` dictionary whose value + is a WSGI app. 'controller' is a key into `target' whose value is + a class instance containing the method specified by 'action'. -class ParsedRoutes(Middleware): - """Processed parsed routes from routes.middleware.RoutesMiddleware - and call either the controller if found or the default application - otherwise.""" + Examples: + map = routes.Mapper() + targets = { "servers": ServerController(), "blog": BlogWsgiApp() } - def __call__(self, environ, start_response): - if environ['routes.route'] is None: - return self.application(environ, start_response) - app = environ['wsgiorg.routing_args'][1]['controller'] - return app(environ, start_response) + # Explicit mapping of one route to a controller+action + map.connect(None, "/serverlist", controller="servers", action="list") -class MichaelRouterMiddleware(object): - """ - Router that maps incoming requests to WSGI apps or to standard - controllers+actions. The response will be a WSGI response; standard - controllers+actions will by default have their results serialized - to the requested Content Type, or you can subclass and override - _to_webob_response to customize this. - """ - - def __init__(self, map): - """ - Create a router for the given routes.Mapper. It may contain standard - routes (i.e. specifying controllers and actions), or may route to a - WSGI app by instead specifying a wsgi_app=SomeApp() parameter in - map.connect(). + # Controller string is implicitly equal to 2nd param here, and + # actions are all implicitly defined + map.resource("server", "servers") + + # Pointing to a WSGI app. You'll need to specify the {path_info:.*} + # parameter so the target app can work with just his section of the + # URL. + map.connect(None, "/v1.0/{path_info:.*}", wsgi_app="blog") """ self.map = map + self.targets = targets self._router = routes.middleware.RoutesMiddleware(self.__proceed, self.map) @webob.dec.wsgify @@ -169,23 +176,28 @@ class MichaelRouterMiddleware(object): return self._router @webob.dec.wsgify - @staticmethod - def __proceed(req): + def __proceed(self, req): # Called by self._router after matching the incoming request to a route # and putting the information into req.environ. Either returns 404, the # routed WSGI app, or _to_webob_response(the action result). if req.environ['routes.route'] is None: return webob.exc.HTTPNotFound() - match = environ['wsgiorg.routing_args'][1] + match = req.environ['wsgiorg.routing_args'][1] if 'wsgi_app' in match: - return match['wsgi_app'] + app_name = match['wsgi_app'] + app = self.targets[app_name] + return app else: kwargs = match.copy() - controller, action = match['controller'], match['action'] - delete kwargs['controller'] - delete kwargs['action'] - return _to_webob_response(req, getattr(controller, action)(**kwargs)) + controller_name, action = match['controller'], match['action'] + del kwargs['controller'] + del kwargs['action'] + + controller = self.targets[controller_name] + method = getattr(controller, action) + result = method(**kwargs) + return self._to_webob_response(req, result) def _to_webob_response(self, req, result): """ @@ -194,7 +206,8 @@ class MichaelRouterMiddleware(object): webob.Response before returning up the WSGI chain. By default it serializes to the requested Content Type. """ - return Serializer(req).serialize(result) + return Serializer(req.environ).serialize(result) + class Serializer(object): """ @@ -206,75 +219,53 @@ class Serializer(object): self.environ = environ def serialize(self, data): - req = webob.Request(environ) + req = webob.Request(self.environ) # TODO(gundlach): temp - if 'applicatio/json' in req.accept): + if req.accept and 'application/json' in req.accept: import json - return json.dumps(result) + return json.dumps(data) else: return '' + repr(data) + '' -class ApiVersionRouter(MichaelRouterMiddleware): +class ApiVersionRouter(Router): def __init__(self): map = routes.Mapper() - map.connect(None, "/v1.0/{path_info:.*}", wsgi_app=RsApiRouter()) - map.connect(None, "/ec2/{path_info:.*}", wsgi_app=Ec2ApiRouter()) + map.connect(None, "/v1.0/{path_info:.*}", wsgi_app="rs") + map.connect(None, "/ec2/{path_info:.*}", wsgi_app="ec2") + + targets = { "rs": RsApiRouter(), "ec2": Ec2ApiRouter() } - super(ApiVersionRouter, self).__init__(self, map) + super(ApiVersionRouter, self).__init__(map, targets) -class RsApiRouter(MichaelRouterMiddleware): +class RsApiRouter(Router): def __init__(self): map = routes.Mapper() - map.resource("server", "servers", controller=ServerController()) - map.resource("image", "images", controller=ImageController()) - map.resource("flavor", "flavors", controller=FlavorController()) - map.resource("sharedipgroup", "sharedipgroups", - controller=SharedIpGroupController()) + map.resource("server", "servers") + map.resource("image", "images") + map.resource("flavor", "flavors") + map.resource("sharedipgroup", "sharedipgroups") - super(RsApiRouter, self).__init__(self, map) + targets = { + 'servers': ServerController(), + 'images': ImageController(), + 'flavors': FlavorController(), + 'sharedipgroups': SharedIpGroupController() + } + super(RsApiRouter, self).__init__(map, targets) + +# TODO(gundlach): temp class Ec2ApiRouter(object): @webob.dec.wsgify def __call__(self, req): return 'dummy response' - +# TODO(gundlach): temp class ServerController(object): def __getattr__(self, key): - return {'dummy': 'dummy response'} + return lambda **args: {key: 'dummy response for %s' % repr(args)} +# TODO(gundlach): temp ImageController = FlavorController = SharedIpGroupController = ServerController - - -class Router(Middleware): # pylint: disable-msg=R0921 - """Wrapper to help setup routes.middleware.RoutesMiddleware.""" - - def __init__(self, application): - self.map = routes.Mapper() - self._build_map() - application = ParsedRoutes(application) - application = routes.middleware.RoutesMiddleware(application, self.map) - super(Router, self).__init__(application) - - def __call__(self, environ, start_response): - return self.application(environ, start_response) - - def _build_map(self): - """Method to create new connections for the routing map.""" - raise NotImplementedError("You must implement _build_map") - - def _connect(self, *args, **kwargs): - """Wrapper for the map.connect method.""" - self.map.connect(*args, **kwargs) - - -def route_args(application): - """Decorator to make grabbing routing args more convenient.""" - - def wrapper(self, req): - """Call application with req and parsed routing args from.""" - return application(self, req, req.environ['wsgiorg.routing_args'][1]) - - return wrapper -- cgit From 2e753b033dae6270674c0397be8e01bd2ff47980 Mon Sep 17 00:00:00 2001 From: Matthew Dietz Date: Wed, 11 Aug 2010 15:27:27 -0500 Subject: Prototype implementation of Servers controller --- nova/endpoint/aws/cloud.py | 729 +++++++++++++++++++++ nova/endpoint/aws/images.py | 95 +++ nova/endpoint/cloud.py | 729 --------------------- nova/endpoint/images.py | 95 --- nova/endpoint/rackspace.py | 186 ------ nova/endpoint/rackspace/controllers/base.py | 9 + nova/endpoint/rackspace/controllers/flavors.py | 0 nova/endpoint/rackspace/controllers/images.py | 0 nova/endpoint/rackspace/controllers/servers.py | 72 ++ .../rackspace/controllers/shared_ip_groups.py | 0 nova/endpoint/rackspace/rackspace.py | 183 ++++++ 11 files changed, 1088 insertions(+), 1010 deletions(-) create mode 100644 nova/endpoint/aws/cloud.py create mode 100644 nova/endpoint/aws/images.py delete mode 100644 nova/endpoint/cloud.py delete mode 100644 nova/endpoint/images.py delete mode 100644 nova/endpoint/rackspace.py create mode 100644 nova/endpoint/rackspace/controllers/base.py create mode 100644 nova/endpoint/rackspace/controllers/flavors.py create mode 100644 nova/endpoint/rackspace/controllers/images.py create mode 100644 nova/endpoint/rackspace/controllers/servers.py create mode 100644 nova/endpoint/rackspace/controllers/shared_ip_groups.py create mode 100644 nova/endpoint/rackspace/rackspace.py diff --git a/nova/endpoint/aws/cloud.py b/nova/endpoint/aws/cloud.py new file mode 100644 index 000000000..878d54a15 --- /dev/null +++ b/nova/endpoint/aws/cloud.py @@ -0,0 +1,729 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Cloud Controller: Implementation of EC2 REST API calls, which are +dispatched to other nodes via AMQP RPC. State is via distributed +datastore. +""" + +import base64 +import logging +import os +import time +from twisted.internet import defer + +from nova import datastore +from nova import exception +from nova import flags +from nova import rpc +from nova import utils +from nova.auth import rbac +from nova.auth import manager +from nova.compute import model +from nova.compute.instance_types import INSTANCE_TYPES +from nova.endpoint import images +from nova.network import service as network_service +from nova.network import model as network_model +from nova.volume import service + + +FLAGS = flags.FLAGS + +flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') + +def _gen_key(user_id, key_name): + """ Tuck this into AuthManager """ + try: + mgr = manager.AuthManager() + private_key, fingerprint = mgr.generate_key_pair(user_id, key_name) + except Exception as ex: + return {'exception': ex} + return {'private_key': private_key, 'fingerprint': fingerprint} + + +class CloudController(object): + """ CloudController provides the critical dispatch between + inbound API calls through the endpoint and messages + sent to the other nodes. +""" + def __init__(self): + self.instdir = model.InstanceDirectory() + self.setup() + + @property + def instances(self): + """ All instances in the system, as dicts """ + return self.instdir.all + + @property + def volumes(self): + """ returns a list of all volumes """ + for volume_id in datastore.Redis.instance().smembers("volumes"): + volume = service.get_volume(volume_id) + yield volume + + def __str__(self): + return 'CloudController' + + def setup(self): + """ Ensure the keychains and folders exist. """ + # Create keys folder, if it doesn't exist + if not os.path.exists(FLAGS.keys_path): + os.makedirs(os.path.abspath(FLAGS.keys_path)) + # Gen root CA, if we don't have one + root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file) + if not os.path.exists(root_ca_path): + start = os.getcwd() + os.chdir(FLAGS.ca_path) + utils.runthis("Generating root CA: %s", "sh genrootca.sh") + os.chdir(start) + # TODO: Do this with M2Crypto instead + + def get_instance_by_ip(self, ip): + return self.instdir.by_ip(ip) + + def _get_mpi_data(self, project_id): + result = {} + for instance in self.instdir.all: + if instance['project_id'] == project_id: + line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) + if instance['key_name'] in result: + result[instance['key_name']].append(line) + else: + result[instance['key_name']] = [line] + return result + + def get_metadata(self, ip): + i = self.get_instance_by_ip(ip) + if i is None: + return None + mpi = self._get_mpi_data(i['project_id']) + if i['key_name']: + keys = { + '0': { + '_name': i['key_name'], + 'openssh-key': i['key_data'] + } + } + else: + keys = '' + data = { + 'user-data': base64.b64decode(i['user_data']), + 'meta-data': { + 'ami-id': i['image_id'], + 'ami-launch-index': i['ami_launch_index'], + 'ami-manifest-path': 'FIXME', # image property + 'block-device-mapping': { # TODO: replace with real data + 'ami': 'sda1', + 'ephemeral0': 'sda2', + 'root': '/dev/sda1', + 'swap': 'sda3' + }, + 'hostname': i['private_dns_name'], # is this public sometimes? + 'instance-action': 'none', + 'instance-id': i['instance_id'], + 'instance-type': i.get('instance_type', ''), + 'local-hostname': i['private_dns_name'], + 'local-ipv4': i['private_dns_name'], # TODO: switch to IP + 'kernel-id': i.get('kernel_id', ''), + 'placement': { + 'availaibility-zone': i.get('availability_zone', 'nova'), + }, + 'public-hostname': i.get('dns_name', ''), + 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP + 'public-keys' : keys, + 'ramdisk-id': i.get('ramdisk_id', ''), + 'reservation-id': i['reservation_id'], + 'security-groups': i.get('groups', ''), + 'mpi': mpi + } + } + if False: # TODO: store ancestor ids + data['ancestor-ami-ids'] = [] + if i.get('product_codes', None): + data['product-codes'] = i['product_codes'] + return data + + @rbac.allow('all') + def describe_availability_zones(self, context, **kwargs): + return {'availabilityZoneInfo': [{'zoneName': 'nova', + 'zoneState': 'available'}]} + + @rbac.allow('all') + def describe_regions(self, context, region_name=None, **kwargs): + # TODO(vish): region_name is an array. Support filtering + return {'regionInfo': [{'regionName': 'nova', + 'regionUrl': FLAGS.ec2_url}]} + + @rbac.allow('all') + def describe_snapshots(self, + context, + snapshot_id=None, + owner=None, + restorable_by=None, + **kwargs): + return {'snapshotSet': [{'snapshotId': 'fixme', + 'volumeId': 'fixme', + 'status': 'fixme', + 'startTime': 'fixme', + 'progress': 'fixme', + 'ownerId': 'fixme', + 'volumeSize': 0, + 'description': 'fixme'}]} + + @rbac.allow('all') + def describe_key_pairs(self, context, key_name=None, **kwargs): + key_pairs = context.user.get_key_pairs() + if not key_name is None: + key_pairs = [x for x in key_pairs if x.name in key_name] + + result = [] + for key_pair in key_pairs: + # filter out the vpn keys + suffix = FLAGS.vpn_key_suffix + if context.user.is_admin() or not key_pair.name.endswith(suffix): + result.append({ + 'keyName': key_pair.name, + 'keyFingerprint': key_pair.fingerprint, + }) + + return { 'keypairsSet': result } + + @rbac.allow('all') + def create_key_pair(self, context, key_name, **kwargs): + try: + d = defer.Deferred() + p = context.handler.application.settings.get('pool') + def _complete(kwargs): + if 'exception' in kwargs: + d.errback(kwargs['exception']) + return + d.callback({'keyName': key_name, + 'keyFingerprint': kwargs['fingerprint'], + 'keyMaterial': kwargs['private_key']}) + p.apply_async(_gen_key, [context.user.id, key_name], + callback=_complete) + return d + + except manager.UserError as e: + raise + + @rbac.allow('all') + def delete_key_pair(self, context, key_name, **kwargs): + context.user.delete_key_pair(key_name) + # aws returns true even if the key doens't exist + return True + + @rbac.allow('all') + def describe_security_groups(self, context, group_names, **kwargs): + groups = { 'securityGroupSet': [] } + + # Stubbed for now to unblock other things. + return groups + + @rbac.allow('netadmin') + def create_security_group(self, context, group_name, **kwargs): + return True + + @rbac.allow('netadmin') + def delete_security_group(self, context, group_name, **kwargs): + return True + + @rbac.allow('projectmanager', 'sysadmin') + def get_console_output(self, context, instance_id, **kwargs): + # instance_id is passed in as a list of instances + instance = self._get_instance(context, instance_id[0]) + return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']), + {"method": "get_console_output", + "args" : {"instance_id": instance_id[0]}}) + + def _get_user_id(self, context): + if context and context.user: + return context.user.id + else: + return None + + @rbac.allow('projectmanager', 'sysadmin') + def describe_volumes(self, context, **kwargs): + volumes = [] + for volume in self.volumes: + if context.user.is_admin() or volume['project_id'] == context.project.id: + v = self.format_volume(context, volume) + volumes.append(v) + return defer.succeed({'volumeSet': volumes}) + + def format_volume(self, context, volume): + v = {} + v['volumeId'] = volume['volume_id'] + v['status'] = volume['status'] + v['size'] = volume['size'] + v['availabilityZone'] = volume['availability_zone'] + v['createTime'] = volume['create_time'] + if context.user.is_admin(): + v['status'] = '%s (%s, %s, %s, %s)' % ( + volume.get('status', None), + volume.get('user_id', None), + volume.get('node_name', None), + volume.get('instance_id', ''), + volume.get('mountpoint', '')) + if volume['attach_status'] == 'attached': + v['attachmentSet'] = [{'attachTime': volume['attach_time'], + 'deleteOnTermination': volume['delete_on_termination'], + 'device' : volume['mountpoint'], + 'instanceId' : volume['instance_id'], + 'status' : 'attached', + 'volume_id' : volume['volume_id']}] + else: + v['attachmentSet'] = [{}] + return v + + @rbac.allow('projectmanager', 'sysadmin') + @defer.inlineCallbacks + def create_volume(self, context, size, **kwargs): + # TODO(vish): refactor this to create the volume object here and tell service to create it + result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume", + "args" : {"size": size, + "user_id": context.user.id, + "project_id": context.project.id}}) + # NOTE(vish): rpc returned value is in the result key in the dictionary + volume = self._get_volume(context, result['result']) + defer.returnValue({'volumeSet': [self.format_volume(context, volume)]}) + + def _get_address(self, context, public_ip): + # FIXME(vish) this should move into network.py + address = network_model.PublicAddress.lookup(public_ip) + if address and (context.user.is_admin() or address['project_id'] == context.project.id): + return address + raise exception.NotFound("Address at ip %s not found" % public_ip) + + def _get_image(self, context, image_id): + """passes in context because + objectstore does its own authorization""" + result = images.list(context, [image_id]) + if not result: + raise exception.NotFound('Image %s could not be found' % image_id) + image = result[0] + return image + + def _get_instance(self, context, instance_id): + for instance in self.instdir.all: + if instance['instance_id'] == instance_id: + if context.user.is_admin() or instance['project_id'] == context.project.id: + return instance + raise exception.NotFound('Instance %s could not be found' % instance_id) + + def _get_volume(self, context, volume_id): + volume = service.get_volume(volume_id) + if context.user.is_admin() or volume['project_id'] == context.project.id: + return volume + raise exception.NotFound('Volume %s could not be found' % volume_id) + + @rbac.allow('projectmanager', 'sysadmin') + def attach_volume(self, context, volume_id, instance_id, device, **kwargs): + volume = self._get_volume(context, volume_id) + if volume['status'] == "attached": + raise exception.ApiError("Volume is already attached") + # TODO(vish): looping through all volumes is slow. We should probably maintain an index + for vol in self.volumes: + if vol['instance_id'] == instance_id and vol['mountpoint'] == device: + raise exception.ApiError("Volume %s is already attached to %s" % (vol['volume_id'], vol['mountpoint'])) + volume.start_attach(instance_id, device) + instance = self._get_instance(context, instance_id) + compute_node = instance['node_name'] + rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node), + {"method": "attach_volume", + "args" : {"volume_id": volume_id, + "instance_id" : instance_id, + "mountpoint" : device}}) + return defer.succeed({'attachTime' : volume['attach_time'], + 'device' : volume['mountpoint'], + 'instanceId' : instance_id, + 'requestId' : context.request_id, + 'status' : volume['attach_status'], + 'volumeId' : volume_id}) + + + @rbac.allow('projectmanager', 'sysadmin') + def detach_volume(self, context, volume_id, **kwargs): + volume = self._get_volume(context, volume_id) + instance_id = volume.get('instance_id', None) + if not instance_id: + raise exception.Error("Volume isn't attached to anything!") + if volume['status'] == "available": + raise exception.Error("Volume is already detached") + try: + volume.start_detach() + instance = self._get_instance(context, instance_id) + rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), + {"method": "detach_volume", + "args" : {"instance_id": instance_id, + "volume_id": volume_id}}) + except exception.NotFound: + # If the instance doesn't exist anymore, + # then we need to call detach blind + volume.finish_detach() + return defer.succeed({'attachTime' : volume['attach_time'], + 'device' : volume['mountpoint'], + 'instanceId' : instance_id, + 'requestId' : context.request_id, + 'status' : volume['attach_status'], + 'volumeId' : volume_id}) + + def _convert_to_set(self, lst, label): + if lst == None or lst == []: + return None + if not isinstance(lst, list): + lst = [lst] + return [{label: x} for x in lst] + + @rbac.allow('all') + def describe_instances(self, context, **kwargs): + return defer.succeed(self._format_instances(context)) + + def _format_instances(self, context, reservation_id = None): + reservations = {} + if context.user.is_admin(): + instgenerator = self.instdir.all + else: + instgenerator = self.instdir.by_project(context.project.id) + for instance in instgenerator: + res_id = instance.get('reservation_id', 'Unknown') + if reservation_id != None and reservation_id != res_id: + continue + if not context.user.is_admin(): + if instance['image_id'] == FLAGS.vpn_image_id: + continue + i = {} + i['instance_id'] = instance.get('instance_id', None) + i['image_id'] = instance.get('image_id', None) + i['instance_state'] = { + 'code': instance.get('state', 0), + 'name': instance.get('state_description', 'pending') + } + i['public_dns_name'] = network_model.get_public_ip_for_instance( + i['instance_id']) + i['private_dns_name'] = instance.get('private_dns_name', None) + if not i['public_dns_name']: + i['public_dns_name'] = i['private_dns_name'] + i['dns_name'] = instance.get('dns_name', None) + i['key_name'] = instance.get('key_name', None) + if context.user.is_admin(): + i['key_name'] = '%s (%s, %s)' % (i['key_name'], + instance.get('project_id', None), instance.get('node_name','')) + i['product_codes_set'] = self._convert_to_set( + instance.get('product_codes', None), 'product_code') + i['instance_type'] = instance.get('instance_type', None) + i['launch_time'] = instance.get('launch_time', None) + i['ami_launch_index'] = instance.get('ami_launch_index', + None) + if not reservations.has_key(res_id): + r = {} + r['reservation_id'] = res_id + r['owner_id'] = instance.get('project_id', None) + r['group_set'] = self._convert_to_set( + instance.get('groups', None), 'group_id') + r['instances_set'] = [] + reservations[res_id] = r + reservations[res_id]['instances_set'].append(i) + + instance_response = {'reservationSet' : list(reservations.values()) } + return instance_response + + @rbac.allow('all') + def describe_addresses(self, context, **kwargs): + return self.format_addresses(context) + + def format_addresses(self, context): + addresses = [] + for address in network_model.PublicAddress.all(): + # TODO(vish): implement a by_project iterator for addresses + if (context.user.is_admin() or + address['project_id'] == context.project.id): + address_rv = { + 'public_ip': address['address'], + 'instance_id' : address.get('instance_id', 'free') + } + if context.user.is_admin(): + address_rv['instance_id'] = "%s (%s, %s)" % ( + address['instance_id'], + address['user_id'], + address['project_id'], + ) + addresses.append(address_rv) + return {'addressesSet': addresses} + + @rbac.allow('netadmin') + @defer.inlineCallbacks + def allocate_address(self, context, **kwargs): + network_topic = yield self._get_network_topic(context) + alloc_result = yield rpc.call(network_topic, + {"method": "allocate_elastic_ip", + "args": {"user_id": context.user.id, + "project_id": context.project.id}}) + public_ip = alloc_result['result'] + defer.returnValue({'addressSet': [{'publicIp' : public_ip}]}) + + @rbac.allow('netadmin') + @defer.inlineCallbacks + def release_address(self, context, public_ip, **kwargs): + # NOTE(vish): Should we make sure this works? + network_topic = yield self._get_network_topic(context) + rpc.cast(network_topic, + {"method": "deallocate_elastic_ip", + "args": {"elastic_ip": public_ip}}) + defer.returnValue({'releaseResponse': ["Address released."]}) + + @rbac.allow('netadmin') + @defer.inlineCallbacks + def associate_address(self, context, instance_id, public_ip, **kwargs): + instance = self._get_instance(context, instance_id) + address = self._get_address(context, public_ip) + network_topic = yield self._get_network_topic(context) + rpc.cast(network_topic, + {"method": "associate_elastic_ip", + "args": {"elastic_ip": address['address'], + "fixed_ip": instance['private_dns_name'], + "instance_id": instance['instance_id']}}) + defer.returnValue({'associateResponse': ["Address associated."]}) + + @rbac.allow('netadmin') + @defer.inlineCallbacks + def disassociate_address(self, context, public_ip, **kwargs): + address = self._get_address(context, public_ip) + network_topic = yield self._get_network_topic(context) + rpc.cast(network_topic, + {"method": "disassociate_elastic_ip", + "args": {"elastic_ip": address['address']}}) + defer.returnValue({'disassociateResponse': ["Address disassociated."]}) + + @defer.inlineCallbacks + def _get_network_topic(self, context): + """Retrieves the network host for a project""" + host = network_service.get_host_for_project(context.project.id) + if not host: + result = yield rpc.call(FLAGS.network_topic, + {"method": "set_network_host", + "args": {"user_id": context.user.id, + "project_id": context.project.id}}) + host = result['result'] + defer.returnValue('%s.%s' %(FLAGS.network_topic, host)) + + @rbac.allow('projectmanager', 'sysadmin') + @defer.inlineCallbacks + def run_instances(self, context, **kwargs): + # make sure user can access the image + # vpn image is private so it doesn't show up on lists + if kwargs['image_id'] != FLAGS.vpn_image_id: + image = self._get_image(context, kwargs['image_id']) + + # FIXME(ja): if image is cloudpipe, this breaks + + # get defaults from imagestore + image_id = image['imageId'] + kernel_id = image.get('kernelId', FLAGS.default_kernel) + ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) + + # API parameters overrides of defaults + kernel_id = kwargs.get('kernel_id', kernel_id) + ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id) + + # make sure we have access to kernel and ramdisk + self._get_image(context, kernel_id) + self._get_image(context, ramdisk_id) + + logging.debug("Going to run instances...") + reservation_id = utils.generate_uid('r') + launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + key_data = None + if kwargs.has_key('key_name'): + key_pair = context.user.get_key_pair(kwargs['key_name']) + if not key_pair: + raise exception.ApiError('Key Pair %s not found' % + kwargs['key_name']) + key_data = key_pair.public_key + network_topic = yield self._get_network_topic(context) + # TODO: Get the real security group of launch in here + security_group = "default" + for num in range(int(kwargs['max_count'])): + vpn = False + if image_id == FLAGS.vpn_image_id: + vpn = True + allocate_result = yield rpc.call(network_topic, + {"method": "allocate_fixed_ip", + "args": {"user_id": context.user.id, + "project_id": context.project.id, + "security_group": security_group, + "vpn": vpn}}) + allocate_data = allocate_result['result'] + inst = self.instdir.new() + inst['image_id'] = image_id + inst['kernel_id'] = kernel_id + inst['ramdisk_id'] = ramdisk_id + inst['user_data'] = kwargs.get('user_data', '') + inst['instance_type'] = kwargs.get('instance_type', 'm1.small') + inst['reservation_id'] = reservation_id + inst['launch_time'] = launch_time + inst['key_data'] = key_data or '' + inst['key_name'] = kwargs.get('key_name', '') + inst['user_id'] = context.user.id + inst['project_id'] = context.project.id + inst['ami_launch_index'] = num + inst['security_group'] = security_group + for (key, value) in allocate_data.iteritems(): + inst[key] = value + + inst.save() + rpc.cast(FLAGS.compute_topic, + {"method": "run_instance", + "args": {"instance_id" : inst.instance_id}}) + logging.debug("Casting to node for %s's instance with IP of %s" % + (context.user.name, inst['private_dns_name'])) + # TODO: Make Network figure out the network name from ip. + defer.returnValue(self._format_instances(context, reservation_id)) + + @rbac.allow('projectmanager', 'sysadmin') + @defer.inlineCallbacks + def terminate_instances(self, context, instance_id, **kwargs): + logging.debug("Going to start terminating instances") + network_topic = yield self._get_network_topic(context) + for i in instance_id: + logging.debug("Going to try and terminate %s" % i) + try: + instance = self._get_instance(context, i) + except exception.NotFound: + logging.warning("Instance %s was not found during terminate" + % i) + continue + elastic_ip = network_model.get_public_ip_for_instance(i) + if elastic_ip: + logging.debug("Disassociating address %s" % elastic_ip) + # NOTE(vish): Right now we don't really care if the ip is + # disassociated. We may need to worry about + # checking this later. Perhaps in the scheduler? + rpc.cast(network_topic, + {"method": "disassociate_elastic_ip", + "args": {"elastic_ip": elastic_ip}}) + + fixed_ip = instance.get('private_dns_name', None) + if fixed_ip: + logging.debug("Deallocating address %s" % fixed_ip) + # NOTE(vish): Right now we don't really care if the ip is + # actually removed. We may need to worry about + # checking this later. Perhaps in the scheduler? + rpc.cast(network_topic, + {"method": "deallocate_fixed_ip", + "args": {"fixed_ip": fixed_ip}}) + + if instance.get('node_name', 'unassigned') != 'unassigned': + # NOTE(joshua?): It's also internal default + rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), + {"method": "terminate_instance", + "args": {"instance_id": i}}) + else: + instance.destroy() + defer.returnValue(True) + + @rbac.allow('projectmanager', 'sysadmin') + def reboot_instances(self, context, instance_id, **kwargs): + """instance_id is a list of instance ids""" + for i in instance_id: + instance = self._get_instance(context, i) + rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), + {"method": "reboot_instance", + "args" : {"instance_id": i}}) + return defer.succeed(True) + + @rbac.allow('projectmanager', 'sysadmin') + def delete_volume(self, context, volume_id, **kwargs): + # TODO: return error if not authorized + volume = self._get_volume(context, volume_id) + volume_node = volume['node_name'] + rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node), + {"method": "delete_volume", + "args" : {"volume_id": volume_id}}) + return defer.succeed(True) + + @rbac.allow('all') + def describe_images(self, context, image_id=None, **kwargs): + # The objectstore does its own authorization for describe + imageSet = images.list(context, image_id) + return defer.succeed({'imagesSet': imageSet}) + + @rbac.allow('projectmanager', 'sysadmin') + def deregister_image(self, context, image_id, **kwargs): + # FIXME: should the objectstore be doing these authorization checks? + images.deregister(context, image_id) + return defer.succeed({'imageId': image_id}) + + @rbac.allow('projectmanager', 'sysadmin') + def register_image(self, context, image_location=None, **kwargs): + # FIXME: should the objectstore be doing these authorization checks? + if image_location is None and kwargs.has_key('name'): + image_location = kwargs['name'] + image_id = images.register(context, image_location) + logging.debug("Registered %s as %s" % (image_location, image_id)) + + return defer.succeed({'imageId': image_id}) + + @rbac.allow('all') + def describe_image_attribute(self, context, image_id, attribute, **kwargs): + if attribute != 'launchPermission': + raise exception.ApiError('attribute not supported: %s' % attribute) + try: + image = images.list(context, image_id)[0] + except IndexError: + raise exception.ApiError('invalid id: %s' % image_id) + result = { 'image_id': image_id, 'launchPermission': [] } + if image['isPublic']: + result['launchPermission'].append({ 'group': 'all' }) + return defer.succeed(result) + + @rbac.allow('projectmanager', 'sysadmin') + def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs): + # TODO(devcamcar): Support users and groups other than 'all'. + if attribute != 'launchPermission': + raise exception.ApiError('attribute not supported: %s' % attribute) + if not 'user_group' in kwargs: + raise exception.ApiError('user or group not specified') + if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': + raise exception.ApiError('only group "all" is supported') + if not operation_type in ['add', 'remove']: + raise exception.ApiError('operation_type must be add or remove') + result = images.modify(context, image_id, operation_type) + return defer.succeed(result) + + def update_state(self, topic, value): + """ accepts status reports from the queue and consolidates them """ + # TODO(jmc): if an instance has disappeared from + # the node, call instance_death + if topic == "instances": + return defer.succeed(True) + aggregate_state = getattr(self, topic) + node_name = value.keys()[0] + items = value[node_name] + + logging.debug("Updating %s state for %s" % (topic, node_name)) + + for item_id in items.keys(): + if (aggregate_state.has_key('pending') and + aggregate_state['pending'].has_key(item_id)): + del aggregate_state['pending'][item_id] + aggregate_state[node_name] = items + + return defer.succeed(True) diff --git a/nova/endpoint/aws/images.py b/nova/endpoint/aws/images.py new file mode 100644 index 000000000..fe7cb5d11 --- /dev/null +++ b/nova/endpoint/aws/images.py @@ -0,0 +1,95 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Proxy AMI-related calls from the cloud controller, to the running +objectstore daemon. +""" + +import boto.s3.connection +import json +import urllib + +from nova import flags +from nova import utils +from nova.auth import manager + + +FLAGS = flags.FLAGS + +def modify(context, image_id, operation): + conn(context).make_request( + method='POST', + bucket='_images', + query_args=qs({'image_id': image_id, 'operation': operation})) + + return True + + +def register(context, image_location): + """ rpc call to register a new image based from a manifest """ + + image_id = utils.generate_uid('ami') + conn(context).make_request( + method='PUT', + bucket='_images', + query_args=qs({'image_location': image_location, + 'image_id': image_id})) + + return image_id + +def list(context, filter_list=[]): + """ return a list of all images that a user can see + + optionally filtered by a list of image_id """ + + # FIXME: send along the list of only_images to check for + response = conn(context).make_request( + method='GET', + bucket='_images') + + result = json.loads(response.read()) + if not filter_list is None: + return [i for i in result if i['imageId'] in filter_list] + return result + +def deregister(context, image_id): + """ unregister an image """ + conn(context).make_request( + method='DELETE', + bucket='_images', + query_args=qs({'image_id': image_id})) + +def conn(context): + access = manager.AuthManager().get_access_key(context.user, + context.project) + secret = str(context.user.secret) + calling = boto.s3.connection.OrdinaryCallingFormat() + return boto.s3.connection.S3Connection(aws_access_key_id=access, + aws_secret_access_key=secret, + is_secure=False, + calling_format=calling, + port=FLAGS.s3_port, + host=FLAGS.s3_host) + + +def qs(params): + pairs = [] + for key in params.keys(): + pairs.append(key + '=' + urllib.quote(params[key])) + return '&'.join(pairs) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py deleted file mode 100644 index 878d54a15..000000000 --- a/nova/endpoint/cloud.py +++ /dev/null @@ -1,729 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Cloud Controller: Implementation of EC2 REST API calls, which are -dispatched to other nodes via AMQP RPC. State is via distributed -datastore. -""" - -import base64 -import logging -import os -import time -from twisted.internet import defer - -from nova import datastore -from nova import exception -from nova import flags -from nova import rpc -from nova import utils -from nova.auth import rbac -from nova.auth import manager -from nova.compute import model -from nova.compute.instance_types import INSTANCE_TYPES -from nova.endpoint import images -from nova.network import service as network_service -from nova.network import model as network_model -from nova.volume import service - - -FLAGS = flags.FLAGS - -flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') - -def _gen_key(user_id, key_name): - """ Tuck this into AuthManager """ - try: - mgr = manager.AuthManager() - private_key, fingerprint = mgr.generate_key_pair(user_id, key_name) - except Exception as ex: - return {'exception': ex} - return {'private_key': private_key, 'fingerprint': fingerprint} - - -class CloudController(object): - """ CloudController provides the critical dispatch between - inbound API calls through the endpoint and messages - sent to the other nodes. -""" - def __init__(self): - self.instdir = model.InstanceDirectory() - self.setup() - - @property - def instances(self): - """ All instances in the system, as dicts """ - return self.instdir.all - - @property - def volumes(self): - """ returns a list of all volumes """ - for volume_id in datastore.Redis.instance().smembers("volumes"): - volume = service.get_volume(volume_id) - yield volume - - def __str__(self): - return 'CloudController' - - def setup(self): - """ Ensure the keychains and folders exist. """ - # Create keys folder, if it doesn't exist - if not os.path.exists(FLAGS.keys_path): - os.makedirs(os.path.abspath(FLAGS.keys_path)) - # Gen root CA, if we don't have one - root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file) - if not os.path.exists(root_ca_path): - start = os.getcwd() - os.chdir(FLAGS.ca_path) - utils.runthis("Generating root CA: %s", "sh genrootca.sh") - os.chdir(start) - # TODO: Do this with M2Crypto instead - - def get_instance_by_ip(self, ip): - return self.instdir.by_ip(ip) - - def _get_mpi_data(self, project_id): - result = {} - for instance in self.instdir.all: - if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) - if instance['key_name'] in result: - result[instance['key_name']].append(line) - else: - result[instance['key_name']] = [line] - return result - - def get_metadata(self, ip): - i = self.get_instance_by_ip(ip) - if i is None: - return None - mpi = self._get_mpi_data(i['project_id']) - if i['key_name']: - keys = { - '0': { - '_name': i['key_name'], - 'openssh-key': i['key_data'] - } - } - else: - keys = '' - data = { - 'user-data': base64.b64decode(i['user_data']), - 'meta-data': { - 'ami-id': i['image_id'], - 'ami-launch-index': i['ami_launch_index'], - 'ami-manifest-path': 'FIXME', # image property - 'block-device-mapping': { # TODO: replace with real data - 'ami': 'sda1', - 'ephemeral0': 'sda2', - 'root': '/dev/sda1', - 'swap': 'sda3' - }, - 'hostname': i['private_dns_name'], # is this public sometimes? - 'instance-action': 'none', - 'instance-id': i['instance_id'], - 'instance-type': i.get('instance_type', ''), - 'local-hostname': i['private_dns_name'], - 'local-ipv4': i['private_dns_name'], # TODO: switch to IP - 'kernel-id': i.get('kernel_id', ''), - 'placement': { - 'availaibility-zone': i.get('availability_zone', 'nova'), - }, - 'public-hostname': i.get('dns_name', ''), - 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP - 'public-keys' : keys, - 'ramdisk-id': i.get('ramdisk_id', ''), - 'reservation-id': i['reservation_id'], - 'security-groups': i.get('groups', ''), - 'mpi': mpi - } - } - if False: # TODO: store ancestor ids - data['ancestor-ami-ids'] = [] - if i.get('product_codes', None): - data['product-codes'] = i['product_codes'] - return data - - @rbac.allow('all') - def describe_availability_zones(self, context, **kwargs): - return {'availabilityZoneInfo': [{'zoneName': 'nova', - 'zoneState': 'available'}]} - - @rbac.allow('all') - def describe_regions(self, context, region_name=None, **kwargs): - # TODO(vish): region_name is an array. Support filtering - return {'regionInfo': [{'regionName': 'nova', - 'regionUrl': FLAGS.ec2_url}]} - - @rbac.allow('all') - def describe_snapshots(self, - context, - snapshot_id=None, - owner=None, - restorable_by=None, - **kwargs): - return {'snapshotSet': [{'snapshotId': 'fixme', - 'volumeId': 'fixme', - 'status': 'fixme', - 'startTime': 'fixme', - 'progress': 'fixme', - 'ownerId': 'fixme', - 'volumeSize': 0, - 'description': 'fixme'}]} - - @rbac.allow('all') - def describe_key_pairs(self, context, key_name=None, **kwargs): - key_pairs = context.user.get_key_pairs() - if not key_name is None: - key_pairs = [x for x in key_pairs if x.name in key_name] - - result = [] - for key_pair in key_pairs: - # filter out the vpn keys - suffix = FLAGS.vpn_key_suffix - if context.user.is_admin() or not key_pair.name.endswith(suffix): - result.append({ - 'keyName': key_pair.name, - 'keyFingerprint': key_pair.fingerprint, - }) - - return { 'keypairsSet': result } - - @rbac.allow('all') - def create_key_pair(self, context, key_name, **kwargs): - try: - d = defer.Deferred() - p = context.handler.application.settings.get('pool') - def _complete(kwargs): - if 'exception' in kwargs: - d.errback(kwargs['exception']) - return - d.callback({'keyName': key_name, - 'keyFingerprint': kwargs['fingerprint'], - 'keyMaterial': kwargs['private_key']}) - p.apply_async(_gen_key, [context.user.id, key_name], - callback=_complete) - return d - - except manager.UserError as e: - raise - - @rbac.allow('all') - def delete_key_pair(self, context, key_name, **kwargs): - context.user.delete_key_pair(key_name) - # aws returns true even if the key doens't exist - return True - - @rbac.allow('all') - def describe_security_groups(self, context, group_names, **kwargs): - groups = { 'securityGroupSet': [] } - - # Stubbed for now to unblock other things. - return groups - - @rbac.allow('netadmin') - def create_security_group(self, context, group_name, **kwargs): - return True - - @rbac.allow('netadmin') - def delete_security_group(self, context, group_name, **kwargs): - return True - - @rbac.allow('projectmanager', 'sysadmin') - def get_console_output(self, context, instance_id, **kwargs): - # instance_id is passed in as a list of instances - instance = self._get_instance(context, instance_id[0]) - return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']), - {"method": "get_console_output", - "args" : {"instance_id": instance_id[0]}}) - - def _get_user_id(self, context): - if context and context.user: - return context.user.id - else: - return None - - @rbac.allow('projectmanager', 'sysadmin') - def describe_volumes(self, context, **kwargs): - volumes = [] - for volume in self.volumes: - if context.user.is_admin() or volume['project_id'] == context.project.id: - v = self.format_volume(context, volume) - volumes.append(v) - return defer.succeed({'volumeSet': volumes}) - - def format_volume(self, context, volume): - v = {} - v['volumeId'] = volume['volume_id'] - v['status'] = volume['status'] - v['size'] = volume['size'] - v['availabilityZone'] = volume['availability_zone'] - v['createTime'] = volume['create_time'] - if context.user.is_admin(): - v['status'] = '%s (%s, %s, %s, %s)' % ( - volume.get('status', None), - volume.get('user_id', None), - volume.get('node_name', None), - volume.get('instance_id', ''), - volume.get('mountpoint', '')) - if volume['attach_status'] == 'attached': - v['attachmentSet'] = [{'attachTime': volume['attach_time'], - 'deleteOnTermination': volume['delete_on_termination'], - 'device' : volume['mountpoint'], - 'instanceId' : volume['instance_id'], - 'status' : 'attached', - 'volume_id' : volume['volume_id']}] - else: - v['attachmentSet'] = [{}] - return v - - @rbac.allow('projectmanager', 'sysadmin') - @defer.inlineCallbacks - def create_volume(self, context, size, **kwargs): - # TODO(vish): refactor this to create the volume object here and tell service to create it - result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume", - "args" : {"size": size, - "user_id": context.user.id, - "project_id": context.project.id}}) - # NOTE(vish): rpc returned value is in the result key in the dictionary - volume = self._get_volume(context, result['result']) - defer.returnValue({'volumeSet': [self.format_volume(context, volume)]}) - - def _get_address(self, context, public_ip): - # FIXME(vish) this should move into network.py - address = network_model.PublicAddress.lookup(public_ip) - if address and (context.user.is_admin() or address['project_id'] == context.project.id): - return address - raise exception.NotFound("Address at ip %s not found" % public_ip) - - def _get_image(self, context, image_id): - """passes in context because - objectstore does its own authorization""" - result = images.list(context, [image_id]) - if not result: - raise exception.NotFound('Image %s could not be found' % image_id) - image = result[0] - return image - - def _get_instance(self, context, instance_id): - for instance in self.instdir.all: - if instance['instance_id'] == instance_id: - if context.user.is_admin() or instance['project_id'] == context.project.id: - return instance - raise exception.NotFound('Instance %s could not be found' % instance_id) - - def _get_volume(self, context, volume_id): - volume = service.get_volume(volume_id) - if context.user.is_admin() or volume['project_id'] == context.project.id: - return volume - raise exception.NotFound('Volume %s could not be found' % volume_id) - - @rbac.allow('projectmanager', 'sysadmin') - def attach_volume(self, context, volume_id, instance_id, device, **kwargs): - volume = self._get_volume(context, volume_id) - if volume['status'] == "attached": - raise exception.ApiError("Volume is already attached") - # TODO(vish): looping through all volumes is slow. We should probably maintain an index - for vol in self.volumes: - if vol['instance_id'] == instance_id and vol['mountpoint'] == device: - raise exception.ApiError("Volume %s is already attached to %s" % (vol['volume_id'], vol['mountpoint'])) - volume.start_attach(instance_id, device) - instance = self._get_instance(context, instance_id) - compute_node = instance['node_name'] - rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node), - {"method": "attach_volume", - "args" : {"volume_id": volume_id, - "instance_id" : instance_id, - "mountpoint" : device}}) - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) - - - @rbac.allow('projectmanager', 'sysadmin') - def detach_volume(self, context, volume_id, **kwargs): - volume = self._get_volume(context, volume_id) - instance_id = volume.get('instance_id', None) - if not instance_id: - raise exception.Error("Volume isn't attached to anything!") - if volume['status'] == "available": - raise exception.Error("Volume is already detached") - try: - volume.start_detach() - instance = self._get_instance(context, instance_id) - rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), - {"method": "detach_volume", - "args" : {"instance_id": instance_id, - "volume_id": volume_id}}) - except exception.NotFound: - # If the instance doesn't exist anymore, - # then we need to call detach blind - volume.finish_detach() - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) - - def _convert_to_set(self, lst, label): - if lst == None or lst == []: - return None - if not isinstance(lst, list): - lst = [lst] - return [{label: x} for x in lst] - - @rbac.allow('all') - def describe_instances(self, context, **kwargs): - return defer.succeed(self._format_instances(context)) - - def _format_instances(self, context, reservation_id = None): - reservations = {} - if context.user.is_admin(): - instgenerator = self.instdir.all - else: - instgenerator = self.instdir.by_project(context.project.id) - for instance in instgenerator: - res_id = instance.get('reservation_id', 'Unknown') - if reservation_id != None and reservation_id != res_id: - continue - if not context.user.is_admin(): - if instance['image_id'] == FLAGS.vpn_image_id: - continue - i = {} - i['instance_id'] = instance.get('instance_id', None) - i['image_id'] = instance.get('image_id', None) - i['instance_state'] = { - 'code': instance.get('state', 0), - 'name': instance.get('state_description', 'pending') - } - i['public_dns_name'] = network_model.get_public_ip_for_instance( - i['instance_id']) - i['private_dns_name'] = instance.get('private_dns_name', None) - if not i['public_dns_name']: - i['public_dns_name'] = i['private_dns_name'] - i['dns_name'] = instance.get('dns_name', None) - i['key_name'] = instance.get('key_name', None) - if context.user.is_admin(): - i['key_name'] = '%s (%s, %s)' % (i['key_name'], - instance.get('project_id', None), instance.get('node_name','')) - i['product_codes_set'] = self._convert_to_set( - instance.get('product_codes', None), 'product_code') - i['instance_type'] = instance.get('instance_type', None) - i['launch_time'] = instance.get('launch_time', None) - i['ami_launch_index'] = instance.get('ami_launch_index', - None) - if not reservations.has_key(res_id): - r = {} - r['reservation_id'] = res_id - r['owner_id'] = instance.get('project_id', None) - r['group_set'] = self._convert_to_set( - instance.get('groups', None), 'group_id') - r['instances_set'] = [] - reservations[res_id] = r - reservations[res_id]['instances_set'].append(i) - - instance_response = {'reservationSet' : list(reservations.values()) } - return instance_response - - @rbac.allow('all') - def describe_addresses(self, context, **kwargs): - return self.format_addresses(context) - - def format_addresses(self, context): - addresses = [] - for address in network_model.PublicAddress.all(): - # TODO(vish): implement a by_project iterator for addresses - if (context.user.is_admin() or - address['project_id'] == context.project.id): - address_rv = { - 'public_ip': address['address'], - 'instance_id' : address.get('instance_id', 'free') - } - if context.user.is_admin(): - address_rv['instance_id'] = "%s (%s, %s)" % ( - address['instance_id'], - address['user_id'], - address['project_id'], - ) - addresses.append(address_rv) - return {'addressesSet': addresses} - - @rbac.allow('netadmin') - @defer.inlineCallbacks - def allocate_address(self, context, **kwargs): - network_topic = yield self._get_network_topic(context) - alloc_result = yield rpc.call(network_topic, - {"method": "allocate_elastic_ip", - "args": {"user_id": context.user.id, - "project_id": context.project.id}}) - public_ip = alloc_result['result'] - defer.returnValue({'addressSet': [{'publicIp' : public_ip}]}) - - @rbac.allow('netadmin') - @defer.inlineCallbacks - def release_address(self, context, public_ip, **kwargs): - # NOTE(vish): Should we make sure this works? - network_topic = yield self._get_network_topic(context) - rpc.cast(network_topic, - {"method": "deallocate_elastic_ip", - "args": {"elastic_ip": public_ip}}) - defer.returnValue({'releaseResponse': ["Address released."]}) - - @rbac.allow('netadmin') - @defer.inlineCallbacks - def associate_address(self, context, instance_id, public_ip, **kwargs): - instance = self._get_instance(context, instance_id) - address = self._get_address(context, public_ip) - network_topic = yield self._get_network_topic(context) - rpc.cast(network_topic, - {"method": "associate_elastic_ip", - "args": {"elastic_ip": address['address'], - "fixed_ip": instance['private_dns_name'], - "instance_id": instance['instance_id']}}) - defer.returnValue({'associateResponse': ["Address associated."]}) - - @rbac.allow('netadmin') - @defer.inlineCallbacks - def disassociate_address(self, context, public_ip, **kwargs): - address = self._get_address(context, public_ip) - network_topic = yield self._get_network_topic(context) - rpc.cast(network_topic, - {"method": "disassociate_elastic_ip", - "args": {"elastic_ip": address['address']}}) - defer.returnValue({'disassociateResponse': ["Address disassociated."]}) - - @defer.inlineCallbacks - def _get_network_topic(self, context): - """Retrieves the network host for a project""" - host = network_service.get_host_for_project(context.project.id) - if not host: - result = yield rpc.call(FLAGS.network_topic, - {"method": "set_network_host", - "args": {"user_id": context.user.id, - "project_id": context.project.id}}) - host = result['result'] - defer.returnValue('%s.%s' %(FLAGS.network_topic, host)) - - @rbac.allow('projectmanager', 'sysadmin') - @defer.inlineCallbacks - def run_instances(self, context, **kwargs): - # make sure user can access the image - # vpn image is private so it doesn't show up on lists - if kwargs['image_id'] != FLAGS.vpn_image_id: - image = self._get_image(context, kwargs['image_id']) - - # FIXME(ja): if image is cloudpipe, this breaks - - # get defaults from imagestore - image_id = image['imageId'] - kernel_id = image.get('kernelId', FLAGS.default_kernel) - ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) - - # API parameters overrides of defaults - kernel_id = kwargs.get('kernel_id', kernel_id) - ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id) - - # make sure we have access to kernel and ramdisk - self._get_image(context, kernel_id) - self._get_image(context, ramdisk_id) - - logging.debug("Going to run instances...") - reservation_id = utils.generate_uid('r') - launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) - key_data = None - if kwargs.has_key('key_name'): - key_pair = context.user.get_key_pair(kwargs['key_name']) - if not key_pair: - raise exception.ApiError('Key Pair %s not found' % - kwargs['key_name']) - key_data = key_pair.public_key - network_topic = yield self._get_network_topic(context) - # TODO: Get the real security group of launch in here - security_group = "default" - for num in range(int(kwargs['max_count'])): - vpn = False - if image_id == FLAGS.vpn_image_id: - vpn = True - allocate_result = yield rpc.call(network_topic, - {"method": "allocate_fixed_ip", - "args": {"user_id": context.user.id, - "project_id": context.project.id, - "security_group": security_group, - "vpn": vpn}}) - allocate_data = allocate_result['result'] - inst = self.instdir.new() - inst['image_id'] = image_id - inst['kernel_id'] = kernel_id - inst['ramdisk_id'] = ramdisk_id - inst['user_data'] = kwargs.get('user_data', '') - inst['instance_type'] = kwargs.get('instance_type', 'm1.small') - inst['reservation_id'] = reservation_id - inst['launch_time'] = launch_time - inst['key_data'] = key_data or '' - inst['key_name'] = kwargs.get('key_name', '') - inst['user_id'] = context.user.id - inst['project_id'] = context.project.id - inst['ami_launch_index'] = num - inst['security_group'] = security_group - for (key, value) in allocate_data.iteritems(): - inst[key] = value - - inst.save() - rpc.cast(FLAGS.compute_topic, - {"method": "run_instance", - "args": {"instance_id" : inst.instance_id}}) - logging.debug("Casting to node for %s's instance with IP of %s" % - (context.user.name, inst['private_dns_name'])) - # TODO: Make Network figure out the network name from ip. - defer.returnValue(self._format_instances(context, reservation_id)) - - @rbac.allow('projectmanager', 'sysadmin') - @defer.inlineCallbacks - def terminate_instances(self, context, instance_id, **kwargs): - logging.debug("Going to start terminating instances") - network_topic = yield self._get_network_topic(context) - for i in instance_id: - logging.debug("Going to try and terminate %s" % i) - try: - instance = self._get_instance(context, i) - except exception.NotFound: - logging.warning("Instance %s was not found during terminate" - % i) - continue - elastic_ip = network_model.get_public_ip_for_instance(i) - if elastic_ip: - logging.debug("Disassociating address %s" % elastic_ip) - # NOTE(vish): Right now we don't really care if the ip is - # disassociated. We may need to worry about - # checking this later. Perhaps in the scheduler? - rpc.cast(network_topic, - {"method": "disassociate_elastic_ip", - "args": {"elastic_ip": elastic_ip}}) - - fixed_ip = instance.get('private_dns_name', None) - if fixed_ip: - logging.debug("Deallocating address %s" % fixed_ip) - # NOTE(vish): Right now we don't really care if the ip is - # actually removed. We may need to worry about - # checking this later. Perhaps in the scheduler? - rpc.cast(network_topic, - {"method": "deallocate_fixed_ip", - "args": {"fixed_ip": fixed_ip}}) - - if instance.get('node_name', 'unassigned') != 'unassigned': - # NOTE(joshua?): It's also internal default - rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), - {"method": "terminate_instance", - "args": {"instance_id": i}}) - else: - instance.destroy() - defer.returnValue(True) - - @rbac.allow('projectmanager', 'sysadmin') - def reboot_instances(self, context, instance_id, **kwargs): - """instance_id is a list of instance ids""" - for i in instance_id: - instance = self._get_instance(context, i) - rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), - {"method": "reboot_instance", - "args" : {"instance_id": i}}) - return defer.succeed(True) - - @rbac.allow('projectmanager', 'sysadmin') - def delete_volume(self, context, volume_id, **kwargs): - # TODO: return error if not authorized - volume = self._get_volume(context, volume_id) - volume_node = volume['node_name'] - rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node), - {"method": "delete_volume", - "args" : {"volume_id": volume_id}}) - return defer.succeed(True) - - @rbac.allow('all') - def describe_images(self, context, image_id=None, **kwargs): - # The objectstore does its own authorization for describe - imageSet = images.list(context, image_id) - return defer.succeed({'imagesSet': imageSet}) - - @rbac.allow('projectmanager', 'sysadmin') - def deregister_image(self, context, image_id, **kwargs): - # FIXME: should the objectstore be doing these authorization checks? - images.deregister(context, image_id) - return defer.succeed({'imageId': image_id}) - - @rbac.allow('projectmanager', 'sysadmin') - def register_image(self, context, image_location=None, **kwargs): - # FIXME: should the objectstore be doing these authorization checks? - if image_location is None and kwargs.has_key('name'): - image_location = kwargs['name'] - image_id = images.register(context, image_location) - logging.debug("Registered %s as %s" % (image_location, image_id)) - - return defer.succeed({'imageId': image_id}) - - @rbac.allow('all') - def describe_image_attribute(self, context, image_id, attribute, **kwargs): - if attribute != 'launchPermission': - raise exception.ApiError('attribute not supported: %s' % attribute) - try: - image = images.list(context, image_id)[0] - except IndexError: - raise exception.ApiError('invalid id: %s' % image_id) - result = { 'image_id': image_id, 'launchPermission': [] } - if image['isPublic']: - result['launchPermission'].append({ 'group': 'all' }) - return defer.succeed(result) - - @rbac.allow('projectmanager', 'sysadmin') - def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs): - # TODO(devcamcar): Support users and groups other than 'all'. - if attribute != 'launchPermission': - raise exception.ApiError('attribute not supported: %s' % attribute) - if not 'user_group' in kwargs: - raise exception.ApiError('user or group not specified') - if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': - raise exception.ApiError('only group "all" is supported') - if not operation_type in ['add', 'remove']: - raise exception.ApiError('operation_type must be add or remove') - result = images.modify(context, image_id, operation_type) - return defer.succeed(result) - - def update_state(self, topic, value): - """ accepts status reports from the queue and consolidates them """ - # TODO(jmc): if an instance has disappeared from - # the node, call instance_death - if topic == "instances": - return defer.succeed(True) - aggregate_state = getattr(self, topic) - node_name = value.keys()[0] - items = value[node_name] - - logging.debug("Updating %s state for %s" % (topic, node_name)) - - for item_id in items.keys(): - if (aggregate_state.has_key('pending') and - aggregate_state['pending'].has_key(item_id)): - del aggregate_state['pending'][item_id] - aggregate_state[node_name] = items - - return defer.succeed(True) diff --git a/nova/endpoint/images.py b/nova/endpoint/images.py deleted file mode 100644 index fe7cb5d11..000000000 --- a/nova/endpoint/images.py +++ /dev/null @@ -1,95 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Proxy AMI-related calls from the cloud controller, to the running -objectstore daemon. -""" - -import boto.s3.connection -import json -import urllib - -from nova import flags -from nova import utils -from nova.auth import manager - - -FLAGS = flags.FLAGS - -def modify(context, image_id, operation): - conn(context).make_request( - method='POST', - bucket='_images', - query_args=qs({'image_id': image_id, 'operation': operation})) - - return True - - -def register(context, image_location): - """ rpc call to register a new image based from a manifest """ - - image_id = utils.generate_uid('ami') - conn(context).make_request( - method='PUT', - bucket='_images', - query_args=qs({'image_location': image_location, - 'image_id': image_id})) - - return image_id - -def list(context, filter_list=[]): - """ return a list of all images that a user can see - - optionally filtered by a list of image_id """ - - # FIXME: send along the list of only_images to check for - response = conn(context).make_request( - method='GET', - bucket='_images') - - result = json.loads(response.read()) - if not filter_list is None: - return [i for i in result if i['imageId'] in filter_list] - return result - -def deregister(context, image_id): - """ unregister an image """ - conn(context).make_request( - method='DELETE', - bucket='_images', - query_args=qs({'image_id': image_id})) - -def conn(context): - access = manager.AuthManager().get_access_key(context.user, - context.project) - secret = str(context.user.secret) - calling = boto.s3.connection.OrdinaryCallingFormat() - return boto.s3.connection.S3Connection(aws_access_key_id=access, - aws_secret_access_key=secret, - is_secure=False, - calling_format=calling, - port=FLAGS.s3_port, - host=FLAGS.s3_host) - - -def qs(params): - pairs = [] - for key in params.keys(): - pairs.append(key + '=' + urllib.quote(params[key])) - return '&'.join(pairs) diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py deleted file mode 100644 index b4e6cd823..000000000 --- a/nova/endpoint/rackspace.py +++ /dev/null @@ -1,186 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Rackspace API Endpoint -""" - -import json -import time - -import webob.dec -import webob.exc - -from nova import flags -from nova import rpc -from nova import utils -from nova import wsgi -from nova.auth import manager -from nova.compute import model as compute -from nova.network import model as network - - -FLAGS = flags.FLAGS -flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') - - -class API(wsgi.Middleware): - """Entry point for all requests.""" - - def __init__(self): - super(API, self).__init__(Router(webob.exc.HTTPNotFound())) - - @webob.dec.wsgify - def __call__(self, req): - return self.application - context = {} - if "HTTP_X_AUTH_TOKEN" in req.environ: - context['user'] = manager.AuthManager().get_user_from_access_key( - req.environ['HTTP_X_AUTH_TOKEN']) - if context['user']: - context['project'] = manager.AuthManager().get_project( - context['user'].name) - if "user" not in context: - return webob.exc.HTTPForbidden() - environ['nova.context'] = context - return self.application - - -class Router(wsgi.Router): - """Route requests to the next WSGI application.""" - - def _build_map(self): - """Build routing map for authentication and cloud.""" - self.map.resource("server", "servers", controller=CloudServerAPI()) - #self._connect("/v1.0", controller=AuthenticationAPI()) - #cloud = CloudServerAPI() - #self._connect("/servers", controller=cloud.launch_server, - # conditions={"method": ["POST"]}) - #self._connect("/servers/{server_id}", controller=cloud.delete_server, - # conditions={'method': ["DELETE"]}) - #self._connect("/servers", controller=cloud) - - -class AuthenticationAPI(wsgi.Application): - """Handle all authorization requests through WSGI applications.""" - - @webob.dec.wsgify - def __call__(self, req): # pylint: disable-msg=W0221 - # TODO(todd): make a actual session with a unique token - # just pass the auth key back through for now - res = webob.Response() - res.status = '204 No Content' - res.headers.add('X-Server-Management-Url', req.host_url) - res.headers.add('X-Storage-Url', req.host_url) - res.headers.add('X-CDN-Managment-Url', req.host_url) - res.headers.add('X-Auth-Token', req.headers['X-Auth-Key']) - return res - - -class CloudServerAPI(wsgi.Application): - """Handle all server requests through WSGI applications.""" - - def __init__(self): - super(CloudServerAPI, self).__init__() - self.instdir = compute.InstanceDirectory() - self.network = network.PublicNetworkController() - - @webob.dec.wsgify - def __call__(self, req): # pylint: disable-msg=W0221 - value = {"servers": []} - for inst in self.instdir.all: - value["servers"].append(self.instance_details(inst)) - return json.dumps(value) - - def instance_details(self, inst): # pylint: disable-msg=R0201 - """Build the data structure to represent details for an instance.""" - return { - "id": inst.get("instance_id", None), - "imageId": inst.get("image_id", None), - "flavorId": inst.get("instacne_type", None), - "hostId": inst.get("node_name", None), - "status": inst.get("state", "pending"), - "addresses": { - "public": [network.get_public_ip_for_instance( - inst.get("instance_id", None))], - "private": [inst.get("private_dns_name", None)]}, - - # implemented only by Rackspace, not AWS - "name": inst.get("name", "Not-Specified"), - - # not supported - "progress": "Not-Supported", - "metadata": { - "Server Label": "Not-Supported", - "Image Version": "Not-Supported"}} - - @webob.dec.wsgify - def launch_server(self, req): - """Launch a new instance.""" - data = json.loads(req.body) - inst = self.build_server_instance(data, req.environ['nova.context']) - rpc.cast( - FLAGS.compute_topic, { - "method": "run_instance", - "args": {"instance_id": inst.instance_id}}) - - return json.dumps({"server": self.instance_details(inst)}) - - def build_server_instance(self, env, context): - """Build instance data structure and save it to the data store.""" - reservation = utils.generate_uid('r') - ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) - inst = self.instdir.new() - inst['name'] = env['server']['name'] - inst['image_id'] = env['server']['imageId'] - inst['instance_type'] = env['server']['flavorId'] - inst['user_id'] = context['user'].id - inst['project_id'] = context['project'].id - inst['reservation_id'] = reservation - inst['launch_time'] = ltime - inst['mac_address'] = utils.generate_mac() - address = self.network.allocate_ip( - inst['user_id'], - inst['project_id'], - mac=inst['mac_address']) - inst['private_dns_name'] = str(address) - inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( - inst['user_id'], - inst['project_id'], - 'default')['bridge_name'] - # key_data, key_name, ami_launch_index - # TODO(todd): key data or root password - inst.save() - return inst - - @webob.dec.wsgify - @wsgi.route_args - def delete_server(self, req, route_args): # pylint: disable-msg=R0201 - """Delete an instance.""" - owner_hostname = None - instance = compute.Instance.lookup(route_args['server_id']) - if instance: - owner_hostname = instance["node_name"] - if not owner_hostname: - return webob.exc.HTTPNotFound("Did not find image, or it was " - "not in a running state.") - rpc_transport = "%s:%s" % (FLAGS.compute_topic, owner_hostname) - rpc.cast(rpc_transport, - {"method": "reboot_instance", - "args": {"instance_id": route_args['server_id']}}) - req.status = "202 Accepted" diff --git a/nova/endpoint/rackspace/controllers/base.py b/nova/endpoint/rackspace/controllers/base.py new file mode 100644 index 000000000..a83925cc3 --- /dev/null +++ b/nova/endpoint/rackspace/controllers/base.py @@ -0,0 +1,9 @@ +class BaseController(object): + @classmethod + def render(cls, instance): + if isinstance(instance, list): + return [ cls.entity_name : { cls.render(instance) } + else + return + + diff --git a/nova/endpoint/rackspace/controllers/flavors.py b/nova/endpoint/rackspace/controllers/flavors.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/endpoint/rackspace/controllers/images.py b/nova/endpoint/rackspace/controllers/images.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/endpoint/rackspace/controllers/servers.py b/nova/endpoint/rackspace/controllers/servers.py new file mode 100644 index 000000000..af6c958bb --- /dev/null +++ b/nova/endpoint/rackspace/controllers/servers.py @@ -0,0 +1,72 @@ +from nova import rpc +from nova.compute import model as compute +from nova.endpoint.rackspace import BaseController + +class ServersController(BaseController): + entity_name = 'servers' + + def __init__(self): + raise NotImplemented("You may not create an instance of this class") + + @classmethod + def index(cls): + return [instance_details(inst) for inst in compute.InstanceDirectory().all] + + @classmethod + def show(cls, **kwargs): + instance_id = kwargs['id'] + return compute.InstanceDirectory().get(instance_id) + + @classmethod + def delete(cls, **kwargs): + instance_id = kwargs['id'] + instance = compute.InstanceDirectory().get(instance_id) + if not instance + raise ServerNotFound("The requested server was not found") + instance.destroy() + return True + + @classmethod + def create(cls, **kwargs): + inst = self.build_server_instance(kwargs['server']) + rpc.cast( + FLAGS.compute_topic, { + "method": "run_instance", + "args": {"instance_id": inst.instance_id}}) + + @classmethod + def update(cls, **kwargs): + instance_id = kwargs['id'] + instance = compute.InstanceDirectory().get(instance_id) + if not instance: + raise ServerNotFound("The requested server was not found") + instance.update(kwargs['server']) + instance.save() + + @classmethod + def build_server_instance(self, env): + """Build instance data structure and save it to the data store.""" + reservation = utils.generate_uid('r') + ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + inst = self.instdir.new() + inst['name'] = env['server']['name'] + inst['image_id'] = env['server']['imageId'] + inst['instance_type'] = env['server']['flavorId'] + inst['user_id'] = env['user']['id'] + inst['project_id'] = env['project']['id'] + inst['reservation_id'] = reservation + inst['launch_time'] = ltime + inst['mac_address'] = utils.generate_mac() + address = self.network.allocate_ip( + inst['user_id'], + inst['project_id'], + mac=inst['mac_address']) + inst['private_dns_name'] = str(address) + inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( + inst['user_id'], + inst['project_id'], + 'default')['bridge_name'] + # key_data, key_name, ami_launch_index + # TODO(todd): key data or root password + inst.save() + return inst diff --git a/nova/endpoint/rackspace/controllers/shared_ip_groups.py b/nova/endpoint/rackspace/controllers/shared_ip_groups.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/endpoint/rackspace/rackspace.py b/nova/endpoint/rackspace/rackspace.py new file mode 100644 index 000000000..75b828e91 --- /dev/null +++ b/nova/endpoint/rackspace/rackspace.py @@ -0,0 +1,183 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Rackspace API Endpoint +""" + +import json +import time + +import webob.dec +import webob.exc + +from nova import flags +from nova import rpc +from nova import utils +from nova import wsgi +from nova.auth import manager +from nova.compute import model as compute +from nova.network import model as network + + +FLAGS = flags.FLAGS +flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') + + +class API(wsgi.Middleware): + """Entry point for all requests.""" + + def __init__(self): + super(API, self).__init__(Router(webob.exc.HTTPNotFound())) + + def __call__(self, environ, start_response): + context = {} + if "HTTP_X_AUTH_TOKEN" in environ: + context['user'] = manager.AuthManager().get_user_from_access_key( + environ['HTTP_X_AUTH_TOKEN']) + if context['user']: + context['project'] = manager.AuthManager().get_project( + context['user'].name) + if "user" not in context: + return webob.exc.HTTPForbidden()(environ, start_response) + environ['nova.context'] = context + return self.application(environ, start_response) + + +class Router(wsgi.Router): + """Route requests to the next WSGI application.""" + + def _build_map(self): + """Build routing map for authentication and cloud.""" + self._connect("/v1.0", controller=AuthenticationAPI()) + cloud = CloudServerAPI() + self._connect("/servers", controller=cloud.launch_server, + conditions={"method": ["POST"]}) + self._connect("/servers/{server_id}", controller=cloud.delete_server, + conditions={'method': ["DELETE"]}) + self._connect("/servers", controller=cloud) + + +class AuthenticationAPI(wsgi.Application): + """Handle all authorization requests through WSGI applications.""" + + @webob.dec.wsgify + def __call__(self, req): # pylint: disable-msg=W0221 + # TODO(todd): make a actual session with a unique token + # just pass the auth key back through for now + res = webob.Response() + res.status = '204 No Content' + res.headers.add('X-Server-Management-Url', req.host_url) + res.headers.add('X-Storage-Url', req.host_url) + res.headers.add('X-CDN-Managment-Url', req.host_url) + res.headers.add('X-Auth-Token', req.headers['X-Auth-Key']) + return res + + +class CloudServerAPI(wsgi.Application): + """Handle all server requests through WSGI applications.""" + + def __init__(self): + super(CloudServerAPI, self).__init__() + self.instdir = compute.InstanceDirectory() + self.network = network.PublicNetworkController() + + @webob.dec.wsgify + def __call__(self, req): # pylint: disable-msg=W0221 + value = {"servers": []} + for inst in self.instdir.all: + value["servers"].append(self.instance_details(inst)) + return json.dumps(value) + + def instance_details(self, inst): # pylint: disable-msg=R0201 + """Build the data structure to represent details for an instance.""" + return { + "id": inst.get("instance_id", None), + "imageId": inst.get("image_id", None), + "flavorId": inst.get("instacne_type", None), + "hostId": inst.get("node_name", None), + "status": inst.get("state", "pending"), + "addresses": { + "public": [network.get_public_ip_for_instance( + inst.get("instance_id", None))], + "private": [inst.get("private_dns_name", None)]}, + + # implemented only by Rackspace, not AWS + "name": inst.get("name", "Not-Specified"), + + # not supported + "progress": "Not-Supported", + "metadata": { + "Server Label": "Not-Supported", + "Image Version": "Not-Supported"}} + + @webob.dec.wsgify + def launch_server(self, req): + """Launch a new instance.""" + data = json.loads(req.body) + inst = self.build_server_instance(data, req.environ['nova.context']) + rpc.cast( + FLAGS.compute_topic, { + "method": "run_instance", + "args": {"instance_id": inst.instance_id}}) + + return json.dumps({"server": self.instance_details(inst)}) + + def build_server_instance(self, env, context): + """Build instance data structure and save it to the data store.""" + reservation = utils.generate_uid('r') + ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + inst = self.instdir.new() + inst['name'] = env['server']['name'] + inst['image_id'] = env['server']['imageId'] + inst['instance_type'] = env['server']['flavorId'] + inst['user_id'] = context['user'].id + inst['project_id'] = context['project'].id + inst['reservation_id'] = reservation + inst['launch_time'] = ltime + inst['mac_address'] = utils.generate_mac() + address = self.network.allocate_ip( + inst['user_id'], + inst['project_id'], + mac=inst['mac_address']) + inst['private_dns_name'] = str(address) + inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( + inst['user_id'], + inst['project_id'], + 'default')['bridge_name'] + # key_data, key_name, ami_launch_index + # TODO(todd): key data or root password + inst.save() + return inst + + @webob.dec.wsgify + @wsgi.route_args + def delete_server(self, req, route_args): # pylint: disable-msg=R0201 + """Delete an instance.""" + owner_hostname = None + instance = compute.Instance.lookup(route_args['server_id']) + if instance: + owner_hostname = instance["node_name"] + if not owner_hostname: + return webob.exc.HTTPNotFound("Did not find image, or it was " + "not in a running state.") + rpc_transport = "%s:%s" % (FLAGS.compute_topic, owner_hostname) + rpc.cast(rpc_transport, + {"method": "reboot_instance", + "args": {"instance_id": route_args['server_id']}}) + req.status = "202 Accepted" -- cgit From fb382c8e705e1803abb5de77a1fd11e6f913af75 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Wed, 11 Aug 2010 17:40:28 -0400 Subject: Adapts the run_tests.sh script to allow interactive or automated creation of virtualenv, or to run tests outside of a virtualenv --- run_tests.sh | 63 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 60 insertions(+), 3 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index 85d7c8834..31bfce9fa 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -1,12 +1,69 @@ -#!/bin/bash +#!/bin/bash + +function usage { + echo "Usage: $0 [OPTION]..." + echo "Run Nova's test suite(s)" + echo "" + echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" + echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" + echo " -h, --help Print this usage message" + echo "" + echo "Note: with no options specified, the script will try to run the tests in a virtual environment," + echo " If no virtualenv is found, the script will ask if you would like to create one. If you " + echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." + exit +} + +function process_options { + array=$1 + elements=${#array[@]} + for (( x=0;x<$elements;x++)); do + process_option ${array[${x}]} + done +} + +function process_option { + option=$1 + case $option in + --help) usage;; + -h) usage;; + -V) let always_venv=1; let never_venv=0;; + --virtual-env) let always_venv=1; let never_venv=0;; + -N) let always_venv=0; let never_venv=1;; + --no-virtual-env) let always_venv=0; let never_venv=1;; + esac +} venv=.nova-venv with_venv=tools/with_venv.sh +always_venv=0 +never_venv=0 +options=("$@") + +process_options $options + +if [ $never_venv -eq 1 ]; then + # Just run the test suites in current environment + python run_tests.py + exit +fi if [ -e ${venv} ]; then ${with_venv} python run_tests.py $@ else - echo "No virtual environment found...creating one" - python tools/install_venv.py + if [ $always_venv -eq 1 ]; then + # Automatically install the virtualenv + python tools/install_venv.py + else + echo -e "No virtual environment found...create one? (Y/n) \c" + read use_ve + if [ "x$use_ve" = "xY" ]; then + # Install the virtualenv and run the test suite in it + python tools/install_venv.py + else + python run_tests.py + exit + fi + fi ${with_venv} python run_tests.py $@ fi -- cgit From 6664c960e08e31fa8b464b0ccbbf489da271e033 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 11 Aug 2010 15:29:14 -0700 Subject: fix dhcpbridge issues --- bin/nova-dhcpbridge | 2 +- nova/network/linux_net.py | 8 ++------ nova/network/service.py | 8 ++++---- 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index b1ad1c8fe..f70a4482c 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -69,7 +69,7 @@ def init_leases(interface): """Get the list of hosts for an interface.""" net = model.get_network_by_interface(interface) res = "" - for address in net.address_objs: + for address in net.assigned_objs: res += "%s\n" % linux_net.host_dhcp(address) return res diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 4ebc2097b..15050adaf 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -116,7 +116,7 @@ def _dnsmasq_cmd(net): ' --pid-file=%s' % dhcp_file(net['vlan'], 'pid'), ' --listen-address=%s' % net.dhcp_listen_address, ' --except-interface=lo', - ' --dhcp-range=%s,static,600s' % net.dhcp_range_start, + ' --dhcp-range=%s,static,120s' % net.dhcp_range_start, ' --dhcp-hostsfile=%s' % dhcp_file(net['vlan'], 'conf'), ' --dhcp-script=%s' % bin_file('nova-dhcpbridge'), ' --leasefile-ro'] @@ -153,14 +153,10 @@ def start_dnsmasq(network): # correct dnsmasq process try: os.kill(pid, signal.SIGHUP) + return except Exception as exc: # pylint: disable=W0703 logging.debug("Hupping dnsmasq threw %s", exc) - # otherwise delete the existing leases file and start dnsmasq - lease_file = dhcp_file(network['vlan'], 'leases') - if os.path.exists(lease_file): - os.unlink(lease_file) - # FLAGFILE and DNSMASQ_INTERFACE in env env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, 'DNSMASQ_INTERFACE': network['bridge_name']} diff --git a/nova/network/service.py b/nova/network/service.py index 9c0f5520b..625f20dd4 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -226,13 +226,13 @@ class VlanNetworkService(BaseNetworkService): """Returns an ip to the pool""" return model.get_network_by_address(fixed_ip).deallocate_ip(fixed_ip) - def lease_ip(self, address): + def lease_ip(self, fixed_ip): """Called by bridge when ip is leased""" - return model.get_network_by_address(address).lease_ip(address) + return model.get_network_by_address(fixed_ip).lease_ip(fixed_ip) - def release_ip(self, address): + def release_ip(self, fixed_ip): """Called by bridge when ip is released""" - return model.get_network_by_address(address).release_ip(address) + return model.get_network_by_address(fixed_ip).release_ip(fixed_ip) def restart_nets(self): """Ensure the network for each user is enabled""" -- cgit From f8fc15c645216483ac20280af0e6e6bb92b6be0b Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Thu, 12 Aug 2010 14:30:54 +0200 Subject: Parameterise libvirt URI. --- nova/virt/libvirt_conn.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 551ba6e54..c3175b6fe 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -43,6 +43,9 @@ libvirt = None libxml2 = None FLAGS = flags.FLAGS +flags.DEFINE_string('libvirt_uri', + 'qemu:///system', + 'Libvirt connection URI') flags.DEFINE_string('libvirt_xml_template', utils.abspath('compute/libvirt.xml.template'), 'Libvirt XML Template') @@ -72,9 +75,9 @@ class LibvirtConnection(object): 'root', None] if read_only: - self._conn = libvirt.openReadOnly('qemu:///system') + self._conn = libvirt.openReadOnly(FLAGS.libvirt_uri) else: - self._conn = libvirt.openAuth('qemu:///system', auth, 0) + self._conn = libvirt.openAuth(FLAGS.libvirt_uri, auth, 0) def list_instances(self): -- cgit From 0493f4bc5786a4d253e7f73092443117b158071a Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Thu, 12 Aug 2010 14:32:11 +0200 Subject: Move libvirt.xml template into nova/virt --- nova/compute/libvirt.xml.template | 30 ------------------------------ nova/virt/libvirt.xml.template | 30 ++++++++++++++++++++++++++++++ nova/virt/libvirt_conn.py | 2 +- 3 files changed, 31 insertions(+), 31 deletions(-) delete mode 100644 nova/compute/libvirt.xml.template create mode 100644 nova/virt/libvirt.xml.template diff --git a/nova/compute/libvirt.xml.template b/nova/compute/libvirt.xml.template deleted file mode 100644 index 307f9d03a..000000000 --- a/nova/compute/libvirt.xml.template +++ /dev/null @@ -1,30 +0,0 @@ - - %(name)s - - hvm - %(basepath)s/kernel - %(basepath)s/ramdisk - root=/dev/vda1 console=ttyS0 - - - - - %(memory_kb)s - %(vcpus)s - - - - - - - - - - - - - - - - %(nova)s - diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template new file mode 100644 index 000000000..307f9d03a --- /dev/null +++ b/nova/virt/libvirt.xml.template @@ -0,0 +1,30 @@ + + %(name)s + + hvm + %(basepath)s/kernel + %(basepath)s/ramdisk + root=/dev/vda1 console=ttyS0 + + + + + %(memory_kb)s + %(vcpus)s + + + + + + + + + + + + + + + + %(nova)s + diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index c3175b6fe..715c4487d 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -50,7 +50,7 @@ flags.DEFINE_string('libvirt_xml_template', utils.abspath('compute/libvirt.xml.template'), 'Libvirt XML Template') flags.DEFINE_string('injected_network_template', - utils.abspath('compute/interfaces.template'), + utils.abspath('virt/interfaces.template'), 'Template file for injected network') flags.DEFINE_string('libvirt_type', -- cgit From 4d7fe5555de3c7e475a436af11559b00d7af5790 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 12 Aug 2010 11:52:32 -0700 Subject: remove syslog-ng workaround --- nova/twistd.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/nova/twistd.py b/nova/twistd.py index c83276daa..8de322aa5 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -241,15 +241,7 @@ def serve(filename): print 'usage: %s [options] [start|stop|restart]' % argv[0] sys.exit(1) - class NoNewlineFormatter(logging.Formatter): - """Strips newlines from default formatter""" - def format(self, record): - """Grabs default formatter's output and strips newlines""" - data = logging.Formatter.format(self, record) - return data.replace("\n", "--") - - # NOTE(vish): syslog-ng doesn't handle newlines from trackbacks very well - formatter = NoNewlineFormatter( + formatter = logging.Formatter( '(%(name)s): %(levelname)s %(message)s') handler = logging.StreamHandler(log.StdioOnnaStick()) handler.setFormatter(formatter) -- cgit From e14d70d7be58ac99f98b66620320c453fa79c8c8 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 12 Aug 2010 12:12:38 -0700 Subject: keep track of leasing state so we can delete ips that didn't ever get leased --- nova/network/model.py | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/nova/network/model.py b/nova/network/model.py index ce9345067..49c12e459 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -155,8 +155,10 @@ class Address(datastore.BasicModel): def identifier(self): return self.address + # NOTE(vish): address states allocated, leased, deallocated def default_state(self): - return {'address': self.address} + return {'address': self.address, + 'state': 'none'} @classmethod # pylint: disable=R0913 @@ -170,6 +172,7 @@ class Address(datastore.BasicModel): hostname = "ip-%s" % address.replace('.', '-') addr['hostname'] = hostname addr['network_id'] = network_id + addr['state'] = 'allocated' addr.save() return addr @@ -322,7 +325,13 @@ class BaseNetwork(datastore.BasicModel): def lease_ip(self, ip_str): """Called when DHCP lease is activated""" - logging.debug("Leasing allocated IP %s", ip_str) + if not ip_str in self.assigned: + raise exception.AddressNotAllocated() + address = self.get_address(ip_str) + if address: + logging.debug("Leasing allocated IP %s", ip_str) + address['state'] = 'leased' + address.save() def release_ip(self, ip_str): """Called when DHCP lease expires @@ -330,16 +339,23 @@ class BaseNetwork(datastore.BasicModel): Removes the ip from the assigned list""" if not ip_str in self.assigned: raise exception.AddressNotAllocated() + logging.debug("Releasing IP %s", ip_str) self._rem_host(ip_str) self.deexpress(address=ip_str) - logging.debug("Releasing IP %s", ip_str) def deallocate_ip(self, ip_str): """Deallocates an allocated ip""" - # NOTE(vish): Perhaps we should put the ip into an intermediate - # state, so we know that we are pending waiting for - # dnsmasq to confirm that it has been released. - logging.debug("Deallocating allocated IP %s", ip_str) + if not ip_str in self.assigned: + raise exception.AddressNotAllocated() + address = self.get_address(ip_str) + if address: + if address['state'] != 'allocated': + # NOTE(vish): address hasn't been leased, so release it + self.release_ip(ip_str) + else: + logging.debug("Deallocating allocated IP %s", ip_str) + address['state'] == 'deallocated' + address.save() def express(self, address=None): """Set up network. Implemented in subclasses""" -- cgit From 8d4dd0924bfd45b7806e6a29018de45d58ee6339 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 12 Aug 2010 12:44:23 -0700 Subject: rename address stuff to avoid name collision and make the .all() iterator work again --- nova/endpoint/cloud.py | 4 ++-- nova/network/model.py | 26 +++++++++++++------------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index c79e96f5d..27310577f 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -311,7 +311,7 @@ class CloudController(object): def _get_address(self, context, public_ip): # FIXME(vish) this should move into network.py - address = network_model.PublicAddress.lookup(public_ip) + address = network_model.ElasticIp.lookup(public_ip) if address and (context.user.is_admin() or address['project_id'] == context.project.id): return address raise exception.NotFound("Address at ip %s not found" % public_ip) @@ -456,7 +456,7 @@ class CloudController(object): def format_addresses(self, context): addresses = [] - for address in network_model.PublicAddress.all(): + for address in network_model.ElasticIp.all(): # TODO(vish): implement a by_project iterator for addresses if (context.user.is_admin() or address['project_id'] == context.project.id): diff --git a/nova/network/model.py b/nova/network/model.py index 49c12e459..7ae68d8a7 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -143,13 +143,12 @@ class Vlan(datastore.BasicModel): network[start + FLAGS.network_size - 1]) -class Address(datastore.BasicModel): +class FixedIp(datastore.BasicModel): """Represents a fixed ip in the datastore""" - override_type = "address" def __init__(self, address): self.address = address - super(Address, self).__init__() + super(FixedIp, self).__init__() @property def identifier(self): @@ -163,7 +162,7 @@ class Address(datastore.BasicModel): @classmethod # pylint: disable=R0913 def create(cls, user_id, project_id, address, mac, hostname, network_id): - """Creates an Address object""" + """Creates an FixedIp object""" addr = cls(address) addr['user_id'] = user_id addr['project_id'] = project_id @@ -178,16 +177,16 @@ class Address(datastore.BasicModel): def save(self): is_new = self.is_new_record() - success = super(Address, self).save() + success = super(FixedIp, self).save() if success and is_new: self.associate_with("network", self['network_id']) def destroy(self): self.unassociate_with("network", self['network_id']) - super(Address, self).destroy() + super(FixedIp, self).destroy() -class PublicAddress(Address): +class ElasticIp(FixedIp): """Represents an elastic ip in the datastore""" override_type = "address" @@ -203,7 +202,7 @@ class PublicAddress(Address): class BaseNetwork(datastore.BasicModel): """Implements basic logic for allocating ips in a network""" override_type = 'network' - address_class = Address + address_class = FixedIp @property def identifier(self): @@ -271,12 +270,12 @@ class BaseNetwork(datastore.BasicModel): # pylint: disable=R0913 def _add_host(self, user_id, project_id, ip_address, mac, hostname): """Add a host to the datastore""" - Address.create(user_id, project_id, ip_address, + self.address_class.create(user_id, project_id, ip_address, mac, hostname, self.identifier) def _rem_host(self, ip_address): """Remove a host from the datastore""" - Address(ip_address).destroy() + self.address_class(ip_address).destroy() @property def assigned(self): @@ -288,6 +287,7 @@ class BaseNetwork(datastore.BasicModel): """Returns a list of all assigned addresses as objects""" return self.address_class.associated_to('network', self.identifier) + @classmethod def get_address(self, ip_address): """Returns a specific ip as an object""" if ip_address in self.assigned: @@ -478,7 +478,7 @@ DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] class PublicNetworkController(BaseNetwork): """Handles elastic ips""" override_type = 'network' - address_class = PublicAddress + address_class = ElasticIp def __init__(self, *args, **kwargs): network_id = "public:default" @@ -613,7 +613,7 @@ def get_project_network(project_id, security_group='default'): def get_network_by_address(address): """Gets the network for a given private ip""" - address_record = Address.lookup(address) + address_record = FixedIp.lookup(address) if not address_record: raise exception.AddressNotAllocated() return get_project_network(address_record['project_id']) @@ -629,6 +629,6 @@ def get_network_by_interface(iface, security_group='default'): def get_public_ip_for_instance(instance_id): """Gets the public ip for a given instance""" # FIXME(josh): this should be a lookup - iteration won't scale - for address_record in PublicAddress.all(): + for address_record in ElasticIp.all(): if address_record.get('instance_id', 'available') == instance_id: return address_record['address'] -- cgit From 773390a4daa633b8a54b4fc29600182b6bfb915d Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 12 Aug 2010 13:33:22 -0700 Subject: typo allocated should be relased --- nova/endpoint/cloud.py | 4 ++-- nova/network/model.py | 28 ++++++++++++++-------------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index c79e96f5d..1b07f2adb 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -311,7 +311,7 @@ class CloudController(object): def _get_address(self, context, public_ip): # FIXME(vish) this should move into network.py - address = network_model.PublicAddress.lookup(public_ip) + address = network_model.PublicNetworkController.get_address(public_ip) if address and (context.user.is_admin() or address['project_id'] == context.project.id): return address raise exception.NotFound("Address at ip %s not found" % public_ip) @@ -456,7 +456,7 @@ class CloudController(object): def format_addresses(self, context): addresses = [] - for address in network_model.PublicAddress.all(): + for address in network_model.PublicNetworkController.assigned_objs(): # TODO(vish): implement a by_project iterator for addresses if (context.user.is_admin() or address['project_id'] == context.project.id): diff --git a/nova/network/model.py b/nova/network/model.py index 49c12e459..e53693693 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -143,13 +143,12 @@ class Vlan(datastore.BasicModel): network[start + FLAGS.network_size - 1]) -class Address(datastore.BasicModel): +class FixedIp(datastore.BasicModel): """Represents a fixed ip in the datastore""" - override_type = "address" def __init__(self, address): self.address = address - super(Address, self).__init__() + super(FixedIp, self).__init__() @property def identifier(self): @@ -163,7 +162,7 @@ class Address(datastore.BasicModel): @classmethod # pylint: disable=R0913 def create(cls, user_id, project_id, address, mac, hostname, network_id): - """Creates an Address object""" + """Creates an FixedIp object""" addr = cls(address) addr['user_id'] = user_id addr['project_id'] = project_id @@ -178,16 +177,16 @@ class Address(datastore.BasicModel): def save(self): is_new = self.is_new_record() - success = super(Address, self).save() + success = super(FixedIp, self).save() if success and is_new: self.associate_with("network", self['network_id']) def destroy(self): self.unassociate_with("network", self['network_id']) - super(Address, self).destroy() + super(FixedIp, self).destroy() -class PublicAddress(Address): +class ElasticIp(FixedIp): """Represents an elastic ip in the datastore""" override_type = "address" @@ -203,7 +202,7 @@ class PublicAddress(Address): class BaseNetwork(datastore.BasicModel): """Implements basic logic for allocating ips in a network""" override_type = 'network' - address_class = Address + address_class = FixedIp @property def identifier(self): @@ -271,12 +270,12 @@ class BaseNetwork(datastore.BasicModel): # pylint: disable=R0913 def _add_host(self, user_id, project_id, ip_address, mac, hostname): """Add a host to the datastore""" - Address.create(user_id, project_id, ip_address, + self.address_class.create(user_id, project_id, ip_address, mac, hostname, self.identifier) def _rem_host(self, ip_address): """Remove a host from the datastore""" - Address(ip_address).destroy() + self.address_class(ip_address).destroy() @property def assigned(self): @@ -288,6 +287,7 @@ class BaseNetwork(datastore.BasicModel): """Returns a list of all assigned addresses as objects""" return self.address_class.associated_to('network', self.identifier) + @classmethod def get_address(self, ip_address): """Returns a specific ip as an object""" if ip_address in self.assigned: @@ -349,7 +349,7 @@ class BaseNetwork(datastore.BasicModel): raise exception.AddressNotAllocated() address = self.get_address(ip_str) if address: - if address['state'] != 'allocated': + if address['state'] != 'leased': # NOTE(vish): address hasn't been leased, so release it self.release_ip(ip_str) else: @@ -478,7 +478,7 @@ DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] class PublicNetworkController(BaseNetwork): """Handles elastic ips""" override_type = 'network' - address_class = PublicAddress + address_class = ElasticIp def __init__(self, *args, **kwargs): network_id = "public:default" @@ -613,7 +613,7 @@ def get_project_network(project_id, security_group='default'): def get_network_by_address(address): """Gets the network for a given private ip""" - address_record = Address.lookup(address) + address_record = FixedIp.lookup(address) if not address_record: raise exception.AddressNotAllocated() return get_project_network(address_record['project_id']) @@ -629,6 +629,6 @@ def get_network_by_interface(iface, security_group='default'): def get_public_ip_for_instance(instance_id): """Gets the public ip for a given instance""" # FIXME(josh): this should be a lookup - iteration won't scale - for address_record in PublicAddress.all(): + for address_record in ElasticIp.all(): if address_record.get('instance_id', 'available') == instance_id: return address_record['address'] -- cgit From ef48a727d1c6b824170995fffa59949960ea5d11 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 12 Aug 2010 13:36:10 -0700 Subject: remove class method --- nova/network/model.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/network/model.py b/nova/network/model.py index e53693693..1a958b564 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -287,7 +287,6 @@ class BaseNetwork(datastore.BasicModel): """Returns a list of all assigned addresses as objects""" return self.address_class.associated_to('network', self.identifier) - @classmethod def get_address(self, ip_address): """Returns a specific ip as an object""" if ip_address in self.assigned: -- cgit From a96b4c1470ee4e73382178206d8728d2a2ba89cf Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 12 Aug 2010 14:18:59 -0700 Subject: renamed missed reference to Address --- nova/endpoint/cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 27310577f..0a15e934c 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -126,7 +126,7 @@ class CloudController(object): else: keys = '' - address_record = network_model.Address(i['private_dns_name']) + address_record = network_model.FixedIp(i['private_dns_name']) if address_record: hostname = address_record['hostname'] else: -- cgit From 6eba59be8ef6ea47e1d9657fed72fafbc7c9d6ef Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Thu, 12 Aug 2010 23:41:32 +0200 Subject: Make --libvirt_type=uml do the right thing: Sets the correct libvirt URI and use a special template for the XML. --- nova/virt/libvirt.qemu.xml.template | 30 ++++++++++++++++++++++++++++++ nova/virt/libvirt.uml.xml.template | 25 +++++++++++++++++++++++++ nova/virt/libvirt.xml.template | 30 ------------------------------ nova/virt/libvirt_conn.py | 32 +++++++++++++++++++++----------- 4 files changed, 76 insertions(+), 41 deletions(-) create mode 100644 nova/virt/libvirt.qemu.xml.template create mode 100644 nova/virt/libvirt.uml.xml.template delete mode 100644 nova/virt/libvirt.xml.template diff --git a/nova/virt/libvirt.qemu.xml.template b/nova/virt/libvirt.qemu.xml.template new file mode 100644 index 000000000..307f9d03a --- /dev/null +++ b/nova/virt/libvirt.qemu.xml.template @@ -0,0 +1,30 @@ + + %(name)s + + hvm + %(basepath)s/kernel + %(basepath)s/ramdisk + root=/dev/vda1 console=ttyS0 + + + + + %(memory_kb)s + %(vcpus)s + + + + + + + + + + + + + + + + %(nova)s + diff --git a/nova/virt/libvirt.uml.xml.template b/nova/virt/libvirt.uml.xml.template new file mode 100644 index 000000000..0bc1507de --- /dev/null +++ b/nova/virt/libvirt.uml.xml.template @@ -0,0 +1,25 @@ + + %(name)s + %(memory_kb)s + + %(type)suml + /usr/bin/linux + /dev/ubda1 + + + + + + + + + + + + + + + + + %(nova)s + diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template deleted file mode 100644 index 307f9d03a..000000000 --- a/nova/virt/libvirt.xml.template +++ /dev/null @@ -1,30 +0,0 @@ - - %(name)s - - hvm - %(basepath)s/kernel - %(basepath)s/ramdisk - root=/dev/vda1 console=ttyS0 - - - - - %(memory_kb)s - %(vcpus)s - - - - - - - - - - - - - - - - %(nova)s - diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 2a818b40d..e2cdaaf7d 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -43,19 +43,21 @@ libvirt = None libxml2 = None FLAGS = flags.FLAGS -flags.DEFINE_string('libvirt_uri', - 'qemu:///system', - 'Libvirt connection URI') flags.DEFINE_string('libvirt_xml_template', - utils.abspath('compute/libvirt.xml.template'), - 'Libvirt XML Template') + utils.abspath('compute/libvirt.qemu.xml.template'), + 'Libvirt XML Template for QEmu/KVM') +flags.DEFINE_string('libvirt_uml_xml_template', + utils.abspath('compute/libvirt.uml.xml.template'), + 'Libvirt XML Template for user-mode-linux') flags.DEFINE_string('injected_network_template', utils.abspath('virt/interfaces.template'), 'Template file for injected network') - flags.DEFINE_string('libvirt_type', 'kvm', - 'Libvirt domain type (kvm, qemu, etc)') + 'Libvirt domain type (valid options are: kvm, qemu, uml)') +flags.DEFINE_string('libvirt_uri', + '', + 'Override the default libvirt URI (which is dependent on libvirt_type)') def get_connection(read_only): # These are loaded late so that there's no need to install these @@ -74,10 +76,19 @@ class LibvirtConnection(object): auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], 'root', None] + + if FLAGS.libvirt_type == 'uml': + uri = FLAGS.libvirt_uri or 'uml:///system' + template_file = FLAGS.libvirt_uml_xml_template + else: + uri = FLAGS.libvirt_uri or 'qemu:///system' + template_file = FLAGS.libvirt_xml_template + self.libvirt_xml = open(template_file).read() + if read_only: - self._conn = libvirt.openReadOnly(FLAGS.libvirt_uri) + self._conn = libvirt.openReadOnly(uri) else: - self._conn = libvirt.openAuth(FLAGS.libvirt_uri, auth, 0) + self._conn = libvirt.openAuth(uri, auth, 0) def list_instances(self): @@ -240,14 +251,13 @@ class LibvirtConnection(object): def toXml(self, instance): # TODO(termie): cache? logging.debug("Starting the toXML method") - libvirt_xml = open(FLAGS.libvirt_xml_template).read() xml_info = instance.datamodel.copy() # TODO(joshua): Make this xml express the attached disks as well # TODO(termie): lazy lazy hack because xml is annoying xml_info['nova'] = json.dumps(instance.datamodel.copy()) xml_info['type'] = FLAGS.libvirt_type - libvirt_xml = libvirt_xml % xml_info + libvirt_xml = self.libvirt_xml % xml_info logging.debug("Finished the toXML method") return libvirt_xml -- cgit From 11c47dd12adcbf2a5011510f01081db858b057db Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 12 Aug 2010 18:36:46 -0400 Subject: Mergeprop cleanup --- nova/endpoint/rackspace/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 nova/endpoint/rackspace/__init__.py diff --git a/nova/endpoint/rackspace/__init__.py b/nova/endpoint/rackspace/__init__.py deleted file mode 100644 index e69de29bb..000000000 -- cgit From 39d12bf518e284183d1debd52fe7081ecf1c633d Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 12 Aug 2010 18:36:56 -0400 Subject: Mergeprop cleanup --- nova/endpoint/rackspace/__init__.py | 90 ++++++++++++++++++++++++++++++++++++ nova/endpoint/rackspace/rackspace.py | 90 ------------------------------------ 2 files changed, 90 insertions(+), 90 deletions(-) create mode 100644 nova/endpoint/rackspace/__init__.py delete mode 100644 nova/endpoint/rackspace/rackspace.py diff --git a/nova/endpoint/rackspace/__init__.py b/nova/endpoint/rackspace/__init__.py new file mode 100644 index 000000000..f14f6218c --- /dev/null +++ b/nova/endpoint/rackspace/__init__.py @@ -0,0 +1,90 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Rackspace API Endpoint +""" + +import json +import time + +import webob.dec +import webob.exc +import routes + +from nova import flags +from nova import wsgi +from nova.auth import manager +from nova.endpoint.rackspace import controllers + + +FLAGS = flags.FLAGS +flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') + + +class Api(wsgi.Middleware): + """WSGI entry point for all Rackspace API requests.""" + + def __init__(self): + app = AuthMiddleware(ApiRouter()) + super(Api, self).__init__(app) + + +class AuthMiddleware(wsgi.Middleware): + """Authorize the rackspace API request or return an HTTP Forbidden.""" + + #TODO(gundlach): isn't this the old Nova API's auth? Should it be replaced + #with correct RS API auth? + + @webob.dec.wsgify + def __call__(self, req): + context = {} + if "HTTP_X_AUTH_TOKEN" in req.environ: + context['user'] = manager.AuthManager().get_user_from_access_key( + req.environ['HTTP_X_AUTH_TOKEN']) + if context['user']: + context['project'] = manager.AuthManager().get_project( + context['user'].name) + if "user" not in context: + return webob.exc.HTTPForbidden() + req.environ['nova.context'] = context + return self.application + + +class ApiRouter(wsgi.Router): + """ + Routes requests on the Rackspace API to the appropriate controller + and method. + """ + + def __init__(self): + mapper = routes.Mapper() + + mapper.resource("server", "servers") + mapper.resource("image", "images") + mapper.resource("flavor", "flavors") + mapper.resource("sharedipgroup", "sharedipgroups") + + targets = { + 'servers': controllers.ServersController(), + 'images': controllers.ImagesController(), + 'flavors': controllers.FlavorsController(), + 'sharedipgroups': controllers.SharedIpGroupsController() + } + + super(ApiRouter, self).__init__(mapper, targets) diff --git a/nova/endpoint/rackspace/rackspace.py b/nova/endpoint/rackspace/rackspace.py deleted file mode 100644 index f14f6218c..000000000 --- a/nova/endpoint/rackspace/rackspace.py +++ /dev/null @@ -1,90 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Rackspace API Endpoint -""" - -import json -import time - -import webob.dec -import webob.exc -import routes - -from nova import flags -from nova import wsgi -from nova.auth import manager -from nova.endpoint.rackspace import controllers - - -FLAGS = flags.FLAGS -flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') - - -class Api(wsgi.Middleware): - """WSGI entry point for all Rackspace API requests.""" - - def __init__(self): - app = AuthMiddleware(ApiRouter()) - super(Api, self).__init__(app) - - -class AuthMiddleware(wsgi.Middleware): - """Authorize the rackspace API request or return an HTTP Forbidden.""" - - #TODO(gundlach): isn't this the old Nova API's auth? Should it be replaced - #with correct RS API auth? - - @webob.dec.wsgify - def __call__(self, req): - context = {} - if "HTTP_X_AUTH_TOKEN" in req.environ: - context['user'] = manager.AuthManager().get_user_from_access_key( - req.environ['HTTP_X_AUTH_TOKEN']) - if context['user']: - context['project'] = manager.AuthManager().get_project( - context['user'].name) - if "user" not in context: - return webob.exc.HTTPForbidden() - req.environ['nova.context'] = context - return self.application - - -class ApiRouter(wsgi.Router): - """ - Routes requests on the Rackspace API to the appropriate controller - and method. - """ - - def __init__(self): - mapper = routes.Mapper() - - mapper.resource("server", "servers") - mapper.resource("image", "images") - mapper.resource("flavor", "flavors") - mapper.resource("sharedipgroup", "sharedipgroups") - - targets = { - 'servers': controllers.ServersController(), - 'images': controllers.ImagesController(), - 'flavors': controllers.FlavorsController(), - 'sharedipgroups': controllers.SharedIpGroupsController() - } - - super(ApiRouter, self).__init__(mapper, targets) -- cgit From 4391b7362eeab2cd976309696be1209ac771ce24 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 12 Aug 2010 18:41:31 -0400 Subject: Undo the changes to cloud.py that somehow diverged from trunk --- nova/endpoint/cloud.py | 105 ++++++++++++++++++++++++------------------------- 1 file changed, 52 insertions(+), 53 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 878d54a15..ad9188ff3 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -47,6 +47,7 @@ FLAGS = flags.FLAGS flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') + def _gen_key(user_id, key_name): """ Tuck this into AuthManager """ try: @@ -102,15 +103,16 @@ class CloudController(object): result = {} for instance in self.instdir.all: if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) + line = '%s slots=%d' % (instance['private_dns_name'], + INSTANCE_TYPES[instance['instance_type']]['vcpus']) if instance['key_name'] in result: result[instance['key_name']].append(line) else: result[instance['key_name']] = [line] return result - def get_metadata(self, ip): - i = self.get_instance_by_ip(ip) + def get_metadata(self, ipaddress): + i = self.get_instance_by_ip(ipaddress) if i is None: return None mpi = self._get_mpi_data(i['project_id']) @@ -147,7 +149,7 @@ class CloudController(object): }, 'public-hostname': i.get('dns_name', ''), 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP - 'public-keys' : keys, + 'public-keys': keys, 'ramdisk-id': i.get('ramdisk_id', ''), 'reservation-id': i['reservation_id'], 'security-groups': i.get('groups', ''), @@ -203,26 +205,22 @@ class CloudController(object): 'keyFingerprint': key_pair.fingerprint, }) - return { 'keypairsSet': result } + return {'keypairsSet': result} @rbac.allow('all') def create_key_pair(self, context, key_name, **kwargs): - try: - d = defer.Deferred() - p = context.handler.application.settings.get('pool') - def _complete(kwargs): - if 'exception' in kwargs: - d.errback(kwargs['exception']) - return - d.callback({'keyName': key_name, - 'keyFingerprint': kwargs['fingerprint'], - 'keyMaterial': kwargs['private_key']}) - p.apply_async(_gen_key, [context.user.id, key_name], - callback=_complete) - return d - - except manager.UserError as e: - raise + dcall = defer.Deferred() + pool = context.handler.application.settings.get('pool') + def _complete(kwargs): + if 'exception' in kwargs: + dcall.errback(kwargs['exception']) + return + dcall.callback({'keyName': key_name, + 'keyFingerprint': kwargs['fingerprint'], + 'keyMaterial': kwargs['private_key']}) + pool.apply_async(_gen_key, [context.user.id, key_name], + callback=_complete) + return dcall @rbac.allow('all') def delete_key_pair(self, context, key_name, **kwargs): @@ -232,7 +230,7 @@ class CloudController(object): @rbac.allow('all') def describe_security_groups(self, context, group_names, **kwargs): - groups = { 'securityGroupSet': [] } + groups = {'securityGroupSet': []} # Stubbed for now to unblock other things. return groups @@ -251,7 +249,7 @@ class CloudController(object): instance = self._get_instance(context, instance_id[0]) return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "get_console_output", - "args" : {"instance_id": instance_id[0]}}) + "args": {"instance_id": instance_id[0]}}) def _get_user_id(self, context): if context and context.user: @@ -285,10 +283,10 @@ class CloudController(object): if volume['attach_status'] == 'attached': v['attachmentSet'] = [{'attachTime': volume['attach_time'], 'deleteOnTermination': volume['delete_on_termination'], - 'device' : volume['mountpoint'], - 'instanceId' : volume['instance_id'], - 'status' : 'attached', - 'volume_id' : volume['volume_id']}] + 'device': volume['mountpoint'], + 'instanceId': volume['instance_id'], + 'status': 'attached', + 'volume_id': volume['volume_id']}] else: v['attachmentSet'] = [{}] return v @@ -298,7 +296,7 @@ class CloudController(object): def create_volume(self, context, size, **kwargs): # TODO(vish): refactor this to create the volume object here and tell service to create it result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume", - "args" : {"size": size, + "args": {"size": size, "user_id": context.user.id, "project_id": context.project.id}}) # NOTE(vish): rpc returned value is in the result key in the dictionary @@ -348,15 +346,15 @@ class CloudController(object): compute_node = instance['node_name'] rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node), {"method": "attach_volume", - "args" : {"volume_id": volume_id, - "instance_id" : instance_id, - "mountpoint" : device}}) - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) + "args": {"volume_id": volume_id, + "instance_id": instance_id, + "mountpoint": device}}) + return defer.succeed({'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': instance_id, + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id}) @rbac.allow('projectmanager', 'sysadmin') @@ -372,18 +370,18 @@ class CloudController(object): instance = self._get_instance(context, instance_id) rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "detach_volume", - "args" : {"instance_id": instance_id, + "args": {"instance_id": instance_id, "volume_id": volume_id}}) except exception.NotFound: # If the instance doesn't exist anymore, # then we need to call detach blind volume.finish_detach() - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) + return defer.succeed({'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': instance_id, + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id}) def _convert_to_set(self, lst, label): if lst == None or lst == []: @@ -425,7 +423,8 @@ class CloudController(object): i['key_name'] = instance.get('key_name', None) if context.user.is_admin(): i['key_name'] = '%s (%s, %s)' % (i['key_name'], - instance.get('project_id', None), instance.get('node_name','')) + instance.get('project_id', None), + instance.get('node_name', '')) i['product_codes_set'] = self._convert_to_set( instance.get('product_codes', None), 'product_code') i['instance_type'] = instance.get('instance_type', None) @@ -442,7 +441,7 @@ class CloudController(object): reservations[res_id] = r reservations[res_id]['instances_set'].append(i) - instance_response = {'reservationSet' : list(reservations.values()) } + instance_response = {'reservationSet': list(reservations.values())} return instance_response @rbac.allow('all') @@ -457,7 +456,7 @@ class CloudController(object): address['project_id'] == context.project.id): address_rv = { 'public_ip': address['address'], - 'instance_id' : address.get('instance_id', 'free') + 'instance_id': address.get('instance_id', 'free') } if context.user.is_admin(): address_rv['instance_id'] = "%s (%s, %s)" % ( @@ -477,7 +476,7 @@ class CloudController(object): "args": {"user_id": context.user.id, "project_id": context.project.id}}) public_ip = alloc_result['result'] - defer.returnValue({'addressSet': [{'publicIp' : public_ip}]}) + defer.returnValue({'addressSet': [{'publicIp': public_ip}]}) @rbac.allow('netadmin') @defer.inlineCallbacks @@ -591,7 +590,7 @@ class CloudController(object): inst.save() rpc.cast(FLAGS.compute_topic, {"method": "run_instance", - "args": {"instance_id" : inst.instance_id}}) + "args": {"instance_id": inst.instance_id}}) logging.debug("Casting to node for %s's instance with IP of %s" % (context.user.name, inst['private_dns_name'])) # TODO: Make Network figure out the network name from ip. @@ -646,7 +645,7 @@ class CloudController(object): instance = self._get_instance(context, i) rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "reboot_instance", - "args" : {"instance_id": i}}) + "args": {"instance_id": i}}) return defer.succeed(True) @rbac.allow('projectmanager', 'sysadmin') @@ -656,7 +655,7 @@ class CloudController(object): volume_node = volume['node_name'] rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node), {"method": "delete_volume", - "args" : {"volume_id": volume_id}}) + "args": {"volume_id": volume_id}}) return defer.succeed(True) @rbac.allow('all') @@ -689,9 +688,9 @@ class CloudController(object): image = images.list(context, image_id)[0] except IndexError: raise exception.ApiError('invalid id: %s' % image_id) - result = { 'image_id': image_id, 'launchPermission': [] } + result = {'image_id': image_id, 'launchPermission': []} if image['isPublic']: - result['launchPermission'].append({ 'group': 'all' }) + result['launchPermission'].append({'group': 'all'}) return defer.succeed(result) @rbac.allow('projectmanager', 'sysadmin') -- cgit From a679cab031ec91dd719b9ba887cdae4f595b2ca4 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 12 Aug 2010 21:27:53 -0700 Subject: make rpc.call propogate exception info. Includes tests --- nova/endpoint/cloud.py | 15 +++++------ nova/rpc.py | 38 +++++++++++++++++++++------- nova/tests/rpc_unittest.py | 62 ++++++++++++++++++++++++++++++++++++++++++++++ run_tests.py | 1 + 4 files changed, 98 insertions(+), 18 deletions(-) create mode 100644 nova/tests/rpc_unittest.py diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index ad9188ff3..c32fb1f7f 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -103,7 +103,7 @@ class CloudController(object): result = {} for instance in self.instdir.all: if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], + line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) if instance['key_name'] in result: result[instance['key_name']].append(line) @@ -300,7 +300,7 @@ class CloudController(object): "user_id": context.user.id, "project_id": context.project.id}}) # NOTE(vish): rpc returned value is in the result key in the dictionary - volume = self._get_volume(context, result['result']) + volume = self._get_volume(context, result) defer.returnValue({'volumeSet': [self.format_volume(context, volume)]}) def _get_address(self, context, public_ip): @@ -423,7 +423,7 @@ class CloudController(object): i['key_name'] = instance.get('key_name', None) if context.user.is_admin(): i['key_name'] = '%s (%s, %s)' % (i['key_name'], - instance.get('project_id', None), + instance.get('project_id', None), instance.get('node_name', '')) i['product_codes_set'] = self._convert_to_set( instance.get('product_codes', None), 'product_code') @@ -471,11 +471,10 @@ class CloudController(object): @defer.inlineCallbacks def allocate_address(self, context, **kwargs): network_topic = yield self._get_network_topic(context) - alloc_result = yield rpc.call(network_topic, + public_ip = yield rpc.call(network_topic, {"method": "allocate_elastic_ip", "args": {"user_id": context.user.id, "project_id": context.project.id}}) - public_ip = alloc_result['result'] defer.returnValue({'addressSet': [{'publicIp': public_ip}]}) @rbac.allow('netadmin') @@ -516,11 +515,10 @@ class CloudController(object): """Retrieves the network host for a project""" host = network_service.get_host_for_project(context.project.id) if not host: - result = yield rpc.call(FLAGS.network_topic, + host = yield rpc.call(FLAGS.network_topic, {"method": "set_network_host", "args": {"user_id": context.user.id, "project_id": context.project.id}}) - host = result['result'] defer.returnValue('%s.%s' %(FLAGS.network_topic, host)) @rbac.allow('projectmanager', 'sysadmin') @@ -563,13 +561,12 @@ class CloudController(object): vpn = False if image_id == FLAGS.vpn_image_id: vpn = True - allocate_result = yield rpc.call(network_topic, + allocate_data = yield rpc.call(network_topic, {"method": "allocate_fixed_ip", "args": {"user_id": context.user.id, "project_id": context.project.id, "security_group": security_group, "vpn": vpn}}) - allocate_data = allocate_result['result'] inst = self.instdir.new() inst['image_id'] = image_id inst['kernel_id'] = kernel_id diff --git a/nova/rpc.py b/nova/rpc.py index 2a550c3ae..e06a3e19b 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -40,7 +40,7 @@ FLAGS = flags.FLAGS _log = logging.getLogger('amqplib') -_log.setLevel(logging.WARN) +_log.setLevel(logging.DEBUG) class Connection(connection.BrokerConnection): @@ -141,8 +141,8 @@ class AdapterConsumer(TopicConsumer): node_args = dict((str(k), v) for k, v in args.iteritems()) d = defer.maybeDeferred(node_func, **node_args) if msg_id: - d.addCallback(lambda rval: msg_reply(msg_id, rval)) - d.addErrback(lambda e: msg_reply(msg_id, str(e))) + d.addCallback(lambda rval: msg_reply(msg_id, rval, None)) + d.addErrback(lambda e: msg_reply(msg_id, None, e)) return @@ -174,20 +174,37 @@ class DirectPublisher(Publisher): super(DirectPublisher, self).__init__(connection=connection) -def msg_reply(msg_id, reply): +def msg_reply(msg_id, reply=None, failure=None): + if failure: + message = failure.getErrorMessage() + traceback = failure.getTraceback() + logging.error("Returning exception %s to caller", message) + logging.error(traceback) + failure = (failure.type.__name__, str(failure.value), traceback) conn = Connection.instance() publisher = DirectPublisher(connection=conn, msg_id=msg_id) - try: - publisher.send({'result': reply}) - except TypeError: + publisher.send({'result': reply, 'failure': failure}) + except Exception, exc: publisher.send( {'result': dict((k, repr(v)) - for k, v in reply.__dict__.iteritems()) + for k, v in reply.__dict__.iteritems()), + 'failure': failure }) publisher.close() +class RemoteError(exception.Error): + """signifies that a remote class has raised an exception""" + def __init__(self, type, value, traceback): + self.type = type + self.value = value + self.traceback = traceback + super(RemoteError, self).__init__("%s %s\n%s" % (type, + value, + traceback)) + + def call(topic, msg): _log.debug("Making asynchronous call...") msg_id = uuid.uuid4().hex @@ -199,7 +216,10 @@ def call(topic, msg): consumer = DirectConsumer(connection=conn, msg_id=msg_id) def deferred_receive(data, message): message.ack() - d.callback(data) + if data['failure']: + return d.errback(RemoteError(*data['failure'])) + else: + return d.callback(data['result']) consumer.register_callback(deferred_receive) injected = consumer.attach_to_tornado() diff --git a/nova/tests/rpc_unittest.py b/nova/tests/rpc_unittest.py new file mode 100644 index 000000000..9c2e29344 --- /dev/null +++ b/nova/tests/rpc_unittest.py @@ -0,0 +1,62 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +from twisted.internet import defer + +from nova import flags +from nova import rpc +from nova import test + + +FLAGS = flags.FLAGS + + +class RpcTestCase(test.BaseTestCase): + def setUp(self): + super(RpcTestCase, self).setUp() + self.conn = rpc.Connection.instance() + self.receiver = TestReceiver() + self.consumer = rpc.AdapterConsumer(connection=self.conn, + topic='test', + proxy=self.receiver) + + self.injected.append(self.consumer.attach_to_tornado(self.ioloop)) + + def test_call_succeed(self): + value = 42 + result = yield rpc.call('test', {"method": "echo", "args": {"value": value}}) + self.assertEqual(value, result) + + def test_call_exception(self): + value = 42 + self.assertFailure(rpc.call('test', {"method": "fail", "args": {"value": value}}), rpc.RemoteError) + try: + yield rpc.call('test', {"method": "fail", "args": {"value": value}}) + self.fail("should have thrown rpc.RemoteError") + except rpc.RemoteError as exc: + self.assertEqual(int(exc.value), value) + +class TestReceiver(object): + def echo(self, value): + logging.debug("Received %s", value) + return defer.succeed(value) + + def fail(self, value): + raise Exception(value) diff --git a/run_tests.py b/run_tests.py index 7fe6e73ec..d90ac8175 100644 --- a/run_tests.py +++ b/run_tests.py @@ -59,6 +59,7 @@ from nova.tests.model_unittest import * from nova.tests.network_unittest import * from nova.tests.objectstore_unittest import * from nova.tests.process_unittest import * +from nova.tests.rpc_unittest import * from nova.tests.validator_unittest import * from nova.tests.volume_unittest import * -- cgit From ea2805d372a0d4a480667058e96288bf15844828 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Fri, 13 Aug 2010 11:51:33 +0100 Subject: Added documentation to the nova.virt interface. --- nova/virt/connection.py | 9 ++++ nova/virt/fake.py | 129 +++++++++++++++++++++++++++++++++++++++++++++- nova/virt/libvirt_conn.py | 20 ------- 3 files changed, 137 insertions(+), 21 deletions(-) diff --git a/nova/virt/connection.py b/nova/virt/connection.py index 004adb19d..90bc7fa0a 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -27,6 +27,15 @@ FLAGS = flags.FLAGS def get_connection(read_only=False): + """Returns an object representing the connection to a virtualization + platform. This could be nova.virt.fake.FakeConnection in test mode, + a connection to KVM or QEMU via libvirt, or a connection to XenServer + or Xen Cloud Platform via XenAPI. + + Any object returned here must conform to the interface documented by + FakeConnection. + """ + # TODO(termie): maybe lazy load after initial check for permissions # TODO(termie): check whether we can be disconnected t = FLAGS.connection_type diff --git a/nova/virt/fake.py b/nova/virt/fake.py index d9ae5ac96..105837181 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -19,6 +19,7 @@ """ A fake (in-memory) hypervisor+api. Allows nova testing w/o a hypervisor. +This module also documents the semantics of real hypervisor connections. """ import logging @@ -32,6 +33,38 @@ def get_connection(_): class FakeConnection(object): + """ + The interface to this class talks in terms of 'instances' (Amazon EC2 and + internal Nova terminology), by which we mean 'running virtual machine' + (XenAPI terminology) or domain (Xen or libvirt terminology). + + An instance has an ID, which is the identifier chosen by Nova to represent + the instance further up the stack. This is unfortunately also called a + 'name' elsewhere. As far as this layer is concerned, 'instance ID' and + 'instance name' are synonyms. + + Note that the instance ID or name is not human-readable or + customer-controlled -- it's an internal ID chosen by Nova. At the + nova.virt layer, instances do not have human-readable names at all -- such + things are only known higher up the stack. + + Most virtualization platforms will also have their own identity schemes, + to uniquely identify a VM or domain. These IDs must stay internal to the + platform-specific layer, and never escape the connection interface. The + platform-specific layer is responsible for keeping track of which instance + ID maps to which platform-specific ID, and vice versa. + + In contrast, the list_disks and list_interfaces calls may return + platform-specific IDs. These identify a specific virtual disk or specific + virtual network interface, and these IDs are opaque to the rest of Nova. + + Some methods here take an instance of nova.compute.service.Instance. This + is the datastructure used by nova.compute to store details regarding an + instance, and pass them into this layer. This layer is responsible for + translating that generic datastructure into terms that are specific to the + virtualization platform. + """ + def __init__(self): self.instances = {} @@ -42,20 +75,59 @@ class FakeConnection(object): return cls._instance def list_instances(self): + """ + Return the names of all the instances known to the virtualization + layer, as a list. + """ return self.instances.keys() def spawn(self, instance): + """ + Create a new instance/VM/domain on the virtualization platform. + + The given parameter is an instance of nova.compute.service.Instance. + This function should use the data there to guide the creation of + the new instance. + + Once this function successfully completes, the instance should be + running (power_state.RUNNING). + + If this function fails, any partial instance should be completely + cleaned up, and the virtualization platform should be in the state + that it was before this call began. + """ + fake_instance = FakeInstance() self.instances[instance.name] = fake_instance fake_instance._state = power_state.RUNNING def reboot(self, instance): + """ + Reboot the specified instance. + + The given parameter is an instance of nova.compute.service.Instance, + and so the instance is being specified as instance.name. + """ pass - + def destroy(self, instance): + """ + Destroy (shutdown and delete) the specified instance. + + The given parameter is an instance of nova.compute.service.Instance, + and so the instance is being specified as instance.name. + """ del self.instances[instance.name] def get_info(self, instance_id): + """ + Get a block of information about the given instance. This is returned + as a dictionary containing 'state': The power_state of the instance, + 'max_mem': The maximum memory for the instance, in KiB, 'mem': The + current memory the instance has, in KiB, 'num_cpu': The current number + of virtual CPUs the instance has, 'cpu_time': The total CPU time used + by the instance, in nanoseconds. + """ i = self.instances[instance_id] return {'state': i._state, 'max_mem': 0, @@ -64,15 +136,70 @@ class FakeConnection(object): 'cpu_time': 0} def list_disks(self, instance_id): + """ + Return the IDs of all the virtual disks attached to the specified + instance, as a list. These IDs are opaque to the caller (they are + only useful for giving back to this layer as a parameter to + disk_stats). These IDs only need to be unique for a given instance. + + Note that this function takes an instance ID, not a + compute.service.Instance, so that it can be called by compute.monitor. + """ return ['A_DISK'] def list_interfaces(self, instance_id): + """ + Return the IDs of all the virtual network interfaces attached to the + specified instance, as a list. These IDs are opaque to the caller + (they are only useful for giving back to this layer as a parameter to + interface_stats). These IDs only need to be unique for a given + instance. + + Note that this function takes an instance ID, not a + compute.service.Instance, so that it can be called by compute.monitor. + """ return ['A_VIF'] def block_stats(self, instance_id, disk_id): + """ + Return performance counters associated with the given disk_id on the + given instance_id. These are returned as [rd_req, rd_bytes, wr_req, + wr_bytes, errs], where rd indicates read, wr indicates write, req is + the total number of I/O requests made, bytes is the total number of + bytes transferred, and errs is the number of requests held up due to a + full pipeline. + + All counters are long integers. + + This method is optional. On some platforms (e.g. XenAPI) performance + statistics can be retrieved directly in aggregate form, without Nova + having to do the aggregation. On those platforms, this method is + unused. + + Note that this function takes an instance ID, not a + compute.service.Instance, so that it can be called by compute.monitor. + """ return [0L, 0L, 0L, 0L, null] def interface_stats(self, instance_id, iface_id): + """ + Return performance counters associated with the given iface_id on the + given instance_id. These are returned as [rx_bytes, rx_packets, + rx_errs, rx_drop, tx_bytes, tx_packets, tx_errs, tx_drop], where rx + indicates receive, tx indicates transmit, bytes and packets indicate + the total number of bytes or packets transferred, and errs and dropped + is the total number of packets failed / dropped. + + All counters are long integers. + + This method is optional. On some platforms (e.g. XenAPI) performance + statistics can be retrieved directly in aggregate form, without Nova + having to do the aggregation. On those platforms, this method is + unused. + + Note that this function takes an instance ID, not a + compute.service.Instance, so that it can be called by compute.monitor. + """ return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L] diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 13305be0f..d031a10d8 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -261,12 +261,6 @@ class LibvirtConnection(object): def get_disks(self, instance_id): - """ - Note that this function takes an instance ID, not an Instance, so - that it can be called by monitor. - - Returns a list of all block devices for this domain. - """ domain = self._conn.lookupByName(instance_id) # TODO(devcamcar): Replace libxml2 with etree. xml = domain.XMLDesc(0) @@ -304,12 +298,6 @@ class LibvirtConnection(object): def get_interfaces(self, instance_id): - """ - Note that this function takes an instance ID, not an Instance, so - that it can be called by monitor. - - Returns a list of all network interfaces for this instance. - """ domain = self._conn.lookupByName(instance_id) # TODO(devcamcar): Replace libxml2 with etree. xml = domain.XMLDesc(0) @@ -347,18 +335,10 @@ class LibvirtConnection(object): def block_stats(self, instance_id, disk): - """ - Note that this function takes an instance ID, not an Instance, so - that it can be called by monitor. - """ domain = self._conn.lookupByName(instance_id) return domain.blockStats(disk) def interface_stats(self, instance_id, interface): - """ - Note that this function takes an instance ID, not an Instance, so - that it can be called by monitor. - """ domain = self._conn.lookupByName(instance_id) return domain.interfaceStats(interface) -- cgit From 4c39eca0c90fc798e9980b8fe750d66208fecae5 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Fri, 13 Aug 2010 14:33:07 +0100 Subject: Added note regarding dependency upon XenAPI.py. --- doc/source/getting.started.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/getting.started.rst b/doc/source/getting.started.rst index 3eadd0882..f683bb256 100644 --- a/doc/source/getting.started.rst +++ b/doc/source/getting.started.rst @@ -40,6 +40,7 @@ Python libraries we don't vendor * M2Crypto: python library interface for openssl * curl +* XenAPI: Needed only for Xen Cloud Platform or XenServer support. Available from http://wiki.xensource.com/xenwiki/XCP_SDK or http://community.citrix.com/cdn/xs/sdks. Vendored python libaries (don't require any installation) -- cgit From 3d15adb40c5fc569bd29d4779fca792263338e54 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Fri, 13 Aug 2010 10:14:34 -0400 Subject: Merge case statement options --- run_tests.sh | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index 31bfce9fa..6ea40d95e 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -25,12 +25,9 @@ function process_options { function process_option { option=$1 case $option in - --help) usage;; - -h) usage;; - -V) let always_venv=1; let never_venv=0;; - --virtual-env) let always_venv=1; let never_venv=0;; - -N) let always_venv=0; let never_venv=1;; - --no-virtual-env) let always_venv=0; let never_venv=1;; + -h|--help) usage;; + -V|--virtual-env) let always_venv=1; let never_venv=0;; + -N|--no-virtual-env) let always_venv=0; let never_venv=1;; esac } -- cgit From bfb906cb0235a6e0b037d387aadc4abc2280fea0 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Fri, 13 Aug 2010 11:09:27 -0400 Subject: Support JSON and XML in Serializer --- nova/wsgi.py | 56 ++++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 46 insertions(+), 10 deletions(-) diff --git a/nova/wsgi.py b/nova/wsgi.py index 304f7149a..0570e1829 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -218,23 +218,59 @@ class Serializer(object): Serializes a dictionary to a Content Type specified by a WSGI environment. """ - def __init__(self, environ): - """Create a serializer based on the given WSGI environment.""" + def __init__(self, environ, metadata=None): + """ + Create a serializer based on the given WSGI environment. + 'metadata' is an optional dict mapping MIME types to information + needed to serialize a dictionary to that type. + """ self.environ = environ + self.metadata = metadata or {} - def serialize(self, data): + def to_content_type(self, data): """ Serialize a dictionary into a string. The format of the string will be decided based on the Content Type requested in self.environ: by Accept: header, or by URL suffix. """ - req = webob.Request(self.environ) - # TODO(gundlach): do XML correctly and be more robust - if req.accept and 'application/json' in req.accept: + mimetype = 'application/xml' + # TODO(gundlach): determine mimetype from request + + if mimetype == 'application/json': import json return json.dumps(data) + elif mimetype == 'application/xml': + metadata = self.metadata.get('application/xml', {}) + # We expect data to contain a single key which is the XML root. + root_key = data.keys()[0] + from xml.dom import minidom + doc = minidom.Document() + node = self._to_xml_node(doc, metadata, root_key, data[root_key]) + return node.toprettyxml(indent=' ') else: - return '' + repr(data) + \ - '' - - + return repr(data) + + def _to_xml_node(self, doc, metadata, nodename, data): + result = doc.createElement(nodename) + if type(data) is list: + singular = metadata.get('plurals', {}).get(nodename, None) + if singular is None: + if nodename.endswith('s'): + singular = nodename[:-1] + else: + singular = 'item' + for item in data: + node = self._to_xml_node(doc, metadata, singular, item) + result.appendChild(node) + elif type(data) is dict: + attrs = metadata.get('attributes', {}).get(nodename, {}) + for k,v in data.items(): + if k in attrs: + result.setAttribute(k, str(v)) + else: + node = self._to_xml_node(doc, metadata, k, v) + result.appendChild(node) + else: # atom + node = doc.createTextNode(str(data)) + result.appendChild(node) + return result -- cgit From 8bdc9ec6f90341ed1a3890af283addc7c0a053c9 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 13 Aug 2010 12:51:38 -0700 Subject: pep8 and pylint cleanup --- nova/rpc.py | 123 +++++++++++++++++++++++++++++++++------------ nova/tests/rpc_unittest.py | 37 +++++++++++--- 2 files changed, 122 insertions(+), 38 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index e06a3e19b..4ac546c2a 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -21,14 +21,13 @@ AMQP-based RPC. Queues have consumers and publishers. No fan-out support yet. """ -from carrot import connection +from carrot import connection as carrot_connection from carrot import messaging import json import logging import sys import uuid from twisted.internet import defer -from twisted.internet import reactor from twisted.internet import task from nova import exception @@ -39,13 +38,15 @@ from nova import flags FLAGS = flags.FLAGS -_log = logging.getLogger('amqplib') -_log.setLevel(logging.DEBUG) +LOG = logging.getLogger('amqplib') +LOG.setLevel(logging.DEBUG) -class Connection(connection.BrokerConnection): +class Connection(carrot_connection.BrokerConnection): + """Connection instance object""" @classmethod def instance(cls): + """Returns the instance""" if not hasattr(cls, '_instance'): params = dict(hostname=FLAGS.rabbit_host, port=FLAGS.rabbit_port, @@ -56,18 +57,33 @@ class Connection(connection.BrokerConnection): if FLAGS.fake_rabbit: params['backend_cls'] = fakerabbit.Backend + # NOTE(vish): magic is fun! + # pylint: disable=W0142 cls._instance = cls(**params) return cls._instance @classmethod def recreate(cls): + """Recreates the connection instance + + This is necessary to recover from some network errors/disconnects""" del cls._instance return cls.instance() + class Consumer(messaging.Consumer): + """Consumer base class + + Contains methods for connecting the fetch method to async loops + """ + def __init__(self, *args, **kwargs): + self.failed_connection = False + super(Consumer, self).__init__(*args, **kwargs) + # TODO(termie): it would be nice to give these some way of automatically # cleaning up after themselves def attach_to_tornado(self, io_inst=None): + """Attach a callback to tornado that fires 10 times a second""" from tornado import ioloop if io_inst is None: io_inst = ioloop.IOLoop.instance() @@ -79,33 +95,44 @@ class Consumer(messaging.Consumer): attachToTornado = attach_to_tornado - def fetch(self, *args, **kwargs): + def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): + """Wraps the parent fetch with some logic for failed connections""" # TODO(vish): the logic for failed connections and logging should be # refactored into some sort of connection manager object try: - if getattr(self, 'failed_connection', False): - # attempt to reconnect + if self.failed_connection: + # NOTE(vish): conn is defined in the parent class, we can + # recreate it as long as we create the backend too + # pylint: disable=W0201 self.conn = Connection.recreate() self.backend = self.conn.create_backend() - super(Consumer, self).fetch(*args, **kwargs) - if getattr(self, 'failed_connection', False): + super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) + if self.failed_connection: logging.error("Reconnected to queue") self.failed_connection = False - except Exception, ex: - if not getattr(self, 'failed_connection', False): + # NOTE(vish): This is catching all errors because we really don't + # exceptions to be logged 10 times a second if some + # persistent failure occurs. + except Exception: # pylint: disable=W0703 + if not self.failed_connection: logging.exception("Failed to fetch message from queue") self.failed_connection = True def attach_to_twisted(self): + """Attach a callback to twisted that fires 10 times a second""" loop = task.LoopingCall(self.fetch, enable_callbacks=True) loop.start(interval=0.1) + class Publisher(messaging.Publisher): + """Publisher base class""" pass class TopicConsumer(Consumer): + """Consumes messages on a specific topic""" exchange_type = "topic" + def __init__(self, connection=None, topic="broadcast"): self.queue = topic self.routing_key = topic @@ -115,14 +142,24 @@ class TopicConsumer(Consumer): class AdapterConsumer(TopicConsumer): + """Calls methods on a proxy object based on method and args""" def __init__(self, connection=None, topic="broadcast", proxy=None): - _log.debug('Initing the Adapter Consumer for %s' % (topic)) + LOG.debug('Initing the Adapter Consumer for %s' % (topic)) self.proxy = proxy - super(AdapterConsumer, self).__init__(connection=connection, topic=topic) + super(AdapterConsumer, self).__init__(connection=connection, + topic=topic) @exception.wrap_exception def receive(self, message_data, message): - _log.debug('received %s' % (message_data)) + """Magically looks for a method on the proxy object and calls it + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + """ + LOG.debug('received %s' % (message_data)) msg_id = message_data.pop('_msg_id', None) method = message_data.get('method') @@ -133,12 +170,14 @@ class AdapterConsumer(TopicConsumer): # messages stay in the queue indefinitely, so for now # we just log the message and send an error string # back to the caller - _log.warn('no method for message: %s' % (message_data)) + LOG.warn('no method for message: %s' % (message_data)) msg_reply(msg_id, 'No method for message: %s' % message_data) return node_func = getattr(self.proxy, str(method)) node_args = dict((str(k), v) for k, v in args.iteritems()) + # NOTE(vish): magic is fun! + # pylint: disable=W0142 d = defer.maybeDeferred(node_func, **node_args) if msg_id: d.addCallback(lambda rval: msg_reply(msg_id, rval, None)) @@ -147,7 +186,9 @@ class AdapterConsumer(TopicConsumer): class TopicPublisher(Publisher): + """Publishes messages on a specific topic""" exchange_type = "topic" + def __init__(self, connection=None, topic="broadcast"): self.routing_key = topic self.exchange = FLAGS.control_exchange @@ -156,7 +197,9 @@ class TopicPublisher(Publisher): class DirectConsumer(Consumer): + """Consumes messages directly on a channel specified by msg_id""" exchange_type = "direct" + def __init__(self, connection=None, msg_id=None): self.queue = msg_id self.routing_key = msg_id @@ -166,7 +209,9 @@ class DirectConsumer(Consumer): class DirectPublisher(Publisher): + """Publishes messages directly on a channel specified by msg_id""" exchange_type = "direct" + def __init__(self, connection=None, msg_id=None): self.routing_key = msg_id self.exchange = msg_id @@ -175,51 +220,62 @@ class DirectPublisher(Publisher): def msg_reply(msg_id, reply=None, failure=None): + """Sends a reply or an error on the channel signified by msg_id + + failure should be a twisted failure object""" if failure: message = failure.getErrorMessage() traceback = failure.getTraceback() logging.error("Returning exception %s to caller", message) logging.error(traceback) - failure = (failure.type.__name__, str(failure.value), traceback) + failure = (failure.type.__name__, str(failure.value), traceback) conn = Connection.instance() publisher = DirectPublisher(connection=conn, msg_id=msg_id) try: publisher.send({'result': reply, 'failure': failure}) - except Exception, exc: + except TypeError: publisher.send( {'result': dict((k, repr(v)) for k, v in reply.__dict__.iteritems()), - 'failure': failure - }) + 'failure': failure}) publisher.close() class RemoteError(exception.Error): - """signifies that a remote class has raised an exception""" - def __init__(self, type, value, traceback): - self.type = type + """Signifies that a remote class has raised an exception + + Containes a string representation of the type of the original exception, + the value of the original exception, and the traceback. These are + sent to the parent as a joined string so printing the exception + contains all of the relevent info.""" + def __init__(self, exc_type, value, traceback): + self.exc_type = exc_type self.value = value self.traceback = traceback - super(RemoteError, self).__init__("%s %s\n%s" % (type, + super(RemoteError, self).__init__("%s %s\n%s" % (exc_type, value, traceback)) def call(topic, msg): - _log.debug("Making asynchronous call...") + """Sends a message on a topic and wait for a response""" + LOG.debug("Making asynchronous call...") msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) - _log.debug("MSG_ID is %s" % (msg_id)) + LOG.debug("MSG_ID is %s" % (msg_id)) conn = Connection.instance() d = defer.Deferred() consumer = DirectConsumer(connection=conn, msg_id=msg_id) + def deferred_receive(data, message): + """Acks message and callbacks or errbacks""" message.ack() if data['failure']: return d.errback(RemoteError(*data['failure'])) else: return d.callback(data['result']) + consumer.register_callback(deferred_receive) injected = consumer.attach_to_tornado() @@ -233,7 +289,8 @@ def call(topic, msg): def cast(topic, msg): - _log.debug("Making asynchronous cast...") + """Sends a message on a topic without waiting for a response""" + LOG.debug("Making asynchronous cast...") conn = Connection.instance() publisher = TopicPublisher(connection=conn, topic=topic) publisher.send(msg) @@ -241,16 +298,18 @@ def cast(topic, msg): def generic_response(message_data, message): - _log.debug('response %s', message_data) + """Logs a result and exits""" + LOG.debug('response %s', message_data) message.ack() sys.exit(0) def send_message(topic, message, wait=True): + """Sends a message for testing""" msg_id = uuid.uuid4().hex message.update({'_msg_id': msg_id}) - _log.debug('topic is %s', topic) - _log.debug('message %s', message) + LOG.debug('topic is %s', topic) + LOG.debug('message %s', message) if wait: consumer = messaging.Consumer(connection=Connection.instance(), @@ -273,6 +332,8 @@ def send_message(topic, message, wait=True): consumer.wait() -# TODO: Replace with a docstring test if __name__ == "__main__": + # NOTE(vish): you can send messages from the command line using + # topic and a json sting representing a dictionary + # for the method send_message(sys.argv[1], json.loads(sys.argv[2])) diff --git a/nova/tests/rpc_unittest.py b/nova/tests/rpc_unittest.py index 9c2e29344..764a97416 100644 --- a/nova/tests/rpc_unittest.py +++ b/nova/tests/rpc_unittest.py @@ -15,7 +15,9 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - +""" +Unit Tests for remote procedure calls using queue +""" import logging from twisted.internet import defer @@ -29,7 +31,8 @@ FLAGS = flags.FLAGS class RpcTestCase(test.BaseTestCase): - def setUp(self): + """Test cases for rpc""" + def setUp(self): # pylint: disable=C0103 super(RpcTestCase, self).setUp() self.conn = rpc.Connection.instance() self.receiver = TestReceiver() @@ -40,23 +43,43 @@ class RpcTestCase(test.BaseTestCase): self.injected.append(self.consumer.attach_to_tornado(self.ioloop)) def test_call_succeed(self): + """Get a value through rpc call""" value = 42 - result = yield rpc.call('test', {"method": "echo", "args": {"value": value}}) + result = yield rpc.call('test', {"method": "echo", + "args": {"value": value}}) self.assertEqual(value, result) def test_call_exception(self): + """Test that exception gets passed back properly + + rpc.call returns a RemoteError object. The value of the + exception is converted to a string, so we convert it back + to an int in the test. + """ value = 42 - self.assertFailure(rpc.call('test', {"method": "fail", "args": {"value": value}}), rpc.RemoteError) + self.assertFailure(rpc.call('test', {"method": "fail", + "args": {"value": value}}), + rpc.RemoteError) try: - yield rpc.call('test', {"method": "fail", "args": {"value": value}}) + yield rpc.call('test', {"method": "fail", + "args": {"value": value}}) self.fail("should have thrown rpc.RemoteError") except rpc.RemoteError as exc: self.assertEqual(int(exc.value), value) + class TestReceiver(object): - def echo(self, value): + """Simple Proxy class so the consumer has methods to call + + Uses static methods because we aren't actually storing any state""" + + @staticmethod + def echo(value): + """Simply returns whatever value is sent in""" logging.debug("Received %s", value) return defer.succeed(value) - def fail(self, value): + @staticmethod + def fail(value): + """Raises an exception with the value sent in""" raise Exception(value) -- cgit From d744a5e7bd7aef545def85d54c9e1fc3480c55fc Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 13 Aug 2010 14:09:30 -0700 Subject: Fixes out of order arguments in get_credentials --- bin/nova-manage | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 6af092922..071436b13 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -206,7 +206,7 @@ class ProjectCommands(object): def zipfile(self, project_id, user_id, filename='nova.zip'): """Exports credentials for project to a zip file arguments: project_id user_id [filename='nova.zip]""" - zip_file = self.manager.get_credentials(project_id, user_id) + zip_file = self.manager.get_credentials(user_id, project_id) with open(filename, 'w') as f: f.write(zip_file) -- cgit From 8aa4d9c2f9f3f7cadda334a1161d66c2303e2979 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 13 Aug 2010 23:44:14 +0200 Subject: Remove extra "uml" from os.type. --- nova/virt/libvirt.uml.xml.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt.uml.xml.template b/nova/virt/libvirt.uml.xml.template index 0bc1507de..6f4290f98 100644 --- a/nova/virt/libvirt.uml.xml.template +++ b/nova/virt/libvirt.uml.xml.template @@ -2,7 +2,7 @@ %(name)s %(memory_kb)s - %(type)suml + %(type)s /usr/bin/linux /dev/ubda1 -- cgit From 2dd318827965f20d9a64e624e15dc1a1fee7bf5e Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 13 Aug 2010 23:45:05 +0200 Subject: Refactor LibvirtConnection a little bit for easier testing. --- nova/virt/libvirt_conn.py | 35 ++++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index e2cdaaf7d..97e1b0ab2 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -44,10 +44,10 @@ libxml2 = None FLAGS = flags.FLAGS flags.DEFINE_string('libvirt_xml_template', - utils.abspath('compute/libvirt.qemu.xml.template'), + utils.abspath('virt/libvirt.qemu.xml.template'), 'Libvirt XML Template for QEmu/KVM') flags.DEFINE_string('libvirt_uml_xml_template', - utils.abspath('compute/libvirt.uml.xml.template'), + utils.abspath('virt/libvirt.uml.xml.template'), 'Libvirt XML Template for user-mode-linux') flags.DEFINE_string('injected_network_template', utils.abspath('virt/interfaces.template'), @@ -70,25 +70,42 @@ def get_connection(read_only): libxml2 = __import__('libxml2') return LibvirtConnection(read_only) - class LibvirtConnection(object): def __init__(self, read_only): - auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], - 'root', - None] + self.libvirt_uri, template_file = self.get_uri_and_template() + + self.libvirt_xml = open(template_file).read() + self._wrapped_conn = None + self.read_only = read_only + + + @property + def _conn(self): + if not self._wrapped_conn: + self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only) + return self._wrapped_conn + + def get_uri_and_template(self): if FLAGS.libvirt_type == 'uml': uri = FLAGS.libvirt_uri or 'uml:///system' template_file = FLAGS.libvirt_uml_xml_template else: uri = FLAGS.libvirt_uri or 'qemu:///system' template_file = FLAGS.libvirt_xml_template - self.libvirt_xml = open(template_file).read() + return uri, template_file + + + def _connect(self, uri, read_only): + auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], + 'root', + None] if read_only: - self._conn = libvirt.openReadOnly(uri) + return libvirt.openReadOnly(uri) else: - self._conn = libvirt.openAuth(uri, auth, 0) + return libvirt.openAuth(uri, auth, 0) + def list_instances(self): -- cgit From 49a20981634e880fa14420f0b18b3c64b1f6c06f Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 13 Aug 2010 23:45:26 +0200 Subject: Move interfaces template into virt/, too. --- nova/compute/interfaces.template | 18 ------------------ nova/virt/interfaces.template | 18 ++++++++++++++++++ 2 files changed, 18 insertions(+), 18 deletions(-) delete mode 100644 nova/compute/interfaces.template create mode 100644 nova/virt/interfaces.template diff --git a/nova/compute/interfaces.template b/nova/compute/interfaces.template deleted file mode 100644 index 11df301f6..000000000 --- a/nova/compute/interfaces.template +++ /dev/null @@ -1,18 +0,0 @@ -# This file describes the network interfaces available on your system -# and how to activate them. For more information, see interfaces(5). - -# The loopback network interface -auto lo -iface lo inet loopback - -# The primary network interface -auto eth0 -iface eth0 inet static - address %(address)s - netmask %(netmask)s - network %(network)s - broadcast %(broadcast)s - gateway %(gateway)s - dns-nameservers %(dns)s - - diff --git a/nova/virt/interfaces.template b/nova/virt/interfaces.template new file mode 100644 index 000000000..11df301f6 --- /dev/null +++ b/nova/virt/interfaces.template @@ -0,0 +1,18 @@ +# This file describes the network interfaces available on your system +# and how to activate them. For more information, see interfaces(5). + +# The loopback network interface +auto lo +iface lo inet loopback + +# The primary network interface +auto eth0 +iface eth0 inet static + address %(address)s + netmask %(netmask)s + network %(network)s + broadcast %(broadcast)s + gateway %(gateway)s + dns-nameservers %(dns)s + + -- cgit From 7bbf2f7f9f7c7c49287519207e56932e28061514 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 13 Aug 2010 23:46:44 +0200 Subject: Add a few unit tests for libvirt_conn. --- nova/tests/virt_unittest.py | 69 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 nova/tests/virt_unittest.py diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py new file mode 100644 index 000000000..2aab16809 --- /dev/null +++ b/nova/tests/virt_unittest.py @@ -0,0 +1,69 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2010 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import flags +from nova import test +from nova.virt import libvirt_conn + +FLAGS = flags.FLAGS + + +class LibvirtConnTestCase(test.TrialTestCase): + def test_get_uri_and_template(self): + class MockDataModel(object): + def __init__(self): + self.datamodel = { 'name' : 'i-cafebabe', + 'memory_kb' : '1024000', + 'basepath' : '/some/path', + 'bridge_name' : 'br100', + 'mac_address' : '02:12:34:46:56:67', + 'vcpus' : 2 } + + type_uri_map = { 'qemu' : ('qemu:///system', + [lambda s: '' in s, + lambda s: 'type>hvm/usr/bin/kvm' not in s]), + 'kvm' : ('qemu:///system', + [lambda s: '' in s, + lambda s: 'type>hvm/usr/bin/qemu<' not in s]), + 'uml' : ('uml:///system', + [lambda s: '' in s, + lambda s: 'type>uml Date: Sat, 14 Aug 2010 11:46:10 +0100 Subject: Bug #617776: DescribeImagesResponse contains type element, when it should be called imageType Make the objectstore respond with the field 'imageType' as well as 'type'. The former is the correct one, according to the EC2 API specification for the DescribeImages response. The latter is for compatibility with euca2ools and other clients. --- nova/objectstore/handler.py | 18 +++++++++++++++++- nova/objectstore/image.py | 4 ++-- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index f625a2aa1..dfe1918e3 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -269,7 +269,23 @@ class ImagesResource(Resource): images = [i for i in image.Image.all() \ if i.is_authorized(request.context, readonly=True)] - request.write(json.dumps([i.metadata for i in images])) + # Bug #617776: + # We used to have 'type' in the image metadata, but this field + # should be called 'imageType', as per the EC2 specification. + # For compat with old metadata files we copy type to imageType if + # imageType is not present. + # For compat with euca2ools (and any other clients using the + # incorrect name) we copy imageType to type. + # imageType is primary if we end up with both in the metadata file + # (which should never happen). + def decorate(m): + if 'imageType' not in m and 'type' in m: + m[u'imageType'] = m['type'] + elif 'imageType' in m: + m[u'type'] = m['imageType'] + return m + + request.write(json.dumps([decorate(i.metadata) for i in images])) request.finish() return server.NOT_DONE_YET diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py index 860298ba6..861eb364f 100644 --- a/nova/objectstore/image.py +++ b/nova/objectstore/image.py @@ -148,7 +148,7 @@ class Image(object): 'imageOwnerId': 'system', 'isPublic': public, 'architecture': 'x86_64', - 'type': image_type, + 'imageType': image_type, 'state': 'available' } @@ -195,7 +195,7 @@ class Image(object): 'imageOwnerId': context.project.id, 'isPublic': False, # FIXME: grab public from manifest 'architecture': 'x86_64', # FIXME: grab architecture from manifest - 'type' : image_type + 'imageType' : image_type } def write_state(state): -- cgit From b323a5fc6d08b52bde18c64fea70a7b3421cadc3 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sat, 14 Aug 2010 19:04:19 +0100 Subject: Bug 617913: RunInstances response doesn't meet EC2 specification Fix the RunInstances response to match the EC2 specification. This involved moving the instance details from to . --- nova/endpoint/cloud.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index ad9188ff3..eb0c05229 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -392,7 +392,15 @@ class CloudController(object): @rbac.allow('all') def describe_instances(self, context, **kwargs): - return defer.succeed(self._format_instances(context)) + return defer.succeed(self._format_describe_instances(context)) + + def _format_describe_instances(self, context): + return { 'reservationSet': self._format_instances(context) } + + def _format_run_instances(self, context, reservation_id): + i = self._format_instances(context, reservation_id) + assert len(i) == 1 + return i[0] def _format_instances(self, context, reservation_id = None): reservations = {} @@ -441,8 +449,7 @@ class CloudController(object): reservations[res_id] = r reservations[res_id]['instances_set'].append(i) - instance_response = {'reservationSet': list(reservations.values())} - return instance_response + return list(reservations.values()) @rbac.allow('all') def describe_addresses(self, context, **kwargs): @@ -594,7 +601,7 @@ class CloudController(object): logging.debug("Casting to node for %s's instance with IP of %s" % (context.user.name, inst['private_dns_name'])) # TODO: Make Network figure out the network name from ip. - defer.returnValue(self._format_instances(context, reservation_id)) + defer.returnValue(self._format_run_instances(context, reservation_id)) @rbac.allow('projectmanager', 'sysadmin') @defer.inlineCallbacks -- cgit From b50107ec739bc40e29d76ff56587ddbb478bd878 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sat, 14 Aug 2010 23:23:03 +0100 Subject: Update cloud_unittest to match renamed internal function. --- nova/tests/cloud_unittest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py index 40837405c..3501771cc 100644 --- a/nova/tests/cloud_unittest.py +++ b/nova/tests/cloud_unittest.py @@ -132,7 +132,7 @@ class CloudTestCase(test.BaseTestCase): 'state': 0x01, 'user_data': '' } - rv = self.cloud._format_instances(self.context) + rv = self.cloud._format_describe_instances(self.context) self.assert_(len(rv['reservationSet']) == 0) # simulate launch of 5 instances -- cgit From fb6bf337bc2fe702307842b57e33b9f5f9011147 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sun, 15 Aug 2010 22:48:54 +0100 Subject: Rework virt.xenapi's concurrency model. There were many places where we were inadvertently blocking the reactor thread. The reworking puts all calls to XenAPI on background threads, so that they won't block the reactor thread. Long-lived operations (VM start, reboot, etc) are invoked asynchronously at the XenAPI level (Async.VM.start, etc). These return a XenAPI task. We relinquish the background thread at this point, so as not to hold threads in the pool for too long, and use reactor.callLater to poll the task. This combination of techniques means that we don't block the reactor thread at all, and at the same time we don't hold lots of threads waiting for long-running operations. There is a FIXME in here: get_info does not conform to these new rules. Changes are required in compute.service before we can make get_info non-blocking. --- nova/virt/xenapi.py | 178 +++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 147 insertions(+), 31 deletions(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 9fe15644f..6b41061c1 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -16,15 +16,33 @@ """ A connection to XenServer or Xen Cloud Platform. + +The concurrency model for this class is as follows: + +All XenAPI calls are on a thread (using t.i.t.deferToThread, or the decorator +deferredToThread). They are remote calls, and so may hang for the usual +reasons. They should not be allowed to block the reactor thread. + +All long-running XenAPI calls (VM.start, VM.reboot, etc) are called async +(using XenAPI.VM.async_start etc). These return a task, which can then be +polled for completion. Polling is handled using reactor.callLater. + +This combination of techniques means that we don't block the reactor thread at +all, and at the same time we don't hold lots of threads waiting for +long-running operations. + +FIXME: get_info currently doesn't conform to these rules, and will block the +reactor thread if the VM.get_by_name_label or VM.get_record calls block. """ import logging import xmlrpclib from twisted.internet import defer +from twisted.internet import reactor from twisted.internet import task +from twisted.internet.threads import deferToThread -from nova import exception from nova import flags from nova import process from nova.auth.manager import AuthManager @@ -43,6 +61,9 @@ flags.DEFINE_string('xenapi_connection_username', flags.DEFINE_string('xenapi_connection_password', None, 'Password for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi.') +flags.DEFINE_float('xenapi_task_poll_interval', + 0.5, + 'The interval used for polling of remote tasks (Async.VM.start, etc). Used only if connection_type=xenapi.') def get_connection(_): @@ -61,6 +82,12 @@ def get_connection(_): return XenAPIConnection(url, username, password) +def deferredToThread(f): + def g(*args, **kwargs): + return deferToThread(f, *args, **kwargs) + return g + + class XenAPIConnection(object): def __init__(self, url, user, pw): @@ -72,9 +99,8 @@ class XenAPIConnection(object): for vm in self._conn.xenapi.VM.get_all()] @defer.inlineCallbacks - @exception.wrap_exception def spawn(self, instance): - vm = yield self.lookup(instance.name) + vm = yield self._lookup(instance.name) if vm is not None: raise Exception('Attempted to create non-unique name %s' % instance.name) @@ -93,22 +119,28 @@ class XenAPIConnection(object): user = AuthManager().get_user(instance.datamodel['user_id']) project = AuthManager().get_project(instance.datamodel['project_id']) - vdi_uuid = yield self.fetch_image( + vdi_uuid = yield self._fetch_image( instance.datamodel['image_id'], user, project, True) - kernel = yield self.fetch_image( + kernel = yield self._fetch_image( instance.datamodel['kernel_id'], user, project, False) - ramdisk = yield self.fetch_image( + ramdisk = yield self._fetch_image( instance.datamodel['ramdisk_id'], user, project, False) - vdi_ref = yield self._conn.xenapi.VDI.get_by_uuid(vdi_uuid) + vdi_ref = yield self._call_xenapi('VDI.get_by_uuid', vdi_uuid) - vm_ref = yield self.create_vm(instance, kernel, ramdisk) - yield self.create_vbd(vm_ref, vdi_ref, 0, True) + vm_ref = yield self._create_vm(instance, kernel, ramdisk) + yield self._create_vbd(vm_ref, vdi_ref, 0, True) if network_ref: yield self._create_vif(vm_ref, network_ref, mac_address) - yield self._conn.xenapi.VM.start(vm_ref, False, False) + logging.debug('Starting VM %s...', vm_ref) + yield self._call_xenapi('VM.start', vm_ref, False, False) + logging.info('Spawning VM %s created %s.', instance.name, vm_ref) - def create_vm(self, instance, kernel, ramdisk): + @defer.inlineCallbacks + def _create_vm(self, instance, kernel, ramdisk): + """Create a VM record. Returns a Deferred that gives the new + VM reference.""" + mem = str(long(instance.datamodel['memory_kb']) * 1024) vcpus = str(instance.datamodel['vcpus']) rec = { @@ -141,12 +173,16 @@ class XenAPIConnection(object): 'other_config': {}, } logging.debug('Created VM %s...', instance.name) - vm_ref = self._conn.xenapi.VM.create(rec) + vm_ref = yield self._call_xenapi('VM.create', rec) logging.debug('Created VM %s as %s.', instance.name, vm_ref) - return vm_ref + defer.returnValue(vm_ref) - def create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): + @defer.inlineCallbacks + def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): + """Create a VBD record. Returns a Deferred that gives the new + VBD reference.""" + vbd_rec = {} vbd_rec['VM'] = vm_ref vbd_rec['VDI'] = vdi_ref @@ -161,13 +197,17 @@ class XenAPIConnection(object): vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref) - vbd_ref = self._conn.xenapi.VBD.create(vbd_rec) + vbd_ref = yield self._call_xenapi('VBD.create', vbd_rec) logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref, vdi_ref) - return vbd_ref + defer.returnValue(vbd_ref) + @defer.inlineCallbacks def _create_vif(self, vm_ref, network_ref, mac_address): + """Create a VIF record. Returns a Deferred that gives the new + VIF reference.""" + vif_rec = {} vif_rec['device'] = '0' vif_rec['network']= network_ref @@ -179,27 +219,31 @@ class XenAPIConnection(object): vif_rec['qos_algorithm_params'] = {} logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref, network_ref) - vif_ref = self._conn.xenapi.VIF.create(vif_rec) + vif_ref = yield self._call_xenapi('VIF.create', vif_rec) logging.debug('Created VIF %s for VM %s, network %s.', vif_ref, vm_ref, network_ref) - return vif_ref + defer.returnValue(vif_ref) + @defer.inlineCallbacks def _find_network_with_bridge(self, bridge): expr = 'field "bridge" = "%s"' % bridge - networks = self._conn.xenapi.network.get_all_records_where(expr) + networks = yield self._call_xenapi('network.get_all_records_where', + expr) if len(networks) == 1: - return networks.keys()[0] + defer.returnValue(networks.keys()[0]) elif len(networks) > 1: raise Exception('Found non-unique network for bridge %s' % bridge) else: raise Exception('Found no network for bridge %s' % bridge) - def fetch_image(self, image, user, project, use_sr): + @defer.inlineCallbacks + def _fetch_image(self, image, user, project, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place it on dom0's filesystem. The former is for VM disks, the latter for - its kernel and ramdisk (if external kernels are being used).""" + its kernel and ramdisk (if external kernels are being used). + Returns a Deferred that gives the new VDI UUID.""" url = images.image_url(image) access = AuthManager().get_access_key(user, project) @@ -211,23 +255,31 @@ class XenAPIConnection(object): args['password'] = user.secret if use_sr: args['add_partition'] = 'true' - return self._call_plugin('objectstore', fn, args) + task = yield self._async_call_plugin('objectstore', fn, args) + uuid = yield self._wait_for_task(task) + defer.returnValue(uuid) + @defer.inlineCallbacks def reboot(self, instance): - vm = self.lookup(instance.name) + vm = yield self._lookup(instance.name) if vm is None: raise Exception('instance not present %s' % instance.name) - yield self._conn.xenapi.VM.clean_reboot(vm) + task = yield self._call_xenapi('Async.VM.clean_reboot', vm) + yield self._wait_for_task(task) + + @defer.inlineCallbacks def destroy(self, instance): - vm = self.lookup(instance.name) + vm = yield self._lookup(instance.name) if vm is None: raise Exception('instance not present %s' % instance.name) - yield self._conn.xenapi.VM.destroy(vm) + task = yield self._call_xenapi('Async.VM.destroy', vm) + yield self._wait_for_task(task) + def get_info(self, instance_id): - vm = self.lookup(instance_id) + vm = self._lookup_blocking(instance_id) if vm is None: raise Exception('instance not present %s' % instance_id) rec = self._conn.xenapi.VM.get_record(vm) @@ -237,7 +289,13 @@ class XenAPIConnection(object): 'num_cpu': rec['VCPUs_max'], 'cpu_time': 0} - def lookup(self, i): + + @deferredToThread + def _lookup(self, i): + return self._lookup_blocking(i) + + + def _lookup_blocking(self, i): vms = self._conn.xenapi.VM.get_by_name_label(i) n = len(vms) if n == 0: @@ -248,9 +306,55 @@ class XenAPIConnection(object): return vms[0] - def _call_plugin(self, plugin, fn, args): + def _wait_for_task(self, task): + """Return a Deferred that will give the result of the given task. + The task is polled until it completes.""" + d = defer.Deferred() + reactor.callLater(0, self._poll_task, task, d) + return d + + + @deferredToThread + def _poll_task(self, task, deferred): + """Poll the given XenAPI task, and fire the given Deferred if we + get a result.""" + try: + #logging.debug('Polling task %s...', task) + status = self._conn.xenapi.task.get_status(task) + if status == 'pending': + reactor.callLater(FLAGS.xenapi_task_poll_interval, + self._poll_task, task, deferred) + elif status == 'success': + result = self._conn.xenapi.task.get_result(task) + logging.info('Task %s status: success. %s', task, result) + deferred.callback(_parse_xmlrpc_value(result)) + else: + error_info = self._conn.xenapi.task.get_error_info(task) + logging.warn('Task %s status: %s. %s', task, status, + error_info) + deferred.errback(XenAPI.Failure(error_info)) + #logging.debug('Polling task %s done.', task) + except Exception, exn: + logging.warn(exn) + deferred.errback(exn) + + + @deferredToThread + def _call_xenapi(self, method, *args): + """Call the specified XenAPI method on a background thread. Returns + a Deferred for the result.""" + f = self._conn.xenapi + for m in method.split('.'): + f = f.__getattr__(m) + return f(*args) + + + @deferredToThread + def _async_call_plugin(self, plugin, fn, args): + """Call Async.host.call_plugin on a background thread. Returns a + Deferred with the task reference.""" return _unwrap_plugin_exceptions( - self._conn.xenapi.host.call_plugin, + self._conn.xenapi.Async.host.call_plugin, self._get_xenapi_host(), plugin, fn, args) @@ -286,3 +390,15 @@ def _unwrap_plugin_exceptions(func, *args, **kwargs): except xmlrpclib.ProtocolError, exn: logging.debug("Got exception: %s", exn) raise + + +def _parse_xmlrpc_value(val): + """Parse the given value as if it were an XML-RPC value. This is + sometimes used as the format for the task.result field.""" + if not val: + return val + x = xmlrpclib.loads( + '' + + val + + '') + return x[0][0] -- cgit From d1185adcf6f060c125274d31cf11a4f750521d24 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sun, 15 Aug 2010 23:11:52 +0100 Subject: Add documentation to spawn, reboot, and destroy stating that those functions should return Deferreds. Update the fake implementations to do so (the libvirt ones already do, and making the xenapi ones do so is the subject of a current merge request). --- nova/virt/fake.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 105837181..155833f3f 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -24,6 +24,8 @@ This module also documents the semantics of real hypervisor connections. import logging +from twisted.internet import defer + from nova.compute import power_state @@ -89,10 +91,13 @@ class FakeConnection(object): This function should use the data there to guide the creation of the new instance. - Once this function successfully completes, the instance should be + The work will be done asynchronously. This function returns a + Deferred that allows the caller to detect when it is complete. + + Once this successfully completes, the instance should be running (power_state.RUNNING). - If this function fails, any partial instance should be completely + If this fails, any partial instance should be completely cleaned up, and the virtualization platform should be in the state that it was before this call began. """ @@ -100,6 +105,7 @@ class FakeConnection(object): fake_instance = FakeInstance() self.instances[instance.name] = fake_instance fake_instance._state = power_state.RUNNING + return defer.succeed(None) def reboot(self, instance): """ @@ -107,8 +113,11 @@ class FakeConnection(object): The given parameter is an instance of nova.compute.service.Instance, and so the instance is being specified as instance.name. + + The work will be done asynchronously. This function returns a + Deferred that allows the caller to detect when it is complete. """ - pass + return defer.succeed(None) def destroy(self, instance): """ @@ -116,8 +125,12 @@ class FakeConnection(object): The given parameter is an instance of nova.compute.service.Instance, and so the instance is being specified as instance.name. + + The work will be done asynchronously. This function returns a + Deferred that allows the caller to detect when it is complete. """ del self.instances[instance.name] + return defer.succeed(None) def get_info(self, instance_id): """ -- cgit From d508418214016d5c00aa8d304f9498f5b99a960b Mon Sep 17 00:00:00 2001 From: andy Date: Mon, 16 Aug 2010 14:16:21 +0200 Subject: rather comprehensive style fixes --- nova/adminclient.py | 7 +++++++ nova/auth/fakeldap.py | 1 - nova/auth/ldapdriver.py | 1 + nova/auth/manager.py | 12 +++++++----- nova/auth/rbac.py | 2 ++ nova/auth/signer.py | 10 +++++++--- nova/cloudpipe/api.py | 3 ++- nova/cloudpipe/pipelib.py | 2 +- nova/compute/disk.py | 4 ++++ nova/compute/model.py | 2 ++ nova/compute/monitor.py | 35 +++++++++++++++++++++-------------- nova/compute/service.py | 1 + nova/crypto.py | 8 +++++++- nova/endpoint/admin.py | 4 ++++ nova/endpoint/api.py | 7 +++++-- nova/endpoint/cloud.py | 3 +-- nova/endpoint/images.py | 7 ++++++- nova/exception.py | 8 ++++++++ nova/fakerabbit.py | 5 +++-- nova/flags.py | 40 +++++++++++++++++----------------------- nova/network/exception.py | 12 ++++++------ nova/network/linux_net.py | 6 +++--- nova/network/model.py | 3 ++- nova/network/service.py | 16 ++++++++-------- nova/network/vpn.py | 3 +-- nova/objectstore/bucket.py | 1 + nova/objectstore/handler.py | 38 ++++++++++++++++++++++++++------------ nova/objectstore/image.py | 1 + nova/objectstore/stored.py | 4 ++-- nova/process.py | 3 +++ nova/rpc.py | 5 +++-- nova/test.py | 5 ++--- nova/utils.py | 12 +++++++++--- nova/validate.py | 1 + nova/virt/images.py | 8 ++++++-- nova/virt/libvirt_conn.py | 23 +++++------------------ nova/virt/xenapi.py | 39 +++++++++++++++++---------------------- nova/volume/service.py | 4 +++- run_tests.py | 9 ++++----- 39 files changed, 209 insertions(+), 146 deletions(-) diff --git a/nova/adminclient.py b/nova/adminclient.py index 242298a75..0ca32b1e5 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -20,6 +20,7 @@ Nova User API client library. """ import base64 + import boto from boto.ec2.regioninfo import RegionInfo @@ -57,6 +58,7 @@ class UserInfo(object): elif name == 'secretkey': self.secretkey = str(value) + class UserRole(object): """ Information about a Nova user's role, as parsed through SAX. @@ -79,6 +81,7 @@ class UserRole(object): else: setattr(self, name, str(value)) + class ProjectInfo(object): """ Information about a Nova project, as parsed through SAX @@ -114,12 +117,14 @@ class ProjectInfo(object): else: setattr(self, name, str(value)) + class ProjectMember(object): """ Information about a Nova project member, as parsed through SAX. Fields include: memberId """ + def __init__(self, connection=None): self.connection = connection self.memberId = None @@ -135,6 +140,7 @@ class ProjectMember(object): self.memberId = value else: setattr(self, name, str(value)) + class HostInfo(object): """ @@ -163,6 +169,7 @@ class HostInfo(object): def endElement(self, name, value, connection): setattr(self, name, value) + class NovaAdminClient(object): def __init__(self, clc_ip='127.0.0.1', region='nova', access_key='admin', secret_key='admin', **kwargs): diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py index b420924af..bc744fa01 100644 --- a/nova/auth/fakeldap.py +++ b/nova/auth/fakeldap.py @@ -219,7 +219,6 @@ class FakeLDAP(object): raise NO_SUCH_OBJECT() return objects - @property def __redis_prefix(self): return 'ldap:' diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 453fa196c..6bf7fcd1e 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -30,6 +30,7 @@ import sys from nova import exception from nova import flags + FLAGS = flags.FLAGS flags.DEFINE_string('ldap_url', 'ldap://localhost', 'Point this at your ldap server') diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 064fd78bc..80ee78896 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -37,7 +37,6 @@ from nova.network import vpn FLAGS = flags.FLAGS - flags.DEFINE_list('allowed_roles', ['cloudadmin', 'itsec', 'sysadmin', 'netadmin', 'developer'], 'Allowed roles for project') @@ -52,7 +51,6 @@ flags.DEFINE_list('superuser_roles', ['cloudadmin'], flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'], 'Roles that apply to all projects') - flags.DEFINE_string('credentials_template', utils.abspath('auth/novarc.template'), 'Template for creating users rc file') @@ -67,15 +65,14 @@ flags.DEFINE_string('credential_cert_file', 'cert.pem', 'Filename of certificate in credentials zip') flags.DEFINE_string('credential_rc_file', 'novarc', 'Filename of rc in credentials zip') - flags.DEFINE_string('credential_cert_subject', '/C=US/ST=California/L=MountainView/O=AnsoLabs/' 'OU=NovaDev/CN=%s-%s', 'Subject for certificate for users') - flags.DEFINE_string('auth_driver', 'nova.auth.ldapdriver.FakeLdapDriver', 'Driver that auth manager uses') + class AuthBase(object): """Base class for objects relating to auth @@ -83,6 +80,7 @@ class AuthBase(object): an id member. They may optionally contain methods that delegate to AuthManager, but should not implement logic themselves. """ + @classmethod def safe_id(cls, obj): """Safe get object id @@ -100,6 +98,7 @@ class AuthBase(object): class User(AuthBase): """Object representing a user""" + def __init__(self, id, name, access, secret, admin): AuthBase.__init__(self) self.id = id @@ -161,6 +160,7 @@ class KeyPair(AuthBase): Even though this object is named KeyPair, only the public key and fingerprint is stored. The user's private key is not saved. """ + def __init__(self, id, name, owner_id, public_key, fingerprint): AuthBase.__init__(self) self.id = id @@ -179,6 +179,7 @@ class KeyPair(AuthBase): class Project(AuthBase): """Represents a Project returned from the datastore""" + def __init__(self, id, name, project_manager_id, description, member_ids): AuthBase.__init__(self) self.id = id @@ -227,7 +228,6 @@ class Project(AuthBase): self.member_ids) - class AuthManager(object): """Manager Singleton for dealing with Users, Projects, and Keypairs @@ -239,7 +239,9 @@ class AuthManager(object): AuthManager also manages associated data related to Auth objects that need to be more accessible, such as vpn ips and ports. """ + _instance = None + def __new__(cls, *args, **kwargs): """Returns the AuthManager singleton""" if not cls._instance: diff --git a/nova/auth/rbac.py b/nova/auth/rbac.py index 7fab9419f..1446e4e27 100644 --- a/nova/auth/rbac.py +++ b/nova/auth/rbac.py @@ -32,6 +32,7 @@ def allow(*roles): return wrapped_f return wrap + def deny(*roles): def wrap(f): def wrapped_f(self, context, *args, **kwargs): @@ -44,6 +45,7 @@ def deny(*roles): return wrapped_f return wrap + def __matches_role(context, role): if role == 'all': return True diff --git a/nova/auth/signer.py b/nova/auth/signer.py index 634f22f0d..8334806d2 100644 --- a/nova/auth/signer.py +++ b/nova/auth/signer.py @@ -48,11 +48,15 @@ import hashlib import hmac import logging import urllib -import boto # NOTE(vish): for new boto -import boto.utils # NOTE(vish): for old boto + +# NOTE(vish): for new boto +import boto +# NOTE(vish): for old boto +import boto.utils from nova.exception import Error + class Signer(object): """ hacked up code from boto/connection.py """ @@ -77,7 +81,6 @@ class Signer(object): return self._calc_signature_2(params, verb, server_string, path) raise Error('Unknown Signature Version: %s' % self.SignatureVersion) - def _get_utf8_value(self, value): if not isinstance(value, str) and not isinstance(value, unicode): value = str(value) @@ -133,5 +136,6 @@ class Signer(object): logging.debug('base64 encoded digest: %s' % b64) return b64 + if __name__ == '__main__': print Signer('foo').generate({"SignatureMethod": 'HmacSHA256', 'SignatureVersion': '2'}, "get", "server", "/foo") diff --git a/nova/cloudpipe/api.py b/nova/cloudpipe/api.py index 0bffe9aa3..56aa89834 100644 --- a/nova/cloudpipe/api.py +++ b/nova/cloudpipe/api.py @@ -21,9 +21,10 @@ Tornado REST API Request Handlers for CloudPipe """ import logging -import tornado.web import urllib +import tornado.web + from nova import crypto from nova.auth import manager diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py index 5b0ed3471..2867bcb21 100644 --- a/nova/cloudpipe/pipelib.py +++ b/nova/cloudpipe/pipelib.py @@ -36,11 +36,11 @@ from nova.endpoint import api FLAGS = flags.FLAGS - flags.DEFINE_string('boot_script_template', utils.abspath('cloudpipe/bootscript.sh'), 'Template for script to run on cloudpipe instance boot') + class CloudPipe(object): def __init__(self, cloud_controller): self.controller = cloud_controller diff --git a/nova/compute/disk.py b/nova/compute/disk.py index 1ffcca685..c340c5a79 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -24,6 +24,7 @@ Includes injection of SSH PGP keys into authorized_keys file. import logging import os import tempfile + from twisted.internet import defer from nova import exception @@ -84,6 +85,7 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None): yield execute('dd if=%s of=%s bs=%d seek=%d conv=notrunc,fsync' % (infile, outfile, sector_size, primary_first)) + @defer.inlineCallbacks def inject_data(image, key=None, net=None, partition=None, execute=None): """Injects a ssh key and optionally net data into a disk image. @@ -137,6 +139,7 @@ def inject_data(image, key=None, net=None, partition=None, execute=None): # remove loopback yield execute('sudo losetup -d %s' % device) + @defer.inlineCallbacks def _inject_key_into_fs(key, fs, execute=None): sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh') @@ -146,6 +149,7 @@ def _inject_key_into_fs(key, fs, execute=None): keyfile = os.path.join(sshdir, 'authorized_keys') yield execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n') + @defer.inlineCallbacks def _inject_net_into_fs(net, fs, execute=None): netfile = os.path.join(os.path.join(os.path.join( diff --git a/nova/compute/model.py b/nova/compute/model.py index 266a93b9a..84432b55f 100644 --- a/nova/compute/model.py +++ b/nova/compute/model.py @@ -168,6 +168,7 @@ class Instance(datastore.BasicModel): self.unassociate_with("ip", self.state['private_dns_name']) return super(Instance, self).destroy() + class Host(datastore.BasicModel): """A Host is the machine where a Daemon is running.""" @@ -235,6 +236,7 @@ class Daemon(datastore.BasicModel): for x in cls.associated_to("host", hostname): yield x + class SessionToken(datastore.BasicModel): """This is a short-lived auth token that is passed through web requests""" diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 19e1a483d..268864900 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -24,14 +24,15 @@ Instance Monitoring: in the object store. """ -import boto -import boto.s3 import datetime import logging import os -import rrdtool import sys import time + +import boto +import boto.s3 +import rrdtool from twisted.internet import defer from twisted.internet import task from twisted.application import service @@ -41,13 +42,12 @@ from nova.virt import connection as virt_connection FLAGS = flags.FLAGS -flags.DEFINE_integer( - 'monitoring_instances_delay', 5, 'Sleep time between updates') -flags.DEFINE_integer( - 'monitoring_instances_step', 300, 'Interval of RRD updates') -flags.DEFINE_string( - 'monitoring_rrd_path', '/var/nova/monitor/instances', - 'Location of RRD files') +flags.DEFINE_integer('monitoring_instances_delay', 5, + 'Sleep time between updates') +flags.DEFINE_integer('monitoring_instances_step', 300, + 'Interval of RRD updates') +flags.DEFINE_string('monitoring_rrd_path', '/var/nova/monitor/instances', + 'Location of RRD files') RRD_VALUES = { @@ -61,7 +61,7 @@ RRD_VALUES = { 'RRA:MAX:0.5:6:800', 'RRA:MAX:0.5:24:800', 'RRA:MAX:0.5:288:800', - ], + ], 'net': [ 'DS:rx:COUNTER:600:0:1250000', 'DS:tx:COUNTER:600:0:1250000', @@ -73,7 +73,7 @@ RRD_VALUES = { 'RRA:MAX:0.5:6:800', 'RRA:MAX:0.5:24:800', 'RRA:MAX:0.5:288:800', - ], + ], 'disk': [ 'DS:rd:COUNTER:600:U:U', 'DS:wr:COUNTER:600:U:U', @@ -85,12 +85,13 @@ RRD_VALUES = { 'RRA:MAX:0.5:6:800', 'RRA:MAX:0.5:24:800', 'RRA:MAX:0.5:444:800', - ] -} + ] + } utcnow = datetime.datetime.utcnow + def update_rrd(instance, name, data): """ Updates the specified RRD file. @@ -106,6 +107,7 @@ def update_rrd(instance, name, data): '%d:%s' % (timestamp, data) ) + def init_rrd(instance, name): """ Initializes the specified RRD file. @@ -124,6 +126,7 @@ def init_rrd(instance, name): '--start', '0', *RRD_VALUES[name] ) + def graph_cpu(instance, duration): """ @@ -148,6 +151,7 @@ def graph_cpu(instance, duration): store_graph(instance.instance_id, filename) + def graph_net(instance, duration): """ Creates a graph of network usage for the specified instance and duration. @@ -174,6 +178,7 @@ def graph_net(instance, duration): ) store_graph(instance.instance_id, filename) + def graph_disk(instance, duration): """ @@ -202,6 +207,7 @@ def graph_disk(instance, duration): store_graph(instance.instance_id, filename) + def store_graph(instance_id, filename): """ Transmits the specified graph file to internal object store on cloud @@ -387,6 +393,7 @@ class InstanceMonitor(object, service.Service): """ Monitors the running instances of the current machine. """ + def __init__(self): """ Initialize the monitoring loop. diff --git a/nova/compute/service.py b/nova/compute/service.py index 820116453..e59f3fb34 100644 --- a/nova/compute/service.py +++ b/nova/compute/service.py @@ -29,6 +29,7 @@ import json import logging import os import sys + from twisted.internet import defer from twisted.internet import task diff --git a/nova/crypto.py b/nova/crypto.py index cc84f5e45..b05548ea1 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -24,7 +24,6 @@ SSH keypairs and x509 certificates. import base64 import hashlib import logging -import M2Crypto import os import shutil import struct @@ -32,6 +31,8 @@ import tempfile import time import utils +import M2Crypto + from nova import exception from nova import flags @@ -42,11 +43,13 @@ flags.DEFINE_string('keys_path', utils.abspath('../keys'), 'Where we keep our ke flags.DEFINE_string('ca_path', utils.abspath('../CA'), 'Where we keep our root CA') flags.DEFINE_boolean('use_intermediate_ca', False, 'Should we use intermediate CAs for each project?') + def ca_path(project_id): if project_id: return "%s/INTER/%s/cacert.pem" % (FLAGS.ca_path, project_id) return "%s/cacert.pem" % (FLAGS.ca_path) + def fetch_ca(project_id=None, chain=True): if not FLAGS.use_intermediate_ca: project_id = None @@ -60,6 +63,7 @@ def fetch_ca(project_id=None, chain=True): buffer += cafile.read() return buffer + def generate_key_pair(bits=1024): # what is the magic 65537? @@ -109,6 +113,7 @@ def generate_x509_cert(subject, bits=1024): shutil.rmtree(tmpdir) return (private_key, csr) + def sign_csr(csr_text, intermediate=None): if not FLAGS.use_intermediate_ca: intermediate = None @@ -122,6 +127,7 @@ def sign_csr(csr_text, intermediate=None): os.chdir(start) return _sign_csr(csr_text, user_ca) + def _sign_csr(csr_text, ca_folder): tmpfolder = tempfile.mkdtemp() csrfile = open("%s/inbound.csr" % (tmpfolder), "w") diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py index 4f4824fca..d6f622755 100644 --- a/nova/endpoint/admin.py +++ b/nova/endpoint/admin.py @@ -37,6 +37,7 @@ def user_dict(user, base64_file=None): else: return {} + def project_dict(project): """Convert the project object to a result dict""" if project: @@ -47,6 +48,7 @@ def project_dict(project): else: return {} + def host_dict(host): """Convert a host model object to a result dict""" if host: @@ -54,6 +56,7 @@ def host_dict(host): else: return {} + def admin_only(target): """Decorator for admin-only API calls""" def wrapper(*args, **kwargs): @@ -66,6 +69,7 @@ def admin_only(target): return wrapper + class AdminController(object): """ API Controller for users, hosts, nodes, and workers. diff --git a/nova/endpoint/api.py b/nova/endpoint/api.py index 78a18b9ea..40be00bb7 100755 --- a/nova/endpoint/api.py +++ b/nova/endpoint/api.py @@ -25,12 +25,13 @@ import logging import multiprocessing import random import re -import tornado.web -from twisted.internet import defer import urllib # TODO(termie): replace minidom with etree from xml.dom import minidom +import tornado.web +from twisted.internet import defer + from nova import crypto from nova import exception from nova import flags @@ -43,6 +44,7 @@ from nova.endpoint import cloud FLAGS = flags.FLAGS flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') + _log = logging.getLogger("api") _log.setLevel(logging.DEBUG) @@ -227,6 +229,7 @@ class MetadataRequestHandler(tornado.web.RequestHandler): self.print_data(data) self.finish() + class APIRequestHandler(tornado.web.RequestHandler): def get(self, controller_name): self.execute(controller_name) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 5366acec7..a3d6d1aab 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -26,6 +26,7 @@ import base64 import logging import os import time + from twisted.internet import defer from nova import datastore @@ -44,7 +45,6 @@ from nova.volume import service FLAGS = flags.FLAGS - flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') @@ -362,7 +362,6 @@ class CloudController(object): 'status': volume['attach_status'], 'volumeId': volume_id}) - @rbac.allow('projectmanager', 'sysadmin') def detach_volume(self, context, volume_id, **kwargs): volume = self._get_volume(context, volume_id) diff --git a/nova/endpoint/images.py b/nova/endpoint/images.py index fe7cb5d11..2a88d66af 100644 --- a/nova/endpoint/images.py +++ b/nova/endpoint/images.py @@ -21,10 +21,11 @@ Proxy AMI-related calls from the cloud controller, to the running objectstore daemon. """ -import boto.s3.connection import json import urllib +import boto.s3.connection + from nova import flags from nova import utils from nova.auth import manager @@ -32,6 +33,7 @@ from nova.auth import manager FLAGS = flags.FLAGS + def modify(context, image_id, operation): conn(context).make_request( method='POST', @@ -53,6 +55,7 @@ def register(context, image_location): return image_id + def list(context, filter_list=[]): """ return a list of all images that a user can see @@ -68,6 +71,7 @@ def list(context, filter_list=[]): return [i for i in result if i['imageId'] in filter_list] return result + def deregister(context, image_id): """ unregister an image """ conn(context).make_request( @@ -75,6 +79,7 @@ def deregister(context, image_id): bucket='_images', query_args=qs({'image_id': image_id})) + def conn(context): access = manager.AuthManager().get_access_key(context.user, context.project) diff --git a/nova/exception.py b/nova/exception.py index 52497a19e..29bcb17f8 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -25,31 +25,39 @@ import logging import sys import traceback + class Error(Exception): def __init__(self, message=None): super(Error, self).__init__(message) + class ApiError(Error): def __init__(self, message='Unknown', code='Unknown'): self.message = message self.code = code super(ApiError, self).__init__('%s: %s'% (code, message)) + class NotFound(Error): pass + class Duplicate(Error): pass + class NotAuthorized(Error): pass + class NotEmpty(Error): pass + class Invalid(Error): pass + def wrap_exception(f): def _wrap(*args, **kw): try: diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index 689194513..068025249 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -16,12 +16,13 @@ # License for the specific language governing permissions and limitations # under the License. -""" Based a bit on the carrot.backeds.queue backend... but a lot better """ +"""Based a bit on the carrot.backeds.queue backend... but a lot better.""" -from carrot.backends import base import logging import Queue as queue +from carrot.backends import base + class Message(base.BaseMessage): pass diff --git a/nova/flags.py b/nova/flags.py index b3bdd088f..e3feb252d 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -175,29 +175,25 @@ DEFINE_string('network_topic', 'network', 'the topic network nodes listen on') DEFINE_bool('verbose', False, 'show debug output') DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit') -DEFINE_bool('fake_network', False, 'should we use fake network devices and addresses') +DEFINE_bool('fake_network', False, + 'should we use fake network devices and addresses') DEFINE_string('rabbit_host', 'localhost', 'rabbit host') DEFINE_integer('rabbit_port', 5672, 'rabbit port') DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') DEFINE_string('rabbit_password', 'guest', 'rabbit password') DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') -DEFINE_string('ec2_url', - 'http://127.0.0.1:8773/services/Cloud', - 'Url to ec2 api server') - -DEFINE_string('default_image', - 'ami-11111', - 'default image to use, testing only') -DEFINE_string('default_kernel', - 'aki-11111', - 'default kernel to use, testing only') -DEFINE_string('default_ramdisk', - 'ari-11111', - 'default ramdisk to use, testing only') -DEFINE_string('default_instance_type', - 'm1.small', - 'default instance type to use, testing only') +DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud', + 'Url to ec2 api server') + +DEFINE_string('default_image', 'ami-11111', + 'default image to use, testing only') +DEFINE_string('default_kernel', 'aki-11111', + 'default kernel to use, testing only') +DEFINE_string('default_ramdisk', 'ari-11111', + 'default ramdisk to use, testing only') +DEFINE_string('default_instance_type', 'm1.small', + 'default instance type to use, testing only') DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server') DEFINE_string('vpn_key_suffix', @@ -207,10 +203,8 @@ DEFINE_string('vpn_key_suffix', DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger') # UNUSED -DEFINE_string('node_availability_zone', - 'nova', - 'availability zone of this node') -DEFINE_string('node_name', - socket.gethostname(), - 'name of this node') +DEFINE_string('node_availability_zone', 'nova', + 'availability zone of this node') +DEFINE_string('node_name', socket.gethostname(), + 'name of this node') diff --git a/nova/network/exception.py b/nova/network/exception.py index 8d7aa1498..2a3f5ec14 100644 --- a/nova/network/exception.py +++ b/nova/network/exception.py @@ -20,29 +20,29 @@ Exceptions for network errors. """ -from nova.exception import Error +from nova import exception -class NoMoreAddresses(Error): +class NoMoreAddresses(exception.Error): """No More Addresses are available in the network""" pass -class AddressNotAllocated(Error): +class AddressNotAllocated(exception.Error): """The specified address has not been allocated""" pass -class AddressAlreadyAssociated(Error): +class AddressAlreadyAssociated(exception.Error): """The specified address has already been associated""" pass -class AddressNotAssociated(Error): +class AddressNotAssociated(exception.Error): """The specified address is not associated""" pass -class NotValidNetworkSize(Error): +class NotValidNetworkSize(exception.Error): """The network size is not valid""" pass diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 4ebc2097b..b5385fcab 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -18,16 +18,16 @@ Implements vlans, bridges, and iptables rules using linux utilities. """ import logging -import signal import os +import signal -# todo(ja): does the definition of network_path belong here? +# TODO(ja): does the definition of network_path belong here? from nova import flags from nova import utils -FLAGS = flags.FLAGS +FLAGS = flags.FLAGS flags.DEFINE_string('dhcpbridge_flagfile', '/etc/nova/nova-dhcpbridge.conf', 'location of flagfile for dhcpbridge') diff --git a/nova/network/model.py b/nova/network/model.py index ce9345067..0900e1513 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -20,11 +20,11 @@ Model Classes for network control, including VLANs, DHCP, and IP allocation. """ -import IPy import logging import os import time +import IPy from nova import datastore from nova import exception as nova_exception from nova import flags @@ -53,6 +53,7 @@ flags.DEFINE_integer('cnt_vpn_clients', 5, flags.DEFINE_integer('cloudpipe_start_port', 12000, 'Starting port for mapped CloudPipe external ports') + logging.getLogger().setLevel(logging.DEBUG) diff --git a/nova/network/service.py b/nova/network/service.py index 9c0f5520b..22e84477f 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -21,17 +21,17 @@ Network Hosts are responsible for allocating ips and setting up network """ from nova import datastore +from nova import exception from nova import flags from nova import service from nova import utils from nova.auth import manager -from nova.exception import NotFound from nova.network import exception from nova.network import model from nova.network import vpn -FLAGS = flags.FLAGS +FLAGS = flags.FLAGS flags.DEFINE_string('network_type', 'flat', 'Service Class for Networking') @@ -41,15 +41,15 @@ flags.DEFINE_list('flat_network_ips', ['192.168.0.2', '192.168.0.3', '192.168.0.4'], 'Available ips for simple network') flags.DEFINE_string('flat_network_network', '192.168.0.0', - 'Network for simple network') + 'Network for simple network') flags.DEFINE_string('flat_network_netmask', '255.255.255.0', - 'Netmask for simple network') + 'Netmask for simple network') flags.DEFINE_string('flat_network_gateway', '192.168.0.1', - 'Broadcast for simple network') + 'Broadcast for simple network') flags.DEFINE_string('flat_network_broadcast', '192.168.0.255', - 'Broadcast for simple network') + 'Broadcast for simple network') flags.DEFINE_string('flat_network_dns', '8.8.4.4', - 'Dns for simple network') + 'Dns for simple network') def type_to_class(network_type): @@ -58,7 +58,7 @@ def type_to_class(network_type): return FlatNetworkService elif network_type == 'vlan': return VlanNetworkService - raise NotFound("Couldn't find %s network type" % network_type) + raise exception.NotFound("Couldn't find %s network type" % network_type) def setup_compute_network(network_type, user_id, project_id, security_group): diff --git a/nova/network/vpn.py b/nova/network/vpn.py index a0e2a7fa1..cf2579e61 100644 --- a/nova/network/vpn.py +++ b/nova/network/vpn.py @@ -23,9 +23,8 @@ from nova import exception from nova import flags from nova import utils -FLAGS = flags.FLAGS - +FLAGS = flags.FLAGS flags.DEFINE_string('vpn_ip', utils.get_my_ip(), 'Public IP for the cloudpipe VPN servers') flags.DEFINE_integer('vpn_start_port', 1000, diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py index b42a96233..c2b412dd7 100644 --- a/nova/objectstore/bucket.py +++ b/nova/objectstore/bucket.py @@ -36,6 +36,7 @@ FLAGS = flags.FLAGS flags.DEFINE_string('buckets_path', utils.abspath('../buckets'), 'path to s3 buckets') + class Bucket(object): def __init__(self, name): self.name = name diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index dfe1918e3..035e342ca 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -38,17 +38,19 @@ S3 client with this module:: """ import datetime -import logging import json +import logging import multiprocessing import os -from tornado import escape import urllib -from twisted.application import internet, service -from twisted.web.resource import Resource -from twisted.web import server, static, error - +from tornado import escape +from twisted.application import internet +from twisted.application import service +from twisted.web import error +from twisted.web import resource +from twisted.web import server +from twisted.web import static from nova import exception from nova import flags @@ -60,6 +62,7 @@ from nova.objectstore import image FLAGS = flags.FLAGS + def render_xml(request, value): assert isinstance(value, dict) and len(value) == 1 request.setHeader("Content-Type", "application/xml; charset=UTF-8") @@ -72,11 +75,13 @@ def render_xml(request, value): request.write('') request.finish() + def finish(request, content=None): if content: request.write(content) request.finish() + def _render_parts(value, write_cb): if isinstance(value, basestring): write_cb(escape.xhtml_escape(value)) @@ -95,11 +100,13 @@ def _render_parts(value, write_cb): else: raise Exception("Unknown S3 value type %r", value) + def get_argument(request, key, default_value): if key in request.args: return request.args[key][0] return default_value + def get_context(request): try: # Authorization Header format: 'AWS :' @@ -120,13 +127,14 @@ def get_context(request): logging.debug("Authentication Failure: %s" % ex) raise exception.NotAuthorized -class ErrorHandlingResource(Resource): + +class ErrorHandlingResource(resource.Resource): """Maps exceptions to 404 / 401 codes. Won't work for exceptions thrown after NOT_DONE_YET is returned.""" # TODO(unassigned) (calling-all-twisted-experts): This needs to be plugged in to the right place in twisted... # This doesn't look like it's the right place (consider exceptions in getChild; or after NOT_DONE_YET is returned def render(self, request): try: - return Resource.render(self, request) + return resource.Resource.render(self, request) except exception.NotFound: request.setResponseCode(404) return '' @@ -134,6 +142,7 @@ class ErrorHandlingResource(Resource): request.setResponseCode(403) return '' + class S3(ErrorHandlingResource): """Implementation of an S3-like storage server based on local files.""" def getChild(self, name, request): @@ -154,9 +163,10 @@ class S3(ErrorHandlingResource): }}) return server.NOT_DONE_YET + class BucketResource(ErrorHandlingResource): def __init__(self, name): - Resource.__init__(self) + resource.Resource.__init__(self) self.name = name def getChild(self, name, request): @@ -206,7 +216,7 @@ class BucketResource(ErrorHandlingResource): class ObjectResource(ErrorHandlingResource): def __init__(self, bucket, name): - Resource.__init__(self) + resource.Resource.__init__(self) self.bucket = bucket self.name = name @@ -245,17 +255,19 @@ class ObjectResource(ErrorHandlingResource): request.setResponseCode(204) return '' + class ImageResource(ErrorHandlingResource): isLeaf = True def __init__(self, name): - Resource.__init__(self) + resource.Resource.__init__(self) self.img = image.Image(name) def render_GET(self, request): return static.File(self.img.image_path, defaultType='application/octet-stream').render_GET(request) -class ImagesResource(Resource): + +class ImagesResource(resource.Resource): def getChild(self, name, request): if name == '': return self @@ -339,11 +351,13 @@ class ImagesResource(Resource): request.setResponseCode(204) return '' + def get_site(): root = S3() site = server.Site(root) return site + def get_application(): factory = get_site() application = service.Application("objectstore") diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py index 861eb364f..fb780a0ec 100644 --- a/nova/objectstore/image.py +++ b/nova/objectstore/image.py @@ -42,6 +42,7 @@ FLAGS = flags.FLAGS flags.DEFINE_string('images_path', utils.abspath('../images'), 'path to decrypted images') + class Image(object): def __init__(self, image_id): self.image_id = image_id diff --git a/nova/objectstore/stored.py b/nova/objectstore/stored.py index 81c047b22..9829194cb 100644 --- a/nova/objectstore/stored.py +++ b/nova/objectstore/stored.py @@ -23,7 +23,7 @@ Properties of an object stored within a bucket. import os import nova.crypto -from nova.exception import NotFound, NotAuthorized +from nova import exception class Object(object): @@ -33,7 +33,7 @@ class Object(object): self.key = key self.path = bucket._object_path(key) if not os.path.isfile(self.path): - raise NotFound + raise exception.NotFound def __repr__(self): return "" % (self.bucket, self.key) diff --git a/nova/process.py b/nova/process.py index 2dc56372f..86f29e2c4 100644 --- a/nova/process.py +++ b/nova/process.py @@ -23,6 +23,7 @@ Process pool, still buggy right now. import logging import multiprocessing import StringIO + from twisted.internet import defer from twisted.internet import error from twisted.internet import process @@ -205,6 +206,7 @@ class ProcessPool(object): self._pool.release() return rv + class SharedPool(object): _instance = None def __init__(self): @@ -213,5 +215,6 @@ class SharedPool(object): def __getattr__(self, key): return getattr(self._instance, key) + def simple_execute(cmd, **kwargs): return SharedPool().simple_execute(cmd, **kwargs) diff --git a/nova/rpc.py b/nova/rpc.py index 4ac546c2a..824a66b5b 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -21,12 +21,13 @@ AMQP-based RPC. Queues have consumers and publishers. No fan-out support yet. """ -from carrot import connection as carrot_connection -from carrot import messaging import json import logging import sys import uuid + +from carrot import connection as carrot_connection +from carrot import messaging from twisted.internet import defer from twisted.internet import task diff --git a/nova/test.py b/nova/test.py index c7e08734f..c392c8a84 100644 --- a/nova/test.py +++ b/nova/test.py @@ -22,11 +22,11 @@ Allows overriding of flags for use of fakes, and some black magic for inline callbacks. """ -import mox -import stubout import sys import time +import mox +import stubout from tornado import ioloop from twisted.internet import defer from twisted.trial import unittest @@ -91,7 +91,6 @@ class TrialTestCase(unittest.TestCase): setattr(FLAGS, k, v) - class BaseTestCase(TrialTestCase): # TODO(jaypipes): Can this be moved into the TrialTestCase class? """Base test case class for all unit tests.""" diff --git a/nova/utils.py b/nova/utils.py index 63db080f1..e826f9b71 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -20,7 +20,7 @@ System-level utilities and helper functions. """ -from datetime import datetime, timedelta +import datetime import inspect import logging import os @@ -32,9 +32,11 @@ import sys from nova import exception from nova import flags + FLAGS = flags.FLAGS TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" + def import_class(import_str): """Returns a class from a string including module and class""" mod_str, _sep, class_str = import_str.rpartition('.') @@ -44,6 +46,7 @@ def import_class(import_str): except (ImportError, ValueError, AttributeError): raise exception.NotFound('Class %s cannot be found' % class_str) + def fetchfile(url, target): logging.debug("Fetching %s" % url) # c = pycurl.Curl() @@ -55,6 +58,7 @@ def fetchfile(url, target): # fp.close() execute("curl %s -o %s" % (url, target)) + def execute(cmd, input=None, addl_env=None): env = os.environ.copy() if addl_env: @@ -129,10 +133,12 @@ def get_my_ip(): logging.warn("Couldn't get IP, using 127.0.0.1 %s", ex) return "127.0.0.1" + def isotime(at=None): if not at: - at = datetime.utcnow() + at = datetime.datetime.utcnow() return at.strftime(TIME_FORMAT) + def parse_isotime(timestr): - return datetime.strptime(timestr, TIME_FORMAT) + return datetime.datetime.strptime(timestr, TIME_FORMAT) diff --git a/nova/validate.py b/nova/validate.py index a69306fad..21f4ed286 100644 --- a/nova/validate.py +++ b/nova/validate.py @@ -57,6 +57,7 @@ def rangetest(**argchecks): # validate ranges for both+defaults return onCall return onDecorator + def typetest(**argchecks): def onDecorator(func): import sys diff --git a/nova/virt/images.py b/nova/virt/images.py index 1e23c48b9..a3ca72bdd 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -27,11 +27,11 @@ import urlparse from nova import flags from nova import process -from nova.auth import signer from nova.auth import manager +from nova.auth import signer -FLAGS = flags.FLAGS +FLAGS = flags.FLAGS flags.DEFINE_bool('use_s3', True, 'whether to get images from s3 or use local copy') @@ -43,6 +43,7 @@ def fetch(image, path, user, project): f = _fetch_local_image return f(image, path, user, project) + def _fetch_s3_image(image, path, user, project): url = image_url(image) @@ -66,13 +67,16 @@ def _fetch_s3_image(image, path, user, project): cmd += ['-o', path] return process.SharedPool().execute(executable=cmd[0], args=cmd[1:]) + def _fetch_local_image(image, path, user, project): source = _image_path('%s/image' % image) return process.simple_execute('cp %s %s' % (source, path)) + def _image_path(path): return os.path.join(FLAGS.images_path, path) + def image_url(image): return "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port, image) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 97e1b0ab2..d1a4a6b67 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -42,6 +42,7 @@ from nova.virt import images libvirt = None libxml2 = None + FLAGS = flags.FLAGS flags.DEFINE_string('libvirt_xml_template', utils.abspath('virt/libvirt.qemu.xml.template'), @@ -57,7 +58,9 @@ flags.DEFINE_string('libvirt_type', 'Libvirt domain type (valid options are: kvm, qemu, uml)') flags.DEFINE_string('libvirt_uri', '', - 'Override the default libvirt URI (which is dependent on libvirt_type)') + 'Override the default libvirt URI (which is dependent' + ' on libvirt_type)') + def get_connection(read_only): # These are loaded late so that there's no need to install these @@ -70,6 +73,7 @@ def get_connection(read_only): libxml2 = __import__('libxml2') return LibvirtConnection(read_only) + class LibvirtConnection(object): def __init__(self, read_only): self.libvirt_uri, template_file = self.get_uri_and_template() @@ -78,14 +82,12 @@ class LibvirtConnection(object): self._wrapped_conn = None self.read_only = read_only - @property def _conn(self): if not self._wrapped_conn: self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only) return self._wrapped_conn - def get_uri_and_template(self): if FLAGS.libvirt_type == 'uml': uri = FLAGS.libvirt_uri or 'uml:///system' @@ -95,7 +97,6 @@ class LibvirtConnection(object): template_file = FLAGS.libvirt_xml_template return uri, template_file - def _connect(self, uri, read_only): auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], 'root', @@ -106,13 +107,10 @@ class LibvirtConnection(object): else: return libvirt.openAuth(uri, auth, 0) - - def list_instances(self): return [self._conn.lookupByID(x).name() for x in self._conn.listDomainsID()] - def destroy(self, instance): try: virt_dom = self._conn.lookupByName(instance.name) @@ -141,14 +139,12 @@ class LibvirtConnection(object): timer.start(interval=0.5, now=True) return d - def _cleanup(self, instance): target = os.path.abspath(instance.datamodel['basepath']) logging.info("Deleting instance files at %s", target) if os.path.exists(target): shutil.rmtree(target) - @defer.inlineCallbacks @exception.wrap_exception def reboot(self, instance): @@ -174,7 +170,6 @@ class LibvirtConnection(object): timer.start(interval=0.5, now=True) yield d - @defer.inlineCallbacks @exception.wrap_exception def spawn(self, instance): @@ -205,7 +200,6 @@ class LibvirtConnection(object): timer.start(interval=0.5, now=True) yield local_d - @defer.inlineCallbacks def _create_image(self, instance, libvirt_xml): # syntactic nicety @@ -260,11 +254,9 @@ class LibvirtConnection(object): yield disk.partition( basepath('disk-raw'), basepath('disk'), bytes, execute=execute) - def basepath(self, instance, path=''): return os.path.abspath(os.path.join(instance.datamodel['basepath'], path)) - def toXml(self, instance): # TODO(termie): cache? logging.debug("Starting the toXML method") @@ -279,7 +271,6 @@ class LibvirtConnection(object): return libvirt_xml - def get_info(self, instance_id): virt_dom = self._conn.lookupByName(instance_id) (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() @@ -289,7 +280,6 @@ class LibvirtConnection(object): 'num_cpu': num_cpu, 'cpu_time': cpu_time} - def get_disks(self, instance_id): """ Note that this function takes an instance ID, not an Instance, so @@ -332,7 +322,6 @@ class LibvirtConnection(object): return disks - def get_interfaces(self, instance_id): """ Note that this function takes an instance ID, not an Instance, so @@ -375,7 +364,6 @@ class LibvirtConnection(object): return interfaces - def block_stats(self, instance_id, disk): """ Note that this function takes an instance ID, not an Instance, so @@ -384,7 +372,6 @@ class LibvirtConnection(object): domain = self._conn.lookupByName(instance_id) return domain.blockStats(disk) - def interface_stats(self, instance_id, interface): """ Note that this function takes an instance ID, not an Instance, so diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 9fe15644f..2f5994983 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -33,16 +33,29 @@ from nova.virt import images XenAPI = None + FLAGS = flags.FLAGS flags.DEFINE_string('xenapi_connection_url', None, - 'URL for connection to XenServer/Xen Cloud Platform. Required if connection_type=xenapi.') + 'URL for connection to XenServer/Xen Cloud Platform.' + ' Required if connection_type=xenapi.') flags.DEFINE_string('xenapi_connection_username', 'root', - 'Username for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi.') + 'Username for connection to XenServer/Xen Cloud Platform.' + ' Used only if connection_type=xenapi.') flags.DEFINE_string('xenapi_connection_password', None, - 'Password for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi.') + 'Password for connection to XenServer/Xen Cloud Platform.' + ' Used only if connection_type=xenapi.') + + +XENAPI_POWER_STATE = { + 'Halted' : power_state.SHUTDOWN, + 'Running' : power_state.RUNNING, + 'Paused' : power_state.PAUSED, + 'Suspended': power_state.SHUTDOWN, # FIXME + 'Crashed' : power_state.CRASHED +} def get_connection(_): @@ -62,7 +75,6 @@ def get_connection(_): class XenAPIConnection(object): - def __init__(self, url, user, pw): self._conn = XenAPI.Session(url) self._conn.login_with_password(user, pw) @@ -107,7 +119,6 @@ class XenAPIConnection(object): yield self._create_vif(vm_ref, network_ref, mac_address) yield self._conn.xenapi.VM.start(vm_ref, False, False) - def create_vm(self, instance, kernel, ramdisk): mem = str(long(instance.datamodel['memory_kb']) * 1024) vcpus = str(instance.datamodel['vcpus']) @@ -145,7 +156,6 @@ class XenAPIConnection(object): logging.debug('Created VM %s as %s.', instance.name, vm_ref) return vm_ref - def create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): vbd_rec = {} vbd_rec['VM'] = vm_ref @@ -166,7 +176,6 @@ class XenAPIConnection(object): vdi_ref) return vbd_ref - def _create_vif(self, vm_ref, network_ref, mac_address): vif_rec = {} vif_rec['device'] = '0' @@ -184,7 +193,6 @@ class XenAPIConnection(object): vm_ref, network_ref) return vif_ref - def _find_network_with_bridge(self, bridge): expr = 'field "bridge" = "%s"' % bridge networks = self._conn.xenapi.network.get_all_records_where(expr) @@ -195,7 +203,6 @@ class XenAPIConnection(object): else: raise Exception('Found no network for bridge %s' % bridge) - def fetch_image(self, image, user, project, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place it on dom0's filesystem. The former is for VM disks, the latter for @@ -213,7 +220,6 @@ class XenAPIConnection(object): args['add_partition'] = 'true' return self._call_plugin('objectstore', fn, args) - def reboot(self, instance): vm = self.lookup(instance.name) if vm is None: @@ -231,7 +237,7 @@ class XenAPIConnection(object): if vm is None: raise Exception('instance not present %s' % instance_id) rec = self._conn.xenapi.VM.get_record(vm) - return {'state': power_state_from_xenapi[rec['power_state']], + return {'state': XENAPI_POWER_STATE[rec['power_state']], 'max_mem': long(rec['memory_static_max']) >> 10, 'mem': long(rec['memory_dynamic_max']) >> 10, 'num_cpu': rec['VCPUs_max'], @@ -247,26 +253,15 @@ class XenAPIConnection(object): else: return vms[0] - def _call_plugin(self, plugin, fn, args): return _unwrap_plugin_exceptions( self._conn.xenapi.host.call_plugin, self._get_xenapi_host(), plugin, fn, args) - def _get_xenapi_host(self): return self._conn.xenapi.session.get_this_host(self._conn.handle) -power_state_from_xenapi = { - 'Halted' : power_state.SHUTDOWN, - 'Running' : power_state.RUNNING, - 'Paused' : power_state.PAUSED, - 'Suspended': power_state.SHUTDOWN, # FIXME - 'Crashed' : power_state.CRASHED -} - - def _unwrap_plugin_exceptions(func, *args, **kwargs): try: return func(*args, **kwargs) diff --git a/nova/volume/service.py b/nova/volume/service.py index 66163a812..104bafe90 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -65,6 +65,7 @@ flags.DEFINE_boolean('fake_storage', False, class NoMoreBlades(exception.Error): pass + def get_volume(volume_id): """ Returns a redis-backed volume object """ volume_class = Volume @@ -75,6 +76,7 @@ def get_volume(volume_id): return vol raise exception.Error("Volume does not exist") + class VolumeService(service.Service): """ There is one VolumeNode running on each host. @@ -142,6 +144,7 @@ class VolumeService(service.Service): "sudo vgcreate %s %s" % (FLAGS.volume_group, FLAGS.storage_dev)) + class Volume(datastore.BasicModel): def __init__(self, volume_id=None): @@ -297,7 +300,6 @@ class Volume(datastore.BasicModel): self['blade_id']), error_ok=1) - class FakeVolume(Volume): def _create_lv(self): pass diff --git a/run_tests.py b/run_tests.py index d90ac8175..77aa9088a 100644 --- a/run_tests.py +++ b/run_tests.py @@ -38,11 +38,11 @@ Due to our use of multiprocessing it we frequently get some ignorable 'Interrupted system call' exceptions after test completion. """ + import __main__ import os import sys - from twisted.scripts import trial as trial_script from nova import datastore @@ -65,13 +65,12 @@ from nova.tests.volume_unittest import * FLAGS = flags.FLAGS - flags.DEFINE_bool('flush_db', True, 'Flush the database before running fake tests') - flags.DEFINE_string('tests_stderr', 'run_tests.err.log', - 'Path to where to pipe STDERR during test runs. ' - 'Default = "run_tests.err.log"') + 'Path to where to pipe STDERR during test runs.' + ' Default = "run_tests.err.log"') + if __name__ == '__main__': OptionsClass = twistd.WrapTwistedOptions(trial_script.Options) -- cgit From 5c4a806c852a1c7180bc1c7e2ea8f065198e36d2 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 16 Aug 2010 10:57:42 -0400 Subject: PEP8 and name corrections --- bin/nova-rsapi | 2 +- nova/endpoint/aws/__init__.py | 4 ++-- nova/endpoint/rackspace/__init__.py | 10 +++++----- nova/endpoint/rackspace/controllers/base.py | 4 ++-- nova/wsgi.py | 4 ++-- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/bin/nova-rsapi b/bin/nova-rsapi index 3fc61860e..a35936eff 100755 --- a/bin/nova-rsapi +++ b/bin/nova-rsapi @@ -32,4 +32,4 @@ flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') if __name__ == '__main__': utils.default_flagfile() - wsgi.run_server(nova.endpoint.ApiVersionRouter(), FLAGS.cc_port) + wsgi.run_server(nova.endpoint.APIVersionRouter(), FLAGS.cc_port) diff --git a/nova/endpoint/aws/__init__.py b/nova/endpoint/aws/__init__.py index f49270a30..4507cae62 100644 --- a/nova/endpoint/aws/__init__.py +++ b/nova/endpoint/aws/__init__.py @@ -4,7 +4,7 @@ import webob.dec from nova import wsgi # TODO(gundlach): temp -class Api(wsgi.Router): +class API(wsgi.Router): """WSGI entry point for all AWS API requests.""" def __init__(self): @@ -14,7 +14,7 @@ class Api(wsgi.Router): targets = {"dummy": self.dummy } - super(Api, self).__init__(mapper, targets) + super(API, self).__init__(mapper, targets) @webob.dec.wsgify def dummy(self, req): diff --git a/nova/endpoint/rackspace/__init__.py b/nova/endpoint/rackspace/__init__.py index f14f6218c..162b35caa 100644 --- a/nova/endpoint/rackspace/__init__.py +++ b/nova/endpoint/rackspace/__init__.py @@ -37,12 +37,12 @@ FLAGS = flags.FLAGS flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') -class Api(wsgi.Middleware): +class API(wsgi.Middleware): """WSGI entry point for all Rackspace API requests.""" def __init__(self): - app = AuthMiddleware(ApiRouter()) - super(Api, self).__init__(app) + app = AuthMiddleware(APIRouter()) + super(API, self).__init__(app) class AuthMiddleware(wsgi.Middleware): @@ -66,7 +66,7 @@ class AuthMiddleware(wsgi.Middleware): return self.application -class ApiRouter(wsgi.Router): +class APIRouter(wsgi.Router): """ Routes requests on the Rackspace API to the appropriate controller and method. @@ -87,4 +87,4 @@ class ApiRouter(wsgi.Router): 'sharedipgroups': controllers.SharedIpGroupsController() } - super(ApiRouter, self).__init__(mapper, targets) + super(APIRouter, self).__init__(mapper, targets) diff --git a/nova/endpoint/rackspace/controllers/base.py b/nova/endpoint/rackspace/controllers/base.py index 3ada53fd4..8cd44f62e 100644 --- a/nova/endpoint/rackspace/controllers/base.py +++ b/nova/endpoint/rackspace/controllers/base.py @@ -1,6 +1,6 @@ -from nova.wsgi import WSGIController +from nova import wsgi -class BaseController(WSGIController): +class BaseController(wsgi.Controller): @classmethod def render(cls, instance): if isinstance(instance, list): diff --git a/nova/wsgi.py b/nova/wsgi.py index 0570e1829..52e155101 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -146,7 +146,7 @@ class Router(object): Each route in `mapper` must specify a 'controller' string, which is a key into the 'targets' dictionary whose value is a WSGI app to - run. If routing to a WSGIController, you'll want to specify + run. If routing to a wsgi.Controller, you'll want to specify 'action' as well so the controller knows what method to call on itself. @@ -195,7 +195,7 @@ class Router(object): return app -class WSGIController(object): +class Controller(object): """ WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method on itself. -- cgit From f78a8936b1a401f07fc0a09d4bd150d2793e436e Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 16 Aug 2010 13:22:41 -0400 Subject: All controller actions receive a 'req' parameter containing the webob Request. --- nova/endpoint/__init__.py | 10 +++--- nova/endpoint/aws/__init__.py | 6 ++-- nova/endpoint/rackspace/__init__.py | 23 ++++++------ nova/endpoint/rackspace/controllers/servers.py | 2 +- nova/wsgi.py | 48 ++++++++++++-------------- 5 files changed, 41 insertions(+), 48 deletions(-) diff --git a/nova/endpoint/__init__.py b/nova/endpoint/__init__.py index 065f45848..9aae933af 100644 --- a/nova/endpoint/__init__.py +++ b/nova/endpoint/__init__.py @@ -36,16 +36,16 @@ import routes from nova.endpoint import rackspace from nova.endpoint import aws -class ApiVersionRouter(wsgi.Router): +class APIVersionRouter(wsgi.Router): """Routes top-level requests to the appropriate API.""" def __init__(self): mapper = routes.Mapper() - mapper.connect(None, "/v1.0/{path_info:.*}", controller="rs") - mapper.connect(None, "/ec2/{path_info:.*}", controller="ec2") + rsapi = rackspace.API() + mapper.connect(None, "/v1.0/{path_info:.*}", controller=rsapi) - targets = {"rs": rackspace.Api(), "ec2": aws.Api()} + mapper.connect(None, "/ec2/{path_info:.*}", controller=aws.API()) - super(ApiVersionRouter, self).__init__(mapper, targets) + super(APIVersionRouter, self).__init__(mapper) diff --git a/nova/endpoint/aws/__init__.py b/nova/endpoint/aws/__init__.py index 4507cae62..55cbb8fd3 100644 --- a/nova/endpoint/aws/__init__.py +++ b/nova/endpoint/aws/__init__.py @@ -10,11 +10,9 @@ class API(wsgi.Router): def __init__(self): mapper = routes.Mapper() - mapper.connect(None, "{all:.*}", controller="dummy") + mapper.connect(None, "{all:.*}", controller=self.dummy) - targets = {"dummy": self.dummy } - - super(API, self).__init__(mapper, targets) + super(API, self).__init__(mapper) @webob.dec.wsgify def dummy(self, req): diff --git a/nova/endpoint/rackspace/__init__.py b/nova/endpoint/rackspace/__init__.py index 162b35caa..78b9c9429 100644 --- a/nova/endpoint/rackspace/__init__.py +++ b/nova/endpoint/rackspace/__init__.py @@ -75,16 +75,13 @@ class APIRouter(wsgi.Router): def __init__(self): mapper = routes.Mapper() - mapper.resource("server", "servers") - mapper.resource("image", "images") - mapper.resource("flavor", "flavors") - mapper.resource("sharedipgroup", "sharedipgroups") - - targets = { - 'servers': controllers.ServersController(), - 'images': controllers.ImagesController(), - 'flavors': controllers.FlavorsController(), - 'sharedipgroups': controllers.SharedIpGroupsController() - } - - super(APIRouter, self).__init__(mapper, targets) + mapper.resource("server", "servers", + controller=controllers.ServersController()) + mapper.resource("image", "images", + controller=controllers.ImagesController()) + mapper.resource("flavor", "flavors", + controller=controllers.FlavorsController()) + mapper.resource("sharedipgroup", "sharedipgroups", + controller=controllers.SharedIpGroupsController()) + + super(APIRouter, self).__init__(mapper) diff --git a/nova/endpoint/rackspace/controllers/servers.py b/nova/endpoint/rackspace/controllers/servers.py index db02e058d..2f8e662d6 100644 --- a/nova/endpoint/rackspace/controllers/servers.py +++ b/nova/endpoint/rackspace/controllers/servers.py @@ -5,7 +5,7 @@ from nova.endpoint.rackspace.controllers.base import BaseController class ServersController(BaseController): entity_name = 'servers' - def index(cls): + def index(self, **kwargs): return [instance_details(inst) for inst in compute.InstanceDirectory().all] def show(self, **kwargs): diff --git a/nova/wsgi.py b/nova/wsgi.py index 52e155101..a0a175dc7 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -140,34 +140,31 @@ class Router(object): WSGI middleware that maps incoming requests to WSGI apps. """ - def __init__(self, mapper, targets): + def __init__(self, mapper): """ Create a router for the given routes.Mapper. - Each route in `mapper` must specify a 'controller' string, which is - a key into the 'targets' dictionary whose value is a WSGI app to - run. If routing to a wsgi.Controller, you'll want to specify - 'action' as well so the controller knows what method to call on - itself. + Each route in `mapper` must specify a 'controller', which is a + WSGI app to call. You'll probably want to specify an 'action' as + well and have your controller be a wsgi.Controller, who will route + the request to the action method. Examples: mapper = routes.Mapper() - targets = { "servers": ServerController(), "blog": BlogWsgiApp() } + sc = ServerController() # Explicit mapping of one route to a controller+action - mapper.connect(None, "/svrlist", controller="servers", action="list") + mapper.connect(None, "/svrlist", controller=sc, action="list") - # Controller string is implicitly equal to 2nd param here, and - # actions are all implicitly defined - mapper.resource("server", "servers") + # Actions are all implicitly defined + mapper.resource("server", "servers", controller=sc) # Pointing to an arbitrary WSGI app. You can specify the # {path_info:.*} parameter so the target app can be handed just that # section of the URL. - mapper.connect(None, "/v1.0/{path_info:.*}", controller="blog") + mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp()) """ self.map = mapper - self.targets = targets self._router = routes.middleware.RoutesMiddleware(self._dispatch, self.map) @@ -186,31 +183,32 @@ class Router(object): and putting the information into req.environ. Either returns 404 or the routed WSGI app's response. """ - if req.environ['routes.route'] is None: - return webob.exc.HTTPNotFound() match = req.environ['wsgiorg.routing_args'][1] - app_name = match['controller'] - - app = self.targets[app_name] + if not match: + return webob.exc.HTTPNotFound() + app = match['controller'] return app class Controller(object): """ WSGI app that reads routing information supplied by RoutesMiddleware - and calls the requested action method on itself. + and calls the requested action method upon itself. All action methods + must, in addition to their normal parameters, accept a 'req' argument + which is the incoming webob.Request. """ @webob.dec.wsgify def __call__(self, req): """ - Call the method on self specified in req.environ by RoutesMiddleware. + Call the method specified in req.environ by RoutesMiddleware. """ - routes_dict = req.environ['wsgiorg.routing_args'][1] - action = routes_dict['action'] + arg_dict = req.environ['wsgiorg.routing_args'][1] + action = arg_dict['action'] method = getattr(self, action) - del routes_dict['controller'] - del routes_dict['action'] - return method(**routes_dict) + del arg_dict['controller'] + del arg_dict['action'] + arg_dict['req'] = req + return method(**arg_dict) class Serializer(object): -- cgit From 31c08591793311606551bf0e6bfc14b155b491a6 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 17 Aug 2010 16:46:19 +0200 Subject: Use the argument handler specified by twistd, if any. --- nova/flags.py | 3 +++ nova/server.py | 6 +++++- nova/twistd.py | 12 +++++++++++- 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index e3feb252d..e0181102e 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -141,6 +141,7 @@ def _wrapper(func): return _wrapped +DEFINE = _wrapper(gflags.DEFINE) DEFINE_string = _wrapper(gflags.DEFINE_string) DEFINE_integer = _wrapper(gflags.DEFINE_integer) DEFINE_bool = _wrapper(gflags.DEFINE_bool) @@ -152,6 +153,8 @@ DEFINE_spaceseplist = _wrapper(gflags.DEFINE_spaceseplist) DEFINE_multistring = _wrapper(gflags.DEFINE_multistring) DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int) +ArgumentSerializer = gflags.ArgumentSerializer + def DECLARE(name, module_string, flag_values=FLAGS): if module_string not in sys.modules: diff --git a/nova/server.py b/nova/server.py index 96550f078..c6b60e090 100644 --- a/nova/server.py +++ b/nova/server.py @@ -44,6 +44,8 @@ flags.DEFINE_bool('use_syslog', True, 'output to syslog when daemonizing') flags.DEFINE_string('logfile', None, 'log file to output to') flags.DEFINE_string('pidfile', None, 'pid file to output to') flags.DEFINE_string('working_directory', './', 'working directory...') +flags.DEFINE_integer('uid', os.getuid(), 'uid under which to run') +flags.DEFINE_integer('gid', os.getgid(), 'gid under which to run') def stop(pidfile): @@ -135,6 +137,8 @@ def daemonize(args, name, main): threaded=False), stdin=stdin, stdout=stdout, - stderr=stderr + stderr=stderr, + uid=FLAGS.uid, + gid=FLAGS.gid ): main(args) diff --git a/nova/twistd.py b/nova/twistd.py index 8de322aa5..a72cc85e6 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -48,6 +48,13 @@ class TwistdServerOptions(ServerOptions): def parseArgs(self, *args): return +class FlagParser(object): + def __init__(self, parser): + self.parser = parser + + def Parse(self, s): + return self.parser(s) + def WrapTwistedOptions(wrapped): class TwistedOptionsToFlags(wrapped): @@ -79,7 +86,10 @@ def WrapTwistedOptions(wrapped): reflect.accumulateClassList(self.__class__, 'optParameters', twistd_params) for param in twistd_params: key = param[0].replace('-', '_') - flags.DEFINE_string(key, param[2], str(param[-1])) + if len(param) > 4: + flags.DEFINE(FlagParser(param[4]), key, param[2], str(param[3]), serializer=flags.ArgumentSerializer()) + else: + flags.DEFINE_string(key, param[2], str(param[3])) def _absorbHandlers(self): twistd_handlers = {} -- cgit From 9878a6b8b4691e206dc5d35c39313880db34f229 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 17 Aug 2010 11:03:15 -0400 Subject: Simpler installation, and, can run install_venv from anywhere instead of just from checkout root --- tools/install_venv.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tools/install_venv.py b/tools/install_venv.py index e1a270638..4e775eb33 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -38,15 +38,16 @@ def die(message, *args): def run_command(cmd, redirect_output=True, error_ok=False): - """Runs a command in an out-of-process shell, returning the - output of that command + """ + Runs a command in an out-of-process shell, returning the + output of that command. Working directory is ROOT. """ if redirect_output: stdout = subprocess.PIPE else: stdout = None - proc = subprocess.Popen(cmd, stdout=stdout) + proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout) output = proc.communicate()[0] if not error_ok and proc.returncode != 0: die('Command "%s" failed.\n%s', ' '.join(cmd), output) @@ -94,6 +95,12 @@ def install_dependencies(venv=VENV): redirect_output=False) + # Tell the virtual env how to "import nova" + pathfile=os.path.join(venv, "lib", "python2.6", "site-packages", "nova.pth") + f=open(pathfile, 'w') + f.write("%s\n" % ROOT) + + def print_help(): help = """ Nova development environment setup is complete. -- cgit From f92851ba8ffcb530f6f3c4ea354dd89d29146f6c Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 17 Aug 2010 13:03:38 -0400 Subject: Remove duplicate definition of flag --- nova/endpoint/rackspace/__init__.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nova/endpoint/rackspace/__init__.py b/nova/endpoint/rackspace/__init__.py index 78b9c9429..ac53ee10b 100644 --- a/nova/endpoint/rackspace/__init__.py +++ b/nova/endpoint/rackspace/__init__.py @@ -33,10 +33,6 @@ from nova.auth import manager from nova.endpoint.rackspace import controllers -FLAGS = flags.FLAGS -flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') - - class API(wsgi.Middleware): """WSGI entry point for all Rackspace API requests.""" -- cgit From e8be36d7a7be2ebbf5493766ce909d7913bf61e0 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 17 Aug 2010 13:23:20 -0400 Subject: Move eventlet-using class out of endpoint/__init__.py into its own submodule, so that twisted-related code using endpoint.[other stuff] wouldn't run eventlet and make unit tests throw crazy errors about eventlet 0.9.10 not playing nicely with twisted. --- bin/nova-rsapi | 5 ++--- nova/endpoint/__init__.py | 51 ----------------------------------------------- nova/endpoint/newapi.py | 51 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+), 54 deletions(-) create mode 100644 nova/endpoint/newapi.py diff --git a/bin/nova-rsapi b/bin/nova-rsapi index a35936eff..e2722422e 100755 --- a/bin/nova-rsapi +++ b/bin/nova-rsapi @@ -21,15 +21,14 @@ Daemon for the Rackspace API endpoint. """ -import nova.endpoint - from nova import flags from nova import utils from nova import wsgi +from nova.endpoint import newapi FLAGS = flags.FLAGS flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') if __name__ == '__main__': utils.default_flagfile() - wsgi.run_server(nova.endpoint.APIVersionRouter(), FLAGS.cc_port) + wsgi.run_server(newapi.APIVersionRouter(), FLAGS.cc_port) diff --git a/nova/endpoint/__init__.py b/nova/endpoint/__init__.py index 9aae933af..e69de29bb 100644 --- a/nova/endpoint/__init__.py +++ b/nova/endpoint/__init__.py @@ -1,51 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -:mod:`nova.endpoint` -- Main NOVA Api endpoints -===================================================== - -.. automodule:: nova.endpoint - :platform: Unix - :synopsis: REST APIs for all nova functions -.. moduleauthor:: Jesse Andrews -.. moduleauthor:: Devin Carlen -.. moduleauthor:: Vishvananda Ishaya -.. moduleauthor:: Joshua McKenty -.. moduleauthor:: Manish Singh -.. moduleauthor:: Andy Smith -""" - -from nova import wsgi -import routes -from nova.endpoint import rackspace -from nova.endpoint import aws - -class APIVersionRouter(wsgi.Router): - """Routes top-level requests to the appropriate API.""" - - def __init__(self): - mapper = routes.Mapper() - - rsapi = rackspace.API() - mapper.connect(None, "/v1.0/{path_info:.*}", controller=rsapi) - - mapper.connect(None, "/ec2/{path_info:.*}", controller=aws.API()) - - super(APIVersionRouter, self).__init__(mapper) - diff --git a/nova/endpoint/newapi.py b/nova/endpoint/newapi.py new file mode 100644 index 000000000..9aae933af --- /dev/null +++ b/nova/endpoint/newapi.py @@ -0,0 +1,51 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`nova.endpoint` -- Main NOVA Api endpoints +===================================================== + +.. automodule:: nova.endpoint + :platform: Unix + :synopsis: REST APIs for all nova functions +.. moduleauthor:: Jesse Andrews +.. moduleauthor:: Devin Carlen +.. moduleauthor:: Vishvananda Ishaya +.. moduleauthor:: Joshua McKenty +.. moduleauthor:: Manish Singh +.. moduleauthor:: Andy Smith +""" + +from nova import wsgi +import routes +from nova.endpoint import rackspace +from nova.endpoint import aws + +class APIVersionRouter(wsgi.Router): + """Routes top-level requests to the appropriate API.""" + + def __init__(self): + mapper = routes.Mapper() + + rsapi = rackspace.API() + mapper.connect(None, "/v1.0/{path_info:.*}", controller=rsapi) + + mapper.connect(None, "/ec2/{path_info:.*}", controller=aws.API()) + + super(APIVersionRouter, self).__init__(mapper) + -- cgit From 200daa3e5d5571add6c2937cf847641d065e87b8 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Wed, 18 Aug 2010 00:05:06 +0200 Subject: Stylistic improvements. --- nova/flags.py | 2 -- nova/twistd.py | 6 +++++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index e0181102e..6f9f906dd 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -153,8 +153,6 @@ DEFINE_spaceseplist = _wrapper(gflags.DEFINE_spaceseplist) DEFINE_multistring = _wrapper(gflags.DEFINE_multistring) DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int) -ArgumentSerializer = gflags.ArgumentSerializer - def DECLARE(name, module_string, flag_values=FLAGS): if module_string not in sys.modules: diff --git a/nova/twistd.py b/nova/twistd.py index a72cc85e6..9511c231c 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -21,6 +21,7 @@ Twisted daemon helpers, specifically to parse out gFlags from twisted flags, manage pid files and support syslogging. """ +import gflags import logging import os import signal @@ -48,6 +49,7 @@ class TwistdServerOptions(ServerOptions): def parseArgs(self, *args): return + class FlagParser(object): def __init__(self, parser): self.parser = parser @@ -87,7 +89,9 @@ def WrapTwistedOptions(wrapped): for param in twistd_params: key = param[0].replace('-', '_') if len(param) > 4: - flags.DEFINE(FlagParser(param[4]), key, param[2], str(param[3]), serializer=flags.ArgumentSerializer()) + flags.DEFINE(FlagParser(param[4]), + key, param[2], str(param[3]), + serializer=gflags.ArgumentSerializer()) else: flags.DEFINE_string(key, param[2], str(param[3])) -- cgit From 1e403e56dc1147ce3feea1b8931948bc35f23a44 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Tue, 17 Aug 2010 16:43:37 -0700 Subject: In an effort to keep new and old API code separate, I've created a nova.api to put all new API code under. This means nova.endpoint only contains the old Tornado implementation. I also cleaned up a few pep8 and other style nits in the new API code. --- bin/nova-api-new | 34 +++++++++ bin/nova-rsapi | 34 --------- nova/api/__init__.py | 38 ++++++++++ nova/api/ec2/__init__.py | 42 +++++++++++ nova/api/rackspace/__init__.py | 81 +++++++++++++++++++++ nova/api/rackspace/controllers/__init__.py | 0 nova/api/rackspace/controllers/base.py | 30 ++++++++ nova/api/rackspace/controllers/flavors.py | 18 +++++ nova/api/rackspace/controllers/images.py | 18 +++++ nova/api/rackspace/controllers/servers.py | 83 ++++++++++++++++++++++ nova/api/rackspace/controllers/sharedipgroups.py | 18 +++++ nova/endpoint/aws/__init__.py | 22 ------ nova/endpoint/newapi.py | 51 ------------- nova/endpoint/rackspace/__init__.py | 83 ---------------------- nova/endpoint/rackspace/controllers/__init__.py | 5 -- nova/endpoint/rackspace/controllers/base.py | 9 --- nova/endpoint/rackspace/controllers/flavors.py | 1 - nova/endpoint/rackspace/controllers/images.py | 1 - nova/endpoint/rackspace/controllers/servers.py | 63 ---------------- .../rackspace/controllers/sharedipgroups.py | 1 - 20 files changed, 362 insertions(+), 270 deletions(-) create mode 100755 bin/nova-api-new delete mode 100755 bin/nova-rsapi create mode 100644 nova/api/__init__.py create mode 100644 nova/api/ec2/__init__.py create mode 100644 nova/api/rackspace/__init__.py create mode 100644 nova/api/rackspace/controllers/__init__.py create mode 100644 nova/api/rackspace/controllers/base.py create mode 100644 nova/api/rackspace/controllers/flavors.py create mode 100644 nova/api/rackspace/controllers/images.py create mode 100644 nova/api/rackspace/controllers/servers.py create mode 100644 nova/api/rackspace/controllers/sharedipgroups.py delete mode 100644 nova/endpoint/aws/__init__.py delete mode 100644 nova/endpoint/newapi.py delete mode 100644 nova/endpoint/rackspace/__init__.py delete mode 100644 nova/endpoint/rackspace/controllers/__init__.py delete mode 100644 nova/endpoint/rackspace/controllers/base.py delete mode 100644 nova/endpoint/rackspace/controllers/flavors.py delete mode 100644 nova/endpoint/rackspace/controllers/images.py delete mode 100644 nova/endpoint/rackspace/controllers/servers.py delete mode 100644 nova/endpoint/rackspace/controllers/sharedipgroups.py diff --git a/bin/nova-api-new b/bin/nova-api-new new file mode 100755 index 000000000..fda42339c --- /dev/null +++ b/bin/nova-api-new @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# pylint: disable-msg=C0103 +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Nova API daemon. +""" + +from nova import api +from nova import flags +from nova import utils +from nova import wsgi + +FLAGS = flags.FLAGS +flags.DEFINE_integer('api_port', 8773, 'API port') + +if __name__ == '__main__': + utils.default_flagfile() + wsgi.run_server(api.API(), FLAGS.api_port) diff --git a/bin/nova-rsapi b/bin/nova-rsapi deleted file mode 100755 index e2722422e..000000000 --- a/bin/nova-rsapi +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python -# pylint: disable-msg=C0103 -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" - Daemon for the Rackspace API endpoint. -""" - -from nova import flags -from nova import utils -from nova import wsgi -from nova.endpoint import newapi - -FLAGS = flags.FLAGS -flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') - -if __name__ == '__main__': - utils.default_flagfile() - wsgi.run_server(newapi.APIVersionRouter(), FLAGS.cc_port) diff --git a/nova/api/__init__.py b/nova/api/__init__.py new file mode 100644 index 000000000..a6bb93348 --- /dev/null +++ b/nova/api/__init__.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Root WSGI middleware for all API controllers. +""" + +import routes + +from nova import wsgi +from nova.api import ec2 +from nova.api import rackspace + + +class API(wsgi.Router): + """Routes top-level requests to the appropriate controller.""" + + def __init__(self): + mapper = routes.Mapper() + mapper.connect(None, "/v1.0/{path_info:.*}", + controller=rackspace.API()) + mapper.connect(None, "/ec2/{path_info:.*}", controller=ec2.API()) + super(API, self).__init__(mapper) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py new file mode 100644 index 000000000..6eec0abf7 --- /dev/null +++ b/nova/api/ec2/__init__.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +WSGI middleware for EC2 API controllers. +""" + +import routes +import webob.dec + +from nova import wsgi + + +class API(wsgi.Router): + """Routes EC2 requests to the appropriate controller.""" + + def __init__(self): + mapper = routes.Mapper() + mapper.connect(None, "{all:.*}", controller=self.dummy) + super(API, self).__init__(mapper) + + @staticmethod + @webob.dec.wsgify + def dummy(req): + """Temporary dummy controller.""" + msg = "dummy response -- please hook up __init__() to cloud.py instead" + return repr({'dummy': msg, + 'kwargs': repr(req.environ['wsgiorg.routing_args'][1])}) diff --git a/nova/api/rackspace/__init__.py b/nova/api/rackspace/__init__.py new file mode 100644 index 000000000..662cbe495 --- /dev/null +++ b/nova/api/rackspace/__init__.py @@ -0,0 +1,81 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +WSGI middleware for Rackspace API controllers. +""" + +import json +import time + +import routes +import webob.dec +import webob.exc + +from nova import flags +from nova import wsgi +from nova.api.rackspace.controllers import flavors +from nova.api.rackspace.controllers import images +from nova.api.rackspace.controllers import servers +from nova.api.rackspace.controllers import sharedipgroups +from nova.auth import manager + + +class API(wsgi.Middleware): + """WSGI entry point for all Rackspace API requests.""" + + def __init__(self): + app = AuthMiddleware(APIRouter()) + super(API, self).__init__(app) + + +class AuthMiddleware(wsgi.Middleware): + """Authorize the rackspace API request or return an HTTP Forbidden.""" + + #TODO(gundlach): isn't this the old Nova API's auth? Should it be replaced + #with correct RS API auth? + + @webob.dec.wsgify + def __call__(self, req): + context = {} + if "HTTP_X_AUTH_TOKEN" in req.environ: + context['user'] = manager.AuthManager().get_user_from_access_key( + req.environ['HTTP_X_AUTH_TOKEN']) + if context['user']: + context['project'] = manager.AuthManager().get_project( + context['user'].name) + if "user" not in context: + return webob.exc.HTTPForbidden() + req.environ['nova.context'] = context + return self.application + + +class APIRouter(wsgi.Router): + """ + Routes requests on the Rackspace API to the appropriate controller + and method. + """ + + def __init__(self): + mapper = routes.Mapper() + mapper.resource("server", "servers", controller=servers.Controller()) + mapper.resource("image", "images", controller=images.Controller()) + mapper.resource("flavor", "flavors", controller=flavors.Controller()) + mapper.resource("sharedipgroup", "sharedipgroups", + controller=sharedipgroups.Controller()) + super(APIRouter, self).__init__(mapper) diff --git a/nova/api/rackspace/controllers/__init__.py b/nova/api/rackspace/controllers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/api/rackspace/controllers/base.py b/nova/api/rackspace/controllers/base.py new file mode 100644 index 000000000..dd2c6543c --- /dev/null +++ b/nova/api/rackspace/controllers/base.py @@ -0,0 +1,30 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import wsgi + + +class Controller(wsgi.Controller): + """TODO(eday): Base controller for all rackspace controllers. What is this + for? Is this just Rackspace specific? """ + + @classmethod + def render(cls, instance): + if isinstance(instance, list): + return {cls.entity_name: cls.render(instance)} + else: + return {"TODO": "TODO"} diff --git a/nova/api/rackspace/controllers/flavors.py b/nova/api/rackspace/controllers/flavors.py new file mode 100644 index 000000000..986f11434 --- /dev/null +++ b/nova/api/rackspace/controllers/flavors.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +class Controller(object): pass diff --git a/nova/api/rackspace/controllers/images.py b/nova/api/rackspace/controllers/images.py new file mode 100644 index 000000000..986f11434 --- /dev/null +++ b/nova/api/rackspace/controllers/images.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +class Controller(object): pass diff --git a/nova/api/rackspace/controllers/servers.py b/nova/api/rackspace/controllers/servers.py new file mode 100644 index 000000000..1911d5abf --- /dev/null +++ b/nova/api/rackspace/controllers/servers.py @@ -0,0 +1,83 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import rpc +from nova.compute import model as compute +from nova.api.rackspace.controllers import base + + +class Controller(base.Controller): + entity_name = 'servers' + + def index(self, **kwargs): + instanmces = [] + for inst in compute.InstanceDirectory().all: + instances.append(instance_details(inst)) + + def show(self, **kwargs): + instance_id = kwargs['id'] + return compute.InstanceDirectory().get(instance_id) + + def delete(self, **kwargs): + instance_id = kwargs['id'] + instance = compute.InstanceDirectory().get(instance_id) + if not instance: + raise ServerNotFound("The requested server was not found") + instance.destroy() + return True + + def create(self, **kwargs): + inst = self.build_server_instance(kwargs['server']) + rpc.cast( + FLAGS.compute_topic, { + "method": "run_instance", + "args": {"instance_id": inst.instance_id}}) + + def update(self, **kwargs): + instance_id = kwargs['id'] + instance = compute.InstanceDirectory().get(instance_id) + if not instance: + raise ServerNotFound("The requested server was not found") + instance.update(kwargs['server']) + instance.save() + + def build_server_instance(self, env): + """Build instance data structure and save it to the data store.""" + reservation = utils.generate_uid('r') + ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + inst = self.instdir.new() + inst['name'] = env['server']['name'] + inst['image_id'] = env['server']['imageId'] + inst['instance_type'] = env['server']['flavorId'] + inst['user_id'] = env['user']['id'] + inst['project_id'] = env['project']['id'] + inst['reservation_id'] = reservation + inst['launch_time'] = ltime + inst['mac_address'] = utils.generate_mac() + address = self.network.allocate_ip( + inst['user_id'], + inst['project_id'], + mac=inst['mac_address']) + inst['private_dns_name'] = str(address) + inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( + inst['user_id'], + inst['project_id'], + 'default')['bridge_name'] + # key_data, key_name, ami_launch_index + # TODO(todd): key data or root password + inst.save() + return inst diff --git a/nova/api/rackspace/controllers/sharedipgroups.py b/nova/api/rackspace/controllers/sharedipgroups.py new file mode 100644 index 000000000..986f11434 --- /dev/null +++ b/nova/api/rackspace/controllers/sharedipgroups.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +class Controller(object): pass diff --git a/nova/endpoint/aws/__init__.py b/nova/endpoint/aws/__init__.py deleted file mode 100644 index 55cbb8fd3..000000000 --- a/nova/endpoint/aws/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -import routes -import webob.dec - -from nova import wsgi - -# TODO(gundlach): temp -class API(wsgi.Router): - """WSGI entry point for all AWS API requests.""" - - def __init__(self): - mapper = routes.Mapper() - - mapper.connect(None, "{all:.*}", controller=self.dummy) - - super(API, self).__init__(mapper) - - @webob.dec.wsgify - def dummy(self, req): - #TODO(gundlach) - msg = "dummy response -- please hook up __init__() to cloud.py instead" - return repr({ 'dummy': msg, - 'kwargs': repr(req.environ['wsgiorg.routing_args'][1]) }) diff --git a/nova/endpoint/newapi.py b/nova/endpoint/newapi.py deleted file mode 100644 index 9aae933af..000000000 --- a/nova/endpoint/newapi.py +++ /dev/null @@ -1,51 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -:mod:`nova.endpoint` -- Main NOVA Api endpoints -===================================================== - -.. automodule:: nova.endpoint - :platform: Unix - :synopsis: REST APIs for all nova functions -.. moduleauthor:: Jesse Andrews -.. moduleauthor:: Devin Carlen -.. moduleauthor:: Vishvananda Ishaya -.. moduleauthor:: Joshua McKenty -.. moduleauthor:: Manish Singh -.. moduleauthor:: Andy Smith -""" - -from nova import wsgi -import routes -from nova.endpoint import rackspace -from nova.endpoint import aws - -class APIVersionRouter(wsgi.Router): - """Routes top-level requests to the appropriate API.""" - - def __init__(self): - mapper = routes.Mapper() - - rsapi = rackspace.API() - mapper.connect(None, "/v1.0/{path_info:.*}", controller=rsapi) - - mapper.connect(None, "/ec2/{path_info:.*}", controller=aws.API()) - - super(APIVersionRouter, self).__init__(mapper) - diff --git a/nova/endpoint/rackspace/__init__.py b/nova/endpoint/rackspace/__init__.py deleted file mode 100644 index ac53ee10b..000000000 --- a/nova/endpoint/rackspace/__init__.py +++ /dev/null @@ -1,83 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Rackspace API Endpoint -""" - -import json -import time - -import webob.dec -import webob.exc -import routes - -from nova import flags -from nova import wsgi -from nova.auth import manager -from nova.endpoint.rackspace import controllers - - -class API(wsgi.Middleware): - """WSGI entry point for all Rackspace API requests.""" - - def __init__(self): - app = AuthMiddleware(APIRouter()) - super(API, self).__init__(app) - - -class AuthMiddleware(wsgi.Middleware): - """Authorize the rackspace API request or return an HTTP Forbidden.""" - - #TODO(gundlach): isn't this the old Nova API's auth? Should it be replaced - #with correct RS API auth? - - @webob.dec.wsgify - def __call__(self, req): - context = {} - if "HTTP_X_AUTH_TOKEN" in req.environ: - context['user'] = manager.AuthManager().get_user_from_access_key( - req.environ['HTTP_X_AUTH_TOKEN']) - if context['user']: - context['project'] = manager.AuthManager().get_project( - context['user'].name) - if "user" not in context: - return webob.exc.HTTPForbidden() - req.environ['nova.context'] = context - return self.application - - -class APIRouter(wsgi.Router): - """ - Routes requests on the Rackspace API to the appropriate controller - and method. - """ - - def __init__(self): - mapper = routes.Mapper() - - mapper.resource("server", "servers", - controller=controllers.ServersController()) - mapper.resource("image", "images", - controller=controllers.ImagesController()) - mapper.resource("flavor", "flavors", - controller=controllers.FlavorsController()) - mapper.resource("sharedipgroup", "sharedipgroups", - controller=controllers.SharedIpGroupsController()) - - super(APIRouter, self).__init__(mapper) diff --git a/nova/endpoint/rackspace/controllers/__init__.py b/nova/endpoint/rackspace/controllers/__init__.py deleted file mode 100644 index 052b6f365..000000000 --- a/nova/endpoint/rackspace/controllers/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from nova.endpoint.rackspace.controllers.images import ImagesController -from nova.endpoint.rackspace.controllers.flavors import FlavorsController -from nova.endpoint.rackspace.controllers.servers import ServersController -from nova.endpoint.rackspace.controllers.sharedipgroups import \ - SharedIpGroupsController diff --git a/nova/endpoint/rackspace/controllers/base.py b/nova/endpoint/rackspace/controllers/base.py deleted file mode 100644 index 8cd44f62e..000000000 --- a/nova/endpoint/rackspace/controllers/base.py +++ /dev/null @@ -1,9 +0,0 @@ -from nova import wsgi - -class BaseController(wsgi.Controller): - @classmethod - def render(cls, instance): - if isinstance(instance, list): - return { cls.entity_name : cls.render(instance) } - else: - return { "TODO": "TODO" } diff --git a/nova/endpoint/rackspace/controllers/flavors.py b/nova/endpoint/rackspace/controllers/flavors.py deleted file mode 100644 index f256cc852..000000000 --- a/nova/endpoint/rackspace/controllers/flavors.py +++ /dev/null @@ -1 +0,0 @@ -class FlavorsController(object): pass diff --git a/nova/endpoint/rackspace/controllers/images.py b/nova/endpoint/rackspace/controllers/images.py deleted file mode 100644 index ae2a08849..000000000 --- a/nova/endpoint/rackspace/controllers/images.py +++ /dev/null @@ -1 +0,0 @@ -class ImagesController(object): pass diff --git a/nova/endpoint/rackspace/controllers/servers.py b/nova/endpoint/rackspace/controllers/servers.py deleted file mode 100644 index 2f8e662d6..000000000 --- a/nova/endpoint/rackspace/controllers/servers.py +++ /dev/null @@ -1,63 +0,0 @@ -from nova import rpc -from nova.compute import model as compute -from nova.endpoint.rackspace.controllers.base import BaseController - -class ServersController(BaseController): - entity_name = 'servers' - - def index(self, **kwargs): - return [instance_details(inst) for inst in compute.InstanceDirectory().all] - - def show(self, **kwargs): - instance_id = kwargs['id'] - return compute.InstanceDirectory().get(instance_id) - - def delete(self, **kwargs): - instance_id = kwargs['id'] - instance = compute.InstanceDirectory().get(instance_id) - if not instance: - raise ServerNotFound("The requested server was not found") - instance.destroy() - return True - - def create(self, **kwargs): - inst = self.build_server_instance(kwargs['server']) - rpc.cast( - FLAGS.compute_topic, { - "method": "run_instance", - "args": {"instance_id": inst.instance_id}}) - - def update(self, **kwargs): - instance_id = kwargs['id'] - instance = compute.InstanceDirectory().get(instance_id) - if not instance: - raise ServerNotFound("The requested server was not found") - instance.update(kwargs['server']) - instance.save() - - def build_server_instance(self, env): - """Build instance data structure and save it to the data store.""" - reservation = utils.generate_uid('r') - ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) - inst = self.instdir.new() - inst['name'] = env['server']['name'] - inst['image_id'] = env['server']['imageId'] - inst['instance_type'] = env['server']['flavorId'] - inst['user_id'] = env['user']['id'] - inst['project_id'] = env['project']['id'] - inst['reservation_id'] = reservation - inst['launch_time'] = ltime - inst['mac_address'] = utils.generate_mac() - address = self.network.allocate_ip( - inst['user_id'], - inst['project_id'], - mac=inst['mac_address']) - inst['private_dns_name'] = str(address) - inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( - inst['user_id'], - inst['project_id'], - 'default')['bridge_name'] - # key_data, key_name, ami_launch_index - # TODO(todd): key data or root password - inst.save() - return inst diff --git a/nova/endpoint/rackspace/controllers/sharedipgroups.py b/nova/endpoint/rackspace/controllers/sharedipgroups.py deleted file mode 100644 index 9d346d623..000000000 --- a/nova/endpoint/rackspace/controllers/sharedipgroups.py +++ /dev/null @@ -1 +0,0 @@ -class SharedIpGroupsController(object): pass -- cgit From 67ea462eadcc02ca2f8244062c786bd98871e9e8 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Tue, 17 Aug 2010 23:46:16 -0700 Subject: Added unittests for wsgi and api. --- nova/api/__init__.py | 5 +- nova/api/test.py | 70 +++++++++++++++++++++++++++ nova/wsgi.py | 17 ++++--- nova/wsgi_test.py | 133 +++++++++++++++++++++++++++++++++++++++++++++++++++ pylintrc | 14 ++++-- 5 files changed, 224 insertions(+), 15 deletions(-) create mode 100644 nova/api/test.py create mode 100644 nova/wsgi_test.py diff --git a/nova/api/__init__.py b/nova/api/__init__.py index a6bb93348..b9b9e3988 100644 --- a/nova/api/__init__.py +++ b/nova/api/__init__.py @@ -32,7 +32,6 @@ class API(wsgi.Router): def __init__(self): mapper = routes.Mapper() - mapper.connect(None, "/v1.0/{path_info:.*}", - controller=rackspace.API()) - mapper.connect(None, "/ec2/{path_info:.*}", controller=ec2.API()) + mapper.connect("/v1.0/{path_info:.*}", controller=rackspace.API()) + mapper.connect("/ec2/{path_info:.*}", controller=ec2.API()) super(API, self).__init__(mapper) diff --git a/nova/api/test.py b/nova/api/test.py new file mode 100644 index 000000000..09f79c02e --- /dev/null +++ b/nova/api/test.py @@ -0,0 +1,70 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test for the root WSGI middleware for all API controllers. +""" + +import unittest + +import stubout + +from nova import api +from nova import wsgi_test + + +class Test(unittest.TestCase): + + def setUp(self): # pylint: disable-msg=C0103 + self.called = False + self.stubs = stubout.StubOutForTesting() + + def tearDown(self): # pylint: disable-msg=C0103 + self.stubs.UnsetAll() + + def test_rackspace(self): + self.stubs.Set(api.rackspace, 'API', get_api_stub(self)) + api.API()(wsgi_test.get_environ({'PATH_INFO': '/v1.0/cloud'}), + wsgi_test.start_response) + self.assertTrue(self.called) + + def test_ec2(self): + self.stubs.Set(api.ec2, 'API', get_api_stub(self)) + api.API()(wsgi_test.get_environ({'PATH_INFO': '/ec2/cloud'}), + wsgi_test.start_response) + self.assertTrue(self.called) + + def test_not_found(self): + self.stubs.Set(api.ec2, 'API', get_api_stub(self)) + self.stubs.Set(api.rackspace, 'API', get_api_stub(self)) + api.API()(wsgi_test.get_environ({'PATH_INFO': '/'}), + wsgi_test.start_response) + self.assertFalse(self.called) + + +def get_api_stub(test_object): + """Get a stub class that verifies next part of the request.""" + + class APIStub(object): + """Class to verify request and mark it was called.""" + test = test_object + + def __call__(self, environ, start_response): + self.test.assertEqual(environ['PATH_INFO'], '/cloud') + self.test.called = True + + return APIStub diff --git a/nova/wsgi.py b/nova/wsgi.py index a0a175dc7..baf6cccd9 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -83,7 +83,7 @@ class Application(object): raise NotImplementedError("You must implement __call__") -class Middleware(Application): # pylint: disable=W0223 +class Middleware(Application): """ Base WSGI middleware wrapper. These classes require an application to be initialized that will be called next. By default the middleware will @@ -91,11 +91,11 @@ class Middleware(Application): # pylint: disable=W0223 behavior. """ - def __init__(self, application): # pylint: disable=W0231 + def __init__(self, application): # pylint: disable-msg=W0231 self.application = application @webob.dec.wsgify - def __call__(self, req): + def __call__(self, req): # pylint: disable-msg=W0221 """Override to implement middleware behavior.""" return self.application @@ -113,7 +113,7 @@ class Debug(Middleware): resp = req.get_response(self.application) print ("*" * 40) + " RESPONSE HEADERS" - for (key, value) in resp.headers: + for (key, value) in resp.headers.iteritems(): print key, "=", value print @@ -127,7 +127,7 @@ class Debug(Middleware): Iterator that prints the contents of a wrapper string iterator when iterated. """ - print ("*" * 40) + "BODY" + print ("*" * 40) + " BODY" for part in app_iter: sys.stdout.write(part) sys.stdout.flush() @@ -176,8 +176,9 @@ class Router(object): """ return self._router + @staticmethod @webob.dec.wsgify - def _dispatch(self, req): + def _dispatch(req): """ Called by self._router after matching the incoming request to a route and putting the information into req.environ. Either returns 404 @@ -197,6 +198,7 @@ class Controller(object): must, in addition to their normal parameters, accept a 'req' argument which is the incoming webob.Request. """ + @webob.dec.wsgify def __call__(self, req): """ @@ -249,6 +251,7 @@ class Serializer(object): return repr(data) def _to_xml_node(self, doc, metadata, nodename, data): + """Recursive method to convert data members to XML nodes.""" result = doc.createElement(nodename) if type(data) is list: singular = metadata.get('plurals', {}).get(nodename, None) @@ -262,7 +265,7 @@ class Serializer(object): result.appendChild(node) elif type(data) is dict: attrs = metadata.get('attributes', {}).get(nodename, {}) - for k,v in data.items(): + for k, v in data.items(): if k in attrs: result.setAttribute(k, str(v)) else: diff --git a/nova/wsgi_test.py b/nova/wsgi_test.py new file mode 100644 index 000000000..02bf067d6 --- /dev/null +++ b/nova/wsgi_test.py @@ -0,0 +1,133 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test WSGI basics and provide some helper functions for other WSGI tests. +""" + +import unittest + +import routes + +from nova import wsgi + + +class Test(unittest.TestCase): + + def setUp(self): # pylint: disable-msg=C0103 + self.called = False + + def test_debug(self): + + class Application(wsgi.Application): + """Dummy application to test debug.""" + test = self + + def __call__(self, environ, test_start_response): + test_start_response("200", [("X-Test", "checking")]) + self.test.called = True + return ['Test response'] + + app = wsgi.Debug(Application())(get_environ(), start_response) + self.assertTrue(self.called) + for _ in app: + pass + + def test_router(self): + + class Application(wsgi.Application): + """Test application to call from router.""" + test = self + + def __call__(self, environ, test_start_response): + test_start_response("200", []) + self.test.called = True + return [] + + class Router(wsgi.Router): + """Test router.""" + + def __init__(self): + mapper = routes.Mapper() + mapper.connect("/test", controller=Application()) + super(Router, self).__init__(mapper) + + Router()(get_environ({'PATH_INFO': '/test'}), start_response) + self.assertTrue(self.called) + self.called = False + Router()(get_environ({'PATH_INFO': '/bad'}), start_response) + self.assertFalse(self.called) + + def test_controller(self): + + class Controller(wsgi.Controller): + """Test controller to call from router.""" + test = self + + def show(self, **kwargs): + """Mark that this has been called.""" + self.test.called = True + self.test.assertEqual(kwargs['id'], '123') + return "Test" + + class Router(wsgi.Router): + """Test router.""" + + def __init__(self): + mapper = routes.Mapper() + mapper.resource("test", "tests", controller=Controller()) + super(Router, self).__init__(mapper) + + Router()(get_environ({'PATH_INFO': '/tests/123'}), start_response) + self.assertTrue(self.called) + self.called = False + Router()(get_environ({'PATH_INFO': '/test/123'}), start_response) + self.assertFalse(self.called) + + def test_serializer(self): + # TODO(eday): Placeholder for serializer testing. + pass + + +def get_environ(overwrite={}): # pylint: disable-msg=W0102 + """Get a WSGI environment, overwriting any entries given.""" + environ = {'SERVER_PROTOCOL': 'HTTP/1.1', + 'GATEWAY_INTERFACE': 'CGI/1.1', + 'wsgi.version': (1, 0), + 'SERVER_PORT': '443', + 'SERVER_NAME': '127.0.0.1', + 'REMOTE_ADDR': '127.0.0.1', + 'wsgi.run_once': False, + 'wsgi.errors': None, + 'wsgi.multiprocess': False, + 'SCRIPT_NAME': '', + 'wsgi.url_scheme': 'https', + 'wsgi.input': None, + 'REQUEST_METHOD': 'GET', + 'PATH_INFO': '/', + 'CONTENT_TYPE': 'text/plain', + 'wsgi.multithread': True, + 'QUERY_STRING': '', + 'eventlet.input': None} + return dict(environ, **overwrite) + + +def start_response(_status, _headers): + """Dummy start_response to use with WSGI tests.""" + pass diff --git a/pylintrc b/pylintrc index 6c799c7ea..36cc337e5 100644 --- a/pylintrc +++ b/pylintrc @@ -1,9 +1,7 @@ [Messages Control] -disable=C0103 -# TODOs in code comments are fine... -disable=W0511 -# *args and **kwargs are fine -disable=W0142 +# W0511: TODOs in code comments are fine. +# W0142: *args and **kwargs are fine. +disable-msg=W0511,W0142 [Basic] # Variables can be 1 to 31 characters long, with @@ -14,6 +12,12 @@ variable-rgx=[a-z_][a-z0-9_]{0,30}$ # and be lowecased with underscores method-rgx=[a-z_][a-z0-9_]{2,50}$ +# Module names matching nova-* are ok (files in bin/) +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(nova-[a-z0-9_]+))$ + +# Don't require docstrings on tests. +no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ + [Design] max-public-methods=100 min-public-methods=0 -- cgit From 23e9600fc69541e132f36e27296104442df7ba41 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 18 Aug 2010 10:09:11 -0400 Subject: Fix pep8 violation --- tools/install_venv.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/install_venv.py b/tools/install_venv.py index 4e775eb33..f8c47ff04 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -96,8 +96,8 @@ def install_dependencies(venv=VENV): # Tell the virtual env how to "import nova" - pathfile=os.path.join(venv, "lib", "python2.6", "site-packages", "nova.pth") - f=open(pathfile, 'w') + pthfile = os.path.join(venv, "lib", "python2.6", "site-packages", "nova.pth") + f=open(pthfile, 'w') f.write("%s\n" % ROOT) -- cgit From 738bcb7d381a67b0884d861c7ad48fa08e37106a Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 18 Aug 2010 10:39:59 -0400 Subject: Newest pylint supports 'disable=', not 'disable-msg=' --- bin/nova-rsapi | 2 +- nova/test.py | 10 +++++----- nova/tests/objectstore_unittest.py | 16 ++++++++-------- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/bin/nova-rsapi b/bin/nova-rsapi index e2722422e..9ad6f9e94 100755 --- a/bin/nova-rsapi +++ b/bin/nova-rsapi @@ -1,5 +1,5 @@ #!/usr/bin/env python -# pylint: disable-msg=C0103 +# pylint: disable=C0103 # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the diff --git a/nova/test.py b/nova/test.py index c392c8a84..a75e0de1a 100644 --- a/nova/test.py +++ b/nova/test.py @@ -53,7 +53,7 @@ def skip_if_fake(func): class TrialTestCase(unittest.TestCase): """Test case base class for all unit tests""" - def setUp(self): # pylint: disable-msg=C0103 + def setUp(self): # pylint: disable=C0103 """Run before each test method to initialize test environment""" super(TrialTestCase, self).setUp() @@ -63,7 +63,7 @@ class TrialTestCase(unittest.TestCase): self.stubs = stubout.StubOutForTesting() self.flag_overrides = {} - def tearDown(self): # pylint: disable-msg=C0103 + def tearDown(self): # pylint: disable=C0103 """Runs after each test method to finalize/tear down test environment""" super(TrialTestCase, self).tearDown() self.reset_flags() @@ -94,7 +94,7 @@ class TrialTestCase(unittest.TestCase): class BaseTestCase(TrialTestCase): # TODO(jaypipes): Can this be moved into the TrialTestCase class? """Base test case class for all unit tests.""" - def setUp(self): # pylint: disable-msg=C0103 + def setUp(self): # pylint: disable=C0103 """Run before each test method to initialize test environment""" super(BaseTestCase, self).setUp() # TODO(termie): we could possibly keep a more global registry of @@ -106,7 +106,7 @@ class BaseTestCase(TrialTestCase): self._done_waiting = False self._timed_out = False - def tearDown(self):# pylint: disable-msg=C0103 + def tearDown(self):# pylint: disable=C0103 """Runs after each test method to finalize/tear down test environment""" super(BaseTestCase, self).tearDown() for x in self.injected: @@ -137,7 +137,7 @@ class BaseTestCase(TrialTestCase): if self._waiting: try: self.ioloop.remove_timeout(self._waiting) - except Exception: # pylint: disable-msg=W0703 + except Exception: # pylint: disable=W0703 # TODO(jaypipes): This produces a pylint warning. Should # we really be catching Exception and then passing here? pass diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py index dece4b5d5..5b956fccf 100644 --- a/nova/tests/objectstore_unittest.py +++ b/nova/tests/objectstore_unittest.py @@ -56,7 +56,7 @@ os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets')) class ObjectStoreTestCase(test.BaseTestCase): """Test objectstore API directly.""" - def setUp(self): # pylint: disable-msg=C0103 + def setUp(self): # pylint: disable=C0103 """Setup users and projects.""" super(ObjectStoreTestCase, self).setUp() self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'), @@ -78,7 +78,7 @@ class ObjectStoreTestCase(test.BaseTestCase): self.context = Context() - def tearDown(self): # pylint: disable-msg=C0103 + def tearDown(self): # pylint: disable=C0103 """Tear down users and projects.""" self.auth_manager.delete_project('proj1') self.auth_manager.delete_project('proj2') @@ -168,7 +168,7 @@ class ObjectStoreTestCase(test.BaseTestCase): class TestHTTPChannel(http.HTTPChannel): """Dummy site required for twisted.web""" - def checkPersistence(self, _, __): # pylint: disable-msg=C0103 + def checkPersistence(self, _, __): # pylint: disable=C0103 """Otherwise we end up with an unclean reactor.""" return False @@ -181,7 +181,7 @@ class TestSite(server.Site): class S3APITestCase(test.TrialTestCase): """Test objectstore through S3 API.""" - def setUp(self): # pylint: disable-msg=C0103 + def setUp(self): # pylint: disable=C0103 """Setup users, projects, and start a test server.""" super(S3APITestCase, self).setUp() @@ -198,7 +198,7 @@ class S3APITestCase(test.TrialTestCase): root = S3() self.site = TestSite(root) - # pylint: disable-msg=E1101 + # pylint: disable=E1101 self.listening_port = reactor.listenTCP(0, self.site, interface='127.0.0.1') # pylint: enable-msg=E1101 @@ -221,11 +221,11 @@ class S3APITestCase(test.TrialTestCase): self.conn.get_http_connection = get_http_connection - def _ensure_no_buckets(self, buckets): # pylint: disable-msg=C0111 + def _ensure_no_buckets(self, buckets): # pylint: disable=C0111 self.assertEquals(len(buckets), 0, "Bucket list was not empty") return True - def _ensure_one_bucket(self, buckets, name): # pylint: disable-msg=C0111 + def _ensure_one_bucket(self, buckets, name): # pylint: disable=C0111 self.assertEquals(len(buckets), 1, "Bucket list didn't have exactly one element in it") self.assertEquals(buckets[0].name, name, "Wrong name") @@ -296,7 +296,7 @@ class S3APITestCase(test.TrialTestCase): deferred.addCallback(self._ensure_no_buckets) return deferred - def tearDown(self): # pylint: disable-msg=C0103 + def tearDown(self): # pylint: disable=C0103 """Tear down auth and test server.""" self.auth_manager.delete_user('admin') self.auth_manager.delete_project('admin') -- cgit From 24a6fd40f657896fb20249392be6ed41c30ca679 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 18 Aug 2010 11:19:40 -0400 Subject: Image API work --- nova/endpoint/newapi.py | 4 --- nova/endpoint/rackspace/controllers/base.py | 9 +++++ nova/endpoint/rackspace/controllers/images.py | 48 ++++++++++++++++++++++++++- 3 files changed, 56 insertions(+), 5 deletions(-) diff --git a/nova/endpoint/newapi.py b/nova/endpoint/newapi.py index 9aae933af..7836be582 100644 --- a/nova/endpoint/newapi.py +++ b/nova/endpoint/newapi.py @@ -41,11 +41,7 @@ class APIVersionRouter(wsgi.Router): def __init__(self): mapper = routes.Mapper() - rsapi = rackspace.API() mapper.connect(None, "/v1.0/{path_info:.*}", controller=rsapi) - mapper.connect(None, "/ec2/{path_info:.*}", controller=aws.API()) - super(APIVersionRouter, self).__init__(mapper) - diff --git a/nova/endpoint/rackspace/controllers/base.py b/nova/endpoint/rackspace/controllers/base.py index 8cd44f62e..88922280b 100644 --- a/nova/endpoint/rackspace/controllers/base.py +++ b/nova/endpoint/rackspace/controllers/base.py @@ -7,3 +7,12 @@ class BaseController(wsgi.Controller): return { cls.entity_name : cls.render(instance) } else: return { "TODO": "TODO" } + + def serialize(self, data, request): + """ + Serialize the given dict to the response type requested in request. + Uses self._serialization_metadata if it exists, which is a dict mapping + MIME types to information needed to serialize to that type. + """ + _metadata = getattr(type(self), "_serialization_metadata", {}) + return Serializer(request.environ, _metadata).to_content_type(data) diff --git a/nova/endpoint/rackspace/controllers/images.py b/nova/endpoint/rackspace/controllers/images.py index ae2a08849..197d8375c 100644 --- a/nova/endpoint/rackspace/controllers/images.py +++ b/nova/endpoint/rackspace/controllers/images.py @@ -1 +1,47 @@ -class ImagesController(object): pass +from nova.endpoint.rackspace.controllers.base import BaseController +from nova.endpoint import images +from webob import exc + +#TODO(gundlach): Serialize return values +class ImagesController(BaseController): + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "image": [ "id", "name", "updated", "created", "status", + "serverId", "progress" ] + } + } + } + + def index(self, req): + context = req.environ['nova.api_request_context'] + return images.list(context) + + def show(self, req, id): + context = req.environ['nova.api_request_context'] + return images.list(context, filter_list=[id]) + + def delete(self, req, id): + context = req.environ['nova.api_request_context'] + # TODO(gundlach): make sure it's an image they may delete? + return images.deregister(context, id) + + def create(self, **kwargs): + # TODO(gundlach): no idea how to hook this up. code below + # is from servers.py. + inst = self.build_server_instance(kwargs['server']) + rpc.cast( + FLAGS.compute_topic, { + "method": "run_instance", + "args": {"instance_id": inst.instance_id}}) + + def update(self, **kwargs): + # TODO (gundlach): no idea how to hook this up. code below + # is from servers.py. + instance_id = kwargs['id'] + instance = compute.InstanceDirectory().get(instance_id) + if not instance: + raise ServerNotFound("The requested server was not found") + instance.update(kwargs['server']) + instance.save() -- cgit From 43d2310f87a2f78f342b171de403f3db74a98295 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 18 Aug 2010 08:39:28 -0700 Subject: Fixed typo. --- nova/api/rackspace/controllers/servers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/rackspace/controllers/servers.py b/nova/api/rackspace/controllers/servers.py index 1911d5abf..1d0221ea8 100644 --- a/nova/api/rackspace/controllers/servers.py +++ b/nova/api/rackspace/controllers/servers.py @@ -24,7 +24,7 @@ class Controller(base.Controller): entity_name = 'servers' def index(self, **kwargs): - instanmces = [] + instances = [] for inst in compute.InstanceDirectory().all: instances.append(instance_details(inst)) -- cgit From b380e4a93f6d8ebc772c3989d27f9549b730eee5 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 18 Aug 2010 11:44:24 -0400 Subject: Changed our minds: keep pylint equal to Ubuntu Lucid version, and use disable-msg throughout. --- bin/nova-rsapi | 2 +- nova/network/linux_net.py | 4 ++-- nova/network/model.py | 8 ++++---- nova/network/service.py | 2 +- nova/network/vpn.py | 2 +- nova/rpc.py | 8 ++++---- nova/test.py | 10 +++++----- nova/tests/network_unittest.py | 4 ++-- nova/tests/objectstore_unittest.py | 16 ++++++++-------- nova/tests/rpc_unittest.py | 2 +- nova/wsgi.py | 4 ++-- tools/pip-requires | 2 +- 12 files changed, 32 insertions(+), 32 deletions(-) diff --git a/bin/nova-rsapi b/bin/nova-rsapi index 9ad6f9e94..e2722422e 100755 --- a/bin/nova-rsapi +++ b/bin/nova-rsapi @@ -1,5 +1,5 @@ #!/usr/bin/env python -# pylint: disable=C0103 +# pylint: disable-msg=C0103 # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index a5014b2cb..9e5aabd97 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -154,7 +154,7 @@ def start_dnsmasq(network): try: os.kill(pid, signal.SIGHUP) return - except Exception as exc: # pylint: disable=W0703 + except Exception as exc: # pylint: disable-msg=W0703 logging.debug("Hupping dnsmasq threw %s", exc) # FLAGFILE and DNSMASQ_INTERFACE in env @@ -170,7 +170,7 @@ def stop_dnsmasq(network): if pid: try: os.kill(pid, signal.SIGTERM) - except Exception as exc: # pylint: disable=W0703 + except Exception as exc: # pylint: disable-msg=W0703 logging.debug("Killing dnsmasq threw %s", exc) diff --git a/nova/network/model.py b/nova/network/model.py index d3a6a6552..6e4fcc47e 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -59,7 +59,7 @@ logging.getLogger().setLevel(logging.DEBUG) class Vlan(datastore.BasicModel): """Tracks vlans assigned to project it the datastore""" - def __init__(self, project, vlan): # pylint: disable=W0231 + def __init__(self, project, vlan): # pylint: disable-msg=W0231 """ Since we don't want to try and find a vlan by its identifier, but by a project id, we don't call super-init. @@ -161,7 +161,7 @@ class FixedIp(datastore.BasicModel): 'state': 'none'} @classmethod - # pylint: disable=R0913 + # pylint: disable-msg=R0913 def create(cls, user_id, project_id, address, mac, hostname, network_id): """Creates an FixedIp object""" addr = cls(address) @@ -215,7 +215,7 @@ class BaseNetwork(datastore.BasicModel): return {'network_id': self.network_id, 'network_str': self.network_str} @classmethod - # pylint: disable=R0913 + # pylint: disable-msg=R0913 def create(cls, user_id, project_id, security_group, vlan, network_str): """Create a BaseNetwork object""" network_id = "%s:%s" % (project_id, security_group) @@ -268,7 +268,7 @@ class BaseNetwork(datastore.BasicModel): """Returns the project associated with this network""" return manager.AuthManager().get_project(self['project_id']) - # pylint: disable=R0913 + # pylint: disable-msg=R0913 def _add_host(self, user_id, project_id, ip_address, mac, hostname): """Add a host to the datastore""" self.address_class.create(user_id, project_id, ip_address, diff --git a/nova/network/service.py b/nova/network/service.py index da102a056..d3aa1c46f 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -195,7 +195,7 @@ class VlanNetworkService(BaseNetworkService): # simplified and improved. Also there it may be useful # to support vlans separately from dhcp, instead of having # both of them together in this class. - # pylint: disable=W0221 + # pylint: disable-msg=W0221 def allocate_fixed_ip(self, user_id, project_id, diff --git a/nova/network/vpn.py b/nova/network/vpn.py index cf2579e61..85366ed89 100644 --- a/nova/network/vpn.py +++ b/nova/network/vpn.py @@ -105,7 +105,7 @@ class NetworkData(datastore.BasicModel): return datastore.Redis.instance().scard('ip:%s:ports' % vpn_ip) @property - def ip(self): # pylint: disable=C0103 + def ip(self): # pylint: disable-msg=C0103 """The ip assigned to the project""" return self['ip'] diff --git a/nova/rpc.py b/nova/rpc.py index 824a66b5b..84a9b5590 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -59,7 +59,7 @@ class Connection(carrot_connection.BrokerConnection): params['backend_cls'] = fakerabbit.Backend # NOTE(vish): magic is fun! - # pylint: disable=W0142 + # pylint: disable-msg=W0142 cls._instance = cls(**params) return cls._instance @@ -104,7 +104,7 @@ class Consumer(messaging.Consumer): if self.failed_connection: # NOTE(vish): conn is defined in the parent class, we can # recreate it as long as we create the backend too - # pylint: disable=W0201 + # pylint: disable-msg=W0201 self.conn = Connection.recreate() self.backend = self.conn.create_backend() super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) @@ -114,7 +114,7 @@ class Consumer(messaging.Consumer): # NOTE(vish): This is catching all errors because we really don't # exceptions to be logged 10 times a second if some # persistent failure occurs. - except Exception: # pylint: disable=W0703 + except Exception: # pylint: disable-msg=W0703 if not self.failed_connection: logging.exception("Failed to fetch message from queue") self.failed_connection = True @@ -178,7 +178,7 @@ class AdapterConsumer(TopicConsumer): node_func = getattr(self.proxy, str(method)) node_args = dict((str(k), v) for k, v in args.iteritems()) # NOTE(vish): magic is fun! - # pylint: disable=W0142 + # pylint: disable-msg=W0142 d = defer.maybeDeferred(node_func, **node_args) if msg_id: d.addCallback(lambda rval: msg_reply(msg_id, rval, None)) diff --git a/nova/test.py b/nova/test.py index a75e0de1a..c392c8a84 100644 --- a/nova/test.py +++ b/nova/test.py @@ -53,7 +53,7 @@ def skip_if_fake(func): class TrialTestCase(unittest.TestCase): """Test case base class for all unit tests""" - def setUp(self): # pylint: disable=C0103 + def setUp(self): # pylint: disable-msg=C0103 """Run before each test method to initialize test environment""" super(TrialTestCase, self).setUp() @@ -63,7 +63,7 @@ class TrialTestCase(unittest.TestCase): self.stubs = stubout.StubOutForTesting() self.flag_overrides = {} - def tearDown(self): # pylint: disable=C0103 + def tearDown(self): # pylint: disable-msg=C0103 """Runs after each test method to finalize/tear down test environment""" super(TrialTestCase, self).tearDown() self.reset_flags() @@ -94,7 +94,7 @@ class TrialTestCase(unittest.TestCase): class BaseTestCase(TrialTestCase): # TODO(jaypipes): Can this be moved into the TrialTestCase class? """Base test case class for all unit tests.""" - def setUp(self): # pylint: disable=C0103 + def setUp(self): # pylint: disable-msg=C0103 """Run before each test method to initialize test environment""" super(BaseTestCase, self).setUp() # TODO(termie): we could possibly keep a more global registry of @@ -106,7 +106,7 @@ class BaseTestCase(TrialTestCase): self._done_waiting = False self._timed_out = False - def tearDown(self):# pylint: disable=C0103 + def tearDown(self):# pylint: disable-msg=C0103 """Runs after each test method to finalize/tear down test environment""" super(BaseTestCase, self).tearDown() for x in self.injected: @@ -137,7 +137,7 @@ class BaseTestCase(TrialTestCase): if self._waiting: try: self.ioloop.remove_timeout(self._waiting) - except Exception: # pylint: disable=W0703 + except Exception: # pylint: disable-msg=W0703 # TODO(jaypipes): This produces a pylint warning. Should # we really be catching Exception and then passing here? pass diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 039509809..993bfacc2 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -36,7 +36,7 @@ FLAGS = flags.FLAGS class NetworkTestCase(test.TrialTestCase): """Test cases for network code""" - def setUp(self): # pylint: disable=C0103 + def setUp(self): # pylint: disable-msg=C0103 super(NetworkTestCase, self).setUp() # NOTE(vish): if you change these flags, make sure to change the # flags in the corresponding section in nova-dhcpbridge @@ -60,7 +60,7 @@ class NetworkTestCase(test.TrialTestCase): vpn.NetworkData.create(self.projects[i].id) self.service = service.VlanNetworkService() - def tearDown(self): # pylint: disable=C0103 + def tearDown(self): # pylint: disable-msg=C0103 super(NetworkTestCase, self).tearDown() for project in self.projects: self.manager.delete_project(project) diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py index 5b956fccf..dece4b5d5 100644 --- a/nova/tests/objectstore_unittest.py +++ b/nova/tests/objectstore_unittest.py @@ -56,7 +56,7 @@ os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets')) class ObjectStoreTestCase(test.BaseTestCase): """Test objectstore API directly.""" - def setUp(self): # pylint: disable=C0103 + def setUp(self): # pylint: disable-msg=C0103 """Setup users and projects.""" super(ObjectStoreTestCase, self).setUp() self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'), @@ -78,7 +78,7 @@ class ObjectStoreTestCase(test.BaseTestCase): self.context = Context() - def tearDown(self): # pylint: disable=C0103 + def tearDown(self): # pylint: disable-msg=C0103 """Tear down users and projects.""" self.auth_manager.delete_project('proj1') self.auth_manager.delete_project('proj2') @@ -168,7 +168,7 @@ class ObjectStoreTestCase(test.BaseTestCase): class TestHTTPChannel(http.HTTPChannel): """Dummy site required for twisted.web""" - def checkPersistence(self, _, __): # pylint: disable=C0103 + def checkPersistence(self, _, __): # pylint: disable-msg=C0103 """Otherwise we end up with an unclean reactor.""" return False @@ -181,7 +181,7 @@ class TestSite(server.Site): class S3APITestCase(test.TrialTestCase): """Test objectstore through S3 API.""" - def setUp(self): # pylint: disable=C0103 + def setUp(self): # pylint: disable-msg=C0103 """Setup users, projects, and start a test server.""" super(S3APITestCase, self).setUp() @@ -198,7 +198,7 @@ class S3APITestCase(test.TrialTestCase): root = S3() self.site = TestSite(root) - # pylint: disable=E1101 + # pylint: disable-msg=E1101 self.listening_port = reactor.listenTCP(0, self.site, interface='127.0.0.1') # pylint: enable-msg=E1101 @@ -221,11 +221,11 @@ class S3APITestCase(test.TrialTestCase): self.conn.get_http_connection = get_http_connection - def _ensure_no_buckets(self, buckets): # pylint: disable=C0111 + def _ensure_no_buckets(self, buckets): # pylint: disable-msg=C0111 self.assertEquals(len(buckets), 0, "Bucket list was not empty") return True - def _ensure_one_bucket(self, buckets, name): # pylint: disable=C0111 + def _ensure_one_bucket(self, buckets, name): # pylint: disable-msg=C0111 self.assertEquals(len(buckets), 1, "Bucket list didn't have exactly one element in it") self.assertEquals(buckets[0].name, name, "Wrong name") @@ -296,7 +296,7 @@ class S3APITestCase(test.TrialTestCase): deferred.addCallback(self._ensure_no_buckets) return deferred - def tearDown(self): # pylint: disable=C0103 + def tearDown(self): # pylint: disable-msg=C0103 """Tear down auth and test server.""" self.auth_manager.delete_user('admin') self.auth_manager.delete_project('admin') diff --git a/nova/tests/rpc_unittest.py b/nova/tests/rpc_unittest.py index 764a97416..e12a28fbc 100644 --- a/nova/tests/rpc_unittest.py +++ b/nova/tests/rpc_unittest.py @@ -32,7 +32,7 @@ FLAGS = flags.FLAGS class RpcTestCase(test.BaseTestCase): """Test cases for rpc""" - def setUp(self): # pylint: disable=C0103 + def setUp(self): # pylint: disable-msg=C0103 super(RpcTestCase, self).setUp() self.conn = rpc.Connection.instance() self.receiver = TestReceiver() diff --git a/nova/wsgi.py b/nova/wsgi.py index a0a175dc7..fd87afe6e 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -83,7 +83,7 @@ class Application(object): raise NotImplementedError("You must implement __call__") -class Middleware(Application): # pylint: disable=W0223 +class Middleware(Application): # pylint: disable-msg=W0223 """ Base WSGI middleware wrapper. These classes require an application to be initialized that will be called next. By default the middleware will @@ -91,7 +91,7 @@ class Middleware(Application): # pylint: disable=W0223 behavior. """ - def __init__(self, application): # pylint: disable=W0231 + def __init__(self, application): # pylint: disable-msg=W0231 self.application = application @webob.dec.wsgify diff --git a/tools/pip-requires b/tools/pip-requires index 28af7bcb9..13e8e5f45 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -1,5 +1,5 @@ pep8==0.5.0 -pylint==0.21.1 +pylint==0.19 IPy==0.70 M2Crypto==0.20.2 amqplib==0.6.1 -- cgit From ad3bda4b1a81ee60230869a3d207141f7315a3ca Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 18 Aug 2010 11:53:41 -0400 Subject: pep8 typo --- tools/install_venv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/install_venv.py b/tools/install_venv.py index f8c47ff04..e108c29a1 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -97,7 +97,7 @@ def install_dependencies(venv=VENV): # Tell the virtual env how to "import nova" pthfile = os.path.join(venv, "lib", "python2.6", "site-packages", "nova.pth") - f=open(pthfile, 'w') + f = open(pthfile, 'w') f.write("%s\n" % ROOT) -- cgit From 4e5e72da2e3242026d757c8d5143e16f9d00cb6a Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 18 Aug 2010 08:56:33 -0700 Subject: Removed the 'controllers' directory under 'rackspace' due to full class name redundancy. --- nova/api/rackspace/__init__.py | 8 +-- nova/api/rackspace/base.py | 30 +++++++++ nova/api/rackspace/controllers/__init__.py | 0 nova/api/rackspace/controllers/base.py | 30 --------- nova/api/rackspace/controllers/flavors.py | 18 ----- nova/api/rackspace/controllers/images.py | 18 ----- nova/api/rackspace/controllers/servers.py | 83 ------------------------ nova/api/rackspace/controllers/sharedipgroups.py | 18 ----- nova/api/rackspace/flavors.py | 18 +++++ nova/api/rackspace/images.py | 18 +++++ nova/api/rackspace/servers.py | 83 ++++++++++++++++++++++++ nova/api/rackspace/sharedipgroups.py | 18 +++++ 12 files changed, 171 insertions(+), 171 deletions(-) create mode 100644 nova/api/rackspace/base.py delete mode 100644 nova/api/rackspace/controllers/__init__.py delete mode 100644 nova/api/rackspace/controllers/base.py delete mode 100644 nova/api/rackspace/controllers/flavors.py delete mode 100644 nova/api/rackspace/controllers/images.py delete mode 100644 nova/api/rackspace/controllers/servers.py delete mode 100644 nova/api/rackspace/controllers/sharedipgroups.py create mode 100644 nova/api/rackspace/flavors.py create mode 100644 nova/api/rackspace/images.py create mode 100644 nova/api/rackspace/servers.py create mode 100644 nova/api/rackspace/sharedipgroups.py diff --git a/nova/api/rackspace/__init__.py b/nova/api/rackspace/__init__.py index 662cbe495..27e78f801 100644 --- a/nova/api/rackspace/__init__.py +++ b/nova/api/rackspace/__init__.py @@ -29,10 +29,10 @@ import webob.exc from nova import flags from nova import wsgi -from nova.api.rackspace.controllers import flavors -from nova.api.rackspace.controllers import images -from nova.api.rackspace.controllers import servers -from nova.api.rackspace.controllers import sharedipgroups +from nova.api.rackspace import flavors +from nova.api.rackspace import images +from nova.api.rackspace import servers +from nova.api.rackspace import sharedipgroups from nova.auth import manager diff --git a/nova/api/rackspace/base.py b/nova/api/rackspace/base.py new file mode 100644 index 000000000..dd2c6543c --- /dev/null +++ b/nova/api/rackspace/base.py @@ -0,0 +1,30 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import wsgi + + +class Controller(wsgi.Controller): + """TODO(eday): Base controller for all rackspace controllers. What is this + for? Is this just Rackspace specific? """ + + @classmethod + def render(cls, instance): + if isinstance(instance, list): + return {cls.entity_name: cls.render(instance)} + else: + return {"TODO": "TODO"} diff --git a/nova/api/rackspace/controllers/__init__.py b/nova/api/rackspace/controllers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/nova/api/rackspace/controllers/base.py b/nova/api/rackspace/controllers/base.py deleted file mode 100644 index dd2c6543c..000000000 --- a/nova/api/rackspace/controllers/base.py +++ /dev/null @@ -1,30 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova import wsgi - - -class Controller(wsgi.Controller): - """TODO(eday): Base controller for all rackspace controllers. What is this - for? Is this just Rackspace specific? """ - - @classmethod - def render(cls, instance): - if isinstance(instance, list): - return {cls.entity_name: cls.render(instance)} - else: - return {"TODO": "TODO"} diff --git a/nova/api/rackspace/controllers/flavors.py b/nova/api/rackspace/controllers/flavors.py deleted file mode 100644 index 986f11434..000000000 --- a/nova/api/rackspace/controllers/flavors.py +++ /dev/null @@ -1,18 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -class Controller(object): pass diff --git a/nova/api/rackspace/controllers/images.py b/nova/api/rackspace/controllers/images.py deleted file mode 100644 index 986f11434..000000000 --- a/nova/api/rackspace/controllers/images.py +++ /dev/null @@ -1,18 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -class Controller(object): pass diff --git a/nova/api/rackspace/controllers/servers.py b/nova/api/rackspace/controllers/servers.py deleted file mode 100644 index 1d0221ea8..000000000 --- a/nova/api/rackspace/controllers/servers.py +++ /dev/null @@ -1,83 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova import rpc -from nova.compute import model as compute -from nova.api.rackspace.controllers import base - - -class Controller(base.Controller): - entity_name = 'servers' - - def index(self, **kwargs): - instances = [] - for inst in compute.InstanceDirectory().all: - instances.append(instance_details(inst)) - - def show(self, **kwargs): - instance_id = kwargs['id'] - return compute.InstanceDirectory().get(instance_id) - - def delete(self, **kwargs): - instance_id = kwargs['id'] - instance = compute.InstanceDirectory().get(instance_id) - if not instance: - raise ServerNotFound("The requested server was not found") - instance.destroy() - return True - - def create(self, **kwargs): - inst = self.build_server_instance(kwargs['server']) - rpc.cast( - FLAGS.compute_topic, { - "method": "run_instance", - "args": {"instance_id": inst.instance_id}}) - - def update(self, **kwargs): - instance_id = kwargs['id'] - instance = compute.InstanceDirectory().get(instance_id) - if not instance: - raise ServerNotFound("The requested server was not found") - instance.update(kwargs['server']) - instance.save() - - def build_server_instance(self, env): - """Build instance data structure and save it to the data store.""" - reservation = utils.generate_uid('r') - ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) - inst = self.instdir.new() - inst['name'] = env['server']['name'] - inst['image_id'] = env['server']['imageId'] - inst['instance_type'] = env['server']['flavorId'] - inst['user_id'] = env['user']['id'] - inst['project_id'] = env['project']['id'] - inst['reservation_id'] = reservation - inst['launch_time'] = ltime - inst['mac_address'] = utils.generate_mac() - address = self.network.allocate_ip( - inst['user_id'], - inst['project_id'], - mac=inst['mac_address']) - inst['private_dns_name'] = str(address) - inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( - inst['user_id'], - inst['project_id'], - 'default')['bridge_name'] - # key_data, key_name, ami_launch_index - # TODO(todd): key data or root password - inst.save() - return inst diff --git a/nova/api/rackspace/controllers/sharedipgroups.py b/nova/api/rackspace/controllers/sharedipgroups.py deleted file mode 100644 index 986f11434..000000000 --- a/nova/api/rackspace/controllers/sharedipgroups.py +++ /dev/null @@ -1,18 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -class Controller(object): pass diff --git a/nova/api/rackspace/flavors.py b/nova/api/rackspace/flavors.py new file mode 100644 index 000000000..986f11434 --- /dev/null +++ b/nova/api/rackspace/flavors.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +class Controller(object): pass diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py new file mode 100644 index 000000000..986f11434 --- /dev/null +++ b/nova/api/rackspace/images.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +class Controller(object): pass diff --git a/nova/api/rackspace/servers.py b/nova/api/rackspace/servers.py new file mode 100644 index 000000000..25d1fe9c8 --- /dev/null +++ b/nova/api/rackspace/servers.py @@ -0,0 +1,83 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import rpc +from nova.compute import model as compute +from nova.api.rackspace import base + + +class Controller(base.Controller): + entity_name = 'servers' + + def index(self, **kwargs): + instances = [] + for inst in compute.InstanceDirectory().all: + instances.append(instance_details(inst)) + + def show(self, **kwargs): + instance_id = kwargs['id'] + return compute.InstanceDirectory().get(instance_id) + + def delete(self, **kwargs): + instance_id = kwargs['id'] + instance = compute.InstanceDirectory().get(instance_id) + if not instance: + raise ServerNotFound("The requested server was not found") + instance.destroy() + return True + + def create(self, **kwargs): + inst = self.build_server_instance(kwargs['server']) + rpc.cast( + FLAGS.compute_topic, { + "method": "run_instance", + "args": {"instance_id": inst.instance_id}}) + + def update(self, **kwargs): + instance_id = kwargs['id'] + instance = compute.InstanceDirectory().get(instance_id) + if not instance: + raise ServerNotFound("The requested server was not found") + instance.update(kwargs['server']) + instance.save() + + def build_server_instance(self, env): + """Build instance data structure and save it to the data store.""" + reservation = utils.generate_uid('r') + ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + inst = self.instdir.new() + inst['name'] = env['server']['name'] + inst['image_id'] = env['server']['imageId'] + inst['instance_type'] = env['server']['flavorId'] + inst['user_id'] = env['user']['id'] + inst['project_id'] = env['project']['id'] + inst['reservation_id'] = reservation + inst['launch_time'] = ltime + inst['mac_address'] = utils.generate_mac() + address = self.network.allocate_ip( + inst['user_id'], + inst['project_id'], + mac=inst['mac_address']) + inst['private_dns_name'] = str(address) + inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( + inst['user_id'], + inst['project_id'], + 'default')['bridge_name'] + # key_data, key_name, ami_launch_index + # TODO(todd): key data or root password + inst.save() + return inst diff --git a/nova/api/rackspace/sharedipgroups.py b/nova/api/rackspace/sharedipgroups.py new file mode 100644 index 000000000..986f11434 --- /dev/null +++ b/nova/api/rackspace/sharedipgroups.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +class Controller(object): pass -- cgit From 7cd16b5754a38257d6b492bc29e6f99f2537f11a Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 18 Aug 2010 12:09:29 -0400 Subject: Missed one --- pylintrc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pylintrc b/pylintrc index 6c799c7ea..943eeac36 100644 --- a/pylintrc +++ b/pylintrc @@ -1,9 +1,9 @@ [Messages Control] -disable=C0103 +disable-msg=C0103 # TODOs in code comments are fine... -disable=W0511 +disable-msg=W0511 # *args and **kwargs are fine -disable=W0142 +disable-msg=W0142 [Basic] # Variables can be 1 to 31 characters long, with -- cgit From 7e403e381612e5678aa8f2b9e714d472ba4b3ef0 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 18 Aug 2010 22:19:39 +0100 Subject: Fix to better reflect (my believed intent) as to the meaning of error_ok (ignore stderr vs accept failure) --- nova/volume/service.py | 10 +++++----- tools/install_venv.py | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/nova/volume/service.py b/nova/volume/service.py index bf803eaf6..be62f621d 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -249,14 +249,14 @@ class Volume(datastore.BasicModel): "sudo lvcreate -L %s -n %s %s" % (sizestr, self['volume_id'], FLAGS.volume_group), - check_exit_code=True) + terminate_on_stderr=False) @defer.inlineCallbacks def _delete_lv(self): yield process.simple_execute( "sudo lvremove -f %s/%s" % (FLAGS.volume_group, self['volume_id']), - check_exit_code=True) + terminate_on_stderr=False) @property def __devices_key(self): @@ -285,7 +285,7 @@ class Volume(datastore.BasicModel): FLAGS.aoe_eth_dev, FLAGS.volume_group, self['volume_id']), - check_exit_code=True) + terminate_on_stderr=False) @defer.inlineCallbacks def _remove_export(self): @@ -299,11 +299,11 @@ class Volume(datastore.BasicModel): yield process.simple_execute( "sudo vblade-persist stop %s %s" % (self['shelf_id'], self['blade_id']), - check_exit_code=True) + terminate_on_stderr=False) yield process.simple_execute( "sudo vblade-persist destroy %s %s" % (self['shelf_id'], self['blade_id']), - check_exit_code=True) + terminate_on_stderr=False) class FakeVolume(Volume): diff --git a/tools/install_venv.py b/tools/install_venv.py index a9154fc33..1f0fa3cc7 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -37,7 +37,7 @@ def die(message, *args): sys.exit(1) -def run_command(cmd, redirect_output=True, check_exit_code=False): +def run_command(cmd, redirect_output=True, check_exit_code=True): """ Runs a command in an out-of-process shell, returning the output of that command. Working directory is ROOT. -- cgit From e5a448a616173cd391aaf458f5e0e5ff94a42c89 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 18 Aug 2010 22:33:11 +0100 Subject: Fix unit test bug this uncovered: don't release_ip that we haven't got from issue_ip --- nova/tests/network_unittest.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 993bfacc2..34b68f1ed 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -166,7 +166,6 @@ class NetworkTestCase(test.TrialTestCase): release_ip(mac3, address3, hostname, net.bridge_name) net = model.get_project_network(self.projects[0].id, "default") self.service.deallocate_fixed_ip(firstaddress) - release_ip(mac, firstaddress, hostname, net.bridge_name) def test_vpn_ip_and_port_looks_valid(self): """Ensure the vpn ip and port are reasonable""" -- cgit From 02592d584cc21e536574d20b01d8dbf82474bcd3 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 18 Aug 2010 15:00:20 -0700 Subject: Updated the tests to use webob, removed the 'called' thing and just use return values instead. --- nova/api/test.py | 43 ++++++++++++---------------- nova/wsgi_test.py | 83 +++++++++++++++---------------------------------------- 2 files changed, 40 insertions(+), 86 deletions(-) diff --git a/nova/api/test.py b/nova/api/test.py index 09f79c02e..51b114b8e 100644 --- a/nova/api/test.py +++ b/nova/api/test.py @@ -22,49 +22,40 @@ Test for the root WSGI middleware for all API controllers. import unittest import stubout +import webob +import webob.dec from nova import api -from nova import wsgi_test class Test(unittest.TestCase): def setUp(self): # pylint: disable-msg=C0103 - self.called = False self.stubs = stubout.StubOutForTesting() def tearDown(self): # pylint: disable-msg=C0103 self.stubs.UnsetAll() def test_rackspace(self): - self.stubs.Set(api.rackspace, 'API', get_api_stub(self)) - api.API()(wsgi_test.get_environ({'PATH_INFO': '/v1.0/cloud'}), - wsgi_test.start_response) - self.assertTrue(self.called) + self.stubs.Set(api.rackspace, 'API', APIStub) + result = webob.Request.blank('/v1.0/cloud').get_response(api.API()) + self.assertEqual(result.body, "/cloud") def test_ec2(self): - self.stubs.Set(api.ec2, 'API', get_api_stub(self)) - api.API()(wsgi_test.get_environ({'PATH_INFO': '/ec2/cloud'}), - wsgi_test.start_response) - self.assertTrue(self.called) + self.stubs.Set(api.ec2, 'API', APIStub) + result = webob.Request.blank('/ec2/cloud').get_response(api.API()) + self.assertEqual(result.body, "/cloud") def test_not_found(self): - self.stubs.Set(api.ec2, 'API', get_api_stub(self)) - self.stubs.Set(api.rackspace, 'API', get_api_stub(self)) - api.API()(wsgi_test.get_environ({'PATH_INFO': '/'}), - wsgi_test.start_response) - self.assertFalse(self.called) + self.stubs.Set(api.ec2, 'API', APIStub) + self.stubs.Set(api.rackspace, 'API', APIStub) + result = webob.Request.blank('/test/cloud').get_response(api.API()) + self.assertNotEqual(result.body, "/cloud") -def get_api_stub(test_object): - """Get a stub class that verifies next part of the request.""" +class APIStub(object): + """Class to verify request and mark it was called.""" - class APIStub(object): - """Class to verify request and mark it was called.""" - test = test_object - - def __call__(self, environ, start_response): - self.test.assertEqual(environ['PATH_INFO'], '/cloud') - self.test.called = True - - return APIStub + @webob.dec.wsgify + def __call__(self, req): + return req.path_info diff --git a/nova/wsgi_test.py b/nova/wsgi_test.py index 02bf067d6..786dc1bce 100644 --- a/nova/wsgi_test.py +++ b/nova/wsgi_test.py @@ -24,41 +24,34 @@ Test WSGI basics and provide some helper functions for other WSGI tests. import unittest import routes +import webob from nova import wsgi class Test(unittest.TestCase): - def setUp(self): # pylint: disable-msg=C0103 - self.called = False - def test_debug(self): class Application(wsgi.Application): """Dummy application to test debug.""" - test = self - def __call__(self, environ, test_start_response): - test_start_response("200", [("X-Test", "checking")]) - self.test.called = True - return ['Test response'] + def __call__(self, environ, start_response): + start_response("200", [("X-Test", "checking")]) + return ['Test result'] - app = wsgi.Debug(Application())(get_environ(), start_response) - self.assertTrue(self.called) - for _ in app: - pass + application = wsgi.Debug(Application()) + result = webob.Request.blank('/').get_response(application) + self.assertEqual(result.body, "Test result") def test_router(self): class Application(wsgi.Application): """Test application to call from router.""" - test = self - def __call__(self, environ, test_start_response): - test_start_response("200", []) - self.test.called = True - return [] + def __call__(self, environ, start_response): + start_response("200", []) + return ['Router result'] class Router(wsgi.Router): """Test router.""" @@ -68,11 +61,10 @@ class Test(unittest.TestCase): mapper.connect("/test", controller=Application()) super(Router, self).__init__(mapper) - Router()(get_environ({'PATH_INFO': '/test'}), start_response) - self.assertTrue(self.called) - self.called = False - Router()(get_environ({'PATH_INFO': '/bad'}), start_response) - self.assertFalse(self.called) + result = webob.Request.blank('/test').get_response(Router()) + self.assertEqual(result.body, "Router result") + result = webob.Request.blank('/bad').get_response(Router()) + self.assertNotEqual(result.body, "Router result") def test_controller(self): @@ -80,11 +72,11 @@ class Test(unittest.TestCase): """Test controller to call from router.""" test = self - def show(self, **kwargs): - """Mark that this has been called.""" - self.test.called = True - self.test.assertEqual(kwargs['id'], '123') - return "Test" + def show(self, req, id): # pylint: disable-msg=W0622,C0103 + """Default action called for requests with an ID.""" + self.test.assertEqual(req.path_info, '/tests/123') + self.test.assertEqual(id, '123') + return id class Router(wsgi.Router): """Test router.""" @@ -94,40 +86,11 @@ class Test(unittest.TestCase): mapper.resource("test", "tests", controller=Controller()) super(Router, self).__init__(mapper) - Router()(get_environ({'PATH_INFO': '/tests/123'}), start_response) - self.assertTrue(self.called) - self.called = False - Router()(get_environ({'PATH_INFO': '/test/123'}), start_response) - self.assertFalse(self.called) + result = webob.Request.blank('/tests/123').get_response(Router()) + self.assertEqual(result.body, "123") + result = webob.Request.blank('/test/123').get_response(Router()) + self.assertNotEqual(result.body, "123") def test_serializer(self): # TODO(eday): Placeholder for serializer testing. pass - - -def get_environ(overwrite={}): # pylint: disable-msg=W0102 - """Get a WSGI environment, overwriting any entries given.""" - environ = {'SERVER_PROTOCOL': 'HTTP/1.1', - 'GATEWAY_INTERFACE': 'CGI/1.1', - 'wsgi.version': (1, 0), - 'SERVER_PORT': '443', - 'SERVER_NAME': '127.0.0.1', - 'REMOTE_ADDR': '127.0.0.1', - 'wsgi.run_once': False, - 'wsgi.errors': None, - 'wsgi.multiprocess': False, - 'SCRIPT_NAME': '', - 'wsgi.url_scheme': 'https', - 'wsgi.input': None, - 'REQUEST_METHOD': 'GET', - 'PATH_INFO': '/', - 'CONTENT_TYPE': 'text/plain', - 'wsgi.multithread': True, - 'QUERY_STRING': '', - 'eventlet.input': None} - return dict(environ, **overwrite) - - -def start_response(_status, _headers): - """Dummy start_response to use with WSGI tests.""" - pass -- cgit From bde9618560665392b00dd320b22804020d411b8a Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 18 Aug 2010 17:38:00 -0700 Subject: Added '-' as possible charater in module rgx. --- pylintrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pylintrc b/pylintrc index 36cc337e5..334d49f8e 100644 --- a/pylintrc +++ b/pylintrc @@ -13,7 +13,7 @@ variable-rgx=[a-z_][a-z0-9_]{0,30}$ method-rgx=[a-z_][a-z0-9_]{2,50}$ # Module names matching nova-* are ok (files in bin/) -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(nova-[a-z0-9_]+))$ +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(nova-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ -- cgit From b8747fb38eb1234744cdda85cb20bd27cd7fa9e8 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 18 Aug 2010 17:39:12 -0700 Subject: More bin/ pep8/pylint cleanup. --- bin/nova-compute | 2 +- bin/nova-dhcpbridge | 14 +++++++------- bin/nova-import-canonical-imagestore | 4 ++-- bin/nova-instancemonitor | 5 +++-- bin/nova-manage | 6 +++--- bin/nova-network | 1 + bin/nova-objectstore | 2 +- bin/nova-volume | 2 +- 8 files changed, 19 insertions(+), 17 deletions(-) diff --git a/bin/nova-compute b/bin/nova-compute index e0c12354f..ed9a55565 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -29,4 +29,4 @@ if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': - application = service.ComputeService.create() + application = service.ComputeService.create() # pylint: disable-msg=C0103 diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index f70a4482c..1f2ed4f89 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -40,29 +40,29 @@ from nova.network import service FLAGS = flags.FLAGS -def add_lease(_mac, ip, _hostname, _interface): +def add_lease(_mac, ip_address, _hostname, _interface): """Set the IP that was assigned by the DHCP server.""" if FLAGS.fake_rabbit: - service.VlanNetworkService().lease_ip(ip) + service.VlanNetworkService().lease_ip(ip_address) else: rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name), {"method": "lease_ip", - "args": {"fixed_ip": ip}}) + "args": {"fixed_ip": ip_address}}) -def old_lease(_mac, _ip, _hostname, _interface): +def old_lease(_mac, _ip_address, _hostname, _interface): """Do nothing, just an old lease update.""" logging.debug("Adopted old lease or got a change of mac/hostname") -def del_lease(_mac, ip, _hostname, _interface): +def del_lease(_mac, ip_address, _hostname, _interface): """Called when a lease expires.""" if FLAGS.fake_rabbit: - service.VlanNetworkService().release_ip(ip) + service.VlanNetworkService().release_ip(ip_address) else: rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name), {"method": "release_ip", - "args": {"fixed_ip": ip}}) + "args": {"fixed_ip": ip_address}}) def init_leases(interface): diff --git a/bin/nova-import-canonical-imagestore b/bin/nova-import-canonical-imagestore index 5165109b2..e6931d9db 100755 --- a/bin/nova-import-canonical-imagestore +++ b/bin/nova-import-canonical-imagestore @@ -35,12 +35,12 @@ from nova.objectstore import image FLAGS = flags.FLAGS -api_url = 'https://imagestore.canonical.com/api/dashboard' +API_URL = 'https://imagestore.canonical.com/api/dashboard' def get_images(): """Get a list of the images from the imagestore URL.""" - images = json.load(urllib2.urlopen(api_url))['images'] + images = json.load(urllib2.urlopen(API_URL))['images'] images = [img for img in images if img['title'].find('amd64') > -1] return images diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor index 911fb6f42..fbac58889 100755 --- a/bin/nova-instancemonitor +++ b/bin/nova-instancemonitor @@ -35,9 +35,10 @@ if __name__ == '__main__': if __name__ == '__builtin__': logging.warn('Starting instance monitor') - m = monitor.InstanceMonitor() + # pylint: disable-msg=C0103 + monitor = monitor.InstanceMonitor() # This is the parent service that twistd will be looking for when it # parses this file, return it so that we can get it into globals below application = service.Application('nova-instancemonitor') - m.setServiceParent(application) + monitor.setServiceParent(application) diff --git a/bin/nova-manage b/bin/nova-manage index 071436b13..33141a49e 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -211,7 +211,7 @@ class ProjectCommands(object): f.write(zip_file) -categories = [ +CATEGORIES = [ ('user', UserCommands), ('project', ProjectCommands), ('role', RoleCommands), @@ -258,11 +258,11 @@ def main(): if len(argv) < 1: print script_name + " category action []" print "Available categories:" - for k, _ in categories: + for k, _ in CATEGORIES: print "\t%s" % k sys.exit(2) category = argv.pop(0) - matches = lazy_match(category, categories) + matches = lazy_match(category, CATEGORIES) # instantiate the command group object category, fn = matches[0] command_object = fn() diff --git a/bin/nova-network b/bin/nova-network index ba9063f56..5753aafbe 100755 --- a/bin/nova-network +++ b/bin/nova-network @@ -33,4 +33,5 @@ if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': + # pylint: disable-msg=C0103 application = service.type_to_class(FLAGS.network_type).create() diff --git a/bin/nova-objectstore b/bin/nova-objectstore index 02f2bcb48..afcf13e24 100755 --- a/bin/nova-objectstore +++ b/bin/nova-objectstore @@ -35,4 +35,4 @@ if __name__ == '__main__': if __name__ == '__builtin__': utils.default_flagfile() - application = handler.get_application() + application = handler.get_application() # pylint: disable-msg=C0103 diff --git a/bin/nova-volume b/bin/nova-volume index f7a8fad37..8ef006ebc 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -29,4 +29,4 @@ if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': - application = service.VolumeService.create() + application = service.VolumeService.create() # pylint: disable-msg=C0103 -- cgit From 47e98cdae2a6233cb475c34207758a29c0ef7a4c Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 18 Aug 2010 18:25:16 -0700 Subject: Removed old cloud_topic queue setup, it is no longer used. --- bin/nova-api | 8 -------- nova/endpoint/cloud.py | 1 - nova/flags.py | 1 - nova/tests/cloud_unittest.py | 4 ---- 4 files changed, 14 deletions(-) diff --git a/bin/nova-api b/bin/nova-api index 13baf22a7..a3ad5a0e1 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -26,7 +26,6 @@ from tornado import httpserver from tornado import ioloop from nova import flags -from nova import rpc from nova import server from nova import utils from nova.endpoint import admin @@ -43,14 +42,7 @@ def main(_argv): 'Admin': admin.AdminController()} _app = api.APIServerApplication(controllers) - conn = rpc.Connection.instance() - consumer = rpc.AdapterConsumer(connection=conn, - topic=FLAGS.cloud_topic, - proxy=controllers['Cloud']) - io_inst = ioloop.IOLoop.instance() - _injected = consumer.attach_to_tornado(io_inst) - http_server = httpserver.HTTPServer(_app) http_server.listen(FLAGS.cc_port) logging.debug('Started HTTP server on %s', FLAGS.cc_port) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 30634429d..8e2beb1e3 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -45,7 +45,6 @@ from nova.volume import service FLAGS = flags.FLAGS -flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') def _gen_key(user_id, key_name): diff --git a/nova/flags.py b/nova/flags.py index e3feb252d..f46017f77 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -168,7 +168,6 @@ def DECLARE(name, module_string, flag_values=FLAGS): DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake') DEFINE_integer('s3_port', 3333, 's3 port') DEFINE_string('s3_host', '127.0.0.1', 's3 host') -#DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on') DEFINE_string('volume_topic', 'volume', 'the topic volume nodes listen on') DEFINE_string('network_topic', 'network', 'the topic network nodes listen on') diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py index 3501771cc..900ff5a97 100644 --- a/nova/tests/cloud_unittest.py +++ b/nova/tests/cloud_unittest.py @@ -47,10 +47,6 @@ class CloudTestCase(test.BaseTestCase): # set up our cloud self.cloud = cloud.CloudController() - self.cloud_consumer = rpc.AdapterConsumer(connection=self.conn, - topic=FLAGS.cloud_topic, - proxy=self.cloud) - self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop)) # set up a service self.compute = service.ComputeService() -- cgit From 24c7080249113fc6c87a58d97405f5d32c6db5e2 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 18 Aug 2010 18:38:34 -0700 Subject: More pylintrc updates. --- pylintrc | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pylintrc b/pylintrc index 334d49f8e..6702ca895 100644 --- a/pylintrc +++ b/pylintrc @@ -4,10 +4,12 @@ disable-msg=W0511,W0142 [Basic] -# Variables can be 1 to 31 characters long, with -# lowercase and underscores +# Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ +# Argument names can be 2 to 31 characters long, with lowercase and underscores +argument-rgx=[a-z_][a-z0-9_]{1,30}$ + # Method names should be at least 3 characters long # and be lowecased with underscores method-rgx=[a-z_][a-z0-9_]{2,50}$ @@ -21,3 +23,4 @@ no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [Design] max-public-methods=100 min-public-methods=0 +max-args=6 -- cgit From 59c43ba5b8213e39f726acbe2b137998cae39a26 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 18 Aug 2010 22:14:34 -0700 Subject: Cleaned up pep8/pylint style issues in nova/auth. There are still a few pylint warnings in manager.py, but the patch is already fairly large. --- nova/auth/fakeldap.py | 37 +++++++++++++----------- nova/auth/ldapdriver.py | 62 ++++++++++++++++++++-------------------- nova/auth/manager.py | 76 ++++++++++++++++++++++++++----------------------- nova/auth/rbac.py | 38 +++++++++++++++++-------- nova/auth/signer.py | 51 ++++++++++++++++++++------------- 5 files changed, 149 insertions(+), 115 deletions(-) diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py index bc744fa01..bfc3433c5 100644 --- a/nova/auth/fakeldap.py +++ b/nova/auth/fakeldap.py @@ -30,20 +30,23 @@ from nova import datastore SCOPE_BASE = 0 SCOPE_ONELEVEL = 1 # not implemented -SCOPE_SUBTREE = 2 +SCOPE_SUBTREE = 2 MOD_ADD = 0 MOD_DELETE = 1 -class NO_SUCH_OBJECT(Exception): +class NO_SUCH_OBJECT(Exception): # pylint: disable-msg=C0103 + """Duplicate exception class from real LDAP module.""" pass -class OBJECT_CLASS_VIOLATION(Exception): +class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable-msg=C0103 + """Duplicate exception class from real LDAP module.""" pass -def initialize(uri): +def initialize(_uri): + """Opens a fake connection with an LDAP server.""" return FakeLDAP() @@ -68,7 +71,7 @@ def _match_query(query, attrs): # cut off the ! and the nested parentheses return not _match_query(query[2:-1], attrs) - (k, sep, v) = inner.partition('=') + (k, _sep, v) = inner.partition('=') return _match(k, v, attrs) @@ -85,20 +88,20 @@ def _paren_groups(source): if source[pos] == ')': count -= 1 if count == 0: - result.append(source[start:pos+1]) + result.append(source[start:pos + 1]) return result -def _match(k, v, attrs): +def _match(key, value, attrs): """Match a given key and value against an attribute list.""" - if k not in attrs: + if key not in attrs: return False - if k != "objectclass": - return v in attrs[k] + if key != "objectclass": + return value in attrs[key] # it is an objectclass check, so check subclasses - values = _subs(v) - for value in values: - if value in attrs[k]: + values = _subs(value) + for v in values: + if v in attrs[key]: return True return False @@ -145,6 +148,7 @@ def _to_json(unencoded): class FakeLDAP(object): #TODO(vish): refactor this class to use a wrapper instead of accessing # redis directly + """Fake LDAP connection.""" def simple_bind_s(self, dn, password): """This method is ignored, but provided for compatibility.""" @@ -207,6 +211,7 @@ class FakeLDAP(object): # get the attributes from redis attrs = redis.hgetall(key) # turn the values from redis into lists + # pylint: disable-msg=E1103 attrs = dict([(k, _from_json(v)) for k, v in attrs.iteritems()]) # filter the objects by query @@ -215,12 +220,12 @@ class FakeLDAP(object): attrs = dict([(k, v) for k, v in attrs.iteritems() if not fields or k in fields]) objects.append((key[len(self.__redis_prefix):], attrs)) + # pylint: enable-msg=E1103 if objects == []: raise NO_SUCH_OBJECT() return objects @property - def __redis_prefix(self): + def __redis_prefix(self): # pylint: disable-msg=R0201 + """Get the prefix to use for all redis keys.""" return 'ldap:' - - diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 6bf7fcd1e..74ba011b5 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -34,7 +34,7 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('ldap_url', 'ldap://localhost', 'Point this at your ldap server') -flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password') +flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password') flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com', 'DN of admin user') flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users') @@ -63,14 +63,18 @@ flags.DEFINE_string('ldap_developer', # to define a set interface for AuthDrivers. I'm delaying # creating this now because I'm expecting an auth refactor # in which we may want to change the interface a bit more. + + class LdapDriver(object): """Ldap Auth driver Defines enter and exit and therefore supports the with/as syntax. """ + def __init__(self): """Imports the LDAP module""" self.ldap = __import__('ldap') + self.conn = None def __enter__(self): """Creates the connection to LDAP""" @@ -78,7 +82,7 @@ class LdapDriver(object): self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) return self - def __exit__(self, type, value, traceback): + def __exit__(self, exc_type, exc_value, traceback): """Destroys the connection to LDAP""" self.conn.unbind_s() return False @@ -123,11 +127,11 @@ class LdapDriver(object): def get_projects(self, uid=None): """Retrieve list of projects""" - filter = '(objectclass=novaProject)' + pattern = '(objectclass=novaProject)' if uid: - filter = "(&%s(member=%s))" % (filter, self.__uid_to_dn(uid)) + pattern = "(&%s(member=%s))" % (pattern, self.__uid_to_dn(uid)) attrs = self.__find_objects(FLAGS.ldap_project_subtree, - filter) + pattern) return [self.__to_project(attr) for attr in attrs] def create_user(self, name, access_key, secret_key, is_admin): @@ -194,8 +198,7 @@ class LdapDriver(object): ('cn', [name]), ('description', [description]), ('projectManager', [manager_dn]), - ('member', members) - ] + ('member', members)] self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr) return self.__to_project(dict(attr)) @@ -287,7 +290,6 @@ class LdapDriver(object): def __key_pair_exists(self, uid, key_name): """Check if key pair exists""" - return self.get_user(uid) != None return self.get_key_pair(uid, key_name) != None def __project_exists(self, project_id): @@ -310,7 +312,7 @@ class LdapDriver(object): except self.ldap.NO_SUCH_OBJECT: return [] # just return the DNs - return [dn for dn, attributes in res] + return [dn for dn, _attributes in res] def __find_objects(self, dn, query=None, scope=None): """Find objects by query""" @@ -346,7 +348,8 @@ class LdapDriver(object): for key in keys: self.delete_key_pair(uid, key['name']) - def __role_to_dn(self, role, project_id=None): + @staticmethod + def __role_to_dn(role, project_id=None): """Convert role to corresponding dn""" if project_id == None: return FLAGS.__getitem__("ldap_%s" % role).value @@ -356,7 +359,7 @@ class LdapDriver(object): FLAGS.ldap_project_subtree) def __create_group(self, group_dn, name, uid, - description, member_uids = None): + description, member_uids=None): """Create a group""" if self.__group_exists(group_dn): raise exception.Duplicate("Group can't be created because " @@ -375,8 +378,7 @@ class LdapDriver(object): ('objectclass', ['groupOfNames']), ('cn', [name]), ('description', [description]), - ('member', members) - ] + ('member', members)] self.conn.add_s(group_dn, attr) def __is_in_group(self, uid, group_dn): @@ -402,9 +404,7 @@ class LdapDriver(object): if self.__is_in_group(uid, group_dn): raise exception.Duplicate("User %s is already a member of " "the group %s" % (uid, group_dn)) - attr = [ - (self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid)) - ] + attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))] self.conn.modify_s(group_dn, attr) def __remove_from_group(self, uid, group_dn): @@ -432,7 +432,7 @@ class LdapDriver(object): self.conn.modify_s(group_dn, attr) except self.ldap.OBJECT_CLASS_VIOLATION: logging.debug("Attempted to remove the last member of a group. " - "Deleting the group at %s instead." % group_dn ) + "Deleting the group at %s instead.", group_dn) self.__delete_group(group_dn) def __remove_from_all(self, uid): @@ -440,7 +440,6 @@ class LdapDriver(object): if not self.__user_exists(uid): raise exception.NotFound("User %s can't be removed from all " "because the user doesn't exist" % (uid,)) - dn = self.__uid_to_dn(uid) role_dns = self.__find_group_dns_with_member( FLAGS.role_project_subtree, uid) for role_dn in role_dns: @@ -448,7 +447,7 @@ class LdapDriver(object): project_dns = self.__find_group_dns_with_member( FLAGS.ldap_project_subtree, uid) for project_dn in project_dns: - self.__safe_remove_from_group(uid, role_dn) + self.__safe_remove_from_group(uid, project_dn) def __delete_group(self, group_dn): """Delete Group""" @@ -461,7 +460,8 @@ class LdapDriver(object): for role_dn in self.__find_role_dns(project_dn): self.__delete_group(role_dn) - def __to_user(self, attr): + @staticmethod + def __to_user(attr): """Convert ldap attributes to User object""" if attr == None: return None @@ -470,10 +470,10 @@ class LdapDriver(object): 'name': attr['cn'][0], 'access': attr['accessKey'][0], 'secret': attr['secretKey'][0], - 'admin': (attr['isAdmin'][0] == 'TRUE') - } + 'admin': (attr['isAdmin'][0] == 'TRUE')} - def __to_key_pair(self, owner, attr): + @staticmethod + def __to_key_pair(owner, attr): """Convert ldap attributes to KeyPair object""" if attr == None: return None @@ -482,8 +482,7 @@ class LdapDriver(object): 'name': attr['cn'][0], 'owner_id': owner, 'public_key': attr['sshPublicKey'][0], - 'fingerprint': attr['keyFingerprint'][0], - } + 'fingerprint': attr['keyFingerprint'][0]} def __to_project(self, attr): """Convert ldap attributes to Project object""" @@ -495,21 +494,22 @@ class LdapDriver(object): 'name': attr['cn'][0], 'project_manager_id': self.__dn_to_uid(attr['projectManager'][0]), 'description': attr.get('description', [None])[0], - 'member_ids': [self.__dn_to_uid(x) for x in member_dns] - } + 'member_ids': [self.__dn_to_uid(x) for x in member_dns]} - def __dn_to_uid(self, dn): + @staticmethod + def __dn_to_uid(dn): """Convert user dn to uid""" return dn.split(',')[0].split('=')[1] - def __uid_to_dn(self, dn): + @staticmethod + def __uid_to_dn(dn): """Convert uid to dn""" return 'uid=%s,%s' % (dn, FLAGS.ldap_user_subtree) class FakeLdapDriver(LdapDriver): """Fake Ldap Auth driver""" - def __init__(self): + + def __init__(self): # pylint: disable-msg=W0231 __import__('nova.auth.fakeldap') self.ldap = sys.modules['nova.auth.fakeldap'] - diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 80ee78896..284b29502 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -23,7 +23,7 @@ Nova authentication management import logging import os import shutil -import string +import string # pylint: disable-msg=W0402 import tempfile import uuid import zipfile @@ -194,12 +194,12 @@ class Project(AuthBase): @property def vpn_ip(self): - ip, port = AuthManager().get_project_vpn_data(self) + ip, _port = AuthManager().get_project_vpn_data(self) return ip @property def vpn_port(self): - ip, port = AuthManager().get_project_vpn_data(self) + _ip, port = AuthManager().get_project_vpn_data(self) return port def has_manager(self, user): @@ -221,11 +221,9 @@ class Project(AuthBase): return AuthManager().get_credentials(user, self) def __repr__(self): - return "Project('%s', '%s', '%s', '%s', %s)" % (self.id, - self.name, - self.project_manager_id, - self.description, - self.member_ids) + return "Project('%s', '%s', '%s', '%s', %s)" % \ + (self.id, self.name, self.project_manager_id, self.description, + self.member_ids) class AuthManager(object): @@ -297,7 +295,7 @@ class AuthManager(object): @return: User and project that the request represents. """ # TODO(vish): check for valid timestamp - (access_key, sep, project_id) = access.partition(':') + (access_key, _sep, project_id) = access.partition(':') logging.info('Looking up user: %r', access_key) user = self.get_user_from_access_key(access_key) @@ -320,7 +318,8 @@ class AuthManager(object): raise exception.NotFound('User %s is not a member of project %s' % (user.id, project.id)) if check_type == 's3': - expected_signature = signer.Signer(user.secret.encode()).s3_authorization(headers, verb, path) + sign = signer.Signer(user.secret.encode()) + expected_signature = sign.s3_authorization(headers, verb, path) logging.debug('user.secret: %s', user.secret) logging.debug('expected_signature: %s', expected_signature) logging.debug('signature: %s', signature) @@ -465,7 +464,8 @@ class AuthManager(object): with self.driver() as drv: drv.remove_role(User.safe_id(user), role, Project.safe_id(project)) - def get_roles(self, project_roles=True): + @staticmethod + def get_roles(project_roles=True): """Get list of allowed roles""" if project_roles: return list(set(FLAGS.allowed_roles) - set(FLAGS.global_roles)) @@ -518,10 +518,10 @@ class AuthManager(object): if member_users: member_users = [User.safe_id(u) for u in member_users] with self.driver() as drv: - project_dict = drv.create_project(name, - User.safe_id(manager_user), - description, - member_users) + project_dict = drv.create_project(name, + User.safe_id(manager_user), + description, + member_users) if project_dict: return Project(**project_dict) @@ -549,7 +549,8 @@ class AuthManager(object): return drv.remove_from_project(User.safe_id(user), Project.safe_id(project)) - def get_project_vpn_data(self, project): + @staticmethod + def get_project_vpn_data(project): """Gets vpn ip and port for project @type project: Project or project_id @@ -613,8 +614,10 @@ class AuthManager(object): @rtype: User @return: The new user. """ - if access == None: access = str(uuid.uuid4()) - if secret == None: secret = str(uuid.uuid4()) + if access == None: + access = str(uuid.uuid4()) + if secret == None: + secret = str(uuid.uuid4()) with self.driver() as drv: user_dict = drv.create_user(name, access, secret, admin) if user_dict: @@ -656,10 +659,10 @@ class AuthManager(object): def create_key_pair(self, user, key_name, public_key, fingerprint): """Creates a key pair for user""" with self.driver() as drv: - kp_dict = drv.create_key_pair(User.safe_id(user), - key_name, - public_key, - fingerprint) + kp_dict = drv.create_key_pair(User.safe_id(user), + key_name, + public_key, + fingerprint) if kp_dict: return KeyPair(**kp_dict) @@ -702,7 +705,7 @@ class AuthManager(object): network_data = vpn.NetworkData.lookup(pid) if network_data: - configfile = open(FLAGS.vpn_client_template,"r") + configfile = open(FLAGS.vpn_client_template, "r") s = string.Template(configfile.read()) configfile.close() config = s.substitute(keyfile=FLAGS.credential_key_file, @@ -717,10 +720,10 @@ class AuthManager(object): zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id)) zippy.close() with open(zf, 'rb') as f: - buffer = f.read() + read_buffer = f.read() shutil.rmtree(tmpdir) - return buffer + return read_buffer def get_environment_rc(self, user, project=None): """Get credential zip for user in project""" @@ -731,18 +734,18 @@ class AuthManager(object): pid = Project.safe_id(project) return self.__generate_rc(user.access, user.secret, pid) - def __generate_rc(self, access, secret, pid): + @staticmethod + def __generate_rc(access, secret, pid): """Generate rc file for user""" rc = open(FLAGS.credentials_template).read() - rc = rc % { 'access': access, - 'project': pid, - 'secret': secret, - 'ec2': FLAGS.ec2_url, - 's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port), - 'nova': FLAGS.ca_file, - 'cert': FLAGS.credential_cert_file, - 'key': FLAGS.credential_key_file, - } + rc = rc % {'access': access, + 'project': pid, + 'secret': secret, + 'ec2': FLAGS.ec2_url, + 's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port), + 'nova': FLAGS.ca_file, + 'cert': FLAGS.credential_cert_file, + 'key': FLAGS.credential_key_file} return rc def _generate_x509_cert(self, uid, pid): @@ -753,6 +756,7 @@ class AuthManager(object): signed_cert = crypto.sign_csr(csr, pid) return (private_key, signed_cert) - def __cert_subject(self, uid): + @staticmethod + def __cert_subject(uid): """Helper to generate cert subject""" return FLAGS.credential_cert_subject % (uid, utils.isotime()) diff --git a/nova/auth/rbac.py b/nova/auth/rbac.py index 1446e4e27..d157f44b3 100644 --- a/nova/auth/rbac.py +++ b/nova/auth/rbac.py @@ -16,40 +16,54 @@ # License for the specific language governing permissions and limitations # under the License. +"""Role-based access control decorators to use fpr wrapping other +methods with.""" + from nova import exception -from nova.auth import manager def allow(*roles): - def wrap(f): - def wrapped_f(self, context, *args, **kwargs): + """Allow the given roles access the wrapped function.""" + + def wrap(func): # pylint: disable-msg=C0111 + + def wrapped_func(self, context, *args, + **kwargs): # pylint: disable-msg=C0111 if context.user.is_superuser(): - return f(self, context, *args, **kwargs) + return func(self, context, *args, **kwargs) for role in roles: if __matches_role(context, role): - return f(self, context, *args, **kwargs) + return func(self, context, *args, **kwargs) raise exception.NotAuthorized() - return wrapped_f + + return wrapped_func + return wrap def deny(*roles): - def wrap(f): - def wrapped_f(self, context, *args, **kwargs): + """Deny the given roles access the wrapped function.""" + + def wrap(func): # pylint: disable-msg=C0111 + + def wrapped_func(self, context, *args, + **kwargs): # pylint: disable-msg=C0111 if context.user.is_superuser(): - return f(self, context, *args, **kwargs) + return func(self, context, *args, **kwargs) for role in roles: if __matches_role(context, role): raise exception.NotAuthorized() - return f(self, context, *args, **kwargs) - return wrapped_f + return func(self, context, *args, **kwargs) + + return wrapped_func + return wrap def __matches_role(context, role): + """Check if a role is allowed.""" if role == 'all': return True if role == 'none': return False return context.project.has_role(context.user.id, role) - diff --git a/nova/auth/signer.py b/nova/auth/signer.py index 8334806d2..f7d29f534 100644 --- a/nova/auth/signer.py +++ b/nova/auth/signer.py @@ -50,15 +50,15 @@ import logging import urllib # NOTE(vish): for new boto -import boto +import boto # NOTE(vish): for old boto -import boto.utils +import boto.utils from nova.exception import Error class Signer(object): - """ hacked up code from boto/connection.py """ + """Hacked up code from boto/connection.py""" def __init__(self, secret_key): self.hmac = hmac.new(secret_key, digestmod=hashlib.sha1) @@ -66,22 +66,27 @@ class Signer(object): self.hmac_256 = hmac.new(secret_key, digestmod=hashlib.sha256) def s3_authorization(self, headers, verb, path): + """Generate S3 authorization string.""" c_string = boto.utils.canonical_string(verb, path, headers) - hmac = self.hmac.copy() - hmac.update(c_string) - b64_hmac = base64.encodestring(hmac.digest()).strip() + hmac_copy = self.hmac.copy() + hmac_copy.update(c_string) + b64_hmac = base64.encodestring(hmac_copy.digest()).strip() return b64_hmac def generate(self, params, verb, server_string, path): + """Generate auth string according to what SignatureVersion is given.""" if params['SignatureVersion'] == '0': return self._calc_signature_0(params) if params['SignatureVersion'] == '1': return self._calc_signature_1(params) if params['SignatureVersion'] == '2': return self._calc_signature_2(params, verb, server_string, path) - raise Error('Unknown Signature Version: %s' % self.SignatureVersion) + raise Error('Unknown Signature Version: %s' % + params['SignatureVersion']) - def _get_utf8_value(self, value): + @staticmethod + def _get_utf8_value(value): + """Get the UTF8-encoded version of a value.""" if not isinstance(value, str) and not isinstance(value, unicode): value = str(value) if isinstance(value, unicode): @@ -90,10 +95,11 @@ class Signer(object): return value def _calc_signature_0(self, params): + """Generate AWS signature version 0 string.""" s = params['Action'] + params['Timestamp'] self.hmac.update(s) keys = params.keys() - keys.sort(cmp = lambda x, y: cmp(x.lower(), y.lower())) + keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower())) pairs = [] for key in keys: val = self._get_utf8_value(params[key]) @@ -101,8 +107,9 @@ class Signer(object): return base64.b64encode(self.hmac.digest()) def _calc_signature_1(self, params): + """Generate AWS signature version 1 string.""" keys = params.keys() - keys.sort(cmp = lambda x, y: cmp(x.lower(), y.lower())) + keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower())) pairs = [] for key in keys: self.hmac.update(key) @@ -112,30 +119,34 @@ class Signer(object): return base64.b64encode(self.hmac.digest()) def _calc_signature_2(self, params, verb, server_string, path): + """Generate AWS signature version 2 string.""" logging.debug('using _calc_signature_2') string_to_sign = '%s\n%s\n%s\n' % (verb, server_string, path) if self.hmac_256: - hmac = self.hmac_256 + current_hmac = self.hmac_256 params['SignatureMethod'] = 'HmacSHA256' else: - hmac = self.hmac + current_hmac = self.hmac params['SignatureMethod'] = 'HmacSHA1' keys = params.keys() keys.sort() pairs = [] for key in keys: val = self._get_utf8_value(params[key]) - pairs.append(urllib.quote(key, safe='') + '=' + urllib.quote(val, safe='-_~')) + val = urllib.quote(val, safe='-_~') + pairs.append(urllib.quote(key, safe='') + '=' + val) qs = '&'.join(pairs) - logging.debug('query string: %s' % qs) + logging.debug('query string: %s', qs) string_to_sign += qs - logging.debug('string_to_sign: %s' % string_to_sign) - hmac.update(string_to_sign) - b64 = base64.b64encode(hmac.digest()) - logging.debug('len(b64)=%d' % len(b64)) - logging.debug('base64 encoded digest: %s' % b64) + logging.debug('string_to_sign: %s', string_to_sign) + current_hmac.update(string_to_sign) + b64 = base64.b64encode(current_hmac.digest()) + logging.debug('len(b64)=%d', len(b64)) + logging.debug('base64 encoded digest: %s', b64) return b64 if __name__ == '__main__': - print Signer('foo').generate({"SignatureMethod": 'HmacSHA256', 'SignatureVersion': '2'}, "get", "server", "/foo") + print Signer('foo').generate({'SignatureMethod': 'HmacSHA256', + 'SignatureVersion': '2'}, + 'get', 'server', '/foo') -- cgit From 567aa0ac862f0cb18786f20d949ab75bd800c3c7 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Thu, 19 Aug 2010 15:05:13 +0100 Subject: Remove whitespace to match style guide. --- nova/virt/xenapi.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index aed4c4fb5..f0bbbbe1f 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -149,7 +149,6 @@ class XenAPIConnection(object): yield self._call_xenapi('VM.start', vm_ref, False, False) logging.info('Spawning VM %s created %s.', instance.name, vm_ref) - @defer.inlineCallbacks def _create_vm(self, instance, kernel, ramdisk): """Create a VM record. Returns a Deferred that gives the new @@ -191,7 +190,6 @@ class XenAPIConnection(object): logging.debug('Created VM %s as %s.', instance.name, vm_ref) defer.returnValue(vm_ref) - @defer.inlineCallbacks def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): """Create a VBD record. Returns a Deferred that gives the new @@ -216,7 +214,6 @@ class XenAPIConnection(object): vdi_ref) defer.returnValue(vbd_ref) - @defer.inlineCallbacks def _create_vif(self, vm_ref, network_ref, mac_address): """Create a VIF record. Returns a Deferred that gives the new @@ -238,7 +235,6 @@ class XenAPIConnection(object): vm_ref, network_ref) defer.returnValue(vif_ref) - @defer.inlineCallbacks def _find_network_with_bridge(self, bridge): expr = 'field "bridge" = "%s"' % bridge @@ -251,7 +247,6 @@ class XenAPIConnection(object): else: raise Exception('Found no network for bridge %s' % bridge) - @defer.inlineCallbacks def _fetch_image(self, image, user, project, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place @@ -273,7 +268,6 @@ class XenAPIConnection(object): uuid = yield self._wait_for_task(task) defer.returnValue(uuid) - @defer.inlineCallbacks def reboot(self, instance): vm = yield self._lookup(instance.name) @@ -282,7 +276,6 @@ class XenAPIConnection(object): task = yield self._call_xenapi('Async.VM.clean_reboot', vm) yield self._wait_for_task(task) - @defer.inlineCallbacks def destroy(self, instance): vm = yield self._lookup(instance.name) @@ -291,7 +284,6 @@ class XenAPIConnection(object): task = yield self._call_xenapi('Async.VM.destroy', vm) yield self._wait_for_task(task) - def get_info(self, instance_id): vm = self._lookup_blocking(instance_id) if vm is None: @@ -303,12 +295,10 @@ class XenAPIConnection(object): 'num_cpu': rec['VCPUs_max'], 'cpu_time': 0} - @deferredToThread def _lookup(self, i): return self._lookup_blocking(i) - def _lookup_blocking(self, i): vms = self._conn.xenapi.VM.get_by_name_label(i) n = len(vms) @@ -319,7 +309,6 @@ class XenAPIConnection(object): else: return vms[0] - def _wait_for_task(self, task): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" @@ -327,7 +316,6 @@ class XenAPIConnection(object): reactor.callLater(0, self._poll_task, task, d) return d - @deferredToThread def _poll_task(self, task, deferred): """Poll the given XenAPI task, and fire the given Deferred if we @@ -352,7 +340,6 @@ class XenAPIConnection(object): logging.warn(exn) deferred.errback(exn) - @deferredToThread def _call_xenapi(self, method, *args): """Call the specified XenAPI method on a background thread. Returns @@ -362,7 +349,6 @@ class XenAPIConnection(object): f = f.__getattr__(m) return f(*args) - @deferredToThread def _async_call_plugin(self, plugin, fn, args): """Call Async.host.call_plugin on a background thread. Returns a @@ -371,7 +357,6 @@ class XenAPIConnection(object): self._conn.xenapi.Async.host.call_plugin, self._get_xenapi_host(), plugin, fn, args) - def _get_xenapi_host(self): return self._conn.xenapi.session.get_this_host(self._conn.handle) -- cgit From 4a23d5d9091823e9b4dc364383a14b566af80cd6 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Thu, 19 Aug 2010 15:12:46 +0100 Subject: Move deferredToThread into utils, as suggested by termie. --- nova/utils.py | 8 ++++++++ nova/virt/xenapi.py | 18 ++++++------------ 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/nova/utils.py b/nova/utils.py index e826f9b71..b0d07af79 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -29,6 +29,8 @@ import subprocess import socket import sys +from twisted.internet.threads import deferToThread + from nova import exception from nova import flags @@ -142,3 +144,9 @@ def isotime(at=None): def parse_isotime(timestr): return datetime.datetime.strptime(timestr, TIME_FORMAT) + + +def deferredToThread(f): + def g(*args, **kwargs): + return deferToThread(f, *args, **kwargs) + return g diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index f0bbbbe1f..b44ac383a 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -19,7 +19,7 @@ A connection to XenServer or Xen Cloud Platform. The concurrency model for this class is as follows: -All XenAPI calls are on a thread (using t.i.t.deferToThread, or the decorator +All XenAPI calls are on a thread (using t.i.t.deferToThread, via the decorator deferredToThread). They are remote calls, and so may hang for the usual reasons. They should not be allowed to block the reactor thread. @@ -41,10 +41,10 @@ import xmlrpclib from twisted.internet import defer from twisted.internet import reactor from twisted.internet import task -from twisted.internet.threads import deferToThread from nova import flags from nova import process +from nova import utils from nova.auth.manager import AuthManager from nova.compute import power_state from nova.virt import images @@ -97,12 +97,6 @@ def get_connection(_): return XenAPIConnection(url, username, password) -def deferredToThread(f): - def g(*args, **kwargs): - return deferToThread(f, *args, **kwargs) - return g - - class XenAPIConnection(object): def __init__(self, url, user, pw): self._conn = XenAPI.Session(url) @@ -295,7 +289,7 @@ class XenAPIConnection(object): 'num_cpu': rec['VCPUs_max'], 'cpu_time': 0} - @deferredToThread + @utils.deferredToThread def _lookup(self, i): return self._lookup_blocking(i) @@ -316,7 +310,7 @@ class XenAPIConnection(object): reactor.callLater(0, self._poll_task, task, d) return d - @deferredToThread + @utils.deferredToThread def _poll_task(self, task, deferred): """Poll the given XenAPI task, and fire the given Deferred if we get a result.""" @@ -340,7 +334,7 @@ class XenAPIConnection(object): logging.warn(exn) deferred.errback(exn) - @deferredToThread + @utils.deferredToThread def _call_xenapi(self, method, *args): """Call the specified XenAPI method on a background thread. Returns a Deferred for the result.""" @@ -349,7 +343,7 @@ class XenAPIConnection(object): f = f.__getattr__(m) return f(*args) - @deferredToThread + @utils.deferredToThread def _async_call_plugin(self, plugin, fn, args): """Call Async.host.call_plugin on a background thread. Returns a Deferred with the task reference.""" -- cgit From b651008e7e4f60f2ccb07497c27d866814156209 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 19 Aug 2010 16:05:27 -0400 Subject: Complete the Image API against a LocalImageService until Glance's API exists (at which point we'll make a GlanceImageService and make the choice of ImageService plugin configurable.) --- nova/api/rackspace/images.py | 83 +++++++++++++++++++++++++++++-------------- nova/api/rackspace/notes.txt | 23 ++++++++++++ nova/api/services/__init__.py | 0 nova/api/services/image.py | 72 +++++++++++++++++++++++++++++++++++++ 4 files changed, 151 insertions(+), 27 deletions(-) create mode 100644 nova/api/rackspace/notes.txt create mode 100644 nova/api/services/__init__.py create mode 100644 nova/api/services/image.py diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py index 57c03894a..e29f737a5 100644 --- a/nova/api/rackspace/images.py +++ b/nova/api/rackspace/images.py @@ -15,12 +15,13 @@ # License for the specific language governing permissions and limitations # under the License. -from nova.endpoint.rackspace.controllers.base import BaseController -from nova.endpoint import images +from nova import datastore +from nova.api.rackspace import base +from nova.api.services.image import ImageService from webob import exc #TODO(gundlach): Serialize return values -class Controller(BaseController): +class Controller(base.Controller): _serialization_metadata = { 'application/xml': { @@ -31,34 +32,62 @@ class Controller(BaseController): } } + def __init__(self): + self._svc = ImageService.load() + self._id_xlator = RackspaceApiImageIdTranslator() + + def _to_rs_id(self, image_id): + """ + Convert an image id from the format of our ImageService strategy + to the Rackspace API format (an int). + """ + strategy = self._svc.__class__.__name__ + return self._id_xlator.to_rs_id(strategy, image_id) + def index(self, req): - context = req.environ['nova.api_request_context'] - return images.list(context) + """Return all public images.""" + data = self._svc.list() + for img in data: + img['id'] = self._to_rs_id(img['id']) + return dict(images=result) def show(self, req, id): - context = req.environ['nova.api_request_context'] - return images.list(context, filter_list=[id]) + """Return data about the given image id.""" + img = self._svc.show(id) + img['id'] = self._to_rs_id(img['id']) + return dict(image=img) def delete(self, req, id): - context = req.environ['nova.api_request_context'] - # TODO(gundlach): make sure it's an image they may delete? - return images.deregister(context, id) + # Only public images are supported for now. + raise exc.HTTPNotFound() + + def create(self, req): + # Only public images are supported for now, so a request to + # make a backup of a server cannot be supproted. + raise exc.HTTPNotFound() + + def update(self, req, id): + # Users may not modify public images, and that's all that + # we support for now. + raise exc.HTTPNotFound() + + +class RackspaceApiImageIdTranslator(object): + """ + Converts Rackspace API image ids to and from the id format for a given + strategy. + """ - def create(self, **kwargs): - # TODO(gundlach): no idea how to hook this up. code below - # is from servers.py. - inst = self.build_server_instance(kwargs['server']) - rpc.cast( - FLAGS.compute_topic, { - "method": "run_instance", - "args": {"instance_id": inst.instance_id}}) + def __init__(self): + self._store = datastore.Redis.instance() - def update(self, **kwargs): - # TODO (gundlach): no idea how to hook this up. code below - # is from servers.py. - instance_id = kwargs['id'] - instance = compute.InstanceDirectory().get(instance_id) - if not instance: - raise ServerNotFound("The requested server was not found") - instance.update(kwargs['server']) - instance.save() + def to_rs_id(self, strategy_name, opaque_id): + """Convert an id from a strategy-specific one to a Rackspace one.""" + key = "rsapi.idstrategies.image.%s" % strategy_name + result = self._store.hget(key, str(opaque_id)) + if result: # we have a mapping from opaque to RS for this strategy + return int(result) + else: + nextid = self._store.incr("%s.lastid" % key) + self._store.hsetnx(key, str(opaque_id), nextid) + return nextid diff --git a/nova/api/rackspace/notes.txt b/nova/api/rackspace/notes.txt new file mode 100644 index 000000000..e133bf5ea --- /dev/null +++ b/nova/api/rackspace/notes.txt @@ -0,0 +1,23 @@ +We will need: + +ImageService +a service that can do crud on image information. not user-specific. opaque +image ids. + +GlanceImageService(ImageService): +image ids are URIs. + +LocalImageService(ImageService): +image ids are random strings. + +RackspaceAPITranslationStore: +translates RS server/images/flavor/etc ids into formats required +by a given ImageService strategy. + +api.rackspace.images.Controller: +uses an ImageService strategy behind the scenes to do its fetching; it just +converts int image id into a strategy-specific image id. + +who maintains the mapping from user to [images he owns]? nobody, because +we have no way of enforcing access to his images, without kryptex which +won't be in Austin. diff --git a/nova/api/services/__init__.py b/nova/api/services/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/api/services/image.py b/nova/api/services/image.py new file mode 100644 index 000000000..c5ea15ba1 --- /dev/null +++ b/nova/api/services/image.py @@ -0,0 +1,72 @@ +import cPickle as pickle +import os.path +import string + +class ImageService(object): + """Provides storage and retrieval of disk image objects.""" + + @staticmethod + def load(): + """Factory method to return image service.""" + #TODO(gundlach): read from config. + class_ = LocalImageService + return class_() + + def index(self): + """ + Return a list of image data dicts. Each dict will contain an + id key whose value is an opaque image id. + """ + + def show(self, id): + """ + Returns a dict containing image data for the given opaque image id. + """ + + +class GlanceImageService(ImageService): + """Provides storage and retrieval of disk image objects within Glance.""" + # TODO(gundlach): once Glance has an API, build this. + pass + + +class LocalImageService(ImageService): + """Image service storing images to local disk.""" + + def __init__(self): + self._path = "/tmp/nova/images" + try: + os.makedirs(self._path) + except OSError: # exists + pass + + def _path_to(self, image_id=''): + return os.path.join(self._path, image_id) + + def _ids(self): + """The list of all image ids.""" + return os.path.listdir(self._path) + + def index(self): + return [ self.show(id) for id in self._ids() ] + + def show(self, id): + return pickle.load(open(self._path_to(id))) + + def create(self, data): + """ + Store the image data and return the new image id. + """ + id = ''.join(random.choice(string.letters) for _ in range(20)) + self.update(id, data) + return id + + def update(self, image_id, data): + """Replace the contents of the given image with the new data.""" + pickle.dump(data, open(self._path_to(image_id), 'w')) + + def delete(self, image_id): + """ + Delete the given image. Raises OSError if the image does not exist. + """ + os.unlink(self._path_to(image_id)) -- cgit From a39a155342ad5aa9d8c7b115fb6fe7498ef00f23 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 20 Aug 2010 10:08:05 -0700 Subject: small fixes to network --- nova/network/service.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/nova/network/service.py b/nova/network/service.py index d3aa1c46f..3dba0a9ef 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -26,7 +26,7 @@ from nova import flags from nova import service from nova import utils from nova.auth import manager -from nova.network import exception +from nova.network import exception as network_exception from nova.network import model from nova.network import vpn @@ -64,8 +64,7 @@ def type_to_class(network_type): def setup_compute_network(network_type, user_id, project_id, security_group): """Sets up the network on a compute host""" srv = type_to_class(network_type) - srv.setup_compute_network(network_type, - user_id, + srv.setup_compute_network(user_id, project_id, security_group) @@ -170,7 +169,7 @@ class FlatNetworkService(BaseNetworkService): redis.sadd('ips', fixed_ip) fixed_ip = redis.spop('ips') if not fixed_ip: - raise exception.NoMoreAddresses() + raise network_exception.NoMoreAddresses() # TODO(vish): some sort of dns handling for hostname should # probably be done here. return {'inject_network': True, -- cgit From 70112ea9941b92aa98e32c0c37f0208877953557 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 20 Aug 2010 13:05:46 -0700 Subject: fix concurrency issue with multiple instances getting the same ip --- nova/network/model.py | 39 +++++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/nova/network/model.py b/nova/network/model.py index 6e4fcc47e..6c12836b7 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -231,7 +231,8 @@ class BaseNetwork(datastore.BasicModel): self.network_id = network_id self.network_str = network_str super(BaseNetwork, self).__init__() - self.save() + if self.is_new_record(): + self._create_assigned_set() @property def network(self): @@ -278,6 +279,16 @@ class BaseNetwork(datastore.BasicModel): """Remove a host from the datastore""" self.address_class(ip_address).destroy() + def _create_assigned_set(self): + for idx in range(self.num_bottom_reserved_ips, + len(self.network) - self.num_top_reserved_ips): + redis = datastore.Redis.instance() + redis.sadd(self._available_key, str(self.network[idx])) + + @property + def _available_key(self): + return 'available:%s' % self.identifier + @property def assigned(self): """Returns a list of all assigned addresses""" @@ -294,15 +305,6 @@ class BaseNetwork(datastore.BasicModel): return self.address_class(ip_address) return None - @property - def available(self): - """Returns a list of all available addresses in the network""" - for idx in range(self.num_bottom_reserved_ips, - len(self.network) - self.num_top_reserved_ips): - address = str(self.network[idx]) - if not address in self.assigned: - yield address - @property def num_bottom_reserved_ips(self): """Returns number of ips reserved at the bottom of the range""" @@ -315,13 +317,14 @@ class BaseNetwork(datastore.BasicModel): def allocate_ip(self, user_id, project_id, mac, hostname=None): """Allocates an ip to a mac address""" - for address in self.available: - logging.debug("Allocating IP %s to %s", address, project_id) - self._add_host(user_id, project_id, address, mac, hostname) - self.express(address=address) - return address - raise exception.NoMoreAddresses("Project %s with network %s" % - (project_id, str(self.network))) + address = datastore.Redis.instance().spop(self._available_key) + if not address: + raise exception.NoMoreAddresses("Project %s with network %s" % + (project_id, str(self.network))) + logging.debug("Allocating IP %s to %s", address, project_id) + self._add_host(user_id, project_id, address, mac, hostname) + self.express(address=address) + return address def lease_ip(self, ip_str): """Called when DHCP lease is activated""" @@ -342,6 +345,7 @@ class BaseNetwork(datastore.BasicModel): logging.debug("Releasing IP %s", ip_str) self._rem_host(ip_str) self.deexpress(address=ip_str) + datastore.Redis.instance().sadd(self._available_key, ip_str) def deallocate_ip(self, ip_str): """Deallocates an allocated ip""" @@ -400,7 +404,6 @@ class BridgedNetwork(BaseNetwork): def __init__(self, *args, **kwargs): super(BridgedNetwork, self).__init__(*args, **kwargs) self['bridge_dev'] = FLAGS.bridge_dev - self.save() def express(self, address=None): super(BridgedNetwork, self).express(address=address) -- cgit From d38f21e0fb382bd8f01cfbc79cb34ea8710cd639 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 23 Aug 2010 10:27:59 -0400 Subject: License --- nova/api/services/image.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/nova/api/services/image.py b/nova/api/services/image.py index c5ea15ba1..bda50fc66 100644 --- a/nova/api/services/image.py +++ b/nova/api/services/image.py @@ -1,3 +1,20 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + import cPickle as pickle import os.path import string -- cgit From e3727d6d88a0631d3b896c4fcdcfec05510dad36 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 23 Aug 2010 12:07:20 -0400 Subject: Support opaque id to rs int id as well --- nova/api/rackspace/images.py | 42 ++++++++++++++++++++++++++++++++---------- nova/api/services/image.py | 8 ++++---- 2 files changed, 36 insertions(+), 14 deletions(-) diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py index e29f737a5..c9cc8e85d 100644 --- a/nova/api/rackspace/images.py +++ b/nova/api/rackspace/images.py @@ -44,18 +44,24 @@ class Controller(base.Controller): strategy = self._svc.__class__.__name__ return self._id_xlator.to_rs_id(strategy, image_id) + def _from_rs_id(self, rs_image_id): + """ + Convert an image id from the Rackspace API format (an int) to the + format of our ImageService strategy. + """ + strategy = self._svc.__class__.__name__ + return self._id_xlator.from_rs_id(strategy, rs_image_id) + def index(self, req): """Return all public images.""" - data = self._svc.list() - for img in data: - img['id'] = self._to_rs_id(img['id']) - return dict(images=result) + data = dict((self._to_rs_id(id), val) + for id, val in self._svc.index().iteritems()) + return dict(images=data) def show(self, req, id): """Return data about the given image id.""" - img = self._svc.show(id) - img['id'] = self._to_rs_id(img['id']) - return dict(image=img) + opaque_id = self._from_rs_id(id) + return dict(image=self._svc.show(opaque_id)) def delete(self, req, id): # Only public images are supported for now. @@ -80,14 +86,30 @@ class RackspaceApiImageIdTranslator(object): def __init__(self): self._store = datastore.Redis.instance() + self._key_template = "rsapi.idstrategies.image.%s.%s" def to_rs_id(self, strategy_name, opaque_id): """Convert an id from a strategy-specific one to a Rackspace one.""" - key = "rsapi.idstrategies.image.%s" % strategy_name + key = self._key_template % (strategy_name, "fwd") result = self._store.hget(key, str(opaque_id)) if result: # we have a mapping from opaque to RS for this strategy return int(result) else: + # Store the mapping. nextid = self._store.incr("%s.lastid" % key) - self._store.hsetnx(key, str(opaque_id), nextid) - return nextid + if self._store.hsetnx(key, str(opaque_id), nextid): + # If someone else didn't beat us to it, store the reverse + # mapping as well. + key = self._key_template % (strategy_name, "rev") + self._store.hset(key, nextid, str(opaque_id)) + return nextid + else: + # Someone beat us to it; use their number instead, and + # discard nextid (which is OK -- we don't require that + # every int id be used.) + return int(self._store.hget(key, str(opaque_id))) + + def from_rs_id(self, strategy_name, rs_id): + """Convert a Rackspace id to a strategy-specific one.""" + key = self._key_template % (strategy_name, "rev") + return self._store.hget(key, rs_id) diff --git a/nova/api/services/image.py b/nova/api/services/image.py index bda50fc66..11e19804a 100644 --- a/nova/api/services/image.py +++ b/nova/api/services/image.py @@ -17,6 +17,7 @@ import cPickle as pickle import os.path +import random import string class ImageService(object): @@ -31,8 +32,7 @@ class ImageService(object): def index(self): """ - Return a list of image data dicts. Each dict will contain an - id key whose value is an opaque image id. + Return a dict from opaque image id to image data. """ def show(self, id): @@ -62,10 +62,10 @@ class LocalImageService(ImageService): def _ids(self): """The list of all image ids.""" - return os.path.listdir(self._path) + return os.listdir(self._path) def index(self): - return [ self.show(id) for id in self._ids() ] + return dict((id, self.show(id)) for id in self._ids()) def show(self, id): return pickle.load(open(self._path_to(id))) -- cgit From 030d01fd10f7f65cdafbea49e04f3b6b147a7348 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 23 Aug 2010 12:46:29 -0400 Subject: Serialize properly --- nova/api/rackspace/base.py | 3 ++- nova/api/rackspace/images.py | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/api/rackspace/base.py b/nova/api/rackspace/base.py index c85fd7b8e..b995d9acc 100644 --- a/nova/api/rackspace/base.py +++ b/nova/api/rackspace/base.py @@ -36,4 +36,5 @@ class Controller(wsgi.Controller): MIME types to information needed to serialize to that type. """ _metadata = getattr(type(self), "_serialization_metadata", {}) - return Serializer(request.environ, _metadata).to_content_type(data) + serializer = wsgi.Serializer(request.environ, _metadata) + return serializer.to_content_type(data) diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py index c9cc8e85d..62e0b24c5 100644 --- a/nova/api/rackspace/images.py +++ b/nova/api/rackspace/images.py @@ -56,12 +56,12 @@ class Controller(base.Controller): """Return all public images.""" data = dict((self._to_rs_id(id), val) for id, val in self._svc.index().iteritems()) - return dict(images=data) + return self.serialize(dict(images=data), req) def show(self, req, id): """Return data about the given image id.""" opaque_id = self._from_rs_id(id) - return dict(image=self._svc.show(opaque_id)) + return self.serialize(dict(image=self._svc.show(opaque_id)), req) def delete(self, req, id): # Only public images are supported for now. -- cgit From a50a200bc2547439a3da17e695224d3d434e14dd Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 23 Aug 2010 12:55:57 -0400 Subject: Move serialize() to wsgi.Controller so __call__ can serialize() action return values if they are dicts. --- nova/api/rackspace/base.py | 10 ---------- nova/wsgi.py | 16 ++++++++++++++-- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/nova/api/rackspace/base.py b/nova/api/rackspace/base.py index b995d9acc..51841925e 100644 --- a/nova/api/rackspace/base.py +++ b/nova/api/rackspace/base.py @@ -28,13 +28,3 @@ class Controller(wsgi.Controller): return {cls.entity_name: cls.render(instance)} else: return { "TODO": "TODO" } - - def serialize(self, data, request): - """ - Serialize the given dict to the response type requested in request. - Uses self._serialization_metadata if it exists, which is a dict mapping - MIME types to information needed to serialize to that type. - """ - _metadata = getattr(type(self), "_serialization_metadata", {}) - serializer = wsgi.Serializer(request.environ, _metadata) - return serializer.to_content_type(data) diff --git a/nova/wsgi.py b/nova/wsgi.py index baf6cccd9..d52bf855d 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -196,7 +196,8 @@ class Controller(object): WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon itself. All action methods must, in addition to their normal parameters, accept a 'req' argument - which is the incoming webob.Request. + which is the incoming webob.Request. They raise a webob.exc exception, + or return a dict which will be serialized by requested content type. """ @webob.dec.wsgify @@ -210,7 +211,18 @@ class Controller(object): del arg_dict['controller'] del arg_dict['action'] arg_dict['req'] = req - return method(**arg_dict) + result = method(**arg_dict) + return self._serialize(result, req) if type(result) is dict else result + + def _serialize(self, data, request): + """ + Serialize the given dict to the response type requested in request. + Uses self._serialization_metadata if it exists, which is a dict mapping + MIME types to information needed to serialize to that type. + """ + _metadata = getattr(type(self), "_serialization_metadata", {}) + serializer = wsgi.Serializer(request.environ, _metadata) + return serializer.to_content_type(data) class Serializer(object): -- cgit From f5c03fdd78a3bb8233e465c7624ed1fdb8f400fe Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 23 Aug 2010 13:06:40 -0400 Subject: Don't serialize in Controller subclass now that wsgi.Controller handles it for us --- nova/api/rackspace/images.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py index 62e0b24c5..070100143 100644 --- a/nova/api/rackspace/images.py +++ b/nova/api/rackspace/images.py @@ -20,7 +20,6 @@ from nova.api.rackspace import base from nova.api.services.image import ImageService from webob import exc -#TODO(gundlach): Serialize return values class Controller(base.Controller): _serialization_metadata = { @@ -56,12 +55,12 @@ class Controller(base.Controller): """Return all public images.""" data = dict((self._to_rs_id(id), val) for id, val in self._svc.index().iteritems()) - return self.serialize(dict(images=data), req) + return dict(images=data) def show(self, req, id): """Return data about the given image id.""" opaque_id = self._from_rs_id(id) - return self.serialize(dict(image=self._svc.show(opaque_id)), req) + return dict(image=self._svc.show(opaque_id)) def delete(self, req, id): # Only public images are supported for now. -- cgit From c49c725e43cfbc9d90b5e9ebbf93a32e71c7e6a9 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 23 Aug 2010 13:07:43 -0400 Subject: Typo --- nova/wsgi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/wsgi.py b/nova/wsgi.py index d52bf855d..096d5843f 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -221,7 +221,7 @@ class Controller(object): MIME types to information needed to serialize to that type. """ _metadata = getattr(type(self), "_serialization_metadata", {}) - serializer = wsgi.Serializer(request.environ, _metadata) + serializer = Serializer(request.environ, _metadata) return serializer.to_content_type(data) -- cgit From 35a08780c41ece1b47b2ded98c061b103a400fea Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 23 Aug 2010 13:26:10 -0400 Subject: Get the output formatting correct. --- nova/api/rackspace/images.py | 9 ++++++--- nova/api/services/image.py | 3 ++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py index 070100143..7d32fa099 100644 --- a/nova/api/rackspace/images.py +++ b/nova/api/rackspace/images.py @@ -53,14 +53,17 @@ class Controller(base.Controller): def index(self, req): """Return all public images.""" - data = dict((self._to_rs_id(id), val) - for id, val in self._svc.index().iteritems()) + data = self._svc.index() + for img in data: + img['id'] = self._to_rs_id(img['id']) return dict(images=data) def show(self, req, id): """Return data about the given image id.""" opaque_id = self._from_rs_id(id) - return dict(image=self._svc.show(opaque_id)) + img = self._svc.show(opaque_id) + img['id'] = id + return dict(image=img) def delete(self, req, id): # Only public images are supported for now. diff --git a/nova/api/services/image.py b/nova/api/services/image.py index 11e19804a..1a7a258b7 100644 --- a/nova/api/services/image.py +++ b/nova/api/services/image.py @@ -65,7 +65,7 @@ class LocalImageService(ImageService): return os.listdir(self._path) def index(self): - return dict((id, self.show(id)) for id in self._ids()) + return [ self.show(id) for id in self._ids() ] def show(self, id): return pickle.load(open(self._path_to(id))) @@ -75,6 +75,7 @@ class LocalImageService(ImageService): Store the image data and return the new image id. """ id = ''.join(random.choice(string.letters) for _ in range(20)) + data['id'] = id self.update(id, data) return id -- cgit From 41e2e91ccfb1409f1ea47d92a9d15f47ab37e65d Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 23 Aug 2010 16:43:25 -0400 Subject: Merge fail --- nova/api/rackspace/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/rackspace/base.py b/nova/api/rackspace/base.py index 51841925e..dd2c6543c 100644 --- a/nova/api/rackspace/base.py +++ b/nova/api/rackspace/base.py @@ -27,4 +27,4 @@ class Controller(wsgi.Controller): if isinstance(instance, list): return {cls.entity_name: cls.render(instance)} else: - return { "TODO": "TODO" } + return {"TODO": "TODO"} -- cgit From d94eec3d2995c97c38006e4d6177740375860f8f Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 24 Aug 2010 11:19:51 -0400 Subject: Style fixes --- nova/api/rackspace/images.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py index 7d32fa099..36a26688c 100644 --- a/nova/api/rackspace/images.py +++ b/nova/api/rackspace/images.py @@ -17,7 +17,7 @@ from nova import datastore from nova.api.rackspace import base -from nova.api.services.image import ImageService +from nova.api.services import image from webob import exc class Controller(base.Controller): @@ -32,28 +32,28 @@ class Controller(base.Controller): } def __init__(self): - self._svc = ImageService.load() - self._id_xlator = RackspaceApiImageIdTranslator() + self._service = image.ImageService.load() + self._id_translator = RackspaceAPIImageIdTranslator() def _to_rs_id(self, image_id): """ Convert an image id from the format of our ImageService strategy to the Rackspace API format (an int). """ - strategy = self._svc.__class__.__name__ - return self._id_xlator.to_rs_id(strategy, image_id) + strategy = self._service.__class__.__name__ + return self._id_translator.to_rs_id(strategy, image_id) def _from_rs_id(self, rs_image_id): """ Convert an image id from the Rackspace API format (an int) to the format of our ImageService strategy. """ - strategy = self._svc.__class__.__name__ - return self._id_xlator.from_rs_id(strategy, rs_image_id) + strategy = self._service.__class__.__name__ + return self._id_translator.from_rs_id(strategy, rs_image_id) def index(self, req): """Return all public images.""" - data = self._svc.index() + data = self._service.index() for img in data: img['id'] = self._to_rs_id(img['id']) return dict(images=data) @@ -61,7 +61,7 @@ class Controller(base.Controller): def show(self, req, id): """Return data about the given image id.""" opaque_id = self._from_rs_id(id) - img = self._svc.show(opaque_id) + img = self._service.show(opaque_id) img['id'] = id return dict(image=img) @@ -80,7 +80,7 @@ class Controller(base.Controller): raise exc.HTTPNotFound() -class RackspaceApiImageIdTranslator(object): +class RackspaceAPIImageIdTranslator(object): """ Converts Rackspace API image ids to and from the id format for a given strategy. -- cgit From 4d1b2539d2d2f39ca53e9383e317af76dbc71905 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 24 Aug 2010 13:37:18 -0400 Subject: OK, break out ternary operator (good to know that it slowed you down to read it) --- nova/wsgi.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nova/wsgi.py b/nova/wsgi.py index 096d5843f..bec0a7b1c 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -212,7 +212,10 @@ class Controller(object): del arg_dict['action'] arg_dict['req'] = req result = method(**arg_dict) - return self._serialize(result, req) if type(result) is dict else result + if type(result) is dict: + return self._serialize(result, req) + else: + return result def _serialize(self, data, request): """ -- cgit From 09bc71460b976f28c7bc6a507006d6c7c12c5824 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 24 Aug 2010 16:16:41 -0400 Subject: Move imageservice to its own directory --- nova/api/rackspace/images.py | 4 +- nova/api/services/image.py | 90 -------------------------------------------- nova/image/service.py | 90 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 92 insertions(+), 92 deletions(-) delete mode 100644 nova/api/services/image.py create mode 100644 nova/image/service.py diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py index 36a26688c..b7668a1e1 100644 --- a/nova/api/rackspace/images.py +++ b/nova/api/rackspace/images.py @@ -17,7 +17,7 @@ from nova import datastore from nova.api.rackspace import base -from nova.api.services import image +from nova import image from webob import exc class Controller(base.Controller): @@ -32,7 +32,7 @@ class Controller(base.Controller): } def __init__(self): - self._service = image.ImageService.load() + self._service = image.service.ImageService.load() self._id_translator = RackspaceAPIImageIdTranslator() def _to_rs_id(self, image_id): diff --git a/nova/api/services/image.py b/nova/api/services/image.py deleted file mode 100644 index 1a7a258b7..000000000 --- a/nova/api/services/image.py +++ /dev/null @@ -1,90 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import cPickle as pickle -import os.path -import random -import string - -class ImageService(object): - """Provides storage and retrieval of disk image objects.""" - - @staticmethod - def load(): - """Factory method to return image service.""" - #TODO(gundlach): read from config. - class_ = LocalImageService - return class_() - - def index(self): - """ - Return a dict from opaque image id to image data. - """ - - def show(self, id): - """ - Returns a dict containing image data for the given opaque image id. - """ - - -class GlanceImageService(ImageService): - """Provides storage and retrieval of disk image objects within Glance.""" - # TODO(gundlach): once Glance has an API, build this. - pass - - -class LocalImageService(ImageService): - """Image service storing images to local disk.""" - - def __init__(self): - self._path = "/tmp/nova/images" - try: - os.makedirs(self._path) - except OSError: # exists - pass - - def _path_to(self, image_id=''): - return os.path.join(self._path, image_id) - - def _ids(self): - """The list of all image ids.""" - return os.listdir(self._path) - - def index(self): - return [ self.show(id) for id in self._ids() ] - - def show(self, id): - return pickle.load(open(self._path_to(id))) - - def create(self, data): - """ - Store the image data and return the new image id. - """ - id = ''.join(random.choice(string.letters) for _ in range(20)) - data['id'] = id - self.update(id, data) - return id - - def update(self, image_id, data): - """Replace the contents of the given image with the new data.""" - pickle.dump(data, open(self._path_to(image_id), 'w')) - - def delete(self, image_id): - """ - Delete the given image. Raises OSError if the image does not exist. - """ - os.unlink(self._path_to(image_id)) diff --git a/nova/image/service.py b/nova/image/service.py new file mode 100644 index 000000000..1a7a258b7 --- /dev/null +++ b/nova/image/service.py @@ -0,0 +1,90 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import cPickle as pickle +import os.path +import random +import string + +class ImageService(object): + """Provides storage and retrieval of disk image objects.""" + + @staticmethod + def load(): + """Factory method to return image service.""" + #TODO(gundlach): read from config. + class_ = LocalImageService + return class_() + + def index(self): + """ + Return a dict from opaque image id to image data. + """ + + def show(self, id): + """ + Returns a dict containing image data for the given opaque image id. + """ + + +class GlanceImageService(ImageService): + """Provides storage and retrieval of disk image objects within Glance.""" + # TODO(gundlach): once Glance has an API, build this. + pass + + +class LocalImageService(ImageService): + """Image service storing images to local disk.""" + + def __init__(self): + self._path = "/tmp/nova/images" + try: + os.makedirs(self._path) + except OSError: # exists + pass + + def _path_to(self, image_id=''): + return os.path.join(self._path, image_id) + + def _ids(self): + """The list of all image ids.""" + return os.listdir(self._path) + + def index(self): + return [ self.show(id) for id in self._ids() ] + + def show(self, id): + return pickle.load(open(self._path_to(id))) + + def create(self, data): + """ + Store the image data and return the new image id. + """ + id = ''.join(random.choice(string.letters) for _ in range(20)) + data['id'] = id + self.update(id, data) + return id + + def update(self, image_id, data): + """Replace the contents of the given image with the new data.""" + pickle.dump(data, open(self._path_to(image_id), 'w')) + + def delete(self, image_id): + """ + Delete the given image. Raises OSError if the image does not exist. + """ + os.unlink(self._path_to(image_id)) -- cgit From 5f832cd5ea9fb858f5e8b09992cbd47d8d16f665 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 24 Aug 2010 16:17:06 -0400 Subject: Delete unused directory --- nova/api/services/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 nova/api/services/__init__.py diff --git a/nova/api/services/__init__.py b/nova/api/services/__init__.py deleted file mode 100644 index e69de29bb..000000000 -- cgit From 96ae5e2776218adfee2cb22a4c0d7358a498a451 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 24 Aug 2010 16:24:24 -0400 Subject: pep8 --- nova/api/rackspace/images.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py index b7668a1e1..370980fe9 100644 --- a/nova/api/rackspace/images.py +++ b/nova/api/rackspace/images.py @@ -16,8 +16,8 @@ # under the License. from nova import datastore -from nova.api.rackspace import base from nova import image +from nova.api.rackspace import base from webob import exc class Controller(base.Controller): -- cgit