From f5e19272844f2f0d2c72bf55a2bdf533f40d1ea5 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Thu, 22 Jul 2010 12:28:47 -0700 Subject: Check exit codes when spawning processes by default --- bin/nova-manage | 2 +- nova/objectstore/image.py | 6 +++--- nova/utils.py | 11 ++++++++--- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 56f89ce30..61ac86db6 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -56,7 +56,7 @@ class VpnCommands(object): vpn = self.__vpn_for(project.id) if vpn: - out, err = utils.execute("ping -c1 -w1 %s > /dev/null; echo $?" % vpn['private_dns_name']) + out, err = utils.execute("ping -c1 -w1 %s > /dev/null; echo $?" % vpn['private_dns_name'], check_exit_code=False) if out.strip() == '0': net = 'up' else: diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py index bea2e9637..b98de276c 100644 --- a/nova/objectstore/image.py +++ b/nova/objectstore/image.py @@ -227,13 +227,13 @@ class Image(object): @staticmethod def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_private_key, decrypted_filename): - key, err = utils.execute('openssl rsautl -decrypt -inkey %s' % cloud_private_key, encrypted_key) + key, err = utils.execute('openssl rsautl -decrypt -inkey %s' % cloud_private_key, encrypted_key, check_exit_code=False) if err: raise exception.Error("Failed to decrypt private key: %s" % err) - iv, err = utils.execute('openssl rsautl -decrypt -inkey %s' % cloud_private_key, encrypted_iv) + iv, err = utils.execute('openssl rsautl -decrypt -inkey %s' % cloud_private_key, encrypted_iv, check_exit_code=False) if err: raise exception.Error("Failed to decrypt initialization vector: %s" % err) - out, err = utils.execute('openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s' % (encrypted_filename, key, iv, decrypted_filename)) + out, err = utils.execute('openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s' % (encrypted_filename, key, iv, decrypted_filename), check_exit_code=False) if err: raise exception.Error("Failed to decrypt image file %s : %s" % (encrypted_filename, err)) diff --git a/nova/utils.py b/nova/utils.py index 9ecceafe0..d01c33042 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -45,7 +45,7 @@ def fetchfile(url, target): # fp.close() execute("curl %s -o %s" % (url, target)) -def execute(cmd, input=None, addl_env=None): +def execute(cmd, input=None, addl_env=None, check_exit_code=True): env = os.environ.copy() if addl_env: env.update(addl_env) @@ -59,6 +59,8 @@ def execute(cmd, input=None, addl_env=None): obj.stdin.close() if obj.returncode: logging.debug("Result was %s" % (obj.returncode)) + if check_exit_code and obj.returncode <> 0: + raise Exception("Unexpected exit code: %s. result=%s" % (obj.returncode, result)) return result @@ -84,9 +86,12 @@ def debug(arg): return arg -def runthis(prompt, cmd): +def runthis(prompt, cmd, check_exit_code = True): logging.debug("Running %s" % (cmd)) - logging.debug(prompt % (subprocess.call(cmd.split(" ")))) + exit_code = subprocess.call(cmd.split(" ")) + logging.debug(prompt % (exit_code)) + if check_exit_code and exit_code <> 0: + raise Exception("Unexpected exit code: %s from cmd: %s" % (exit_code, cmd)) def generate_uid(topic, size=8): -- cgit From 93aee19fa2f24c4f9c1fd59c0666e024c6891565 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Thu, 29 Jul 2010 14:48:10 -0700 Subject: Added --fail argument to curl invocations, so that HTTP request fails get surfaced as non-zero exit codes --- bin/nova-import-canonical-imagestore | 6 +++--- nova/cloudpipe/bootscript.sh | 4 ++-- nova/utils.py | 2 +- nova/virt/images.py | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/bin/nova-import-canonical-imagestore b/bin/nova-import-canonical-imagestore index 2e79f09b7..8106cc5ca 100755 --- a/bin/nova-import-canonical-imagestore +++ b/bin/nova-import-canonical-imagestore @@ -59,21 +59,21 @@ def download(img): for f in img['files']: if f['kind'] == 'kernel': dest = os.path.join(tempdir, 'kernel') - subprocess.call(['curl', f['url'], '-o', dest]) + subprocess.call(['curl', '--fail', f['url'], '-o', dest]) kernel_id = image.Image.add(dest, description='kernel/' + img['title'], kernel=True) for f in img['files']: if f['kind'] == 'ramdisk': dest = os.path.join(tempdir, 'ramdisk') - subprocess.call(['curl', f['url'], '-o', dest]) + subprocess.call(['curl', '--fail', f['url'], '-o', dest]) ramdisk_id = image.Image.add(dest, description='ramdisk/' + img['title'], ramdisk=True) for f in img['files']: if f['kind'] == 'image': dest = os.path.join(tempdir, 'image') - subprocess.call(['curl', f['url'], '-o', dest]) + subprocess.call(['curl', '--fail', f['url'], '-o', dest]) ramdisk_id = image.Image.add(dest, description=img['title'], kernel=kernel_id, ramdisk=ramdisk_id) diff --git a/nova/cloudpipe/bootscript.sh b/nova/cloudpipe/bootscript.sh index 82ec2012a..30d9ad102 100755 --- a/nova/cloudpipe/bootscript.sh +++ b/nova/cloudpipe/bootscript.sh @@ -44,8 +44,8 @@ CSRTEXT=$(python -c "import urllib; print urllib.quote('''$CSRTEXT''')") # SIGN the csr and save as server.crt # CURL fetch to the supervisor, POSTing the CSR text, saving the result as the CRT file -curl $SUPERVISOR -d "cert=$CSRTEXT" > /etc/openvpn/server.crt -curl $SUPERVISOR/getca/ > /etc/openvpn/ca.crt +curl --fail $SUPERVISOR -d "cert=$CSRTEXT" > /etc/openvpn/server.crt +curl --fail $SUPERVISOR/getca/ > /etc/openvpn/ca.crt # Customize the server.conf.template cd /etc/openvpn diff --git a/nova/utils.py b/nova/utils.py index fd30f1f2d..74c7c021c 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -53,7 +53,7 @@ def fetchfile(url, target): # c.perform() # c.close() # fp.close() - execute("curl %s -o %s" % (url, target)) + execute("curl --fail %s -o %s" % (url, target)) def execute(cmd, input=None, addl_env=None, check_exit_code=True): env = os.environ.copy() diff --git a/nova/virt/images.py b/nova/virt/images.py index 92210e242..75fd1625c 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -54,7 +54,7 @@ def _fetch_s3_image(image, path, user): auth = signer.Signer(user.secret.encode()).s3_authorization(headers, 'GET', uri) headers['Authorization'] = 'AWS %s:%s' % (user.access, auth) - cmd = ['/usr/bin/curl', '--silent', url] + cmd = ['/usr/bin/curl', '--fail', '--silent', url] for (k,v) in headers.iteritems(): cmd += ['-H', '%s: %s' % (k,v)] -- cgit From 3897047a2c0f8906c99418ddad6e2c68f0dec5c7 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Fri, 30 Jul 2010 12:05:32 -0700 Subject: Added exit code checking to process.py (twisted process utils). A bit of class refactoring to make it work & cleaner. Also added some more instructive messages to install_venv.py, because otherwise people that don't know what they're doing will install the wrong pip... i.e. I did :-) --- nova/process.py | 90 ++++++++++++++++-------------------------- nova/tests/process_unittest.py | 2 +- nova/virt/libvirt_conn.py | 2 +- tools/install_venv.py | 15 ++++--- 4 files changed, 44 insertions(+), 65 deletions(-) diff --git a/nova/process.py b/nova/process.py index 2dc56372f..24ea3eb7f 100644 --- a/nova/process.py +++ b/nova/process.py @@ -54,19 +54,20 @@ class UnexpectedErrorOutput(IOError): IOError.__init__(self, "got stdout: %r\nstderr: %r" % (stdout, stderr)) -# NOTE(termie): this too -class _BackRelay(protocol.ProcessProtocol): +# This is based on _BackRelay from twister.internal.utils, but modified to capture +# both stdout and stderr without odd stderr handling, and also to handle stdin +class BackRelayWithInput(protocol.ProcessProtocol): """ Trivial protocol for communicating with a process and turning its output into the result of a L{Deferred}. @ivar deferred: A L{Deferred} which will be called back with all of stdout - and, if C{errortoo} is true, all of stderr as well (mixed together in - one string). If C{errortoo} is false and any bytes are received over - stderr, this will fire with an L{_UnexpectedErrorOutput} instance and - the attribute will be set to C{None}. + and all of stderr as well (as a tuple). C{terminate_on_stderr} is true + and any bytes are received over stderr, this will fire with an + L{_UnexpectedErrorOutput} instance and the attribute will be set to + C{None}. - @ivar onProcessEnded: If C{errortoo} is false and bytes are received over + @ivar onProcessEnded: If C{terminate_on_stderr} is false and bytes are received over stderr, this attribute will refer to a L{Deferred} which will be called back when the process ends. This C{Deferred} is also associated with the L{_UnexpectedErrorOutput} which C{deferred} fires with earlier in @@ -74,52 +75,43 @@ class _BackRelay(protocol.ProcessProtocol): ended, in addition to knowing when bytes have been received via stderr. """ - def __init__(self, deferred, errortoo=0): + def __init__(self, deferred, startedDeferred=None, terminate_on_stderr=False, + check_exit_code=True, input=None): self.deferred = deferred - self.s = StringIO.StringIO() - if errortoo: - self.errReceived = self.errReceivedIsGood - else: - self.errReceived = self.errReceivedIsBad - - def errReceivedIsBad(self, text): - if self.deferred is not None: + self.stdout = StringIO.StringIO() + self.stderr = StringIO.StringIO() + self.startedDeferred = startedDeferred + self.terminate_on_stderr = terminate_on_stderr + self.check_exit_code = check_exit_code + self.input = input + + def errReceived(self, text): + self.sterr.write(text) + if self.terminate_on_stderr and (self.deferred is not None): self.onProcessEnded = defer.Deferred() - err = UnexpectedErrorOutput(text, self.onProcessEnded) - self.deferred.errback(failure.Failure(err)) + self.deferred.errback(UnexpectedErrorOutput(stdout=self.stdout.getvalue(), stderr=self.stderr.getvalue())) self.deferred = None self.transport.loseConnection() - def errReceivedIsGood(self, text): - self.s.write(text) + def errReceived(self, text): + self.stderr.write(text) def outReceived(self, text): - self.s.write(text) + self.stdout.write(text) def processEnded(self, reason): if self.deferred is not None: - self.deferred.callback(self.s.getvalue()) + stdout, stderr = self.stdout.getvalue(), self.stderr.getvalue() + try: + if self.check_exit_code: + reason.trap(error.ProcessDone) + self.deferred.callback((stdout, stderr)) + except: + self.deferred.errback(UnexpectedErrorOutput(stdout, stderr)) elif self.onProcessEnded is not None: self.onProcessEnded.errback(reason) -class BackRelayWithInput(_BackRelay): - def __init__(self, deferred, startedDeferred=None, error_ok=0, - input=None): - # Twisted doesn't use new-style classes in most places :( - _BackRelay.__init__(self, deferred, errortoo=error_ok) - self.error_ok = error_ok - self.input = input - self.stderr = StringIO.StringIO() - self.startedDeferred = startedDeferred - - def errReceivedIsBad(self, text): - self.stderr.write(text) - self.transport.loseConnection() - - def errReceivedIsGood(self, text): - self.stderr.write(text) - def connectionMade(self): if self.startedDeferred: self.startedDeferred.callback(self) @@ -127,31 +119,15 @@ class BackRelayWithInput(_BackRelay): self.transport.write(self.input) self.transport.closeStdin() - def processEnded(self, reason): - if self.deferred is not None: - stdout, stderr = self.s.getvalue(), self.stderr.getvalue() - try: - # NOTE(termie): current behavior means if error_ok is True - # we won't throw an error even if the process - # exited with a non-0 status, so you can't be - # okay with stderr output and not with bad exit - # codes. - if not self.error_ok: - reason.trap(error.ProcessDone) - self.deferred.callback((stdout, stderr)) - except: - self.deferred.errback(UnexpectedErrorOutput(stdout, stderr)) - - def getProcessOutput(executable, args=None, env=None, path=None, reactor=None, - error_ok=0, input=None, startedDeferred=None): + check_exit_code=True, input=None, startedDeferred=None): if reactor is None: from twisted.internet import reactor args = args and args or () env = env and env and {} d = defer.Deferred() p = BackRelayWithInput( - d, startedDeferred=startedDeferred, error_ok=error_ok, input=input) + d, startedDeferred=startedDeferred, check_exit_code=check_exit_code, input=input) # NOTE(vish): commands come in as unicode, but self.executes needs # strings or process.spawn raises a deprecation warning executable = str(executable) diff --git a/nova/tests/process_unittest.py b/nova/tests/process_unittest.py index 75187e1fc..25c60c616 100644 --- a/nova/tests/process_unittest.py +++ b/nova/tests/process_unittest.py @@ -48,7 +48,7 @@ class ProcessTestCase(test.TrialTestCase): def test_execute_stderr(self): pool = process.ProcessPool(2) - d = pool.simple_execute('cat BAD_FILE', error_ok=1) + d = pool.simple_execute('cat BAD_FILE', check_exit_code=False) def _check(rv): self.assertEqual(rv[0], '') self.assert_('No such file' in rv[1]) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index c545e4190..6cb9acb29 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -197,7 +197,7 @@ class LibvirtConnection(object): execute = lambda cmd, input=None: \ process.simple_execute(cmd=cmd, input=input, - error_ok=1) + check_exit_code=True) key = data['key_data'] net = None diff --git a/tools/install_venv.py b/tools/install_venv.py index 0b35fc8e9..b9eac70e6 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -18,7 +18,7 @@ def die(message, *args): sys.exit(1) -def run_command(cmd, redirect_output=True, error_ok=False): +def run_command(cmd, redirect_output=True, check_exit_code=True): # Useful for debugging: #print >>sys.stderr, ' '.join(cmd) if redirect_output: @@ -28,23 +28,26 @@ def run_command(cmd, redirect_output=True, error_ok=False): proc = subprocess.Popen(cmd, stdout=stdout) output = proc.communicate()[0] - if not error_ok and proc.returncode != 0: + if check_exit_code and proc.returncode != 0: die('Command "%s" failed.\n%s', ' '.join(cmd), output) return output def check_dependencies(): """Make sure pip and virtualenv are on the path.""" + # Perl also has a pip program. Hopefully the user has installed the right one! print 'Checking for pip...', - if not run_command(['which', 'pip']).strip(): + if not run_command(['which', 'pip'], check_exit_code=False).strip(): die('ERROR: pip not found.\n\nNova development requires pip,' - ' please install it using your favorite package management tool') + ' please install it using your favorite package management tool ' + ' (e.g. "sudo apt-get install python-pip")') print 'done.' print 'Checking for virtualenv...', - if not run_command(['which', 'virtualenv']).strip(): + if not run_command(['which', 'virtualenv'], check_exit_code=False).strip(): die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,' - ' please install it using your favorite package management tool') + ' please install it using your favorite package management tool ' + ' (e.g. "sudo easy_install virtualenv")') print 'done.' -- cgit From 0ee7d2f74a959bcf1cf611f63842302866774475 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Sun, 8 Aug 2010 12:57:33 -0700 Subject: Greater compliance with pep8/pylint style checks --- bin/nova-manage | 4 +- nova/objectstore/image.py | 15 +++++-- nova/process.py | 108 +++++++++++++++++++++++----------------------- nova/utils.py | 12 +++--- nova/virt/libvirt_conn.py | 4 +- tools/install_venv.py | 2 +- 6 files changed, 80 insertions(+), 65 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 1f7f808f1..36dc1dde9 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -56,7 +56,9 @@ class VpnCommands(object): vpn = self.__vpn_for(project.id) if vpn: - out, err = utils.execute("ping -c1 -w1 %s > /dev/null; echo $?" % vpn['private_dns_name'], check_exit_code=False) + out, err = utils.execute( + "ping -c1 -w1 %s > /dev/null; echo $?" + % vpn['private_dns_name'], check_exit_code=False) if out.strip() == '0': net = 'up' else: diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py index b98de276c..5dbf37133 100644 --- a/nova/objectstore/image.py +++ b/nova/objectstore/image.py @@ -227,13 +227,22 @@ class Image(object): @staticmethod def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_private_key, decrypted_filename): - key, err = utils.execute('openssl rsautl -decrypt -inkey %s' % cloud_private_key, encrypted_key, check_exit_code=False) + key, err = utils.execute( + 'openssl rsautl -decrypt -inkey %s' % cloud_private_key, + process_input=encrypted_key, + check_exit_code=False) if err: raise exception.Error("Failed to decrypt private key: %s" % err) - iv, err = utils.execute('openssl rsautl -decrypt -inkey %s' % cloud_private_key, encrypted_iv, check_exit_code=False) + iv, err = utils.execute( + 'openssl rsautl -decrypt -inkey %s' % cloud_private_key, + process_input=encrypted_iv, + check_exit_code=False) if err: raise exception.Error("Failed to decrypt initialization vector: %s" % err) - out, err = utils.execute('openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s' % (encrypted_filename, key, iv, decrypted_filename), check_exit_code=False) + _out, err = utils.execute( + 'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s' + % (encrypted_filename, key, iv, decrypted_filename), + check_exit_code=False) if err: raise exception.Error("Failed to decrypt image file %s : %s" % (encrypted_filename, err)) diff --git a/nova/process.py b/nova/process.py index 9e9de2ee8..37ab538ee 100644 --- a/nova/process.py +++ b/nova/process.py @@ -2,6 +2,7 @@ # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. +# Copyright 2010 FathomDB Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -20,16 +21,11 @@ Process pool, still buggy right now. """ -import logging -import multiprocessing import StringIO from twisted.internet import defer from twisted.internet import error -from twisted.internet import process from twisted.internet import protocol from twisted.internet import reactor -from twisted.internet import threads -from twisted.python import failure from nova import flags @@ -54,8 +50,9 @@ class UnexpectedErrorOutput(IOError): IOError.__init__(self, "got stdout: %r\nstderr: %r" % (stdout, stderr)) -# This is based on _BackRelay from twister.internal.utils, but modified to capture -# both stdout and stderr without odd stderr handling, and also to handle stdin +# This is based on _BackRelay from twister.internal.utils, but modified to +# capture both stdout and stderr, without odd stderr handling, and also to +# handle stdin class BackRelayWithInput(protocol.ProcessProtocol): """ Trivial protocol for communicating with a process and turning its output @@ -67,35 +64,37 @@ class BackRelayWithInput(protocol.ProcessProtocol): L{_UnexpectedErrorOutput} instance and the attribute will be set to C{None}. - @ivar onProcessEnded: If C{terminate_on_stderr} is false and bytes are received over - stderr, this attribute will refer to a L{Deferred} which will be called - back when the process ends. This C{Deferred} is also associated with - the L{_UnexpectedErrorOutput} which C{deferred} fires with earlier in - this case so that users can determine when the process has actually - ended, in addition to knowing when bytes have been received via stderr. + @ivar onProcessEnded: If C{terminate_on_stderr} is false and bytes are + received over stderr, this attribute will refer to a L{Deferred} which + will be called back when the process ends. This C{Deferred} is also + associated with the L{_UnexpectedErrorOutput} which C{deferred} fires + with earlier in this case so that users can determine when the process + has actually ended, in addition to knowing when bytes have been received + via stderr. """ - def __init__(self, deferred, startedDeferred=None, terminate_on_stderr=False, - check_exit_code=True, input=None): + def __init__(self, deferred, started_deferred=None, + terminate_on_stderr=False, check_exit_code=True, + process_input=None): self.deferred = deferred self.stdout = StringIO.StringIO() self.stderr = StringIO.StringIO() - self.startedDeferred = startedDeferred + self.started_deferred = started_deferred self.terminate_on_stderr = terminate_on_stderr self.check_exit_code = check_exit_code - self.input = input + self.process_input = process_input + self.on_process_ended = None def errReceived(self, text): - self.sterr.write(text) + self.stderr.write(text) if self.terminate_on_stderr and (self.deferred is not None): - self.onProcessEnded = defer.Deferred() - self.deferred.errback(UnexpectedErrorOutput(stdout=self.stdout.getvalue(), stderr=self.stderr.getvalue())) + self.on_process_ended = defer.Deferred() + self.deferred.errback(UnexpectedErrorOutput( + stdout=self.stdout.getvalue(), + stderr=self.stderr.getvalue())) self.deferred = None self.transport.loseConnection() - def errReceived(self, text): - self.stderr.write(text) - def outReceived(self, text): self.stdout.write(text) @@ -107,37 +106,40 @@ class BackRelayWithInput(protocol.ProcessProtocol): reason.trap(error.ProcessDone) self.deferred.callback((stdout, stderr)) except: - # This logic is a little suspicious to me (justinsb)... - # If the callback throws an exception, then errback will be called also. - # However, this is what the unit tests test for... + # NOTE(justinsb): This logic is a little suspicious to me... + # If the callback throws an exception, then errback will be + # called also. However, this is what the unit tests test for... self.deferred.errback(UnexpectedErrorOutput(stdout, stderr)) - elif self.onProcessEnded is not None: - self.onProcessEnded.errback(reason) + elif self.on_process_ended is not None: + self.on_process_ended.errback(reason) def connectionMade(self): - if self.startedDeferred: - self.startedDeferred.callback(self) - if self.input: - self.transport.write(self.input) + if self.started_deferred: + self.started_deferred.callback(self) + if self.process_input: + self.transport.write(self.process_input) self.transport.closeStdin() -def getProcessOutput(executable, args=None, env=None, path=None, reactor=None, - check_exit_code=True, input=None, startedDeferred=None): - if reactor is None: - from twisted.internet import reactor +def get_process_output(executable, args=None, env=None, path=None, + process_reactor=None, check_exit_code=True, + process_input=None, started_deferred=None): + if process_reactor is None: + process_reactor = reactor args = args and args or () env = env and env and {} - d = defer.Deferred() - p = BackRelayWithInput( - d, startedDeferred=startedDeferred, check_exit_code=check_exit_code, input=input) + deferred = defer.Deferred() + process_handler = BackRelayWithInput( + deferred, started_deferred=started_deferred, + check_exit_code=check_exit_code, process_input=process_input) # NOTE(vish): commands come in as unicode, but self.executes needs # strings or process.spawn raises a deprecation warning executable = str(executable) if not args is None: args = [str(x) for x in args] - reactor.spawnProcess(p, executable, (executable,)+tuple(args), env, path) - return d + process_reactor.spawnProcess( process_handler, executable, + (executable,)+tuple(args), env, path) + return deferred class ProcessPool(object): @@ -163,26 +165,26 @@ class ProcessPool(object): return self.execute(executable, args, **kw) def execute(self, *args, **kw): - d = self._pool.acquire() + deferred = self._pool.acquire() - def _associateProcess(proto): - d.process = proto.transport + def _associate_process(proto): + deferred.process = proto.transport return proto.transport started = defer.Deferred() - started.addCallback(_associateProcess) - kw.setdefault('startedDeferred', started) + started.addCallback(_associate_process) + kw.setdefault('started_deferred', started) - d.process = None - d.started = started + deferred.process = None + deferred.started = started - d.addCallback(lambda _: getProcessOutput(*args, **kw)) - d.addBoth(self._release) - return d + deferred.addCallback(lambda _: get_process_output(*args, **kw)) + deferred.addBoth(self._release) + return deferred - def _release(self, rv=None): + def _release(self, retval=None): self._pool.release() - return rv + return retval class SharedPool(object): _instance = None diff --git a/nova/utils.py b/nova/utils.py index 74c7c021c..1acc205b5 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -55,22 +55,23 @@ def fetchfile(url, target): # fp.close() execute("curl --fail %s -o %s" % (url, target)) -def execute(cmd, input=None, addl_env=None, check_exit_code=True): +def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): env = os.environ.copy() if addl_env: env.update(addl_env) obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) result = None - if input != None: - result = obj.communicate(input) + if process_input != None: + result = obj.communicate(process_input) else: result = obj.communicate() obj.stdin.close() if obj.returncode: logging.debug("Result was %s" % (obj.returncode)) if check_exit_code and obj.returncode <> 0: - raise Exception("Unexpected exit code: %s. result=%s" % (obj.returncode, result)) + raise Exception( "Unexpected exit code: %s. result=%s" + % (obj.returncode, result)) return result @@ -101,7 +102,8 @@ def runthis(prompt, cmd, check_exit_code = True): exit_code = subprocess.call(cmd.split(" ")) logging.debug(prompt % (exit_code)) if check_exit_code and exit_code <> 0: - raise Exception("Unexpected exit code: %s from cmd: %s" % (exit_code, cmd)) + raise Exception( "Unexpected exit code: %s from cmd: %s" + % (exit_code, cmd)) def generate_uid(topic, size=8): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 6cb9acb29..e36bfc7f5 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -194,9 +194,9 @@ class LibvirtConnection(object): if not os.path.exists(basepath('ramdisk')): yield images.fetch(data['ramdisk_id'], basepath('ramdisk'), user) - execute = lambda cmd, input=None: \ + execute = lambda cmd, process_input=None: \ process.simple_execute(cmd=cmd, - input=input, + process_input=process_input, check_exit_code=True) key = data['key_data'] diff --git a/tools/install_venv.py b/tools/install_venv.py index b9eac70e6..3cff8051d 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -35,7 +35,7 @@ def run_command(cmd, redirect_output=True, check_exit_code=True): def check_dependencies(): """Make sure pip and virtualenv are on the path.""" - # Perl also has a pip program. Hopefully the user has installed the right one! + # Perl also has a pip program. Hopefully the user has installed the right one! print 'Checking for pip...', if not run_command(['which', 'pip'], check_exit_code=False).strip(): die('ERROR: pip not found.\n\nNova development requires pip,' -- cgit From 993563b6cc9db9f24480678cf8b2d0750aee7a92 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Sun, 8 Aug 2010 13:05:24 -0700 Subject: Used new (clearer) flag names when calling processes --- nova/process.py | 10 +++++++--- nova/volume/service.py | 3 ++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/nova/process.py b/nova/process.py index 37ab538ee..d36de0410 100644 --- a/nova/process.py +++ b/nova/process.py @@ -123,15 +123,19 @@ class BackRelayWithInput(protocol.ProcessProtocol): def get_process_output(executable, args=None, env=None, path=None, process_reactor=None, check_exit_code=True, - process_input=None, started_deferred=None): + process_input=None, started_deferred=None, + terminate_on_stderr=False): if process_reactor is None: process_reactor = reactor args = args and args or () env = env and env and {} deferred = defer.Deferred() process_handler = BackRelayWithInput( - deferred, started_deferred=started_deferred, - check_exit_code=check_exit_code, process_input=process_input) + deferred, + started_deferred=started_deferred, + check_exit_code=check_exit_code, + process_input=process_input, + terminate_on_stderr=terminate_on_stderr) # NOTE(vish): commands come in as unicode, but self.executes needs # strings or process.spawn raises a deprecation warning executable = str(executable) diff --git a/nova/volume/service.py b/nova/volume/service.py index e12f675a7..98cd0d3bf 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -149,7 +149,8 @@ class VolumeService(service.Service): return yield process.simple_execute("sudo vblade-persist auto all") # NOTE(vish): this command sometimes sends output to stderr for warnings - yield process.simple_execute("sudo vblade-persist start all", error_ok=1) + yield process.simple_execute("sudo vblade-persist start all", + terminate_on_stderr=False) @defer.inlineCallbacks def _init_volume_group(self): -- cgit From abfd82d89653482e21e1139fb8ce8bf89c2b4d2c Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Tue, 10 Aug 2010 09:54:13 -0400 Subject: pylint fixes for nova/virt/connection.py --- nova/virt/connection.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/nova/virt/connection.py b/nova/virt/connection.py index 004adb19d..bb54f5db7 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -17,6 +17,11 @@ # License for the specific language governing permissions and limitations # under the License. +"""Abstraction of the underlying virtualization API""" + +import logging +import sys + from nova import flags from nova.virt import fake from nova.virt import libvirt_conn @@ -27,6 +32,11 @@ FLAGS = flags.FLAGS def get_connection(read_only=False): + """Returns a connection to the underlying virtualization API + + The read_only parameter is passed through to the underlying API's + get_connection() method if applicable + """ # TODO(termie): maybe lazy load after initial check for permissions # TODO(termie): check whether we can be disconnected t = FLAGS.connection_type -- cgit From 33576003f68498371f0761aaa3ca5c9a08d1c452 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Tue, 10 Aug 2010 09:55:00 -0400 Subject: Pylint fixes for /nova/tests/api_unittest.py --- nova/tests/api_unittest.py | 83 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 58 insertions(+), 25 deletions(-) diff --git a/nova/tests/api_unittest.py b/nova/tests/api_unittest.py index 9d072866c..462d1b295 100644 --- a/nova/tests/api_unittest.py +++ b/nova/tests/api_unittest.py @@ -16,6 +16,8 @@ # License for the specific language governing permissions and limitations # under the License. +"""Unit tests for the API endpoint""" + import boto from boto.ec2 import regioninfo import httplib @@ -38,7 +40,15 @@ FLAGS = flags.FLAGS # circuit boto calls and feed them into our tornado handlers, # it's pretty damn circuitous so apologies if you have to fix # a bug in it -def boto_to_tornado(method, path, headers, data, host, connection=None): +# NOTE(jaypipes) The pylint disables here are for R0913 (too many args) which +# isn't controllable since boto's HTTPRequest needs that many +# args, and for the version-differentiated import of tornado's +# httputil. +# NOTE(jaypipes): The disable-msg=E1101 and E1103 below is because pylint is +# unable to introspect the deferred's return value properly + +def boto_to_tornado(method, path, headers, data, # pylint: disable-msg=R0913 + host, connection=None): """ translate boto requests into tornado requests connection should be a FakeTornadoHttpConnection instance @@ -46,7 +56,7 @@ def boto_to_tornado(method, path, headers, data, host, connection=None): try: headers = httpserver.HTTPHeaders() except AttributeError: - from tornado import httputil + from tornado import httputil # pylint: disable-msg=E0611 headers = httputil.HTTPHeaders() for k, v in headers.iteritems(): headers[k] = v @@ -61,57 +71,64 @@ def boto_to_tornado(method, path, headers, data, host, connection=None): return req -def raw_to_httpresponse(s): - """ translate a raw tornado http response into an httplib.HTTPResponse """ - sock = FakeHttplibSocket(s) +def raw_to_httpresponse(response_string): + """translate a raw tornado http response into an httplib.HTTPResponse""" + sock = FakeHttplibSocket(response_string) resp = httplib.HTTPResponse(sock) resp.begin() return resp class FakeHttplibSocket(object): - """ a fake socket implementation for httplib.HTTPResponse, trivial """ - def __init__(self, s): - self.fp = StringIO.StringIO(s) + """a fake socket implementation for httplib.HTTPResponse, trivial""" + def __init__(self, response_string): + self._buffer = StringIO.StringIO(response_string) - def makefile(self, mode, other): - return self.fp + def makefile(self, _mode, _other): + """Returns the socket's internal buffer""" + return self._buffer class FakeTornadoStream(object): - """ a fake stream to satisfy tornado's assumptions, trivial """ - def set_close_callback(self, f): + """a fake stream to satisfy tornado's assumptions, trivial""" + def set_close_callback(self, _func): + """Dummy callback for stream""" pass class FakeTornadoConnection(object): - """ a fake connection object for tornado to pass to its handlers + """A fake connection object for tornado to pass to its handlers web requests are expected to write to this as they get data and call finish when they are done with the request, we buffer the writes and kick off a callback when it is done so that we can feed the result back into boto. """ - def __init__(self, d): - self.d = d + def __init__(self, deferred): + self._deferred = deferred self._buffer = StringIO.StringIO() def write(self, chunk): + """Writes a chunk of data to the internal buffer""" self._buffer.write(chunk) def finish(self): - s = self._buffer.getvalue() - self.d.callback(s) + """Finalizes the connection and returns the buffered data via the + deferred callback. + """ + data = self._buffer.getvalue() + self._deferred.callback(data) xheaders = None @property - def stream(self): + def stream(self): # pylint: disable-msg=R0201 + """Required property for interfacing with tornado""" return FakeTornadoStream() class FakeHttplibConnection(object): - """ a fake httplib.HTTPConnection for boto to use + """A fake httplib.HTTPConnection for boto to use requests made via this connection actually get translated and routed into our tornado app, we then wait for the response and turn it back into @@ -123,7 +140,9 @@ class FakeHttplibConnection(object): self.deferred = defer.Deferred() def request(self, method, path, data, headers): - req = boto_to_tornado + """Creates a connection to a fake tornado and sets + up a deferred request with the supplied data and + headers""" conn = FakeTornadoConnection(self.deferred) request = boto_to_tornado(connection=conn, method=method, @@ -131,12 +150,16 @@ class FakeHttplibConnection(object): headers=headers, data=data, host=self.host) - handler = self.app(request) + self.app(request) self.deferred.addCallback(raw_to_httpresponse) def getresponse(self): + """A bit of deferred magic for catching the response + from the previously deferred request""" @defer.inlineCallbacks def _waiter(): + """Callback that simply yields the deferred's + return value.""" result = yield self.deferred defer.returnValue(result) d = _waiter() @@ -144,14 +167,16 @@ class FakeHttplibConnection(object): # this deferred has already been called by the time # we get here, we are going to cheat and return # the result of the callback - return d.result + return d.result # pylint: disable-msg=E1101 def close(self): + """Required for compatibility with boto/tornado""" pass class ApiEc2TestCase(test.BaseTestCase): - def setUp(self): + """Unit test for the cloud controller on an EC2 API""" + def setUp(self): # pylint: disable-msg=C0103,C0111 super(ApiEc2TestCase, self).setUp() self.manager = manager.AuthManager() @@ -171,12 +196,16 @@ class ApiEc2TestCase(test.BaseTestCase): self.mox.StubOutWithMock(self.ec2, 'new_http_connection') def expect_http(self, host=None, is_secure=False): + """Returns a new EC2 connection""" http = FakeHttplibConnection( self.app, '%s:%d' % (self.host, FLAGS.cc_port), False) + # pylint: disable-msg=E1103 self.ec2.new_http_connection(host, is_secure).AndReturn(http) return http def test_describe_instances(self): + """Test that, after creating a user and a project, the describe + instances call to the API works properly""" self.expect_http() self.mox.ReplayAll() user = self.manager.create_user('fake', 'fake', 'fake') @@ -187,14 +216,18 @@ class ApiEc2TestCase(test.BaseTestCase): def test_get_all_key_pairs(self): + """Test that, after creating a user and project and generating + a key pair, that the API call to list key pairs works properly""" self.expect_http() self.mox.ReplayAll() - keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") for x in range(random.randint(4, 8))) + keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \ + for x in range(random.randint(4, 8))) user = self.manager.create_user('fake', 'fake', 'fake') project = self.manager.create_project('fake', 'fake', 'fake') self.manager.generate_key_pair(user.id, keyname) rv = self.ec2.get_all_key_pairs() - self.assertTrue(filter(lambda k: k.name == keyname, rv)) + results = [k for k in rv if k.name == keyname] + self.assertEquals(len(results), 1) self.manager.delete_project(project) self.manager.delete_user(user) -- cgit From 1d414f6bddbae33bcbec799e29ab904d86811869 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Tue, 10 Aug 2010 14:20:15 -0400 Subject: pylint fixes for nova/objectstore/handler.py --- nova/objectstore/handler.py | 133 +++++++++++++++++++++++++++++++++----------- 1 file changed, 100 insertions(+), 33 deletions(-) diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index f625a2aa1..cb38c89f2 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -19,6 +19,10 @@ # License for the specific language governing permissions and limitations # under the License. +# Disabling pylint's R0201 (Method could be a function) because +# the API of render_GET, render_PUT, etc is Twisted's, not ours. +# pylint: disable-msg=R0201 + """ Implementation of an S3-like storage server based on local files. @@ -61,6 +65,7 @@ from nova.objectstore import image FLAGS = flags.FLAGS def render_xml(request, value): + """Writes value as XML string to request""" assert isinstance(value, dict) and len(value) == 1 request.setHeader("Content-Type", "application/xml; charset=UTF-8") @@ -73,11 +78,13 @@ def render_xml(request, value): request.finish() def finish(request, content=None): + """Finalizer method for request""" if content: request.write(content) request.finish() def _render_parts(value, write_cb): + """Helper method to render different Python objects to XML""" if isinstance(value, basestring): write_cb(escape.xhtml_escape(value)) elif isinstance(value, int) or isinstance(value, long): @@ -96,35 +103,47 @@ def _render_parts(value, write_cb): raise Exception("Unknown S3 value type %r", value) def get_argument(request, key, default_value): + """Returns the request's value at key, or default_value + if not found + """ if key in request.args: return request.args[key][0] return default_value def get_context(request): + """Returns the supplied request's context object""" try: # Authorization Header format: 'AWS :' authorization_header = request.getHeader('Authorization') if not authorization_header: raise exception.NotAuthorized - access, sep, secret = authorization_header.split(' ')[1].rpartition(':') - (user, project) = manager.AuthManager().authenticate(access, - secret, - {}, - request.method, - request.getRequestHostname(), - request.uri, - headers=request.getAllHeaders(), - check_type='s3') + auth_header_value = authorization_header.split(' ')[1] + access, _ignored, secret = auth_header_value.rpartition(':') + am = manager.AuthManager() + (user, project) = am.authenticate(access, + secret, + {}, + request.method, + request.getRequestHostname(), + request.uri, + headers=request.getAllHeaders(), + check_type='s3') return api.APIRequestContext(None, user, project) except exception.Error as ex: - logging.debug("Authentication Failure: %s" % ex) + logging.debug("Authentication Failure: %s", ex) raise exception.NotAuthorized class ErrorHandlingResource(Resource): - """Maps exceptions to 404 / 401 codes. Won't work for exceptions thrown after NOT_DONE_YET is returned.""" - # TODO(unassigned) (calling-all-twisted-experts): This needs to be plugged in to the right place in twisted... - # This doesn't look like it's the right place (consider exceptions in getChild; or after NOT_DONE_YET is returned + """Maps exceptions to 404 / 401 codes. Won't work for + exceptions thrown after NOT_DONE_YET is returned. + """ + # TODO(unassigned) (calling-all-twisted-experts): This needs to be + # plugged in to the right place in twisted... + # This doesn't look like it's the right place + # (consider exceptions in getChild; or after + # NOT_DONE_YET is returned def render(self, request): + """Renders the response as XML""" try: return Resource.render(self, request) except exception.NotFound: @@ -136,7 +155,11 @@ class ErrorHandlingResource(Resource): class S3(ErrorHandlingResource): """Implementation of an S3-like storage server based on local files.""" - def getChild(self, name, request): + def __init__(self): + ErrorHandlingResource.__init__(self) + + def getChild(self, name, request): # pylint: disable-msg=C0103 + """Returns either the image or bucket resource""" request.context = get_context(request) if name == '': return self @@ -146,8 +169,10 @@ class S3(ErrorHandlingResource): return BucketResource(name) def render_GET(self, request): + """Renders the GET request for a list of buckets as XML""" logging.debug('List of buckets requested') - buckets = [b for b in bucket.Bucket.all() if b.is_authorized(request.context)] + buckets = [b for b in bucket.Bucket.all() \ + if b.is_authorized(request.context)] render_xml(request, {"ListAllMyBucketsResult": { "Buckets": {"Bucket": [b.metadata for b in buckets]}, @@ -155,22 +180,27 @@ class S3(ErrorHandlingResource): return server.NOT_DONE_YET class BucketResource(ErrorHandlingResource): + """A web resource containing an S3-like bucket""" def __init__(self, name): - Resource.__init__(self) + ErrorHandlingResource.__init__(self) self.name = name def getChild(self, name, request): + """Returns the bucket resource itself, or the object resource + the bucket contains if a name is supplied + """ if name == '': return self else: return ObjectResource(bucket.Bucket(self.name), name) def render_GET(self, request): - logging.debug("List keys for bucket %s" % (self.name)) + "Returns the keys for the bucket resource""" + logging.debug("List keys for bucket %s", self.name) try: bucket_object = bucket.Bucket(self.name) - except exception.NotFound, e: + except exception.NotFound: return error.NoResource(message="No such bucket").render(request) if not bucket_object.is_authorized(request.context): @@ -181,19 +211,26 @@ class BucketResource(ErrorHandlingResource): max_keys = int(get_argument(request, "max-keys", 1000)) terse = int(get_argument(request, "terse", 0)) - results = bucket_object.list_keys(prefix=prefix, marker=marker, max_keys=max_keys, terse=terse) + results = bucket_object.list_keys(prefix=prefix, + marker=marker, + max_keys=max_keys, + terse=terse) render_xml(request, {"ListBucketResult": results}) return server.NOT_DONE_YET def render_PUT(self, request): - logging.debug("Creating bucket %s" % (self.name)) - logging.debug("calling bucket.Bucket.create(%r, %r)" % (self.name, request.context)) + "Creates the bucket resource""" + logging.debug("Creating bucket %s", self.name) + logging.debug("calling bucket.Bucket.create(%r, %r)", + self.name, + request.context) bucket.Bucket.create(self.name, request.context) request.finish() return server.NOT_DONE_YET def render_DELETE(self, request): - logging.debug("Deleting bucket %s" % (self.name)) + """Deletes the bucket resource""" + logging.debug("Deleting bucket %s", self.name) bucket_object = bucket.Bucket(self.name) if not bucket_object.is_authorized(request.context): @@ -205,25 +242,37 @@ class BucketResource(ErrorHandlingResource): class ObjectResource(ErrorHandlingResource): - def __init__(self, bucket, name): - Resource.__init__(self) - self.bucket = bucket + """The resource returned from a bucket""" + def __init__(self, bucket_name, name): + ErrorHandlingResource.__init__(self) + self.bucket = bucket_name self.name = name def render_GET(self, request): - logging.debug("Getting object: %s / %s" % (self.bucket.name, self.name)) + """Returns the object + + Raises NotAuthorized if user in request context is not + authorized to delete the object. + """ + logging.debug("Getting object: %s / %s", self.bucket.name, self.name) if not self.bucket.is_authorized(request.context): raise exception.NotAuthorized obj = self.bucket[urllib.unquote(self.name)] request.setHeader("Content-Type", "application/unknown") - request.setHeader("Last-Modified", datetime.datetime.utcfromtimestamp(obj.mtime)) + request.setHeader("Last-Modified", + datetime.datetime.utcfromtimestamp(obj.mtime)) request.setHeader("Etag", '"' + obj.md5 + '"') return static.File(obj.path).render_GET(request) def render_PUT(self, request): - logging.debug("Putting object: %s / %s" % (self.bucket.name, self.name)) + """Modifies/inserts the object and returns a result code + + Raises NotAuthorized if user in request context is not + authorized to delete the object. + """ + logging.debug("Putting object: %s / %s", self.bucket.name, self.name) if not self.bucket.is_authorized(request.context): raise exception.NotAuthorized @@ -236,7 +285,15 @@ class ObjectResource(ErrorHandlingResource): return server.NOT_DONE_YET def render_DELETE(self, request): - logging.debug("Deleting object: %s / %s" % (self.bucket.name, self.name)) + """Deletes the object and returns a result code + + Raises NotAuthorized if user in request context is not + authorized to delete the object. + """ + + logging.debug("Deleting object: %s / %s", + self.bucket.name, + self.name) if not self.bucket.is_authorized(request.context): raise exception.NotAuthorized @@ -246,17 +303,23 @@ class ObjectResource(ErrorHandlingResource): return '' class ImageResource(ErrorHandlingResource): + """A web resource representing a single image""" isLeaf = True def __init__(self, name): - Resource.__init__(self) + ErrorHandlingResource.__init__(self) self.img = image.Image(name) def render_GET(self, request): - return static.File(self.img.image_path, defaultType='application/octet-stream').render_GET(request) + """Returns the image file""" + return static.File(self.img.image_path, + defaultType='application/octet-stream' + ).render_GET(request) class ImagesResource(Resource): - def getChild(self, name, request): + """A web resource representing a list of images""" + def getChild(self, name, _request): + """Returns itself or an ImageResource if no name given""" if name == '': return self else: @@ -285,7 +348,6 @@ class ImagesResource(Resource): raise exception.NotAuthorized bucket_object = bucket.Bucket(image_location.split("/")[0]) - manifest = image_location[len(image_location.split('/')[0])+1:] if not bucket_object.is_authorized(request.context): raise exception.NotAuthorized @@ -324,13 +386,18 @@ class ImagesResource(Resource): return '' def get_site(): + """Support for WSGI-like interfaces""" root = S3() site = server.Site(root) return site def get_application(): + """Support WSGI-like interfaces""" factory = get_site() application = service.Application("objectstore") + # Disabled because of lack of proper introspection in Twisted + # or possibly different versions of twisted? + # pylint: disable-msg=E1101 objectStoreService = internet.TCPServer(FLAGS.s3_port, factory) objectStoreService.setServiceParent(application) return application -- cgit From 049b89babe10068d3976f3f3a99b7dce120e2962 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 10 Aug 2010 18:17:44 -0400 Subject: work on a router that works with wsgi and non-wsgi routing --- nova/endpoint/rackspace.py | 27 ++++++++-------- nova/wsgi.py | 76 ++++++++++++++++++++++++++++++++++++++++++++++ tools/pip-requires | 3 ++ 3 files changed, 94 insertions(+), 12 deletions(-) diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py index 75b828e91..b4e6cd823 100644 --- a/nova/endpoint/rackspace.py +++ b/nova/endpoint/rackspace.py @@ -45,18 +45,20 @@ class API(wsgi.Middleware): def __init__(self): super(API, self).__init__(Router(webob.exc.HTTPNotFound())) - def __call__(self, environ, start_response): + @webob.dec.wsgify + def __call__(self, req): + return self.application context = {} - if "HTTP_X_AUTH_TOKEN" in environ: + if "HTTP_X_AUTH_TOKEN" in req.environ: context['user'] = manager.AuthManager().get_user_from_access_key( - environ['HTTP_X_AUTH_TOKEN']) + req.environ['HTTP_X_AUTH_TOKEN']) if context['user']: context['project'] = manager.AuthManager().get_project( context['user'].name) if "user" not in context: - return webob.exc.HTTPForbidden()(environ, start_response) + return webob.exc.HTTPForbidden() environ['nova.context'] = context - return self.application(environ, start_response) + return self.application class Router(wsgi.Router): @@ -64,13 +66,14 @@ class Router(wsgi.Router): def _build_map(self): """Build routing map for authentication and cloud.""" - self._connect("/v1.0", controller=AuthenticationAPI()) - cloud = CloudServerAPI() - self._connect("/servers", controller=cloud.launch_server, - conditions={"method": ["POST"]}) - self._connect("/servers/{server_id}", controller=cloud.delete_server, - conditions={'method': ["DELETE"]}) - self._connect("/servers", controller=cloud) + self.map.resource("server", "servers", controller=CloudServerAPI()) + #self._connect("/v1.0", controller=AuthenticationAPI()) + #cloud = CloudServerAPI() + #self._connect("/servers", controller=cloud.launch_server, + # conditions={"method": ["POST"]}) + #self._connect("/servers/{server_id}", controller=cloud.delete_server, + # conditions={'method': ["DELETE"]}) + #self._connect("/servers", controller=cloud) class AuthenticationAPI(wsgi.Application): diff --git a/nova/wsgi.py b/nova/wsgi.py index 4fd6e59e3..271648105 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -40,6 +40,7 @@ def run_server(application, port): eventlet.wsgi.server(sock, application) +# TODO(gundlach): I think we should toss this class, now that it has no purpose. class Application(object): """Base WSGI application wrapper. Subclasses need to implement __call__.""" @@ -140,6 +141,81 @@ class ParsedRoutes(Middleware): app = environ['wsgiorg.routing_args'][1]['controller'] return app(environ, start_response) +class MichaelRouter(object): + """ + My attempt at a routing class. Just override __init__ to call + super, then set up routes in self.map. + """ + + def __init__(self): + self.map = routes.Mapper() + self._router = routes.middleware.RoutesMiddleware(self._proceed, self.map) + + @webob.dec.wsgify + def __call__(self, req): + """ + Route the incoming request to a controller based on self.map. + If no match, return a 404. + """ + return self._router + + @webob.dec.wsgify + def _proceed(self, req): + """ + Called by self._router after matching the incoming request to a route + and putting the information into req.environ. + """ + if req.environ['routes.route'] is None: + return webob.exc.HTTPNotFound() + match = environ['wsgiorg.routing_args'][1] + if match.get('_is_wsgi', False): + wsgiapp = match['controller'] + return req.get_response(wsgiapp) + else: + # TODO(gundlach): doubt this is the right way -- and it really + # feels like this code should exist somewhere already on the + # internet + controller, action = match['controller'], match['action'] + delete match['controller'] + delete match['action'] + return _as_response(getattr(controller, action)(**match)) + + controller = environ['wsgiorg.routing_args'][1]['controller'] + self._dispatch(controller) + + def _as_response(self, result): + """ + When routing to a non-wsgi controller+action, its result will + be passed here before returning up the WSGI chain to be converted + into a webob.Response + + + + + +class ApiVersionRouter(MichaelRouter): + + def __init__(self): + super(ApiVersionRouter, self).__init__(self) + + self.map.connect(None, "/v1.0/{path_info:.*}", controller=RsApiRouter()) + self.map.connect(None, "/ec2/{path_info:.*}", controller=Ec2ApiRouter()) + +class RsApiRouter(MichaelRouter): + def __init__(self): + super(RsApiRouter, self).__init__(self) + + self.map.resource("server", "servers", controller=CloudServersServerApi()) + self.map.resource("image", "images", controller=CloudServersImageApi()) + self.map.resource("flavor", "flavors", controller=CloudServersFlavorApi()) + self.map.resource("sharedipgroup", "sharedipgroups", + controller=CloudServersSharedIpGroupApi()) + +class Ec2ApiRouter(object): + def __getattr__(self, key): + return lambda *x: {'dummy response': 'i am a dummy response'} +CloudServersServerApi = CloudServersImageApi = CloudServersFlavorApi = \ + CloudServersSharedIpGroupApi = Ec2ApiRouter class Router(Middleware): # pylint: disable-msg=R0921 """Wrapper to help setup routes.middleware.RoutesMiddleware.""" diff --git a/tools/pip-requires b/tools/pip-requires index 4eb47ca2b..2317907d1 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -4,11 +4,14 @@ amqplib==0.6.1 anyjson==0.2.4 boto==2.0b1 carrot==0.10.5 +eventlet==0.9.10 lockfile==0.8 python-daemon==1.5.5 python-gflags==1.3 redis==2.0.0 +routes==1.12.3 tornado==1.0 +webob==0.9.8 wsgiref==0.1.2 zope.interface==3.6.1 mox==0.5.0 -- cgit From 1637c33927672a6edc9ad7a994787669ea47f602 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 11 Aug 2010 09:46:08 -0400 Subject: Serializing in middleware after all... by tying to the router. maybe a good idea? --- nova/wsgi.py | 113 +++++++++++++++++++++++++++++++++++++---------------------- 1 file changed, 72 insertions(+), 41 deletions(-) diff --git a/nova/wsgi.py b/nova/wsgi.py index 271648105..c511a3f06 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -141,15 +141,24 @@ class ParsedRoutes(Middleware): app = environ['wsgiorg.routing_args'][1]['controller'] return app(environ, start_response) -class MichaelRouter(object): +class MichaelRouterMiddleware(object): """ - My attempt at a routing class. Just override __init__ to call - super, then set up routes in self.map. + Router that maps incoming requests to WSGI apps or to standard + controllers+actions. The response will be a WSGI response; standard + controllers+actions will by default have their results serialized + to the requested Content Type, or you can subclass and override + _to_webob_response to customize this. """ - def __init__(self): - self.map = routes.Mapper() - self._router = routes.middleware.RoutesMiddleware(self._proceed, self.map) + def __init__(self, map): + """ + Create a router for the given routes.Mapper. It may contain standard + routes (i.e. specifying controllers and actions), or may route to a + WSGI app by instead specifying a wsgi_app=SomeApp() parameter in + map.connect(). + """ + self.map = map + self._router = routes.middleware.RoutesMiddleware(self.__proceed, self.map) @webob.dec.wsgify def __call__(self, req): @@ -160,62 +169,84 @@ class MichaelRouter(object): return self._router @webob.dec.wsgify - def _proceed(self, req): - """ - Called by self._router after matching the incoming request to a route - and putting the information into req.environ. - """ + @staticmethod + def __proceed(req): + # Called by self._router after matching the incoming request to a route + # and putting the information into req.environ. Either returns 404, the + # routed WSGI app, or _to_webob_response(the action result). + if req.environ['routes.route'] is None: return webob.exc.HTTPNotFound() match = environ['wsgiorg.routing_args'][1] - if match.get('_is_wsgi', False): - wsgiapp = match['controller'] - return req.get_response(wsgiapp) + if 'wsgi_app' in match: + return match['wsgi_app'] else: - # TODO(gundlach): doubt this is the right way -- and it really - # feels like this code should exist somewhere already on the - # internet + kwargs = match.copy() controller, action = match['controller'], match['action'] - delete match['controller'] - delete match['action'] - return _as_response(getattr(controller, action)(**match)) + delete kwargs['controller'] + delete kwargs['action'] + return _to_webob_response(req, getattr(controller, action)(**kwargs)) - controller = environ['wsgiorg.routing_args'][1]['controller'] - self._dispatch(controller) - - def _as_response(self, result): + def _to_webob_response(self, req, result): + """ + When routing to a non-WSGI controller+action, the webob.Request and the + action's result will be passed here to be converted into a + webob.Response before returning up the WSGI chain. By default it + serializes to the requested Content Type. """ - When routing to a non-wsgi controller+action, its result will - be passed here before returning up the WSGI chain to be converted - into a webob.Response + return Serializer(req).serialize(result) +class Serializer(object): + """ + Serializes a dictionary to a Content Type specified by a WSGI environment. + """ + def __init__(self, environ): + """Create a serializer based on the given WSGI environment.""" + self.environ = environ + def serialize(self, data): + req = webob.Request(environ) + # TODO(gundlach): temp + if 'applicatio/json' in req.accept): + import json + return json.dumps(result) + else: + return '' + repr(data) + '' -class ApiVersionRouter(MichaelRouter): +class ApiVersionRouter(MichaelRouterMiddleware): def __init__(self): - super(ApiVersionRouter, self).__init__(self) + map = routes.Mapper() - self.map.connect(None, "/v1.0/{path_info:.*}", controller=RsApiRouter()) - self.map.connect(None, "/ec2/{path_info:.*}", controller=Ec2ApiRouter()) + map.connect(None, "/v1.0/{path_info:.*}", wsgi_app=RsApiRouter()) + map.connect(None, "/ec2/{path_info:.*}", wsgi_app=Ec2ApiRouter()) -class RsApiRouter(MichaelRouter): + super(ApiVersionRouter, self).__init__(self, map) + +class RsApiRouter(MichaelRouterMiddleware): def __init__(self): - super(RsApiRouter, self).__init__(self) + map = routes.Mapper() + + map.resource("server", "servers", controller=ServerController()) + map.resource("image", "images", controller=ImageController()) + map.resource("flavor", "flavors", controller=FlavorController()) + map.resource("sharedipgroup", "sharedipgroups", + controller=SharedIpGroupController()) - self.map.resource("server", "servers", controller=CloudServersServerApi()) - self.map.resource("image", "images", controller=CloudServersImageApi()) - self.map.resource("flavor", "flavors", controller=CloudServersFlavorApi()) - self.map.resource("sharedipgroup", "sharedipgroups", - controller=CloudServersSharedIpGroupApi()) + super(RsApiRouter, self).__init__(self, map) class Ec2ApiRouter(object): + @webob.dec.wsgify + def __call__(self, req): + return 'dummy response' + +class ServerController(object): def __getattr__(self, key): - return lambda *x: {'dummy response': 'i am a dummy response'} -CloudServersServerApi = CloudServersImageApi = CloudServersFlavorApi = \ - CloudServersSharedIpGroupApi = Ec2ApiRouter + return {'dummy': 'dummy response'} +ImageController = FlavorController = SharedIpGroupController = ServerController + class Router(Middleware): # pylint: disable-msg=R0921 """Wrapper to help setup routes.middleware.RoutesMiddleware.""" -- cgit From a0fb0fdf1e899488f0717bea6ee2cad58120070b Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 11 Aug 2010 14:46:43 -0400 Subject: Working router that can target WSGI middleware or a standard controller+action --- nova/wsgi.py | 205 ++++++++++++++++++++++++++++------------------------------- 1 file changed, 98 insertions(+), 107 deletions(-) diff --git a/nova/wsgi.py b/nova/wsgi.py index c511a3f06..81890499e 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -29,6 +29,8 @@ import eventlet.wsgi eventlet.patcher.monkey_patch(all=False, socket=True) import routes import routes.middleware +import webob.dec +import webob.exc logging.getLogger("routes.middleware").addHandler(logging.StreamHandler()) @@ -89,75 +91,80 @@ class Middleware(Application): # pylint: disable-msg=W0223 class Debug(Middleware): - """Helper class that can be insertd into any WSGI application chain + """Helper class that can be inserted into any WSGI application chain to get information about the request and response.""" - def __call__(self, environ, start_response): - for key, value in environ.items(): + @webob.dec.wsgify + def __call__(self, req): + print ("*" * 40) + " REQUEST ENVIRON" + for key, value in req.environ.items(): print key, "=", value print - wrapper = debug_start_response(start_response) - return debug_print_body(self.application(environ, wrapper)) - - -def debug_start_response(start_response): - """Wrap the start_response to capture when called.""" + resp = req.get_response(self.application) - def wrapper(status, headers, exc_info=None): - """Print out all headers when start_response is called.""" - print status - for (key, value) in headers: + print ("*" * 40) + " RESPONSE HEADERS" + for (key, value) in resp.headers: print key, "=", value print - start_response(status, headers, exc_info) - return wrapper + resp.app_iter = self.print_generator(resp.app_iter) + return resp -def debug_print_body(body): - """Print the body of the response as it is sent back.""" + @staticmethod + def print_generator(app_iter): + """ + Iterator that prints the contents of a wrapper string iterator + when iterated. + """ + print ("*" * 40) + "BODY" + for part in app_iter: + sys.stdout.write(part) + sys.stdout.flush() + yield part + print - class Wrapper(object): - """Iterate through all the body parts and print before returning.""" - def __iter__(self): - for part in body: - sys.stdout.write(part) - sys.stdout.flush() - yield part - print +class Router(object): + """ + WSGI middleware that maps incoming requests to targets. + + Non-WSGI-app targets have their results converted to a WSGI response + automatically -- by default, they are serialized according to the Content + Type from the request. This behavior can be changed by overriding + _to_webob_response(). + """ + + def __init__(self, map, targets): + """ + Create a router for the given routes.Mapper `map`. - return Wrapper() + Each route in `map` must contain either + - a 'wsgi_app' string or + - a 'controller' string and an 'action' string. + 'wsgi_app' is a key into the `target` dictionary whose value + is a WSGI app. 'controller' is a key into `target' whose value is + a class instance containing the method specified by 'action'. -class ParsedRoutes(Middleware): - """Processed parsed routes from routes.middleware.RoutesMiddleware - and call either the controller if found or the default application - otherwise.""" + Examples: + map = routes.Mapper() + targets = { "servers": ServerController(), "blog": BlogWsgiApp() } - def __call__(self, environ, start_response): - if environ['routes.route'] is None: - return self.application(environ, start_response) - app = environ['wsgiorg.routing_args'][1]['controller'] - return app(environ, start_response) + # Explicit mapping of one route to a controller+action + map.connect(None, "/serverlist", controller="servers", action="list") -class MichaelRouterMiddleware(object): - """ - Router that maps incoming requests to WSGI apps or to standard - controllers+actions. The response will be a WSGI response; standard - controllers+actions will by default have their results serialized - to the requested Content Type, or you can subclass and override - _to_webob_response to customize this. - """ - - def __init__(self, map): - """ - Create a router for the given routes.Mapper. It may contain standard - routes (i.e. specifying controllers and actions), or may route to a - WSGI app by instead specifying a wsgi_app=SomeApp() parameter in - map.connect(). + # Controller string is implicitly equal to 2nd param here, and + # actions are all implicitly defined + map.resource("server", "servers") + + # Pointing to a WSGI app. You'll need to specify the {path_info:.*} + # parameter so the target app can work with just his section of the + # URL. + map.connect(None, "/v1.0/{path_info:.*}", wsgi_app="blog") """ self.map = map + self.targets = targets self._router = routes.middleware.RoutesMiddleware(self.__proceed, self.map) @webob.dec.wsgify @@ -169,23 +176,28 @@ class MichaelRouterMiddleware(object): return self._router @webob.dec.wsgify - @staticmethod - def __proceed(req): + def __proceed(self, req): # Called by self._router after matching the incoming request to a route # and putting the information into req.environ. Either returns 404, the # routed WSGI app, or _to_webob_response(the action result). if req.environ['routes.route'] is None: return webob.exc.HTTPNotFound() - match = environ['wsgiorg.routing_args'][1] + match = req.environ['wsgiorg.routing_args'][1] if 'wsgi_app' in match: - return match['wsgi_app'] + app_name = match['wsgi_app'] + app = self.targets[app_name] + return app else: kwargs = match.copy() - controller, action = match['controller'], match['action'] - delete kwargs['controller'] - delete kwargs['action'] - return _to_webob_response(req, getattr(controller, action)(**kwargs)) + controller_name, action = match['controller'], match['action'] + del kwargs['controller'] + del kwargs['action'] + + controller = self.targets[controller_name] + method = getattr(controller, action) + result = method(**kwargs) + return self._to_webob_response(req, result) def _to_webob_response(self, req, result): """ @@ -194,7 +206,8 @@ class MichaelRouterMiddleware(object): webob.Response before returning up the WSGI chain. By default it serializes to the requested Content Type. """ - return Serializer(req).serialize(result) + return Serializer(req.environ).serialize(result) + class Serializer(object): """ @@ -206,75 +219,53 @@ class Serializer(object): self.environ = environ def serialize(self, data): - req = webob.Request(environ) + req = webob.Request(self.environ) # TODO(gundlach): temp - if 'applicatio/json' in req.accept): + if req.accept and 'application/json' in req.accept: import json - return json.dumps(result) + return json.dumps(data) else: return '' + repr(data) + '' -class ApiVersionRouter(MichaelRouterMiddleware): +class ApiVersionRouter(Router): def __init__(self): map = routes.Mapper() - map.connect(None, "/v1.0/{path_info:.*}", wsgi_app=RsApiRouter()) - map.connect(None, "/ec2/{path_info:.*}", wsgi_app=Ec2ApiRouter()) + map.connect(None, "/v1.0/{path_info:.*}", wsgi_app="rs") + map.connect(None, "/ec2/{path_info:.*}", wsgi_app="ec2") + + targets = { "rs": RsApiRouter(), "ec2": Ec2ApiRouter() } - super(ApiVersionRouter, self).__init__(self, map) + super(ApiVersionRouter, self).__init__(map, targets) -class RsApiRouter(MichaelRouterMiddleware): +class RsApiRouter(Router): def __init__(self): map = routes.Mapper() - map.resource("server", "servers", controller=ServerController()) - map.resource("image", "images", controller=ImageController()) - map.resource("flavor", "flavors", controller=FlavorController()) - map.resource("sharedipgroup", "sharedipgroups", - controller=SharedIpGroupController()) + map.resource("server", "servers") + map.resource("image", "images") + map.resource("flavor", "flavors") + map.resource("sharedipgroup", "sharedipgroups") - super(RsApiRouter, self).__init__(self, map) + targets = { + 'servers': ServerController(), + 'images': ImageController(), + 'flavors': FlavorController(), + 'sharedipgroups': SharedIpGroupController() + } + super(RsApiRouter, self).__init__(map, targets) + +# TODO(gundlach): temp class Ec2ApiRouter(object): @webob.dec.wsgify def __call__(self, req): return 'dummy response' - +# TODO(gundlach): temp class ServerController(object): def __getattr__(self, key): - return {'dummy': 'dummy response'} + return lambda **args: {key: 'dummy response for %s' % repr(args)} +# TODO(gundlach): temp ImageController = FlavorController = SharedIpGroupController = ServerController - - -class Router(Middleware): # pylint: disable-msg=R0921 - """Wrapper to help setup routes.middleware.RoutesMiddleware.""" - - def __init__(self, application): - self.map = routes.Mapper() - self._build_map() - application = ParsedRoutes(application) - application = routes.middleware.RoutesMiddleware(application, self.map) - super(Router, self).__init__(application) - - def __call__(self, environ, start_response): - return self.application(environ, start_response) - - def _build_map(self): - """Method to create new connections for the routing map.""" - raise NotImplementedError("You must implement _build_map") - - def _connect(self, *args, **kwargs): - """Wrapper for the map.connect method.""" - self.map.connect(*args, **kwargs) - - -def route_args(application): - """Decorator to make grabbing routing args more convenient.""" - - def wrapper(self, req): - """Call application with req and parsed routing args from.""" - return application(self, req, req.environ['wsgiorg.routing_args'][1]) - - return wrapper -- cgit From 2e753b033dae6270674c0397be8e01bd2ff47980 Mon Sep 17 00:00:00 2001 From: Matthew Dietz Date: Wed, 11 Aug 2010 15:27:27 -0500 Subject: Prototype implementation of Servers controller --- nova/endpoint/aws/cloud.py | 729 +++++++++++++++++++++ nova/endpoint/aws/images.py | 95 +++ nova/endpoint/cloud.py | 729 --------------------- nova/endpoint/images.py | 95 --- nova/endpoint/rackspace.py | 186 ------ nova/endpoint/rackspace/controllers/base.py | 9 + nova/endpoint/rackspace/controllers/flavors.py | 0 nova/endpoint/rackspace/controllers/images.py | 0 nova/endpoint/rackspace/controllers/servers.py | 72 ++ .../rackspace/controllers/shared_ip_groups.py | 0 nova/endpoint/rackspace/rackspace.py | 183 ++++++ 11 files changed, 1088 insertions(+), 1010 deletions(-) create mode 100644 nova/endpoint/aws/cloud.py create mode 100644 nova/endpoint/aws/images.py delete mode 100644 nova/endpoint/cloud.py delete mode 100644 nova/endpoint/images.py delete mode 100644 nova/endpoint/rackspace.py create mode 100644 nova/endpoint/rackspace/controllers/base.py create mode 100644 nova/endpoint/rackspace/controllers/flavors.py create mode 100644 nova/endpoint/rackspace/controllers/images.py create mode 100644 nova/endpoint/rackspace/controllers/servers.py create mode 100644 nova/endpoint/rackspace/controllers/shared_ip_groups.py create mode 100644 nova/endpoint/rackspace/rackspace.py diff --git a/nova/endpoint/aws/cloud.py b/nova/endpoint/aws/cloud.py new file mode 100644 index 000000000..878d54a15 --- /dev/null +++ b/nova/endpoint/aws/cloud.py @@ -0,0 +1,729 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Cloud Controller: Implementation of EC2 REST API calls, which are +dispatched to other nodes via AMQP RPC. State is via distributed +datastore. +""" + +import base64 +import logging +import os +import time +from twisted.internet import defer + +from nova import datastore +from nova import exception +from nova import flags +from nova import rpc +from nova import utils +from nova.auth import rbac +from nova.auth import manager +from nova.compute import model +from nova.compute.instance_types import INSTANCE_TYPES +from nova.endpoint import images +from nova.network import service as network_service +from nova.network import model as network_model +from nova.volume import service + + +FLAGS = flags.FLAGS + +flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') + +def _gen_key(user_id, key_name): + """ Tuck this into AuthManager """ + try: + mgr = manager.AuthManager() + private_key, fingerprint = mgr.generate_key_pair(user_id, key_name) + except Exception as ex: + return {'exception': ex} + return {'private_key': private_key, 'fingerprint': fingerprint} + + +class CloudController(object): + """ CloudController provides the critical dispatch between + inbound API calls through the endpoint and messages + sent to the other nodes. +""" + def __init__(self): + self.instdir = model.InstanceDirectory() + self.setup() + + @property + def instances(self): + """ All instances in the system, as dicts """ + return self.instdir.all + + @property + def volumes(self): + """ returns a list of all volumes """ + for volume_id in datastore.Redis.instance().smembers("volumes"): + volume = service.get_volume(volume_id) + yield volume + + def __str__(self): + return 'CloudController' + + def setup(self): + """ Ensure the keychains and folders exist. """ + # Create keys folder, if it doesn't exist + if not os.path.exists(FLAGS.keys_path): + os.makedirs(os.path.abspath(FLAGS.keys_path)) + # Gen root CA, if we don't have one + root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file) + if not os.path.exists(root_ca_path): + start = os.getcwd() + os.chdir(FLAGS.ca_path) + utils.runthis("Generating root CA: %s", "sh genrootca.sh") + os.chdir(start) + # TODO: Do this with M2Crypto instead + + def get_instance_by_ip(self, ip): + return self.instdir.by_ip(ip) + + def _get_mpi_data(self, project_id): + result = {} + for instance in self.instdir.all: + if instance['project_id'] == project_id: + line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) + if instance['key_name'] in result: + result[instance['key_name']].append(line) + else: + result[instance['key_name']] = [line] + return result + + def get_metadata(self, ip): + i = self.get_instance_by_ip(ip) + if i is None: + return None + mpi = self._get_mpi_data(i['project_id']) + if i['key_name']: + keys = { + '0': { + '_name': i['key_name'], + 'openssh-key': i['key_data'] + } + } + else: + keys = '' + data = { + 'user-data': base64.b64decode(i['user_data']), + 'meta-data': { + 'ami-id': i['image_id'], + 'ami-launch-index': i['ami_launch_index'], + 'ami-manifest-path': 'FIXME', # image property + 'block-device-mapping': { # TODO: replace with real data + 'ami': 'sda1', + 'ephemeral0': 'sda2', + 'root': '/dev/sda1', + 'swap': 'sda3' + }, + 'hostname': i['private_dns_name'], # is this public sometimes? + 'instance-action': 'none', + 'instance-id': i['instance_id'], + 'instance-type': i.get('instance_type', ''), + 'local-hostname': i['private_dns_name'], + 'local-ipv4': i['private_dns_name'], # TODO: switch to IP + 'kernel-id': i.get('kernel_id', ''), + 'placement': { + 'availaibility-zone': i.get('availability_zone', 'nova'), + }, + 'public-hostname': i.get('dns_name', ''), + 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP + 'public-keys' : keys, + 'ramdisk-id': i.get('ramdisk_id', ''), + 'reservation-id': i['reservation_id'], + 'security-groups': i.get('groups', ''), + 'mpi': mpi + } + } + if False: # TODO: store ancestor ids + data['ancestor-ami-ids'] = [] + if i.get('product_codes', None): + data['product-codes'] = i['product_codes'] + return data + + @rbac.allow('all') + def describe_availability_zones(self, context, **kwargs): + return {'availabilityZoneInfo': [{'zoneName': 'nova', + 'zoneState': 'available'}]} + + @rbac.allow('all') + def describe_regions(self, context, region_name=None, **kwargs): + # TODO(vish): region_name is an array. Support filtering + return {'regionInfo': [{'regionName': 'nova', + 'regionUrl': FLAGS.ec2_url}]} + + @rbac.allow('all') + def describe_snapshots(self, + context, + snapshot_id=None, + owner=None, + restorable_by=None, + **kwargs): + return {'snapshotSet': [{'snapshotId': 'fixme', + 'volumeId': 'fixme', + 'status': 'fixme', + 'startTime': 'fixme', + 'progress': 'fixme', + 'ownerId': 'fixme', + 'volumeSize': 0, + 'description': 'fixme'}]} + + @rbac.allow('all') + def describe_key_pairs(self, context, key_name=None, **kwargs): + key_pairs = context.user.get_key_pairs() + if not key_name is None: + key_pairs = [x for x in key_pairs if x.name in key_name] + + result = [] + for key_pair in key_pairs: + # filter out the vpn keys + suffix = FLAGS.vpn_key_suffix + if context.user.is_admin() or not key_pair.name.endswith(suffix): + result.append({ + 'keyName': key_pair.name, + 'keyFingerprint': key_pair.fingerprint, + }) + + return { 'keypairsSet': result } + + @rbac.allow('all') + def create_key_pair(self, context, key_name, **kwargs): + try: + d = defer.Deferred() + p = context.handler.application.settings.get('pool') + def _complete(kwargs): + if 'exception' in kwargs: + d.errback(kwargs['exception']) + return + d.callback({'keyName': key_name, + 'keyFingerprint': kwargs['fingerprint'], + 'keyMaterial': kwargs['private_key']}) + p.apply_async(_gen_key, [context.user.id, key_name], + callback=_complete) + return d + + except manager.UserError as e: + raise + + @rbac.allow('all') + def delete_key_pair(self, context, key_name, **kwargs): + context.user.delete_key_pair(key_name) + # aws returns true even if the key doens't exist + return True + + @rbac.allow('all') + def describe_security_groups(self, context, group_names, **kwargs): + groups = { 'securityGroupSet': [] } + + # Stubbed for now to unblock other things. + return groups + + @rbac.allow('netadmin') + def create_security_group(self, context, group_name, **kwargs): + return True + + @rbac.allow('netadmin') + def delete_security_group(self, context, group_name, **kwargs): + return True + + @rbac.allow('projectmanager', 'sysadmin') + def get_console_output(self, context, instance_id, **kwargs): + # instance_id is passed in as a list of instances + instance = self._get_instance(context, instance_id[0]) + return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']), + {"method": "get_console_output", + "args" : {"instance_id": instance_id[0]}}) + + def _get_user_id(self, context): + if context and context.user: + return context.user.id + else: + return None + + @rbac.allow('projectmanager', 'sysadmin') + def describe_volumes(self, context, **kwargs): + volumes = [] + for volume in self.volumes: + if context.user.is_admin() or volume['project_id'] == context.project.id: + v = self.format_volume(context, volume) + volumes.append(v) + return defer.succeed({'volumeSet': volumes}) + + def format_volume(self, context, volume): + v = {} + v['volumeId'] = volume['volume_id'] + v['status'] = volume['status'] + v['size'] = volume['size'] + v['availabilityZone'] = volume['availability_zone'] + v['createTime'] = volume['create_time'] + if context.user.is_admin(): + v['status'] = '%s (%s, %s, %s, %s)' % ( + volume.get('status', None), + volume.get('user_id', None), + volume.get('node_name', None), + volume.get('instance_id', ''), + volume.get('mountpoint', '')) + if volume['attach_status'] == 'attached': + v['attachmentSet'] = [{'attachTime': volume['attach_time'], + 'deleteOnTermination': volume['delete_on_termination'], + 'device' : volume['mountpoint'], + 'instanceId' : volume['instance_id'], + 'status' : 'attached', + 'volume_id' : volume['volume_id']}] + else: + v['attachmentSet'] = [{}] + return v + + @rbac.allow('projectmanager', 'sysadmin') + @defer.inlineCallbacks + def create_volume(self, context, size, **kwargs): + # TODO(vish): refactor this to create the volume object here and tell service to create it + result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume", + "args" : {"size": size, + "user_id": context.user.id, + "project_id": context.project.id}}) + # NOTE(vish): rpc returned value is in the result key in the dictionary + volume = self._get_volume(context, result['result']) + defer.returnValue({'volumeSet': [self.format_volume(context, volume)]}) + + def _get_address(self, context, public_ip): + # FIXME(vish) this should move into network.py + address = network_model.PublicAddress.lookup(public_ip) + if address and (context.user.is_admin() or address['project_id'] == context.project.id): + return address + raise exception.NotFound("Address at ip %s not found" % public_ip) + + def _get_image(self, context, image_id): + """passes in context because + objectstore does its own authorization""" + result = images.list(context, [image_id]) + if not result: + raise exception.NotFound('Image %s could not be found' % image_id) + image = result[0] + return image + + def _get_instance(self, context, instance_id): + for instance in self.instdir.all: + if instance['instance_id'] == instance_id: + if context.user.is_admin() or instance['project_id'] == context.project.id: + return instance + raise exception.NotFound('Instance %s could not be found' % instance_id) + + def _get_volume(self, context, volume_id): + volume = service.get_volume(volume_id) + if context.user.is_admin() or volume['project_id'] == context.project.id: + return volume + raise exception.NotFound('Volume %s could not be found' % volume_id) + + @rbac.allow('projectmanager', 'sysadmin') + def attach_volume(self, context, volume_id, instance_id, device, **kwargs): + volume = self._get_volume(context, volume_id) + if volume['status'] == "attached": + raise exception.ApiError("Volume is already attached") + # TODO(vish): looping through all volumes is slow. We should probably maintain an index + for vol in self.volumes: + if vol['instance_id'] == instance_id and vol['mountpoint'] == device: + raise exception.ApiError("Volume %s is already attached to %s" % (vol['volume_id'], vol['mountpoint'])) + volume.start_attach(instance_id, device) + instance = self._get_instance(context, instance_id) + compute_node = instance['node_name'] + rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node), + {"method": "attach_volume", + "args" : {"volume_id": volume_id, + "instance_id" : instance_id, + "mountpoint" : device}}) + return defer.succeed({'attachTime' : volume['attach_time'], + 'device' : volume['mountpoint'], + 'instanceId' : instance_id, + 'requestId' : context.request_id, + 'status' : volume['attach_status'], + 'volumeId' : volume_id}) + + + @rbac.allow('projectmanager', 'sysadmin') + def detach_volume(self, context, volume_id, **kwargs): + volume = self._get_volume(context, volume_id) + instance_id = volume.get('instance_id', None) + if not instance_id: + raise exception.Error("Volume isn't attached to anything!") + if volume['status'] == "available": + raise exception.Error("Volume is already detached") + try: + volume.start_detach() + instance = self._get_instance(context, instance_id) + rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), + {"method": "detach_volume", + "args" : {"instance_id": instance_id, + "volume_id": volume_id}}) + except exception.NotFound: + # If the instance doesn't exist anymore, + # then we need to call detach blind + volume.finish_detach() + return defer.succeed({'attachTime' : volume['attach_time'], + 'device' : volume['mountpoint'], + 'instanceId' : instance_id, + 'requestId' : context.request_id, + 'status' : volume['attach_status'], + 'volumeId' : volume_id}) + + def _convert_to_set(self, lst, label): + if lst == None or lst == []: + return None + if not isinstance(lst, list): + lst = [lst] + return [{label: x} for x in lst] + + @rbac.allow('all') + def describe_instances(self, context, **kwargs): + return defer.succeed(self._format_instances(context)) + + def _format_instances(self, context, reservation_id = None): + reservations = {} + if context.user.is_admin(): + instgenerator = self.instdir.all + else: + instgenerator = self.instdir.by_project(context.project.id) + for instance in instgenerator: + res_id = instance.get('reservation_id', 'Unknown') + if reservation_id != None and reservation_id != res_id: + continue + if not context.user.is_admin(): + if instance['image_id'] == FLAGS.vpn_image_id: + continue + i = {} + i['instance_id'] = instance.get('instance_id', None) + i['image_id'] = instance.get('image_id', None) + i['instance_state'] = { + 'code': instance.get('state', 0), + 'name': instance.get('state_description', 'pending') + } + i['public_dns_name'] = network_model.get_public_ip_for_instance( + i['instance_id']) + i['private_dns_name'] = instance.get('private_dns_name', None) + if not i['public_dns_name']: + i['public_dns_name'] = i['private_dns_name'] + i['dns_name'] = instance.get('dns_name', None) + i['key_name'] = instance.get('key_name', None) + if context.user.is_admin(): + i['key_name'] = '%s (%s, %s)' % (i['key_name'], + instance.get('project_id', None), instance.get('node_name','')) + i['product_codes_set'] = self._convert_to_set( + instance.get('product_codes', None), 'product_code') + i['instance_type'] = instance.get('instance_type', None) + i['launch_time'] = instance.get('launch_time', None) + i['ami_launch_index'] = instance.get('ami_launch_index', + None) + if not reservations.has_key(res_id): + r = {} + r['reservation_id'] = res_id + r['owner_id'] = instance.get('project_id', None) + r['group_set'] = self._convert_to_set( + instance.get('groups', None), 'group_id') + r['instances_set'] = [] + reservations[res_id] = r + reservations[res_id]['instances_set'].append(i) + + instance_response = {'reservationSet' : list(reservations.values()) } + return instance_response + + @rbac.allow('all') + def describe_addresses(self, context, **kwargs): + return self.format_addresses(context) + + def format_addresses(self, context): + addresses = [] + for address in network_model.PublicAddress.all(): + # TODO(vish): implement a by_project iterator for addresses + if (context.user.is_admin() or + address['project_id'] == context.project.id): + address_rv = { + 'public_ip': address['address'], + 'instance_id' : address.get('instance_id', 'free') + } + if context.user.is_admin(): + address_rv['instance_id'] = "%s (%s, %s)" % ( + address['instance_id'], + address['user_id'], + address['project_id'], + ) + addresses.append(address_rv) + return {'addressesSet': addresses} + + @rbac.allow('netadmin') + @defer.inlineCallbacks + def allocate_address(self, context, **kwargs): + network_topic = yield self._get_network_topic(context) + alloc_result = yield rpc.call(network_topic, + {"method": "allocate_elastic_ip", + "args": {"user_id": context.user.id, + "project_id": context.project.id}}) + public_ip = alloc_result['result'] + defer.returnValue({'addressSet': [{'publicIp' : public_ip}]}) + + @rbac.allow('netadmin') + @defer.inlineCallbacks + def release_address(self, context, public_ip, **kwargs): + # NOTE(vish): Should we make sure this works? + network_topic = yield self._get_network_topic(context) + rpc.cast(network_topic, + {"method": "deallocate_elastic_ip", + "args": {"elastic_ip": public_ip}}) + defer.returnValue({'releaseResponse': ["Address released."]}) + + @rbac.allow('netadmin') + @defer.inlineCallbacks + def associate_address(self, context, instance_id, public_ip, **kwargs): + instance = self._get_instance(context, instance_id) + address = self._get_address(context, public_ip) + network_topic = yield self._get_network_topic(context) + rpc.cast(network_topic, + {"method": "associate_elastic_ip", + "args": {"elastic_ip": address['address'], + "fixed_ip": instance['private_dns_name'], + "instance_id": instance['instance_id']}}) + defer.returnValue({'associateResponse': ["Address associated."]}) + + @rbac.allow('netadmin') + @defer.inlineCallbacks + def disassociate_address(self, context, public_ip, **kwargs): + address = self._get_address(context, public_ip) + network_topic = yield self._get_network_topic(context) + rpc.cast(network_topic, + {"method": "disassociate_elastic_ip", + "args": {"elastic_ip": address['address']}}) + defer.returnValue({'disassociateResponse': ["Address disassociated."]}) + + @defer.inlineCallbacks + def _get_network_topic(self, context): + """Retrieves the network host for a project""" + host = network_service.get_host_for_project(context.project.id) + if not host: + result = yield rpc.call(FLAGS.network_topic, + {"method": "set_network_host", + "args": {"user_id": context.user.id, + "project_id": context.project.id}}) + host = result['result'] + defer.returnValue('%s.%s' %(FLAGS.network_topic, host)) + + @rbac.allow('projectmanager', 'sysadmin') + @defer.inlineCallbacks + def run_instances(self, context, **kwargs): + # make sure user can access the image + # vpn image is private so it doesn't show up on lists + if kwargs['image_id'] != FLAGS.vpn_image_id: + image = self._get_image(context, kwargs['image_id']) + + # FIXME(ja): if image is cloudpipe, this breaks + + # get defaults from imagestore + image_id = image['imageId'] + kernel_id = image.get('kernelId', FLAGS.default_kernel) + ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) + + # API parameters overrides of defaults + kernel_id = kwargs.get('kernel_id', kernel_id) + ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id) + + # make sure we have access to kernel and ramdisk + self._get_image(context, kernel_id) + self._get_image(context, ramdisk_id) + + logging.debug("Going to run instances...") + reservation_id = utils.generate_uid('r') + launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + key_data = None + if kwargs.has_key('key_name'): + key_pair = context.user.get_key_pair(kwargs['key_name']) + if not key_pair: + raise exception.ApiError('Key Pair %s not found' % + kwargs['key_name']) + key_data = key_pair.public_key + network_topic = yield self._get_network_topic(context) + # TODO: Get the real security group of launch in here + security_group = "default" + for num in range(int(kwargs['max_count'])): + vpn = False + if image_id == FLAGS.vpn_image_id: + vpn = True + allocate_result = yield rpc.call(network_topic, + {"method": "allocate_fixed_ip", + "args": {"user_id": context.user.id, + "project_id": context.project.id, + "security_group": security_group, + "vpn": vpn}}) + allocate_data = allocate_result['result'] + inst = self.instdir.new() + inst['image_id'] = image_id + inst['kernel_id'] = kernel_id + inst['ramdisk_id'] = ramdisk_id + inst['user_data'] = kwargs.get('user_data', '') + inst['instance_type'] = kwargs.get('instance_type', 'm1.small') + inst['reservation_id'] = reservation_id + inst['launch_time'] = launch_time + inst['key_data'] = key_data or '' + inst['key_name'] = kwargs.get('key_name', '') + inst['user_id'] = context.user.id + inst['project_id'] = context.project.id + inst['ami_launch_index'] = num + inst['security_group'] = security_group + for (key, value) in allocate_data.iteritems(): + inst[key] = value + + inst.save() + rpc.cast(FLAGS.compute_topic, + {"method": "run_instance", + "args": {"instance_id" : inst.instance_id}}) + logging.debug("Casting to node for %s's instance with IP of %s" % + (context.user.name, inst['private_dns_name'])) + # TODO: Make Network figure out the network name from ip. + defer.returnValue(self._format_instances(context, reservation_id)) + + @rbac.allow('projectmanager', 'sysadmin') + @defer.inlineCallbacks + def terminate_instances(self, context, instance_id, **kwargs): + logging.debug("Going to start terminating instances") + network_topic = yield self._get_network_topic(context) + for i in instance_id: + logging.debug("Going to try and terminate %s" % i) + try: + instance = self._get_instance(context, i) + except exception.NotFound: + logging.warning("Instance %s was not found during terminate" + % i) + continue + elastic_ip = network_model.get_public_ip_for_instance(i) + if elastic_ip: + logging.debug("Disassociating address %s" % elastic_ip) + # NOTE(vish): Right now we don't really care if the ip is + # disassociated. We may need to worry about + # checking this later. Perhaps in the scheduler? + rpc.cast(network_topic, + {"method": "disassociate_elastic_ip", + "args": {"elastic_ip": elastic_ip}}) + + fixed_ip = instance.get('private_dns_name', None) + if fixed_ip: + logging.debug("Deallocating address %s" % fixed_ip) + # NOTE(vish): Right now we don't really care if the ip is + # actually removed. We may need to worry about + # checking this later. Perhaps in the scheduler? + rpc.cast(network_topic, + {"method": "deallocate_fixed_ip", + "args": {"fixed_ip": fixed_ip}}) + + if instance.get('node_name', 'unassigned') != 'unassigned': + # NOTE(joshua?): It's also internal default + rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), + {"method": "terminate_instance", + "args": {"instance_id": i}}) + else: + instance.destroy() + defer.returnValue(True) + + @rbac.allow('projectmanager', 'sysadmin') + def reboot_instances(self, context, instance_id, **kwargs): + """instance_id is a list of instance ids""" + for i in instance_id: + instance = self._get_instance(context, i) + rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), + {"method": "reboot_instance", + "args" : {"instance_id": i}}) + return defer.succeed(True) + + @rbac.allow('projectmanager', 'sysadmin') + def delete_volume(self, context, volume_id, **kwargs): + # TODO: return error if not authorized + volume = self._get_volume(context, volume_id) + volume_node = volume['node_name'] + rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node), + {"method": "delete_volume", + "args" : {"volume_id": volume_id}}) + return defer.succeed(True) + + @rbac.allow('all') + def describe_images(self, context, image_id=None, **kwargs): + # The objectstore does its own authorization for describe + imageSet = images.list(context, image_id) + return defer.succeed({'imagesSet': imageSet}) + + @rbac.allow('projectmanager', 'sysadmin') + def deregister_image(self, context, image_id, **kwargs): + # FIXME: should the objectstore be doing these authorization checks? + images.deregister(context, image_id) + return defer.succeed({'imageId': image_id}) + + @rbac.allow('projectmanager', 'sysadmin') + def register_image(self, context, image_location=None, **kwargs): + # FIXME: should the objectstore be doing these authorization checks? + if image_location is None and kwargs.has_key('name'): + image_location = kwargs['name'] + image_id = images.register(context, image_location) + logging.debug("Registered %s as %s" % (image_location, image_id)) + + return defer.succeed({'imageId': image_id}) + + @rbac.allow('all') + def describe_image_attribute(self, context, image_id, attribute, **kwargs): + if attribute != 'launchPermission': + raise exception.ApiError('attribute not supported: %s' % attribute) + try: + image = images.list(context, image_id)[0] + except IndexError: + raise exception.ApiError('invalid id: %s' % image_id) + result = { 'image_id': image_id, 'launchPermission': [] } + if image['isPublic']: + result['launchPermission'].append({ 'group': 'all' }) + return defer.succeed(result) + + @rbac.allow('projectmanager', 'sysadmin') + def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs): + # TODO(devcamcar): Support users and groups other than 'all'. + if attribute != 'launchPermission': + raise exception.ApiError('attribute not supported: %s' % attribute) + if not 'user_group' in kwargs: + raise exception.ApiError('user or group not specified') + if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': + raise exception.ApiError('only group "all" is supported') + if not operation_type in ['add', 'remove']: + raise exception.ApiError('operation_type must be add or remove') + result = images.modify(context, image_id, operation_type) + return defer.succeed(result) + + def update_state(self, topic, value): + """ accepts status reports from the queue and consolidates them """ + # TODO(jmc): if an instance has disappeared from + # the node, call instance_death + if topic == "instances": + return defer.succeed(True) + aggregate_state = getattr(self, topic) + node_name = value.keys()[0] + items = value[node_name] + + logging.debug("Updating %s state for %s" % (topic, node_name)) + + for item_id in items.keys(): + if (aggregate_state.has_key('pending') and + aggregate_state['pending'].has_key(item_id)): + del aggregate_state['pending'][item_id] + aggregate_state[node_name] = items + + return defer.succeed(True) diff --git a/nova/endpoint/aws/images.py b/nova/endpoint/aws/images.py new file mode 100644 index 000000000..fe7cb5d11 --- /dev/null +++ b/nova/endpoint/aws/images.py @@ -0,0 +1,95 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Proxy AMI-related calls from the cloud controller, to the running +objectstore daemon. +""" + +import boto.s3.connection +import json +import urllib + +from nova import flags +from nova import utils +from nova.auth import manager + + +FLAGS = flags.FLAGS + +def modify(context, image_id, operation): + conn(context).make_request( + method='POST', + bucket='_images', + query_args=qs({'image_id': image_id, 'operation': operation})) + + return True + + +def register(context, image_location): + """ rpc call to register a new image based from a manifest """ + + image_id = utils.generate_uid('ami') + conn(context).make_request( + method='PUT', + bucket='_images', + query_args=qs({'image_location': image_location, + 'image_id': image_id})) + + return image_id + +def list(context, filter_list=[]): + """ return a list of all images that a user can see + + optionally filtered by a list of image_id """ + + # FIXME: send along the list of only_images to check for + response = conn(context).make_request( + method='GET', + bucket='_images') + + result = json.loads(response.read()) + if not filter_list is None: + return [i for i in result if i['imageId'] in filter_list] + return result + +def deregister(context, image_id): + """ unregister an image """ + conn(context).make_request( + method='DELETE', + bucket='_images', + query_args=qs({'image_id': image_id})) + +def conn(context): + access = manager.AuthManager().get_access_key(context.user, + context.project) + secret = str(context.user.secret) + calling = boto.s3.connection.OrdinaryCallingFormat() + return boto.s3.connection.S3Connection(aws_access_key_id=access, + aws_secret_access_key=secret, + is_secure=False, + calling_format=calling, + port=FLAGS.s3_port, + host=FLAGS.s3_host) + + +def qs(params): + pairs = [] + for key in params.keys(): + pairs.append(key + '=' + urllib.quote(params[key])) + return '&'.join(pairs) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py deleted file mode 100644 index 878d54a15..000000000 --- a/nova/endpoint/cloud.py +++ /dev/null @@ -1,729 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Cloud Controller: Implementation of EC2 REST API calls, which are -dispatched to other nodes via AMQP RPC. State is via distributed -datastore. -""" - -import base64 -import logging -import os -import time -from twisted.internet import defer - -from nova import datastore -from nova import exception -from nova import flags -from nova import rpc -from nova import utils -from nova.auth import rbac -from nova.auth import manager -from nova.compute import model -from nova.compute.instance_types import INSTANCE_TYPES -from nova.endpoint import images -from nova.network import service as network_service -from nova.network import model as network_model -from nova.volume import service - - -FLAGS = flags.FLAGS - -flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') - -def _gen_key(user_id, key_name): - """ Tuck this into AuthManager """ - try: - mgr = manager.AuthManager() - private_key, fingerprint = mgr.generate_key_pair(user_id, key_name) - except Exception as ex: - return {'exception': ex} - return {'private_key': private_key, 'fingerprint': fingerprint} - - -class CloudController(object): - """ CloudController provides the critical dispatch between - inbound API calls through the endpoint and messages - sent to the other nodes. -""" - def __init__(self): - self.instdir = model.InstanceDirectory() - self.setup() - - @property - def instances(self): - """ All instances in the system, as dicts """ - return self.instdir.all - - @property - def volumes(self): - """ returns a list of all volumes """ - for volume_id in datastore.Redis.instance().smembers("volumes"): - volume = service.get_volume(volume_id) - yield volume - - def __str__(self): - return 'CloudController' - - def setup(self): - """ Ensure the keychains and folders exist. """ - # Create keys folder, if it doesn't exist - if not os.path.exists(FLAGS.keys_path): - os.makedirs(os.path.abspath(FLAGS.keys_path)) - # Gen root CA, if we don't have one - root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file) - if not os.path.exists(root_ca_path): - start = os.getcwd() - os.chdir(FLAGS.ca_path) - utils.runthis("Generating root CA: %s", "sh genrootca.sh") - os.chdir(start) - # TODO: Do this with M2Crypto instead - - def get_instance_by_ip(self, ip): - return self.instdir.by_ip(ip) - - def _get_mpi_data(self, project_id): - result = {} - for instance in self.instdir.all: - if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) - if instance['key_name'] in result: - result[instance['key_name']].append(line) - else: - result[instance['key_name']] = [line] - return result - - def get_metadata(self, ip): - i = self.get_instance_by_ip(ip) - if i is None: - return None - mpi = self._get_mpi_data(i['project_id']) - if i['key_name']: - keys = { - '0': { - '_name': i['key_name'], - 'openssh-key': i['key_data'] - } - } - else: - keys = '' - data = { - 'user-data': base64.b64decode(i['user_data']), - 'meta-data': { - 'ami-id': i['image_id'], - 'ami-launch-index': i['ami_launch_index'], - 'ami-manifest-path': 'FIXME', # image property - 'block-device-mapping': { # TODO: replace with real data - 'ami': 'sda1', - 'ephemeral0': 'sda2', - 'root': '/dev/sda1', - 'swap': 'sda3' - }, - 'hostname': i['private_dns_name'], # is this public sometimes? - 'instance-action': 'none', - 'instance-id': i['instance_id'], - 'instance-type': i.get('instance_type', ''), - 'local-hostname': i['private_dns_name'], - 'local-ipv4': i['private_dns_name'], # TODO: switch to IP - 'kernel-id': i.get('kernel_id', ''), - 'placement': { - 'availaibility-zone': i.get('availability_zone', 'nova'), - }, - 'public-hostname': i.get('dns_name', ''), - 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP - 'public-keys' : keys, - 'ramdisk-id': i.get('ramdisk_id', ''), - 'reservation-id': i['reservation_id'], - 'security-groups': i.get('groups', ''), - 'mpi': mpi - } - } - if False: # TODO: store ancestor ids - data['ancestor-ami-ids'] = [] - if i.get('product_codes', None): - data['product-codes'] = i['product_codes'] - return data - - @rbac.allow('all') - def describe_availability_zones(self, context, **kwargs): - return {'availabilityZoneInfo': [{'zoneName': 'nova', - 'zoneState': 'available'}]} - - @rbac.allow('all') - def describe_regions(self, context, region_name=None, **kwargs): - # TODO(vish): region_name is an array. Support filtering - return {'regionInfo': [{'regionName': 'nova', - 'regionUrl': FLAGS.ec2_url}]} - - @rbac.allow('all') - def describe_snapshots(self, - context, - snapshot_id=None, - owner=None, - restorable_by=None, - **kwargs): - return {'snapshotSet': [{'snapshotId': 'fixme', - 'volumeId': 'fixme', - 'status': 'fixme', - 'startTime': 'fixme', - 'progress': 'fixme', - 'ownerId': 'fixme', - 'volumeSize': 0, - 'description': 'fixme'}]} - - @rbac.allow('all') - def describe_key_pairs(self, context, key_name=None, **kwargs): - key_pairs = context.user.get_key_pairs() - if not key_name is None: - key_pairs = [x for x in key_pairs if x.name in key_name] - - result = [] - for key_pair in key_pairs: - # filter out the vpn keys - suffix = FLAGS.vpn_key_suffix - if context.user.is_admin() or not key_pair.name.endswith(suffix): - result.append({ - 'keyName': key_pair.name, - 'keyFingerprint': key_pair.fingerprint, - }) - - return { 'keypairsSet': result } - - @rbac.allow('all') - def create_key_pair(self, context, key_name, **kwargs): - try: - d = defer.Deferred() - p = context.handler.application.settings.get('pool') - def _complete(kwargs): - if 'exception' in kwargs: - d.errback(kwargs['exception']) - return - d.callback({'keyName': key_name, - 'keyFingerprint': kwargs['fingerprint'], - 'keyMaterial': kwargs['private_key']}) - p.apply_async(_gen_key, [context.user.id, key_name], - callback=_complete) - return d - - except manager.UserError as e: - raise - - @rbac.allow('all') - def delete_key_pair(self, context, key_name, **kwargs): - context.user.delete_key_pair(key_name) - # aws returns true even if the key doens't exist - return True - - @rbac.allow('all') - def describe_security_groups(self, context, group_names, **kwargs): - groups = { 'securityGroupSet': [] } - - # Stubbed for now to unblock other things. - return groups - - @rbac.allow('netadmin') - def create_security_group(self, context, group_name, **kwargs): - return True - - @rbac.allow('netadmin') - def delete_security_group(self, context, group_name, **kwargs): - return True - - @rbac.allow('projectmanager', 'sysadmin') - def get_console_output(self, context, instance_id, **kwargs): - # instance_id is passed in as a list of instances - instance = self._get_instance(context, instance_id[0]) - return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']), - {"method": "get_console_output", - "args" : {"instance_id": instance_id[0]}}) - - def _get_user_id(self, context): - if context and context.user: - return context.user.id - else: - return None - - @rbac.allow('projectmanager', 'sysadmin') - def describe_volumes(self, context, **kwargs): - volumes = [] - for volume in self.volumes: - if context.user.is_admin() or volume['project_id'] == context.project.id: - v = self.format_volume(context, volume) - volumes.append(v) - return defer.succeed({'volumeSet': volumes}) - - def format_volume(self, context, volume): - v = {} - v['volumeId'] = volume['volume_id'] - v['status'] = volume['status'] - v['size'] = volume['size'] - v['availabilityZone'] = volume['availability_zone'] - v['createTime'] = volume['create_time'] - if context.user.is_admin(): - v['status'] = '%s (%s, %s, %s, %s)' % ( - volume.get('status', None), - volume.get('user_id', None), - volume.get('node_name', None), - volume.get('instance_id', ''), - volume.get('mountpoint', '')) - if volume['attach_status'] == 'attached': - v['attachmentSet'] = [{'attachTime': volume['attach_time'], - 'deleteOnTermination': volume['delete_on_termination'], - 'device' : volume['mountpoint'], - 'instanceId' : volume['instance_id'], - 'status' : 'attached', - 'volume_id' : volume['volume_id']}] - else: - v['attachmentSet'] = [{}] - return v - - @rbac.allow('projectmanager', 'sysadmin') - @defer.inlineCallbacks - def create_volume(self, context, size, **kwargs): - # TODO(vish): refactor this to create the volume object here and tell service to create it - result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume", - "args" : {"size": size, - "user_id": context.user.id, - "project_id": context.project.id}}) - # NOTE(vish): rpc returned value is in the result key in the dictionary - volume = self._get_volume(context, result['result']) - defer.returnValue({'volumeSet': [self.format_volume(context, volume)]}) - - def _get_address(self, context, public_ip): - # FIXME(vish) this should move into network.py - address = network_model.PublicAddress.lookup(public_ip) - if address and (context.user.is_admin() or address['project_id'] == context.project.id): - return address - raise exception.NotFound("Address at ip %s not found" % public_ip) - - def _get_image(self, context, image_id): - """passes in context because - objectstore does its own authorization""" - result = images.list(context, [image_id]) - if not result: - raise exception.NotFound('Image %s could not be found' % image_id) - image = result[0] - return image - - def _get_instance(self, context, instance_id): - for instance in self.instdir.all: - if instance['instance_id'] == instance_id: - if context.user.is_admin() or instance['project_id'] == context.project.id: - return instance - raise exception.NotFound('Instance %s could not be found' % instance_id) - - def _get_volume(self, context, volume_id): - volume = service.get_volume(volume_id) - if context.user.is_admin() or volume['project_id'] == context.project.id: - return volume - raise exception.NotFound('Volume %s could not be found' % volume_id) - - @rbac.allow('projectmanager', 'sysadmin') - def attach_volume(self, context, volume_id, instance_id, device, **kwargs): - volume = self._get_volume(context, volume_id) - if volume['status'] == "attached": - raise exception.ApiError("Volume is already attached") - # TODO(vish): looping through all volumes is slow. We should probably maintain an index - for vol in self.volumes: - if vol['instance_id'] == instance_id and vol['mountpoint'] == device: - raise exception.ApiError("Volume %s is already attached to %s" % (vol['volume_id'], vol['mountpoint'])) - volume.start_attach(instance_id, device) - instance = self._get_instance(context, instance_id) - compute_node = instance['node_name'] - rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node), - {"method": "attach_volume", - "args" : {"volume_id": volume_id, - "instance_id" : instance_id, - "mountpoint" : device}}) - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) - - - @rbac.allow('projectmanager', 'sysadmin') - def detach_volume(self, context, volume_id, **kwargs): - volume = self._get_volume(context, volume_id) - instance_id = volume.get('instance_id', None) - if not instance_id: - raise exception.Error("Volume isn't attached to anything!") - if volume['status'] == "available": - raise exception.Error("Volume is already detached") - try: - volume.start_detach() - instance = self._get_instance(context, instance_id) - rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), - {"method": "detach_volume", - "args" : {"instance_id": instance_id, - "volume_id": volume_id}}) - except exception.NotFound: - # If the instance doesn't exist anymore, - # then we need to call detach blind - volume.finish_detach() - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) - - def _convert_to_set(self, lst, label): - if lst == None or lst == []: - return None - if not isinstance(lst, list): - lst = [lst] - return [{label: x} for x in lst] - - @rbac.allow('all') - def describe_instances(self, context, **kwargs): - return defer.succeed(self._format_instances(context)) - - def _format_instances(self, context, reservation_id = None): - reservations = {} - if context.user.is_admin(): - instgenerator = self.instdir.all - else: - instgenerator = self.instdir.by_project(context.project.id) - for instance in instgenerator: - res_id = instance.get('reservation_id', 'Unknown') - if reservation_id != None and reservation_id != res_id: - continue - if not context.user.is_admin(): - if instance['image_id'] == FLAGS.vpn_image_id: - continue - i = {} - i['instance_id'] = instance.get('instance_id', None) - i['image_id'] = instance.get('image_id', None) - i['instance_state'] = { - 'code': instance.get('state', 0), - 'name': instance.get('state_description', 'pending') - } - i['public_dns_name'] = network_model.get_public_ip_for_instance( - i['instance_id']) - i['private_dns_name'] = instance.get('private_dns_name', None) - if not i['public_dns_name']: - i['public_dns_name'] = i['private_dns_name'] - i['dns_name'] = instance.get('dns_name', None) - i['key_name'] = instance.get('key_name', None) - if context.user.is_admin(): - i['key_name'] = '%s (%s, %s)' % (i['key_name'], - instance.get('project_id', None), instance.get('node_name','')) - i['product_codes_set'] = self._convert_to_set( - instance.get('product_codes', None), 'product_code') - i['instance_type'] = instance.get('instance_type', None) - i['launch_time'] = instance.get('launch_time', None) - i['ami_launch_index'] = instance.get('ami_launch_index', - None) - if not reservations.has_key(res_id): - r = {} - r['reservation_id'] = res_id - r['owner_id'] = instance.get('project_id', None) - r['group_set'] = self._convert_to_set( - instance.get('groups', None), 'group_id') - r['instances_set'] = [] - reservations[res_id] = r - reservations[res_id]['instances_set'].append(i) - - instance_response = {'reservationSet' : list(reservations.values()) } - return instance_response - - @rbac.allow('all') - def describe_addresses(self, context, **kwargs): - return self.format_addresses(context) - - def format_addresses(self, context): - addresses = [] - for address in network_model.PublicAddress.all(): - # TODO(vish): implement a by_project iterator for addresses - if (context.user.is_admin() or - address['project_id'] == context.project.id): - address_rv = { - 'public_ip': address['address'], - 'instance_id' : address.get('instance_id', 'free') - } - if context.user.is_admin(): - address_rv['instance_id'] = "%s (%s, %s)" % ( - address['instance_id'], - address['user_id'], - address['project_id'], - ) - addresses.append(address_rv) - return {'addressesSet': addresses} - - @rbac.allow('netadmin') - @defer.inlineCallbacks - def allocate_address(self, context, **kwargs): - network_topic = yield self._get_network_topic(context) - alloc_result = yield rpc.call(network_topic, - {"method": "allocate_elastic_ip", - "args": {"user_id": context.user.id, - "project_id": context.project.id}}) - public_ip = alloc_result['result'] - defer.returnValue({'addressSet': [{'publicIp' : public_ip}]}) - - @rbac.allow('netadmin') - @defer.inlineCallbacks - def release_address(self, context, public_ip, **kwargs): - # NOTE(vish): Should we make sure this works? - network_topic = yield self._get_network_topic(context) - rpc.cast(network_topic, - {"method": "deallocate_elastic_ip", - "args": {"elastic_ip": public_ip}}) - defer.returnValue({'releaseResponse': ["Address released."]}) - - @rbac.allow('netadmin') - @defer.inlineCallbacks - def associate_address(self, context, instance_id, public_ip, **kwargs): - instance = self._get_instance(context, instance_id) - address = self._get_address(context, public_ip) - network_topic = yield self._get_network_topic(context) - rpc.cast(network_topic, - {"method": "associate_elastic_ip", - "args": {"elastic_ip": address['address'], - "fixed_ip": instance['private_dns_name'], - "instance_id": instance['instance_id']}}) - defer.returnValue({'associateResponse': ["Address associated."]}) - - @rbac.allow('netadmin') - @defer.inlineCallbacks - def disassociate_address(self, context, public_ip, **kwargs): - address = self._get_address(context, public_ip) - network_topic = yield self._get_network_topic(context) - rpc.cast(network_topic, - {"method": "disassociate_elastic_ip", - "args": {"elastic_ip": address['address']}}) - defer.returnValue({'disassociateResponse': ["Address disassociated."]}) - - @defer.inlineCallbacks - def _get_network_topic(self, context): - """Retrieves the network host for a project""" - host = network_service.get_host_for_project(context.project.id) - if not host: - result = yield rpc.call(FLAGS.network_topic, - {"method": "set_network_host", - "args": {"user_id": context.user.id, - "project_id": context.project.id}}) - host = result['result'] - defer.returnValue('%s.%s' %(FLAGS.network_topic, host)) - - @rbac.allow('projectmanager', 'sysadmin') - @defer.inlineCallbacks - def run_instances(self, context, **kwargs): - # make sure user can access the image - # vpn image is private so it doesn't show up on lists - if kwargs['image_id'] != FLAGS.vpn_image_id: - image = self._get_image(context, kwargs['image_id']) - - # FIXME(ja): if image is cloudpipe, this breaks - - # get defaults from imagestore - image_id = image['imageId'] - kernel_id = image.get('kernelId', FLAGS.default_kernel) - ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) - - # API parameters overrides of defaults - kernel_id = kwargs.get('kernel_id', kernel_id) - ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id) - - # make sure we have access to kernel and ramdisk - self._get_image(context, kernel_id) - self._get_image(context, ramdisk_id) - - logging.debug("Going to run instances...") - reservation_id = utils.generate_uid('r') - launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) - key_data = None - if kwargs.has_key('key_name'): - key_pair = context.user.get_key_pair(kwargs['key_name']) - if not key_pair: - raise exception.ApiError('Key Pair %s not found' % - kwargs['key_name']) - key_data = key_pair.public_key - network_topic = yield self._get_network_topic(context) - # TODO: Get the real security group of launch in here - security_group = "default" - for num in range(int(kwargs['max_count'])): - vpn = False - if image_id == FLAGS.vpn_image_id: - vpn = True - allocate_result = yield rpc.call(network_topic, - {"method": "allocate_fixed_ip", - "args": {"user_id": context.user.id, - "project_id": context.project.id, - "security_group": security_group, - "vpn": vpn}}) - allocate_data = allocate_result['result'] - inst = self.instdir.new() - inst['image_id'] = image_id - inst['kernel_id'] = kernel_id - inst['ramdisk_id'] = ramdisk_id - inst['user_data'] = kwargs.get('user_data', '') - inst['instance_type'] = kwargs.get('instance_type', 'm1.small') - inst['reservation_id'] = reservation_id - inst['launch_time'] = launch_time - inst['key_data'] = key_data or '' - inst['key_name'] = kwargs.get('key_name', '') - inst['user_id'] = context.user.id - inst['project_id'] = context.project.id - inst['ami_launch_index'] = num - inst['security_group'] = security_group - for (key, value) in allocate_data.iteritems(): - inst[key] = value - - inst.save() - rpc.cast(FLAGS.compute_topic, - {"method": "run_instance", - "args": {"instance_id" : inst.instance_id}}) - logging.debug("Casting to node for %s's instance with IP of %s" % - (context.user.name, inst['private_dns_name'])) - # TODO: Make Network figure out the network name from ip. - defer.returnValue(self._format_instances(context, reservation_id)) - - @rbac.allow('projectmanager', 'sysadmin') - @defer.inlineCallbacks - def terminate_instances(self, context, instance_id, **kwargs): - logging.debug("Going to start terminating instances") - network_topic = yield self._get_network_topic(context) - for i in instance_id: - logging.debug("Going to try and terminate %s" % i) - try: - instance = self._get_instance(context, i) - except exception.NotFound: - logging.warning("Instance %s was not found during terminate" - % i) - continue - elastic_ip = network_model.get_public_ip_for_instance(i) - if elastic_ip: - logging.debug("Disassociating address %s" % elastic_ip) - # NOTE(vish): Right now we don't really care if the ip is - # disassociated. We may need to worry about - # checking this later. Perhaps in the scheduler? - rpc.cast(network_topic, - {"method": "disassociate_elastic_ip", - "args": {"elastic_ip": elastic_ip}}) - - fixed_ip = instance.get('private_dns_name', None) - if fixed_ip: - logging.debug("Deallocating address %s" % fixed_ip) - # NOTE(vish): Right now we don't really care if the ip is - # actually removed. We may need to worry about - # checking this later. Perhaps in the scheduler? - rpc.cast(network_topic, - {"method": "deallocate_fixed_ip", - "args": {"fixed_ip": fixed_ip}}) - - if instance.get('node_name', 'unassigned') != 'unassigned': - # NOTE(joshua?): It's also internal default - rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), - {"method": "terminate_instance", - "args": {"instance_id": i}}) - else: - instance.destroy() - defer.returnValue(True) - - @rbac.allow('projectmanager', 'sysadmin') - def reboot_instances(self, context, instance_id, **kwargs): - """instance_id is a list of instance ids""" - for i in instance_id: - instance = self._get_instance(context, i) - rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), - {"method": "reboot_instance", - "args" : {"instance_id": i}}) - return defer.succeed(True) - - @rbac.allow('projectmanager', 'sysadmin') - def delete_volume(self, context, volume_id, **kwargs): - # TODO: return error if not authorized - volume = self._get_volume(context, volume_id) - volume_node = volume['node_name'] - rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node), - {"method": "delete_volume", - "args" : {"volume_id": volume_id}}) - return defer.succeed(True) - - @rbac.allow('all') - def describe_images(self, context, image_id=None, **kwargs): - # The objectstore does its own authorization for describe - imageSet = images.list(context, image_id) - return defer.succeed({'imagesSet': imageSet}) - - @rbac.allow('projectmanager', 'sysadmin') - def deregister_image(self, context, image_id, **kwargs): - # FIXME: should the objectstore be doing these authorization checks? - images.deregister(context, image_id) - return defer.succeed({'imageId': image_id}) - - @rbac.allow('projectmanager', 'sysadmin') - def register_image(self, context, image_location=None, **kwargs): - # FIXME: should the objectstore be doing these authorization checks? - if image_location is None and kwargs.has_key('name'): - image_location = kwargs['name'] - image_id = images.register(context, image_location) - logging.debug("Registered %s as %s" % (image_location, image_id)) - - return defer.succeed({'imageId': image_id}) - - @rbac.allow('all') - def describe_image_attribute(self, context, image_id, attribute, **kwargs): - if attribute != 'launchPermission': - raise exception.ApiError('attribute not supported: %s' % attribute) - try: - image = images.list(context, image_id)[0] - except IndexError: - raise exception.ApiError('invalid id: %s' % image_id) - result = { 'image_id': image_id, 'launchPermission': [] } - if image['isPublic']: - result['launchPermission'].append({ 'group': 'all' }) - return defer.succeed(result) - - @rbac.allow('projectmanager', 'sysadmin') - def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs): - # TODO(devcamcar): Support users and groups other than 'all'. - if attribute != 'launchPermission': - raise exception.ApiError('attribute not supported: %s' % attribute) - if not 'user_group' in kwargs: - raise exception.ApiError('user or group not specified') - if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': - raise exception.ApiError('only group "all" is supported') - if not operation_type in ['add', 'remove']: - raise exception.ApiError('operation_type must be add or remove') - result = images.modify(context, image_id, operation_type) - return defer.succeed(result) - - def update_state(self, topic, value): - """ accepts status reports from the queue and consolidates them """ - # TODO(jmc): if an instance has disappeared from - # the node, call instance_death - if topic == "instances": - return defer.succeed(True) - aggregate_state = getattr(self, topic) - node_name = value.keys()[0] - items = value[node_name] - - logging.debug("Updating %s state for %s" % (topic, node_name)) - - for item_id in items.keys(): - if (aggregate_state.has_key('pending') and - aggregate_state['pending'].has_key(item_id)): - del aggregate_state['pending'][item_id] - aggregate_state[node_name] = items - - return defer.succeed(True) diff --git a/nova/endpoint/images.py b/nova/endpoint/images.py deleted file mode 100644 index fe7cb5d11..000000000 --- a/nova/endpoint/images.py +++ /dev/null @@ -1,95 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Proxy AMI-related calls from the cloud controller, to the running -objectstore daemon. -""" - -import boto.s3.connection -import json -import urllib - -from nova import flags -from nova import utils -from nova.auth import manager - - -FLAGS = flags.FLAGS - -def modify(context, image_id, operation): - conn(context).make_request( - method='POST', - bucket='_images', - query_args=qs({'image_id': image_id, 'operation': operation})) - - return True - - -def register(context, image_location): - """ rpc call to register a new image based from a manifest """ - - image_id = utils.generate_uid('ami') - conn(context).make_request( - method='PUT', - bucket='_images', - query_args=qs({'image_location': image_location, - 'image_id': image_id})) - - return image_id - -def list(context, filter_list=[]): - """ return a list of all images that a user can see - - optionally filtered by a list of image_id """ - - # FIXME: send along the list of only_images to check for - response = conn(context).make_request( - method='GET', - bucket='_images') - - result = json.loads(response.read()) - if not filter_list is None: - return [i for i in result if i['imageId'] in filter_list] - return result - -def deregister(context, image_id): - """ unregister an image """ - conn(context).make_request( - method='DELETE', - bucket='_images', - query_args=qs({'image_id': image_id})) - -def conn(context): - access = manager.AuthManager().get_access_key(context.user, - context.project) - secret = str(context.user.secret) - calling = boto.s3.connection.OrdinaryCallingFormat() - return boto.s3.connection.S3Connection(aws_access_key_id=access, - aws_secret_access_key=secret, - is_secure=False, - calling_format=calling, - port=FLAGS.s3_port, - host=FLAGS.s3_host) - - -def qs(params): - pairs = [] - for key in params.keys(): - pairs.append(key + '=' + urllib.quote(params[key])) - return '&'.join(pairs) diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py deleted file mode 100644 index b4e6cd823..000000000 --- a/nova/endpoint/rackspace.py +++ /dev/null @@ -1,186 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Rackspace API Endpoint -""" - -import json -import time - -import webob.dec -import webob.exc - -from nova import flags -from nova import rpc -from nova import utils -from nova import wsgi -from nova.auth import manager -from nova.compute import model as compute -from nova.network import model as network - - -FLAGS = flags.FLAGS -flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') - - -class API(wsgi.Middleware): - """Entry point for all requests.""" - - def __init__(self): - super(API, self).__init__(Router(webob.exc.HTTPNotFound())) - - @webob.dec.wsgify - def __call__(self, req): - return self.application - context = {} - if "HTTP_X_AUTH_TOKEN" in req.environ: - context['user'] = manager.AuthManager().get_user_from_access_key( - req.environ['HTTP_X_AUTH_TOKEN']) - if context['user']: - context['project'] = manager.AuthManager().get_project( - context['user'].name) - if "user" not in context: - return webob.exc.HTTPForbidden() - environ['nova.context'] = context - return self.application - - -class Router(wsgi.Router): - """Route requests to the next WSGI application.""" - - def _build_map(self): - """Build routing map for authentication and cloud.""" - self.map.resource("server", "servers", controller=CloudServerAPI()) - #self._connect("/v1.0", controller=AuthenticationAPI()) - #cloud = CloudServerAPI() - #self._connect("/servers", controller=cloud.launch_server, - # conditions={"method": ["POST"]}) - #self._connect("/servers/{server_id}", controller=cloud.delete_server, - # conditions={'method': ["DELETE"]}) - #self._connect("/servers", controller=cloud) - - -class AuthenticationAPI(wsgi.Application): - """Handle all authorization requests through WSGI applications.""" - - @webob.dec.wsgify - def __call__(self, req): # pylint: disable-msg=W0221 - # TODO(todd): make a actual session with a unique token - # just pass the auth key back through for now - res = webob.Response() - res.status = '204 No Content' - res.headers.add('X-Server-Management-Url', req.host_url) - res.headers.add('X-Storage-Url', req.host_url) - res.headers.add('X-CDN-Managment-Url', req.host_url) - res.headers.add('X-Auth-Token', req.headers['X-Auth-Key']) - return res - - -class CloudServerAPI(wsgi.Application): - """Handle all server requests through WSGI applications.""" - - def __init__(self): - super(CloudServerAPI, self).__init__() - self.instdir = compute.InstanceDirectory() - self.network = network.PublicNetworkController() - - @webob.dec.wsgify - def __call__(self, req): # pylint: disable-msg=W0221 - value = {"servers": []} - for inst in self.instdir.all: - value["servers"].append(self.instance_details(inst)) - return json.dumps(value) - - def instance_details(self, inst): # pylint: disable-msg=R0201 - """Build the data structure to represent details for an instance.""" - return { - "id": inst.get("instance_id", None), - "imageId": inst.get("image_id", None), - "flavorId": inst.get("instacne_type", None), - "hostId": inst.get("node_name", None), - "status": inst.get("state", "pending"), - "addresses": { - "public": [network.get_public_ip_for_instance( - inst.get("instance_id", None))], - "private": [inst.get("private_dns_name", None)]}, - - # implemented only by Rackspace, not AWS - "name": inst.get("name", "Not-Specified"), - - # not supported - "progress": "Not-Supported", - "metadata": { - "Server Label": "Not-Supported", - "Image Version": "Not-Supported"}} - - @webob.dec.wsgify - def launch_server(self, req): - """Launch a new instance.""" - data = json.loads(req.body) - inst = self.build_server_instance(data, req.environ['nova.context']) - rpc.cast( - FLAGS.compute_topic, { - "method": "run_instance", - "args": {"instance_id": inst.instance_id}}) - - return json.dumps({"server": self.instance_details(inst)}) - - def build_server_instance(self, env, context): - """Build instance data structure and save it to the data store.""" - reservation = utils.generate_uid('r') - ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) - inst = self.instdir.new() - inst['name'] = env['server']['name'] - inst['image_id'] = env['server']['imageId'] - inst['instance_type'] = env['server']['flavorId'] - inst['user_id'] = context['user'].id - inst['project_id'] = context['project'].id - inst['reservation_id'] = reservation - inst['launch_time'] = ltime - inst['mac_address'] = utils.generate_mac() - address = self.network.allocate_ip( - inst['user_id'], - inst['project_id'], - mac=inst['mac_address']) - inst['private_dns_name'] = str(address) - inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( - inst['user_id'], - inst['project_id'], - 'default')['bridge_name'] - # key_data, key_name, ami_launch_index - # TODO(todd): key data or root password - inst.save() - return inst - - @webob.dec.wsgify - @wsgi.route_args - def delete_server(self, req, route_args): # pylint: disable-msg=R0201 - """Delete an instance.""" - owner_hostname = None - instance = compute.Instance.lookup(route_args['server_id']) - if instance: - owner_hostname = instance["node_name"] - if not owner_hostname: - return webob.exc.HTTPNotFound("Did not find image, or it was " - "not in a running state.") - rpc_transport = "%s:%s" % (FLAGS.compute_topic, owner_hostname) - rpc.cast(rpc_transport, - {"method": "reboot_instance", - "args": {"instance_id": route_args['server_id']}}) - req.status = "202 Accepted" diff --git a/nova/endpoint/rackspace/controllers/base.py b/nova/endpoint/rackspace/controllers/base.py new file mode 100644 index 000000000..a83925cc3 --- /dev/null +++ b/nova/endpoint/rackspace/controllers/base.py @@ -0,0 +1,9 @@ +class BaseController(object): + @classmethod + def render(cls, instance): + if isinstance(instance, list): + return [ cls.entity_name : { cls.render(instance) } + else + return + + diff --git a/nova/endpoint/rackspace/controllers/flavors.py b/nova/endpoint/rackspace/controllers/flavors.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/endpoint/rackspace/controllers/images.py b/nova/endpoint/rackspace/controllers/images.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/endpoint/rackspace/controllers/servers.py b/nova/endpoint/rackspace/controllers/servers.py new file mode 100644 index 000000000..af6c958bb --- /dev/null +++ b/nova/endpoint/rackspace/controllers/servers.py @@ -0,0 +1,72 @@ +from nova import rpc +from nova.compute import model as compute +from nova.endpoint.rackspace import BaseController + +class ServersController(BaseController): + entity_name = 'servers' + + def __init__(self): + raise NotImplemented("You may not create an instance of this class") + + @classmethod + def index(cls): + return [instance_details(inst) for inst in compute.InstanceDirectory().all] + + @classmethod + def show(cls, **kwargs): + instance_id = kwargs['id'] + return compute.InstanceDirectory().get(instance_id) + + @classmethod + def delete(cls, **kwargs): + instance_id = kwargs['id'] + instance = compute.InstanceDirectory().get(instance_id) + if not instance + raise ServerNotFound("The requested server was not found") + instance.destroy() + return True + + @classmethod + def create(cls, **kwargs): + inst = self.build_server_instance(kwargs['server']) + rpc.cast( + FLAGS.compute_topic, { + "method": "run_instance", + "args": {"instance_id": inst.instance_id}}) + + @classmethod + def update(cls, **kwargs): + instance_id = kwargs['id'] + instance = compute.InstanceDirectory().get(instance_id) + if not instance: + raise ServerNotFound("The requested server was not found") + instance.update(kwargs['server']) + instance.save() + + @classmethod + def build_server_instance(self, env): + """Build instance data structure and save it to the data store.""" + reservation = utils.generate_uid('r') + ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + inst = self.instdir.new() + inst['name'] = env['server']['name'] + inst['image_id'] = env['server']['imageId'] + inst['instance_type'] = env['server']['flavorId'] + inst['user_id'] = env['user']['id'] + inst['project_id'] = env['project']['id'] + inst['reservation_id'] = reservation + inst['launch_time'] = ltime + inst['mac_address'] = utils.generate_mac() + address = self.network.allocate_ip( + inst['user_id'], + inst['project_id'], + mac=inst['mac_address']) + inst['private_dns_name'] = str(address) + inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( + inst['user_id'], + inst['project_id'], + 'default')['bridge_name'] + # key_data, key_name, ami_launch_index + # TODO(todd): key data or root password + inst.save() + return inst diff --git a/nova/endpoint/rackspace/controllers/shared_ip_groups.py b/nova/endpoint/rackspace/controllers/shared_ip_groups.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/endpoint/rackspace/rackspace.py b/nova/endpoint/rackspace/rackspace.py new file mode 100644 index 000000000..75b828e91 --- /dev/null +++ b/nova/endpoint/rackspace/rackspace.py @@ -0,0 +1,183 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Rackspace API Endpoint +""" + +import json +import time + +import webob.dec +import webob.exc + +from nova import flags +from nova import rpc +from nova import utils +from nova import wsgi +from nova.auth import manager +from nova.compute import model as compute +from nova.network import model as network + + +FLAGS = flags.FLAGS +flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') + + +class API(wsgi.Middleware): + """Entry point for all requests.""" + + def __init__(self): + super(API, self).__init__(Router(webob.exc.HTTPNotFound())) + + def __call__(self, environ, start_response): + context = {} + if "HTTP_X_AUTH_TOKEN" in environ: + context['user'] = manager.AuthManager().get_user_from_access_key( + environ['HTTP_X_AUTH_TOKEN']) + if context['user']: + context['project'] = manager.AuthManager().get_project( + context['user'].name) + if "user" not in context: + return webob.exc.HTTPForbidden()(environ, start_response) + environ['nova.context'] = context + return self.application(environ, start_response) + + +class Router(wsgi.Router): + """Route requests to the next WSGI application.""" + + def _build_map(self): + """Build routing map for authentication and cloud.""" + self._connect("/v1.0", controller=AuthenticationAPI()) + cloud = CloudServerAPI() + self._connect("/servers", controller=cloud.launch_server, + conditions={"method": ["POST"]}) + self._connect("/servers/{server_id}", controller=cloud.delete_server, + conditions={'method': ["DELETE"]}) + self._connect("/servers", controller=cloud) + + +class AuthenticationAPI(wsgi.Application): + """Handle all authorization requests through WSGI applications.""" + + @webob.dec.wsgify + def __call__(self, req): # pylint: disable-msg=W0221 + # TODO(todd): make a actual session with a unique token + # just pass the auth key back through for now + res = webob.Response() + res.status = '204 No Content' + res.headers.add('X-Server-Management-Url', req.host_url) + res.headers.add('X-Storage-Url', req.host_url) + res.headers.add('X-CDN-Managment-Url', req.host_url) + res.headers.add('X-Auth-Token', req.headers['X-Auth-Key']) + return res + + +class CloudServerAPI(wsgi.Application): + """Handle all server requests through WSGI applications.""" + + def __init__(self): + super(CloudServerAPI, self).__init__() + self.instdir = compute.InstanceDirectory() + self.network = network.PublicNetworkController() + + @webob.dec.wsgify + def __call__(self, req): # pylint: disable-msg=W0221 + value = {"servers": []} + for inst in self.instdir.all: + value["servers"].append(self.instance_details(inst)) + return json.dumps(value) + + def instance_details(self, inst): # pylint: disable-msg=R0201 + """Build the data structure to represent details for an instance.""" + return { + "id": inst.get("instance_id", None), + "imageId": inst.get("image_id", None), + "flavorId": inst.get("instacne_type", None), + "hostId": inst.get("node_name", None), + "status": inst.get("state", "pending"), + "addresses": { + "public": [network.get_public_ip_for_instance( + inst.get("instance_id", None))], + "private": [inst.get("private_dns_name", None)]}, + + # implemented only by Rackspace, not AWS + "name": inst.get("name", "Not-Specified"), + + # not supported + "progress": "Not-Supported", + "metadata": { + "Server Label": "Not-Supported", + "Image Version": "Not-Supported"}} + + @webob.dec.wsgify + def launch_server(self, req): + """Launch a new instance.""" + data = json.loads(req.body) + inst = self.build_server_instance(data, req.environ['nova.context']) + rpc.cast( + FLAGS.compute_topic, { + "method": "run_instance", + "args": {"instance_id": inst.instance_id}}) + + return json.dumps({"server": self.instance_details(inst)}) + + def build_server_instance(self, env, context): + """Build instance data structure and save it to the data store.""" + reservation = utils.generate_uid('r') + ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + inst = self.instdir.new() + inst['name'] = env['server']['name'] + inst['image_id'] = env['server']['imageId'] + inst['instance_type'] = env['server']['flavorId'] + inst['user_id'] = context['user'].id + inst['project_id'] = context['project'].id + inst['reservation_id'] = reservation + inst['launch_time'] = ltime + inst['mac_address'] = utils.generate_mac() + address = self.network.allocate_ip( + inst['user_id'], + inst['project_id'], + mac=inst['mac_address']) + inst['private_dns_name'] = str(address) + inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( + inst['user_id'], + inst['project_id'], + 'default')['bridge_name'] + # key_data, key_name, ami_launch_index + # TODO(todd): key data or root password + inst.save() + return inst + + @webob.dec.wsgify + @wsgi.route_args + def delete_server(self, req, route_args): # pylint: disable-msg=R0201 + """Delete an instance.""" + owner_hostname = None + instance = compute.Instance.lookup(route_args['server_id']) + if instance: + owner_hostname = instance["node_name"] + if not owner_hostname: + return webob.exc.HTTPNotFound("Did not find image, or it was " + "not in a running state.") + rpc_transport = "%s:%s" % (FLAGS.compute_topic, owner_hostname) + rpc.cast(rpc_transport, + {"method": "reboot_instance", + "args": {"instance_id": route_args['server_id']}}) + req.status = "202 Accepted" -- cgit From fb382c8e705e1803abb5de77a1fd11e6f913af75 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Wed, 11 Aug 2010 17:40:28 -0400 Subject: Adapts the run_tests.sh script to allow interactive or automated creation of virtualenv, or to run tests outside of a virtualenv --- run_tests.sh | 63 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 60 insertions(+), 3 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index 85d7c8834..31bfce9fa 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -1,12 +1,69 @@ -#!/bin/bash +#!/bin/bash + +function usage { + echo "Usage: $0 [OPTION]..." + echo "Run Nova's test suite(s)" + echo "" + echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" + echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" + echo " -h, --help Print this usage message" + echo "" + echo "Note: with no options specified, the script will try to run the tests in a virtual environment," + echo " If no virtualenv is found, the script will ask if you would like to create one. If you " + echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." + exit +} + +function process_options { + array=$1 + elements=${#array[@]} + for (( x=0;x<$elements;x++)); do + process_option ${array[${x}]} + done +} + +function process_option { + option=$1 + case $option in + --help) usage;; + -h) usage;; + -V) let always_venv=1; let never_venv=0;; + --virtual-env) let always_venv=1; let never_venv=0;; + -N) let always_venv=0; let never_venv=1;; + --no-virtual-env) let always_venv=0; let never_venv=1;; + esac +} venv=.nova-venv with_venv=tools/with_venv.sh +always_venv=0 +never_venv=0 +options=("$@") + +process_options $options + +if [ $never_venv -eq 1 ]; then + # Just run the test suites in current environment + python run_tests.py + exit +fi if [ -e ${venv} ]; then ${with_venv} python run_tests.py $@ else - echo "No virtual environment found...creating one" - python tools/install_venv.py + if [ $always_venv -eq 1 ]; then + # Automatically install the virtualenv + python tools/install_venv.py + else + echo -e "No virtual environment found...create one? (Y/n) \c" + read use_ve + if [ "x$use_ve" = "xY" ]; then + # Install the virtualenv and run the test suite in it + python tools/install_venv.py + else + python run_tests.py + exit + fi + fi ${with_venv} python run_tests.py $@ fi -- cgit From 11c47dd12adcbf2a5011510f01081db858b057db Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 12 Aug 2010 18:36:46 -0400 Subject: Mergeprop cleanup --- nova/endpoint/rackspace/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 nova/endpoint/rackspace/__init__.py diff --git a/nova/endpoint/rackspace/__init__.py b/nova/endpoint/rackspace/__init__.py deleted file mode 100644 index e69de29bb..000000000 -- cgit From 39d12bf518e284183d1debd52fe7081ecf1c633d Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 12 Aug 2010 18:36:56 -0400 Subject: Mergeprop cleanup --- nova/endpoint/rackspace/__init__.py | 90 ++++++++++++++++++++++++++++++++++++ nova/endpoint/rackspace/rackspace.py | 90 ------------------------------------ 2 files changed, 90 insertions(+), 90 deletions(-) create mode 100644 nova/endpoint/rackspace/__init__.py delete mode 100644 nova/endpoint/rackspace/rackspace.py diff --git a/nova/endpoint/rackspace/__init__.py b/nova/endpoint/rackspace/__init__.py new file mode 100644 index 000000000..f14f6218c --- /dev/null +++ b/nova/endpoint/rackspace/__init__.py @@ -0,0 +1,90 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Rackspace API Endpoint +""" + +import json +import time + +import webob.dec +import webob.exc +import routes + +from nova import flags +from nova import wsgi +from nova.auth import manager +from nova.endpoint.rackspace import controllers + + +FLAGS = flags.FLAGS +flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') + + +class Api(wsgi.Middleware): + """WSGI entry point for all Rackspace API requests.""" + + def __init__(self): + app = AuthMiddleware(ApiRouter()) + super(Api, self).__init__(app) + + +class AuthMiddleware(wsgi.Middleware): + """Authorize the rackspace API request or return an HTTP Forbidden.""" + + #TODO(gundlach): isn't this the old Nova API's auth? Should it be replaced + #with correct RS API auth? + + @webob.dec.wsgify + def __call__(self, req): + context = {} + if "HTTP_X_AUTH_TOKEN" in req.environ: + context['user'] = manager.AuthManager().get_user_from_access_key( + req.environ['HTTP_X_AUTH_TOKEN']) + if context['user']: + context['project'] = manager.AuthManager().get_project( + context['user'].name) + if "user" not in context: + return webob.exc.HTTPForbidden() + req.environ['nova.context'] = context + return self.application + + +class ApiRouter(wsgi.Router): + """ + Routes requests on the Rackspace API to the appropriate controller + and method. + """ + + def __init__(self): + mapper = routes.Mapper() + + mapper.resource("server", "servers") + mapper.resource("image", "images") + mapper.resource("flavor", "flavors") + mapper.resource("sharedipgroup", "sharedipgroups") + + targets = { + 'servers': controllers.ServersController(), + 'images': controllers.ImagesController(), + 'flavors': controllers.FlavorsController(), + 'sharedipgroups': controllers.SharedIpGroupsController() + } + + super(ApiRouter, self).__init__(mapper, targets) diff --git a/nova/endpoint/rackspace/rackspace.py b/nova/endpoint/rackspace/rackspace.py deleted file mode 100644 index f14f6218c..000000000 --- a/nova/endpoint/rackspace/rackspace.py +++ /dev/null @@ -1,90 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Rackspace API Endpoint -""" - -import json -import time - -import webob.dec -import webob.exc -import routes - -from nova import flags -from nova import wsgi -from nova.auth import manager -from nova.endpoint.rackspace import controllers - - -FLAGS = flags.FLAGS -flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') - - -class Api(wsgi.Middleware): - """WSGI entry point for all Rackspace API requests.""" - - def __init__(self): - app = AuthMiddleware(ApiRouter()) - super(Api, self).__init__(app) - - -class AuthMiddleware(wsgi.Middleware): - """Authorize the rackspace API request or return an HTTP Forbidden.""" - - #TODO(gundlach): isn't this the old Nova API's auth? Should it be replaced - #with correct RS API auth? - - @webob.dec.wsgify - def __call__(self, req): - context = {} - if "HTTP_X_AUTH_TOKEN" in req.environ: - context['user'] = manager.AuthManager().get_user_from_access_key( - req.environ['HTTP_X_AUTH_TOKEN']) - if context['user']: - context['project'] = manager.AuthManager().get_project( - context['user'].name) - if "user" not in context: - return webob.exc.HTTPForbidden() - req.environ['nova.context'] = context - return self.application - - -class ApiRouter(wsgi.Router): - """ - Routes requests on the Rackspace API to the appropriate controller - and method. - """ - - def __init__(self): - mapper = routes.Mapper() - - mapper.resource("server", "servers") - mapper.resource("image", "images") - mapper.resource("flavor", "flavors") - mapper.resource("sharedipgroup", "sharedipgroups") - - targets = { - 'servers': controllers.ServersController(), - 'images': controllers.ImagesController(), - 'flavors': controllers.FlavorsController(), - 'sharedipgroups': controllers.SharedIpGroupsController() - } - - super(ApiRouter, self).__init__(mapper, targets) -- cgit From 4391b7362eeab2cd976309696be1209ac771ce24 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 12 Aug 2010 18:41:31 -0400 Subject: Undo the changes to cloud.py that somehow diverged from trunk --- nova/endpoint/cloud.py | 105 ++++++++++++++++++++++++------------------------- 1 file changed, 52 insertions(+), 53 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 878d54a15..ad9188ff3 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -47,6 +47,7 @@ FLAGS = flags.FLAGS flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') + def _gen_key(user_id, key_name): """ Tuck this into AuthManager """ try: @@ -102,15 +103,16 @@ class CloudController(object): result = {} for instance in self.instdir.all: if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) + line = '%s slots=%d' % (instance['private_dns_name'], + INSTANCE_TYPES[instance['instance_type']]['vcpus']) if instance['key_name'] in result: result[instance['key_name']].append(line) else: result[instance['key_name']] = [line] return result - def get_metadata(self, ip): - i = self.get_instance_by_ip(ip) + def get_metadata(self, ipaddress): + i = self.get_instance_by_ip(ipaddress) if i is None: return None mpi = self._get_mpi_data(i['project_id']) @@ -147,7 +149,7 @@ class CloudController(object): }, 'public-hostname': i.get('dns_name', ''), 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP - 'public-keys' : keys, + 'public-keys': keys, 'ramdisk-id': i.get('ramdisk_id', ''), 'reservation-id': i['reservation_id'], 'security-groups': i.get('groups', ''), @@ -203,26 +205,22 @@ class CloudController(object): 'keyFingerprint': key_pair.fingerprint, }) - return { 'keypairsSet': result } + return {'keypairsSet': result} @rbac.allow('all') def create_key_pair(self, context, key_name, **kwargs): - try: - d = defer.Deferred() - p = context.handler.application.settings.get('pool') - def _complete(kwargs): - if 'exception' in kwargs: - d.errback(kwargs['exception']) - return - d.callback({'keyName': key_name, - 'keyFingerprint': kwargs['fingerprint'], - 'keyMaterial': kwargs['private_key']}) - p.apply_async(_gen_key, [context.user.id, key_name], - callback=_complete) - return d - - except manager.UserError as e: - raise + dcall = defer.Deferred() + pool = context.handler.application.settings.get('pool') + def _complete(kwargs): + if 'exception' in kwargs: + dcall.errback(kwargs['exception']) + return + dcall.callback({'keyName': key_name, + 'keyFingerprint': kwargs['fingerprint'], + 'keyMaterial': kwargs['private_key']}) + pool.apply_async(_gen_key, [context.user.id, key_name], + callback=_complete) + return dcall @rbac.allow('all') def delete_key_pair(self, context, key_name, **kwargs): @@ -232,7 +230,7 @@ class CloudController(object): @rbac.allow('all') def describe_security_groups(self, context, group_names, **kwargs): - groups = { 'securityGroupSet': [] } + groups = {'securityGroupSet': []} # Stubbed for now to unblock other things. return groups @@ -251,7 +249,7 @@ class CloudController(object): instance = self._get_instance(context, instance_id[0]) return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "get_console_output", - "args" : {"instance_id": instance_id[0]}}) + "args": {"instance_id": instance_id[0]}}) def _get_user_id(self, context): if context and context.user: @@ -285,10 +283,10 @@ class CloudController(object): if volume['attach_status'] == 'attached': v['attachmentSet'] = [{'attachTime': volume['attach_time'], 'deleteOnTermination': volume['delete_on_termination'], - 'device' : volume['mountpoint'], - 'instanceId' : volume['instance_id'], - 'status' : 'attached', - 'volume_id' : volume['volume_id']}] + 'device': volume['mountpoint'], + 'instanceId': volume['instance_id'], + 'status': 'attached', + 'volume_id': volume['volume_id']}] else: v['attachmentSet'] = [{}] return v @@ -298,7 +296,7 @@ class CloudController(object): def create_volume(self, context, size, **kwargs): # TODO(vish): refactor this to create the volume object here and tell service to create it result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume", - "args" : {"size": size, + "args": {"size": size, "user_id": context.user.id, "project_id": context.project.id}}) # NOTE(vish): rpc returned value is in the result key in the dictionary @@ -348,15 +346,15 @@ class CloudController(object): compute_node = instance['node_name'] rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node), {"method": "attach_volume", - "args" : {"volume_id": volume_id, - "instance_id" : instance_id, - "mountpoint" : device}}) - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) + "args": {"volume_id": volume_id, + "instance_id": instance_id, + "mountpoint": device}}) + return defer.succeed({'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': instance_id, + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id}) @rbac.allow('projectmanager', 'sysadmin') @@ -372,18 +370,18 @@ class CloudController(object): instance = self._get_instance(context, instance_id) rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "detach_volume", - "args" : {"instance_id": instance_id, + "args": {"instance_id": instance_id, "volume_id": volume_id}}) except exception.NotFound: # If the instance doesn't exist anymore, # then we need to call detach blind volume.finish_detach() - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) + return defer.succeed({'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': instance_id, + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id}) def _convert_to_set(self, lst, label): if lst == None or lst == []: @@ -425,7 +423,8 @@ class CloudController(object): i['key_name'] = instance.get('key_name', None) if context.user.is_admin(): i['key_name'] = '%s (%s, %s)' % (i['key_name'], - instance.get('project_id', None), instance.get('node_name','')) + instance.get('project_id', None), + instance.get('node_name', '')) i['product_codes_set'] = self._convert_to_set( instance.get('product_codes', None), 'product_code') i['instance_type'] = instance.get('instance_type', None) @@ -442,7 +441,7 @@ class CloudController(object): reservations[res_id] = r reservations[res_id]['instances_set'].append(i) - instance_response = {'reservationSet' : list(reservations.values()) } + instance_response = {'reservationSet': list(reservations.values())} return instance_response @rbac.allow('all') @@ -457,7 +456,7 @@ class CloudController(object): address['project_id'] == context.project.id): address_rv = { 'public_ip': address['address'], - 'instance_id' : address.get('instance_id', 'free') + 'instance_id': address.get('instance_id', 'free') } if context.user.is_admin(): address_rv['instance_id'] = "%s (%s, %s)" % ( @@ -477,7 +476,7 @@ class CloudController(object): "args": {"user_id": context.user.id, "project_id": context.project.id}}) public_ip = alloc_result['result'] - defer.returnValue({'addressSet': [{'publicIp' : public_ip}]}) + defer.returnValue({'addressSet': [{'publicIp': public_ip}]}) @rbac.allow('netadmin') @defer.inlineCallbacks @@ -591,7 +590,7 @@ class CloudController(object): inst.save() rpc.cast(FLAGS.compute_topic, {"method": "run_instance", - "args": {"instance_id" : inst.instance_id}}) + "args": {"instance_id": inst.instance_id}}) logging.debug("Casting to node for %s's instance with IP of %s" % (context.user.name, inst['private_dns_name'])) # TODO: Make Network figure out the network name from ip. @@ -646,7 +645,7 @@ class CloudController(object): instance = self._get_instance(context, i) rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "reboot_instance", - "args" : {"instance_id": i}}) + "args": {"instance_id": i}}) return defer.succeed(True) @rbac.allow('projectmanager', 'sysadmin') @@ -656,7 +655,7 @@ class CloudController(object): volume_node = volume['node_name'] rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node), {"method": "delete_volume", - "args" : {"volume_id": volume_id}}) + "args": {"volume_id": volume_id}}) return defer.succeed(True) @rbac.allow('all') @@ -689,9 +688,9 @@ class CloudController(object): image = images.list(context, image_id)[0] except IndexError: raise exception.ApiError('invalid id: %s' % image_id) - result = { 'image_id': image_id, 'launchPermission': [] } + result = {'image_id': image_id, 'launchPermission': []} if image['isPublic']: - result['launchPermission'].append({ 'group': 'all' }) + result['launchPermission'].append({'group': 'all'}) return defer.succeed(result) @rbac.allow('projectmanager', 'sysadmin') -- cgit From 3d15adb40c5fc569bd29d4779fca792263338e54 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Fri, 13 Aug 2010 10:14:34 -0400 Subject: Merge case statement options --- run_tests.sh | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index 31bfce9fa..6ea40d95e 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -25,12 +25,9 @@ function process_options { function process_option { option=$1 case $option in - --help) usage;; - -h) usage;; - -V) let always_venv=1; let never_venv=0;; - --virtual-env) let always_venv=1; let never_venv=0;; - -N) let always_venv=0; let never_venv=1;; - --no-virtual-env) let always_venv=0; let never_venv=1;; + -h|--help) usage;; + -V|--virtual-env) let always_venv=1; let never_venv=0;; + -N|--no-virtual-env) let always_venv=0; let never_venv=1;; esac } -- cgit From bfb906cb0235a6e0b037d387aadc4abc2280fea0 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Fri, 13 Aug 2010 11:09:27 -0400 Subject: Support JSON and XML in Serializer --- nova/wsgi.py | 56 ++++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 46 insertions(+), 10 deletions(-) diff --git a/nova/wsgi.py b/nova/wsgi.py index 304f7149a..0570e1829 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -218,23 +218,59 @@ class Serializer(object): Serializes a dictionary to a Content Type specified by a WSGI environment. """ - def __init__(self, environ): - """Create a serializer based on the given WSGI environment.""" + def __init__(self, environ, metadata=None): + """ + Create a serializer based on the given WSGI environment. + 'metadata' is an optional dict mapping MIME types to information + needed to serialize a dictionary to that type. + """ self.environ = environ + self.metadata = metadata or {} - def serialize(self, data): + def to_content_type(self, data): """ Serialize a dictionary into a string. The format of the string will be decided based on the Content Type requested in self.environ: by Accept: header, or by URL suffix. """ - req = webob.Request(self.environ) - # TODO(gundlach): do XML correctly and be more robust - if req.accept and 'application/json' in req.accept: + mimetype = 'application/xml' + # TODO(gundlach): determine mimetype from request + + if mimetype == 'application/json': import json return json.dumps(data) + elif mimetype == 'application/xml': + metadata = self.metadata.get('application/xml', {}) + # We expect data to contain a single key which is the XML root. + root_key = data.keys()[0] + from xml.dom import minidom + doc = minidom.Document() + node = self._to_xml_node(doc, metadata, root_key, data[root_key]) + return node.toprettyxml(indent=' ') else: - return '' + repr(data) + \ - '' - - + return repr(data) + + def _to_xml_node(self, doc, metadata, nodename, data): + result = doc.createElement(nodename) + if type(data) is list: + singular = metadata.get('plurals', {}).get(nodename, None) + if singular is None: + if nodename.endswith('s'): + singular = nodename[:-1] + else: + singular = 'item' + for item in data: + node = self._to_xml_node(doc, metadata, singular, item) + result.appendChild(node) + elif type(data) is dict: + attrs = metadata.get('attributes', {}).get(nodename, {}) + for k,v in data.items(): + if k in attrs: + result.setAttribute(k, str(v)) + else: + node = self._to_xml_node(doc, metadata, k, v) + result.appendChild(node) + else: # atom + node = doc.createTextNode(str(data)) + result.appendChild(node) + return result -- cgit From a860a07068d4d643c42973625c454c6b09e883cb Mon Sep 17 00:00:00 2001 From: Sleepsonthefloor Date: Sat, 14 Aug 2010 02:13:12 -0700 Subject: initial commit for orm based models --- nova/auth.py | 741 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ nova/models.py | 198 +++++++++++++++ 2 files changed, 939 insertions(+) create mode 100644 nova/auth.py create mode 100644 nova/models.py diff --git a/nova/auth.py b/nova/auth.py new file mode 100644 index 000000000..199a887e1 --- /dev/null +++ b/nova/auth.py @@ -0,0 +1,741 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Nova authentication management +""" + +import logging +import os +import shutil +import string +import tempfile +import uuid +import zipfile + +from nova import crypto +from nova import exception +from nova import flags +from nova import utils +from nova.auth import signer +from nova.network import vpn +from nova.models import User + +#unused imports +#from nova import datastore +#from nova.auth import ldapdriver # for flags +#from nova import objectstore # for flags + +FLAGS = flags.FLAGS + +# NOTE(vish): a user with one of these roles will be a superuser and +# have access to all api commands +flags.DEFINE_list('superuser_roles', ['cloudadmin'], + 'Roles that ignore rbac checking completely') + +# NOTE(vish): a user with one of these roles will have it for every +# project, even if he or she is not a member of the project +flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'], + 'Roles that apply to all projects') + + +flags.DEFINE_string('credentials_template', + utils.abspath('auth/novarc.template'), + 'Template for creating users rc file') +flags.DEFINE_string('vpn_client_template', + utils.abspath('cloudpipe/client.ovpn.template'), + 'Template for creating users vpn file') +flags.DEFINE_string('credential_vpn_file', 'nova-vpn.conf', + 'Filename of certificate in credentials zip') +flags.DEFINE_string('credential_key_file', 'pk.pem', + 'Filename of private key in credentials zip') +flags.DEFINE_string('credential_cert_file', 'cert.pem', + 'Filename of certificate in credentials zip') +flags.DEFINE_string('credential_rc_file', 'novarc', + 'Filename of rc in credentials zip') + +flags.DEFINE_string('credential_cert_subject', + '/C=US/ST=California/L=MountainView/O=AnsoLabs/' + 'OU=NovaDev/CN=%s-%s', + 'Subject for certificate for users') + +flags.DEFINE_string('auth_driver', 'nova.auth.ldapdriver.FakeLdapDriver', + 'Driver that auth manager uses') + +class AuthBase(object): + """Base class for objects relating to auth + + Objects derived from this class should be stupid data objects with + an id member. They may optionally contain methods that delegate to + AuthManager, but should not implement logic themselves. + """ + @classmethod + def safe_id(cls, obj): + """Safe get object id + + This method will return the id of the object if the object + is of this class, otherwise it will return the original object. + This allows methods to accept objects or ids as paramaters. + + """ + if isinstance(obj, cls): + return obj.id + else: + return obj + + +# anthony - the User class has moved to nova.models +#class User(AuthBase): +# """Object representing a user""" +# def __init__(self, id, name, access, secret, admin): +# AuthBase.__init__(self) +# self.id = id +# self.name = name +# self.access = access +# self.secret = secret +# self.admin = admin +# +# def is_superuser(self): +# return AuthManager().is_superuser(self) +# +# def is_admin(self): +# return AuthManager().is_admin(self) +# +# def has_role(self, role): +# return AuthManager().has_role(self, role) +# +# def add_role(self, role): +# return AuthManager().add_role(self, role) +# +# def remove_role(self, role): +# return AuthManager().remove_role(self, role) +# +# def is_project_member(self, project): +# return AuthManager().is_project_member(self, project) +# +# def is_project_manager(self, project): +# return AuthManager().is_project_manager(self, project) +# +# def generate_key_pair(self, name): +# return AuthManager().generate_key_pair(self.id, name) +# +# def create_key_pair(self, name, public_key, fingerprint): +# return AuthManager().create_key_pair(self.id, +# name, +# public_key, +# fingerprint) +# +# def get_key_pair(self, name): +# return AuthManager().get_key_pair(self.id, name) +# +# def delete_key_pair(self, name): +# return AuthManager().delete_key_pair(self.id, name) +# +# def get_key_pairs(self): +# return AuthManager().get_key_pairs(self.id) +# +# def __repr__(self): +# return "User('%s', '%s', '%s', '%s', %s)" % (self.id, +# self.name, +# self.access, +# self.secret, +# self.admin) + + +class KeyPair(AuthBase): + """Represents an ssh key returned from the datastore + + Even though this object is named KeyPair, only the public key and + fingerprint is stored. The user's private key is not saved. + """ + def __init__(self, id, name, owner_id, public_key, fingerprint): + AuthBase.__init__(self) + self.id = id + self.name = name + self.owner_id = owner_id + self.public_key = public_key + self.fingerprint = fingerprint + + def __repr__(self): + return "KeyPair('%s', '%s', '%s', '%s', '%s')" % (self.id, + self.name, + self.owner_id, + self.public_key, + self.fingerprint) + + +class Project(AuthBase): + """Represents a Project returned from the datastore""" + def __init__(self, id, name, project_manager_id, description, member_ids): + AuthBase.__init__(self) + self.id = id + self.name = name + self.project_manager_id = project_manager_id + self.description = description + self.member_ids = member_ids + + @property + def project_manager(self): + return AuthManager().get_user(self.project_manager_id) + + @property + def vpn_ip(self): + ip, port = AuthManager().get_project_vpn_data(self) + return ip + + @property + def vpn_port(self): + ip, port = AuthManager().get_project_vpn_data(self) + return port + + def has_manager(self, user): + return AuthManager().is_project_manager(user, self) + + def has_member(self, user): + return AuthManager().is_project_member(user, self) + + def add_role(self, user, role): + return AuthManager().add_role(user, role, self) + + def remove_role(self, user, role): + return AuthManager().remove_role(user, role, self) + + def has_role(self, user, role): + return AuthManager().has_role(user, role, self) + + def get_credentials(self, user): + return AuthManager().get_credentials(user, self) + + def __repr__(self): + return "Project('%s', '%s', '%s', '%s', %s)" % (self.id, + self.name, + self.project_manager_id, + self.description, + self.member_ids) + + + +class AuthManager(object): + """Manager Singleton for dealing with Users, Projects, and Keypairs + + Methods accept objects or ids. + + AuthManager uses a driver object to make requests to the data backend. + See ldapdriver for reference. + + AuthManager also manages associated data related to Auth objects that + need to be more accessible, such as vpn ips and ports. + """ + _instance = None + def __new__(cls, *args, **kwargs): + """Returns the AuthManager singleton""" + if not cls._instance: + cls._instance = super(AuthManager, cls).__new__(cls) + return cls._instance + + def __init__(self, driver=None, *args, **kwargs): + """Inits the driver from parameter or flag + + __init__ is run every time AuthManager() is called, so we only + reset the driver if it is not set or a new driver is specified. + """ + if driver or not getattr(self, 'driver', None): + self.driver = utils.import_class(driver or FLAGS.auth_driver) + + def authenticate(self, access, signature, params, verb='GET', + server_string='127.0.0.1:8773', path='/', + check_type='ec2', headers=None): + """Authenticates AWS request using access key and signature + + If the project is not specified, attempts to authenticate to + a project with the same name as the user. This way, older tools + that have no project knowledge will still work. + + @type access: str + @param access: Access key for user in the form "access:project". + + @type signature: str + @param signature: Signature of the request. + + @type params: list of str + @param params: Web paramaters used for the signature. + + @type verb: str + @param verb: Web request verb ('GET' or 'POST'). + + @type server_string: str + @param server_string: Web request server string. + + @type path: str + @param path: Web request path. + + @type check_type: str + @param check_type: Type of signature to check. 'ec2' for EC2, 's3' for + S3. Any other value will cause signature not to be + checked. + + @type headers: list + @param headers: HTTP headers passed with the request (only needed for + s3 signature checks) + + @rtype: tuple (User, Project) + @return: User and project that the request represents. + """ + # TODO(vish): check for valid timestamp + (access_key, sep, project_id) = access.partition(':') + + logging.info('Looking up user: %r', access_key) + user = self.get_user_from_access_key(access_key) + logging.info('user: %r', user) + if user == None: + raise exception.NotFound('No user found for access key %s' % + access_key) + + # NOTE(vish): if we stop using project name as id we need better + # logic to find a default project for user + if project_id is '': + project_id = user.name + + project = self.get_project(project_id) + if project == None: + raise exception.NotFound('No project called %s could be found' % + project_id) + if not self.is_admin(user) and not self.is_project_member(user, + project): + raise exception.NotFound('User %s is not a member of project %s' % + (user.id, project.id)) + if check_type == 's3': + expected_signature = signer.Signer(user.secret.encode()).s3_authorization(headers, verb, path) + logging.debug('user.secret: %s', user.secret) + logging.debug('expected_signature: %s', expected_signature) + logging.debug('signature: %s', signature) + if signature != expected_signature: + raise exception.NotAuthorized('Signature does not match') + elif check_type == 'ec2': + # NOTE(vish): hmac can't handle unicode, so encode ensures that + # secret isn't unicode + expected_signature = signer.Signer(user.secret.encode()).generate( + params, verb, server_string, path) + logging.debug('user.secret: %s', user.secret) + logging.debug('expected_signature: %s', expected_signature) + logging.debug('signature: %s', signature) + if signature != expected_signature: + raise exception.NotAuthorized('Signature does not match') + return (user, project) + + def get_access_key(self, user, project): + """Get an access key that includes user and project""" + if not isinstance(user, User): + user = self.get_user(user) + return "%s:%s" % (user.access, Project.safe_id(project)) + + def is_superuser(self, user): + """Checks for superuser status, allowing user to bypass rbac + + @type user: User or uid + @param user: User to check. + + @rtype: bool + @return: True for superuser. + """ + if not isinstance(user, User): + user = self.get_user(user) + # NOTE(vish): admin flag on user represents superuser + if user.admin: + return True + for role in FLAGS.superuser_roles: + if self.has_role(user, role): + return True + + def is_admin(self, user): + """Checks for admin status, allowing user to access all projects + + @type user: User or uid + @param user: User to check. + + @rtype: bool + @return: True for admin. + """ + if not isinstance(user, User): + user = self.get_user(user) + if self.is_superuser(user): + return True + for role in FLAGS.global_roles: + if self.has_role(user, role): + return True + + def has_role(self, user, role, project=None): + """Checks existence of role for user + + If project is not specified, checks for a global role. If project + is specified, checks for the union of the global role and the + project role. + + Role 'projectmanager' only works for projects and simply checks to + see if the user is the project_manager of the specified project. It + is the same as calling is_project_manager(user, project). + + @type user: User or uid + @param user: User to check. + + @type role: str + @param role: Role to check. + + @type project: Project or project_id + @param project: Project in which to look for local role. + + @rtype: bool + @return: True if the user has the role. + """ + with self.driver() as drv: + if role == 'projectmanager': + if not project: + raise exception.Error("Must specify project") + return self.is_project_manager(user, project) + + global_role = drv.has_role(User.safe_id(user), + role, + None) + if not global_role: + return global_role + + if not project or role in FLAGS.global_roles: + return global_role + + return drv.has_role(User.safe_id(user), + role, + Project.safe_id(project)) + + def add_role(self, user, role, project=None): + """Adds role for user + + If project is not specified, adds a global role. If project + is specified, adds a local role. + + The 'projectmanager' role is special and can't be added or removed. + + @type user: User or uid + @param user: User to which to add role. + + @type role: str + @param role: Role to add. + + @type project: Project or project_id + @param project: Project in which to add local role. + """ + with self.driver() as drv: + drv.add_role(User.safe_id(user), role, Project.safe_id(project)) + + def remove_role(self, user, role, project=None): + """Removes role for user + + If project is not specified, removes a global role. If project + is specified, removes a local role. + + The 'projectmanager' role is special and can't be added or removed. + + @type user: User or uid + @param user: User from which to remove role. + + @type role: str + @param role: Role to remove. + + @type project: Project or project_id + @param project: Project in which to remove local role. + """ + with self.driver() as drv: + drv.remove_role(User.safe_id(user), role, Project.safe_id(project)) + + def get_project(self, pid): + """Get project object by id""" + with self.driver() as drv: + project_dict = drv.get_project(pid) + if project_dict: + return Project(**project_dict) + + def get_projects(self, user=None): + """Retrieves list of projects, optionally filtered by user""" + with self.driver() as drv: + project_list = drv.get_projects(User.safe_id(user)) + if not project_list: + return [] + return [Project(**project_dict) for project_dict in project_list] + + def create_project(self, name, manager_user, + description=None, member_users=None): + """Create a project + + @type name: str + @param name: Name of the project to create. The name will also be + used as the project id. + + @type manager_user: User or uid + @param manager_user: This user will be the project manager. + + @type description: str + @param project: Description of the project. If no description is + specified, the name of the project will be used. + + @type member_users: list of User or uid + @param: Initial project members. The project manager will always be + added as a member, even if he isn't specified in this list. + + @rtype: Project + @return: The new project. + """ + if member_users: + member_users = [User.safe_id(u) for u in member_users] + with self.driver() as drv: + project_dict = drv.create_project(name, + User.safe_id(manager_user), + description, + member_users) + if project_dict: + return Project(**project_dict) + + def add_to_project(self, user, project): + """Add user to project""" + with self.driver() as drv: + return drv.add_to_project(User.safe_id(user), + Project.safe_id(project)) + + def is_project_manager(self, user, project): + """Checks if user is project manager""" + if not isinstance(project, Project): + project = self.get_project(project) + return User.safe_id(user) == project.project_manager_id + + def is_project_member(self, user, project): + """Checks to see if user is a member of project""" + if not isinstance(project, Project): + project = self.get_project(project) + return User.safe_id(user) in project.member_ids + + def remove_from_project(self, user, project): + """Removes a user from a project""" + with self.driver() as drv: + return drv.remove_from_project(User.safe_id(user), + Project.safe_id(project)) + + def get_project_vpn_data(self, project): + """Gets vpn ip and port for project + + @type project: Project or project_id + @param project: Project from which to get associated vpn data + + @rvalue: tuple of (str, str) + @return: A tuple containing (ip, port) or None, None if vpn has + not been allocated for user. + """ + network_data = vpn.NetworkData.lookup(Project.safe_id(project)) + if not network_data: + raise exception.NotFound('project network data has not been set') + return (network_data.ip, network_data.port) + + def delete_project(self, project): + """Deletes a project""" + with self.driver() as drv: + return drv.delete_project(Project.safe_id(project)) + + def get_user(self, uid): + """Retrieves a user by id""" + with self.driver() as drv: + user_dict = drv.get_user(uid) + if user_dict: + return User(**user_dict) + + def get_user_from_access_key(self, access_key): + """Retrieves a user by access key""" + with self.driver() as drv: + user_dict = drv.get_user_from_access_key(access_key) + if user_dict: + return User(**user_dict) + + def get_users(self): + """Retrieves a list of all users""" + with self.driver() as drv: + user_list = drv.get_users() + if not user_list: + return [] + return [User(**user_dict) for user_dict in user_list] + + def create_user(self, name, access=None, secret=None, admin=False): + """Creates a user + + @type name: str + @param name: Name of the user to create. + + @type access: str + @param access: Access Key (defaults to a random uuid) + + @type secret: str + @param secret: Secret Key (defaults to a random uuid) + + @type admin: bool + @param admin: Whether to set the admin flag. The admin flag gives + superuser status regardless of roles specifed for the user. + + @type create_project: bool + @param: Whether to create a project for the user with the same name. + + @rtype: User + @return: The new user. + """ + if access == None: access = str(uuid.uuid4()) + if secret == None: secret = str(uuid.uuid4()) + with self.driver() as drv: + user_dict = drv.create_user(name, access, secret, admin) + if user_dict: + return User(**user_dict) + + def delete_user(self, user): + """Deletes a user""" + with self.driver() as drv: + drv.delete_user(User.safe_id(user)) + + def generate_key_pair(self, user, key_name): + """Generates a key pair for a user + + Generates a public and private key, stores the public key using the + key_name, and returns the private key and fingerprint. + + @type user: User or uid + @param user: User for which to create key pair. + + @type key_name: str + @param key_name: Name to use for the generated KeyPair. + + @rtype: tuple (private_key, fingerprint) + @return: A tuple containing the private_key and fingerprint. + """ + # NOTE(vish): generating key pair is slow so check for legal + # creation before creating keypair + uid = User.safe_id(user) + with self.driver() as drv: + if not drv.get_user(uid): + raise exception.NotFound("User %s doesn't exist" % user) + if drv.get_key_pair(uid, key_name): + raise exception.Duplicate("The keypair %s already exists" + % key_name) + private_key, public_key, fingerprint = crypto.generate_key_pair() + self.create_key_pair(uid, key_name, public_key, fingerprint) + return private_key, fingerprint + + def create_key_pair(self, user, key_name, public_key, fingerprint): + """Creates a key pair for user""" + with self.driver() as drv: + kp_dict = drv.create_key_pair(User.safe_id(user), + key_name, + public_key, + fingerprint) + if kp_dict: + return KeyPair(**kp_dict) + + def get_key_pair(self, user, key_name): + """Retrieves a key pair for user""" + with self.driver() as drv: + kp_dict = drv.get_key_pair(User.safe_id(user), key_name) + if kp_dict: + return KeyPair(**kp_dict) + + def get_key_pairs(self, user): + """Retrieves all key pairs for user""" + with self.driver() as drv: + kp_list = drv.get_key_pairs(User.safe_id(user)) + if not kp_list: + return [] + return [KeyPair(**kp_dict) for kp_dict in kp_list] + + def delete_key_pair(self, user, key_name): + """Deletes a key pair for user""" + with self.driver() as drv: + drv.delete_key_pair(User.safe_id(user), key_name) + + def get_credentials(self, user, project=None): + """Get credential zip for user in project""" + if not isinstance(user, User): + user = self.get_user(user) + if project is None: + project = user.id + pid = Project.safe_id(project) + rc = self.__generate_rc(user.access, user.secret, pid) + private_key, signed_cert = self._generate_x509_cert(user.id, pid) + + tmpdir = tempfile.mkdtemp() + zf = os.path.join(tmpdir, "temp.zip") + zippy = zipfile.ZipFile(zf, 'w') + zippy.writestr(FLAGS.credential_rc_file, rc) + zippy.writestr(FLAGS.credential_key_file, private_key) + zippy.writestr(FLAGS.credential_cert_file, signed_cert) + + network_data = vpn.NetworkData.lookup(pid) + if network_data: + configfile = open(FLAGS.vpn_client_template,"r") + s = string.Template(configfile.read()) + configfile.close() + config = s.substitute(keyfile=FLAGS.credential_key_file, + certfile=FLAGS.credential_cert_file, + ip=network_data.ip, + port=network_data.port) + zippy.writestr(FLAGS.credential_vpn_file, config) + else: + logging.warn("No vpn data for project %s" % + pid) + + zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id)) + zippy.close() + with open(zf, 'rb') as f: + buffer = f.read() + + shutil.rmtree(tmpdir) + return buffer + + def get_environment_rc(self, user, project=None): + """Get credential zip for user in project""" + if not isinstance(user, User): + user = self.get_user(user) + if project is None: + project = user.id + pid = Project.safe_id(project) + return self.__generate_rc(user.access, user.secret, pid) + + def __generate_rc(self, access, secret, pid): + """Generate rc file for user""" + rc = open(FLAGS.credentials_template).read() + rc = rc % { 'access': access, + 'project': pid, + 'secret': secret, + 'ec2': FLAGS.ec2_url, + 's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port), + 'nova': FLAGS.ca_file, + 'cert': FLAGS.credential_cert_file, + 'key': FLAGS.credential_key_file, + } + return rc + + def _generate_x509_cert(self, uid, pid): + """Generate x509 cert for user""" + (private_key, csr) = crypto.generate_x509_cert( + self.__cert_subject(uid)) + # TODO(joshua): This should be async call back to the cloud controller + signed_cert = crypto.sign_csr(csr, pid) + return (private_key, signed_cert) + + def __cert_subject(self, uid): + """Helper to generate cert subject""" + return FLAGS.credential_cert_subject % (uid, utils.isotime()) diff --git a/nova/models.py b/nova/models.py new file mode 100644 index 000000000..4c739488a --- /dev/null +++ b/nova/models.py @@ -0,0 +1,198 @@ +from sqlalchemy.orm import relationship, backref, validates +from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, DateTime, Boolean, Text +from sqlalchemy.ext.declarative import declarative_base +from auth import * + +Base = declarative_base() + +class User(Base): + # sqlalchemy + __tablename__ = 'users' + sid = Column(String, primary_key=True) + + # backwards compatibility + @classmethod + def safe_id(cls, obj): + """Safe get object id + + This method will return the id of the object if the object + is of this class, otherwise it will return the original object. + This allows methods to accept objects or ids as paramaters. + + """ + if isinstance(obj, cls): + return obj.id + else: + return obj + +# def __init__(self, id, name, access, secret, admin): +# self.id = id +# self.name = name +# self.access = access +# self.secret = secret +# self.admin = admin + + def __getattr__(self, name): + if name == 'id': + return self.uid + else: raise AttributeError, name + + def is_superuser(self): + return AuthManager().is_superuser(self) + + def is_admin(self): + return AuthManager().is_admin(self) + + def has_role(self, role): + return AuthManager().has_role(self, role) + + def add_role(self, role): + return AuthManager().add_role(self, role) + + def remove_role(self, role): + return AuthManager().remove_role(self, role) + + def is_project_member(self, project): + return AuthManager().is_project_member(self, project) + + def is_project_manager(self, project): + return AuthManager().is_project_manager(self, project) + + def generate_key_pair(self, name): + return AuthManager().generate_key_pair(self.id, name) + + def create_key_pair(self, name, public_key, fingerprint): + return AuthManager().create_key_pair(self.id, + name, + public_key, + fingerprint) + + def get_key_pair(self, name): + return AuthManager().get_key_pair(self.id, name) + + def delete_key_pair(self, name): + return AuthManager().delete_key_pair(self.id, name) + + def get_key_pairs(self): + return AuthManager().get_key_pairs(self.id) + + def __repr__(self): + return "User('%s', '%s', '%s', '%s', %s)" % (self.id, + self.name, + self.access, + self.secret, + self.admin) + + + +class Project(Base): + __tablename__ = 'projects' + sid = Column(String, primary_key=True) + +class Image(Base): + __tablename__ = 'images' + user_sid = Column(String, ForeignKey('users.sid'), nullable=False) + project_sid = Column(String, ForeignKey('projects.sid'), nullable=False) + + sid = Column(String, primary_key=True) + image_type = Column(String) + public = Column(Boolean, default=False) + state = Column(String) + location = Column(String) + arch = Column(String) + default_kernel_sid = Column(String) + default_ramdisk_sid = Column(String) + + created_at = Column(DateTime) + updated_at = Column(DateTime) # auto update on change FIXME + + + @validates('image_type') + def validate_image_type(self, key, image_type): + assert(image_type in ['machine', 'kernel', 'ramdisk', 'raw']) + + @validates('state') + def validate_state(self, key, state): + assert(state in ['available', 'pending', 'disabled']) + + @validates('default_kernel_sid') + def validate_kernel_sid(self, key, val): + if val != 'machine': + assert(val is None) + + @validates('default_ramdisk_sid') + def validate_ramdisk_sid(self, key, val): + if val != 'machine': + assert(val is None) + +class Network(Base): + __tablename__ = 'networks' + id = Column(Integer, primary_key=True) + bridge = Column(String) + vlan = Column(String) + #vpn_port = Column(Integer) + project_sid = Column(String, ForeignKey('projects.sid'), nullable=False) + +class PhysicalNode(Base): + __tablename__ = 'physical_nodes' + id = Column(Integer, primary_key=True) + +class Instance(Base): + __tablename__ = 'instances' + id = Column(Integer, primary_key=True) + + user_sid = Column(String, ForeignKey('users.sid'), nullable=False) + project_sid = Column(String, ForeignKey('projects.sid')) + + image_sid = Column(Integer, ForeignKey('images.sid'), nullable=False) + kernel_sid = Column(String, ForeignKey('images.sid'), nullable=True) + ramdisk_sid = Column(String, ForeignKey('images.sid'), nullable=True) + + launch_index = Column(Integer) + key_name = Column(String) + key_data = Column(Text) + + state = Column(String) + + hostname = Column(String) + physical_node_id = Column(Integer) + + instance_type = Column(Integer) + + user_data = Column(Text) + +# user = relationship(User, backref=backref('instances', order_by=id)) +# ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id)) +# kernel = relationship(Kernel, backref=backref('instances', order_by=id)) +# project = relationship(Project, backref=backref('instances', order_by=id)) + +#TODO - see Ewan's email about state improvements + # vmstate_state = running, halted, suspended, paused + # power_state = what we have + # task_state = transitory and may trigger power state transition + + @validates('state') + def validate_state(self, key, state): + assert(state in ['nostate', 'running', 'blocked', 'paused', 'shutdown', 'shutoff', 'crashed']) + +class Volume(Base): + __tablename__ = 'volumes' + id = Column(Integer, primary_key=True) + shelf_id = Column(Integer) + blade_id = Column(Integer) + + +if __name__ == '__main__': + from sqlalchemy import create_engine + engine = create_engine('sqlite:///:memory:', echo=True) + Base.metadata.create_all(engine) + + from sqlalchemy.orm import sessionmaker + Session = sessionmaker(bind=engine) + session = Session() + + instance = Instance(image_sid='as', ramdisk_sid='AS', user_sid='anthony') + user = User(sid='anthony') + session.add(instance) + session.commit() + -- cgit From 1395690e99c41aa14e776e4b94054fde29856c60 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 14 Aug 2010 18:04:43 -0700 Subject: got run_tests.py to run (with many failed tests) --- nova/compute/model.py | 12 +-- nova/datastore.old.py | 261 ++++++++++++++++++++++++++++++++++++++++++++++++ nova/datastore.py | 262 ------------------------------------------------- nova/network/model.py | 12 +-- nova/network/vpn.py | 2 +- nova/test.py | 6 ++ nova/volume/service.py | 2 +- run_tests.py | 10 +- 8 files changed, 281 insertions(+), 286 deletions(-) create mode 100644 nova/datastore.old.py diff --git a/nova/compute/model.py b/nova/compute/model.py index 266a93b9a..54d816a9c 100644 --- a/nova/compute/model.py +++ b/nova/compute/model.py @@ -63,13 +63,11 @@ class InstanceDirectory(object): def __getitem__(self, item): return self.get(item) - @datastore.absorb_connection_error def by_project(self, project): """returns a list of instance objects for a project""" for instance_id in datastore.Redis.instance().smembers('project:%s:instances' % project): yield Instance(instance_id) - @datastore.absorb_connection_error def by_node(self, node): """returns a list of instances for a node""" for instance_id in datastore.Redis.instance().smembers('node:%s:instances' % node): @@ -90,12 +88,10 @@ class InstanceDirectory(object): """returns the instance a volume is attached to""" pass - @datastore.absorb_connection_error def exists(self, instance_id): return datastore.Redis.instance().sismember('instances', instance_id) @property - @datastore.absorb_connection_error def all(self): """returns a list of all instances""" for instance_id in datastore.Redis.instance().smembers('instances'): @@ -107,7 +103,7 @@ class InstanceDirectory(object): return self.get(instance_id) -class Instance(datastore.BasicModel): +class Instance(): """Wrapper around stored properties of an instance""" def __init__(self, instance_id): @@ -168,7 +164,7 @@ class Instance(datastore.BasicModel): self.unassociate_with("ip", self.state['private_dns_name']) return super(Instance, self).destroy() -class Host(datastore.BasicModel): +class Host(): """A Host is the machine where a Daemon is running.""" def __init__(self, hostname): @@ -185,7 +181,7 @@ class Host(datastore.BasicModel): return self.hostname -class Daemon(datastore.BasicModel): +class Daemon(): """A Daemon is a job (compute, api, network, ...) that runs on a host.""" def __init__(self, host_or_combined, binpath=None): @@ -235,7 +231,7 @@ class Daemon(datastore.BasicModel): for x in cls.associated_to("host", hostname): yield x -class SessionToken(datastore.BasicModel): +class SessionToken(): """This is a short-lived auth token that is passed through web requests""" def __init__(self, session_token): diff --git a/nova/datastore.old.py b/nova/datastore.old.py new file mode 100644 index 000000000..751c5eeeb --- /dev/null +++ b/nova/datastore.old.py @@ -0,0 +1,261 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Datastore: + +MAKE Sure that ReDIS is running, and your flags are set properly, +before trying to run this. +""" + +import logging + +from nova import exception +from nova import flags +from nova import utils + + +FLAGS = flags.FLAGS +flags.DEFINE_string('redis_host', '127.0.0.1', + 'Host that redis is running on.') +flags.DEFINE_integer('redis_port', 6379, + 'Port that redis is running on.') +flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away') + + +class Redis(object): + def __init__(self): + if hasattr(self.__class__, '_instance'): + raise Exception('Attempted to instantiate singleton') + + @classmethod + def instance(cls): + if not hasattr(cls, '_instance'): + inst = redis.Redis(host=FLAGS.redis_host, + port=FLAGS.redis_port, + db=FLAGS.redis_db) + cls._instance = inst + return cls._instance + + +class ConnectionError(exception.Error): + pass + + +def absorb_connection_error(fn): + def _wrapper(*args, **kwargs): + try: + return fn(*args, **kwargs) + except redis.exceptions.ConnectionError, ce: + raise ConnectionError(str(ce)) + return _wrapper + + +class BasicModel(object): + """ + All Redis-backed data derives from this class. + + You MUST specify an identifier() property that returns a unique string + per instance. + + You MUST have an initializer that takes a single argument that is a value + returned by identifier() to load a new class with. + + You may want to specify a dictionary for default_state(). + + You may also specify override_type at the class left to use a key other + than __class__.__name__. + + You override save and destroy calls to automatically build and destroy + associations. + """ + + override_type = None + + @absorb_connection_error + def __init__(self): + state = Redis.instance().hgetall(self.__redis_key) + if state: + self.initial_state = state + self.state = dict(self.initial_state) + else: + self.initial_state = {} + self.state = self.default_state() + + + def default_state(self): + """You probably want to define this in your subclass""" + return {} + + @classmethod + def _redis_name(cls): + return cls.override_type or cls.__name__.lower() + + @classmethod + def lookup(cls, identifier): + rv = cls(identifier) + if rv.is_new_record(): + return None + else: + return rv + + @classmethod + @absorb_connection_error + def all(cls): + """yields all objects in the store""" + redis_set = cls._redis_set_name(cls.__name__) + for identifier in Redis.instance().smembers(redis_set): + yield cls(identifier) + + @classmethod + def associated_to(cls, foreign_type, foreign_id): + for identifier in cls.associated_keys(foreign_type, foreign_id): + yield cls(identifier) + + @classmethod + @absorb_connection_error + def associated_keys(cls, foreign_type, foreign_id): + redis_set = cls._redis_association_name(foreign_type, foreign_id) + return Redis.instance().smembers(redis_set) or [] + + @classmethod + def _redis_set_name(cls, kls_name): + # stupidly pluralize (for compatiblity with previous codebase) + return kls_name.lower() + "s" + + @classmethod + def _redis_association_name(cls, foreign_type, foreign_id): + return cls._redis_set_name("%s:%s:%s" % + (foreign_type, foreign_id, cls._redis_name())) + + @property + def identifier(self): + """You DEFINITELY want to define this in your subclass""" + raise NotImplementedError("Your subclass should define identifier") + + @property + def __redis_key(self): + return '%s:%s' % (self._redis_name(), self.identifier) + + def __repr__(self): + return "<%s:%s>" % (self.__class__.__name__, self.identifier) + + def keys(self): + return self.state.keys() + + def copy(self): + copyDict = {} + for item in self.keys(): + copyDict[item] = self[item] + return copyDict + + def get(self, item, default): + return self.state.get(item, default) + + def update(self, update_dict): + return self.state.update(update_dict) + + def setdefault(self, item, default): + return self.state.setdefault(item, default) + + def __contains__(self, item): + return item in self.state + + def __getitem__(self, item): + return self.state[item] + + def __setitem__(self, item, val): + self.state[item] = val + return self.state[item] + + def __delitem__(self, item): + """We don't support this""" + raise Exception("Silly monkey, models NEED all their properties.") + + def is_new_record(self): + return self.initial_state == {} + + @absorb_connection_error + def add_to_index(self): + """Each insance of Foo has its id tracked int the set named Foos""" + set_name = self.__class__._redis_set_name(self.__class__.__name__) + Redis.instance().sadd(set_name, self.identifier) + + @absorb_connection_error + def remove_from_index(self): + """Remove id of this instance from the set tracking ids of this type""" + set_name = self.__class__._redis_set_name(self.__class__.__name__) + Redis.instance().srem(set_name, self.identifier) + + @absorb_connection_error + def associate_with(self, foreign_type, foreign_id): + """Add this class id into the set foreign_type:foreign_id:this_types""" + # note the extra 's' on the end is for plurality + # to match the old data without requiring a migration of any sort + self.add_associated_model_to_its_set(foreign_type, foreign_id) + redis_set = self.__class__._redis_association_name(foreign_type, + foreign_id) + Redis.instance().sadd(redis_set, self.identifier) + + @absorb_connection_error + def unassociate_with(self, foreign_type, foreign_id): + """Delete from foreign_type:foreign_id:this_types set""" + redis_set = self.__class__._redis_association_name(foreign_type, + foreign_id) + Redis.instance().srem(redis_set, self.identifier) + + def add_associated_model_to_its_set(self, model_type, model_id): + """ + When associating an X to a Y, save Y for newer timestamp, etc, and to + make sure to save it if Y is a new record. + If the model_type isn't found as a usable class, ignore it, this can + happen when associating to things stored in LDAP (user, project, ...). + """ + table = globals() + klsname = model_type.capitalize() + if table.has_key(klsname): + model_class = table[klsname] + model_inst = model_class(model_id) + model_inst.save() + + @absorb_connection_error + def save(self): + """ + update the directory with the state from this model + also add it to the index of items of the same type + then set the initial_state = state so new changes are tracked + """ + # TODO(ja): implement hmset in redis-py and use it + # instead of multiple calls to hset + if self.is_new_record(): + self["create_time"] = utils.isotime() + for key, val in self.state.iteritems(): + Redis.instance().hset(self.__redis_key, key, val) + self.add_to_index() + self.initial_state = dict(self.state) + return True + + @absorb_connection_error + def destroy(self): + """deletes all related records from datastore.""" + logging.info("Destroying datamodel for %s %s", + self.__class__.__name__, self.identifier) + Redis.instance().delete(self.__redis_key) + self.remove_from_index() + return True + diff --git a/nova/datastore.py b/nova/datastore.py index 5dc6ed107..e69de29bb 100644 --- a/nova/datastore.py +++ b/nova/datastore.py @@ -1,262 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Datastore: - -MAKE Sure that ReDIS is running, and your flags are set properly, -before trying to run this. -""" - -import logging -import redis - -from nova import exception -from nova import flags -from nova import utils - - -FLAGS = flags.FLAGS -flags.DEFINE_string('redis_host', '127.0.0.1', - 'Host that redis is running on.') -flags.DEFINE_integer('redis_port', 6379, - 'Port that redis is running on.') -flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away') - - -class Redis(object): - def __init__(self): - if hasattr(self.__class__, '_instance'): - raise Exception('Attempted to instantiate singleton') - - @classmethod - def instance(cls): - if not hasattr(cls, '_instance'): - inst = redis.Redis(host=FLAGS.redis_host, - port=FLAGS.redis_port, - db=FLAGS.redis_db) - cls._instance = inst - return cls._instance - - -class ConnectionError(exception.Error): - pass - - -def absorb_connection_error(fn): - def _wrapper(*args, **kwargs): - try: - return fn(*args, **kwargs) - except redis.exceptions.ConnectionError, ce: - raise ConnectionError(str(ce)) - return _wrapper - - -class BasicModel(object): - """ - All Redis-backed data derives from this class. - - You MUST specify an identifier() property that returns a unique string - per instance. - - You MUST have an initializer that takes a single argument that is a value - returned by identifier() to load a new class with. - - You may want to specify a dictionary for default_state(). - - You may also specify override_type at the class left to use a key other - than __class__.__name__. - - You override save and destroy calls to automatically build and destroy - associations. - """ - - override_type = None - - @absorb_connection_error - def __init__(self): - state = Redis.instance().hgetall(self.__redis_key) - if state: - self.initial_state = state - self.state = dict(self.initial_state) - else: - self.initial_state = {} - self.state = self.default_state() - - - def default_state(self): - """You probably want to define this in your subclass""" - return {} - - @classmethod - def _redis_name(cls): - return cls.override_type or cls.__name__.lower() - - @classmethod - def lookup(cls, identifier): - rv = cls(identifier) - if rv.is_new_record(): - return None - else: - return rv - - @classmethod - @absorb_connection_error - def all(cls): - """yields all objects in the store""" - redis_set = cls._redis_set_name(cls.__name__) - for identifier in Redis.instance().smembers(redis_set): - yield cls(identifier) - - @classmethod - def associated_to(cls, foreign_type, foreign_id): - for identifier in cls.associated_keys(foreign_type, foreign_id): - yield cls(identifier) - - @classmethod - @absorb_connection_error - def associated_keys(cls, foreign_type, foreign_id): - redis_set = cls._redis_association_name(foreign_type, foreign_id) - return Redis.instance().smembers(redis_set) or [] - - @classmethod - def _redis_set_name(cls, kls_name): - # stupidly pluralize (for compatiblity with previous codebase) - return kls_name.lower() + "s" - - @classmethod - def _redis_association_name(cls, foreign_type, foreign_id): - return cls._redis_set_name("%s:%s:%s" % - (foreign_type, foreign_id, cls._redis_name())) - - @property - def identifier(self): - """You DEFINITELY want to define this in your subclass""" - raise NotImplementedError("Your subclass should define identifier") - - @property - def __redis_key(self): - return '%s:%s' % (self._redis_name(), self.identifier) - - def __repr__(self): - return "<%s:%s>" % (self.__class__.__name__, self.identifier) - - def keys(self): - return self.state.keys() - - def copy(self): - copyDict = {} - for item in self.keys(): - copyDict[item] = self[item] - return copyDict - - def get(self, item, default): - return self.state.get(item, default) - - def update(self, update_dict): - return self.state.update(update_dict) - - def setdefault(self, item, default): - return self.state.setdefault(item, default) - - def __contains__(self, item): - return item in self.state - - def __getitem__(self, item): - return self.state[item] - - def __setitem__(self, item, val): - self.state[item] = val - return self.state[item] - - def __delitem__(self, item): - """We don't support this""" - raise Exception("Silly monkey, models NEED all their properties.") - - def is_new_record(self): - return self.initial_state == {} - - @absorb_connection_error - def add_to_index(self): - """Each insance of Foo has its id tracked int the set named Foos""" - set_name = self.__class__._redis_set_name(self.__class__.__name__) - Redis.instance().sadd(set_name, self.identifier) - - @absorb_connection_error - def remove_from_index(self): - """Remove id of this instance from the set tracking ids of this type""" - set_name = self.__class__._redis_set_name(self.__class__.__name__) - Redis.instance().srem(set_name, self.identifier) - - @absorb_connection_error - def associate_with(self, foreign_type, foreign_id): - """Add this class id into the set foreign_type:foreign_id:this_types""" - # note the extra 's' on the end is for plurality - # to match the old data without requiring a migration of any sort - self.add_associated_model_to_its_set(foreign_type, foreign_id) - redis_set = self.__class__._redis_association_name(foreign_type, - foreign_id) - Redis.instance().sadd(redis_set, self.identifier) - - @absorb_connection_error - def unassociate_with(self, foreign_type, foreign_id): - """Delete from foreign_type:foreign_id:this_types set""" - redis_set = self.__class__._redis_association_name(foreign_type, - foreign_id) - Redis.instance().srem(redis_set, self.identifier) - - def add_associated_model_to_its_set(self, model_type, model_id): - """ - When associating an X to a Y, save Y for newer timestamp, etc, and to - make sure to save it if Y is a new record. - If the model_type isn't found as a usable class, ignore it, this can - happen when associating to things stored in LDAP (user, project, ...). - """ - table = globals() - klsname = model_type.capitalize() - if table.has_key(klsname): - model_class = table[klsname] - model_inst = model_class(model_id) - model_inst.save() - - @absorb_connection_error - def save(self): - """ - update the directory with the state from this model - also add it to the index of items of the same type - then set the initial_state = state so new changes are tracked - """ - # TODO(ja): implement hmset in redis-py and use it - # instead of multiple calls to hset - if self.is_new_record(): - self["create_time"] = utils.isotime() - for key, val in self.state.iteritems(): - Redis.instance().hset(self.__redis_key, key, val) - self.add_to_index() - self.initial_state = dict(self.state) - return True - - @absorb_connection_error - def destroy(self): - """deletes all related records from datastore.""" - logging.info("Destroying datamodel for %s %s", - self.__class__.__name__, self.identifier) - Redis.instance().delete(self.__redis_key) - self.remove_from_index() - return True - diff --git a/nova/network/model.py b/nova/network/model.py index ce9345067..c5c8ce443 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -56,7 +56,7 @@ flags.DEFINE_integer('cloudpipe_start_port', 12000, logging.getLogger().setLevel(logging.DEBUG) -class Vlan(datastore.BasicModel): +class Vlan(): """Tracks vlans assigned to project it the datastore""" def __init__(self, project, vlan): # pylint: disable=W0231 """ @@ -79,7 +79,6 @@ class Vlan(datastore.BasicModel): return instance @classmethod - @datastore.absorb_connection_error def lookup(cls, project): """Returns object by project if it exists in datastore or None""" set_name = cls._redis_set_name(cls.__name__) @@ -90,14 +89,12 @@ class Vlan(datastore.BasicModel): return None @classmethod - @datastore.absorb_connection_error def dict_by_project(cls): """A hash of project:vlan""" set_name = cls._redis_set_name(cls.__name__) return datastore.Redis.instance().hgetall(set_name) or {} @classmethod - @datastore.absorb_connection_error def dict_by_vlan(cls): """A hash of vlan:project""" set_name = cls._redis_set_name(cls.__name__) @@ -108,14 +105,12 @@ class Vlan(datastore.BasicModel): return retvals @classmethod - @datastore.absorb_connection_error def all(cls): set_name = cls._redis_set_name(cls.__name__) elements = datastore.Redis.instance().hgetall(set_name) for project in elements: yield cls(project, elements[project]) - @datastore.absorb_connection_error def save(self): """ Vlan saves state into a giant hash named "vlans", with keys of @@ -127,7 +122,6 @@ class Vlan(datastore.BasicModel): self.project_id, self.vlan_id) - @datastore.absorb_connection_error def destroy(self): """Removes the object from the datastore""" set_name = self._redis_set_name(self.__class__.__name__) @@ -143,7 +137,7 @@ class Vlan(datastore.BasicModel): network[start + FLAGS.network_size - 1]) -class Address(datastore.BasicModel): +class Address(): """Represents a fixed ip in the datastore""" override_type = "address" @@ -197,7 +191,7 @@ class PublicAddress(Address): # CLEANUP: # TODO(ja): does vlanpool "keeper" need to know the min/max - # shouldn't FLAGS always win? -class BaseNetwork(datastore.BasicModel): +class BaseNetwork(): """Implements basic logic for allocating ips in a network""" override_type = 'network' address_class = Address diff --git a/nova/network/vpn.py b/nova/network/vpn.py index a0e2a7fa1..5eb1c2b20 100644 --- a/nova/network/vpn.py +++ b/nova/network/vpn.py @@ -39,7 +39,7 @@ class NoMorePorts(exception.Error): pass -class NetworkData(datastore.BasicModel): +class NetworkData(): """Manages network host, and vpn ip and port for projects""" def __init__(self, project_id): self.project_id = project_id diff --git a/nova/test.py b/nova/test.py index c7e08734f..9cb826253 100644 --- a/nova/test.py +++ b/nova/test.py @@ -39,6 +39,12 @@ FLAGS = flags.FLAGS flags.DEFINE_bool('fake_tests', True, 'should we use everything for testing') +from sqlalchemy import create_engine +from sqlalchemy.ext.declarative import declarative_base + +engine = create_engine('sqlite:///:memory:', echo=True) +Base = declarative_base() +Base.metadata.create_all(engine) def skip_if_fake(func): """Decorator that skips a test if running in fake mode""" diff --git a/nova/volume/service.py b/nova/volume/service.py index 66163a812..1086b4cd0 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -142,7 +142,7 @@ class VolumeService(service.Service): "sudo vgcreate %s %s" % (FLAGS.volume_group, FLAGS.storage_dev)) -class Volume(datastore.BasicModel): +class Volume(): def __init__(self, volume_id=None): self.volume_id = volume_id diff --git a/run_tests.py b/run_tests.py index d90ac8175..f0a5efb7e 100644 --- a/run_tests.py +++ b/run_tests.py @@ -84,11 +84,11 @@ if __name__ == '__main__': if FLAGS.fake_tests: from nova.tests.fake_flags import * # use db 8 for fake tests - FLAGS.redis_db = 8 - if FLAGS.flush_db: - logging.info("Flushing redis datastore") - r = datastore.Redis.instance() - r.flushdb() + #FLAGS.redis_db = 8 + #if FLAGS.flush_db: + # logging.info("Flushing redis datastore") + # r = datastore.Redis.instance() + # r.flushdb() else: from nova.tests.real_flags import * -- cgit From d64d0fccca94b073760bcfc19b763b2ab64abf08 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 14 Aug 2010 18:31:23 -0700 Subject: make the fake-ldap system work again --- nova/datastore.py | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/nova/datastore.py b/nova/datastore.py index e69de29bb..8e2519429 100644 --- a/nova/datastore.py +++ b/nova/datastore.py @@ -0,0 +1,53 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Datastore: + +MAKE Sure that ReDIS is running, and your flags are set properly, +before trying to run this. +""" + +import logging +import redis + +from nova import flags + +FLAGS = flags.FLAGS +flags.DEFINE_string('redis_host', '127.0.0.1', + 'Host that redis is running on.') +flags.DEFINE_integer('redis_port', 6379, + 'Port that redis is running on.') +flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away') + + +class Redis(object): + def __init__(self): + if hasattr(self.__class__, '_instance'): + raise Exception('Attempted to instantiate singleton') + + @classmethod + def instance(cls): + if not hasattr(cls, '_instance'): + inst = redis.Redis(host=FLAGS.redis_host, + port=FLAGS.redis_port, + db=FLAGS.redis_db) + cls._instance = inst + return cls._instance + + -- cgit From 8b344451f06f96e846cb4fe87131fd23b5c386cc Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 14 Aug 2010 18:39:00 -0700 Subject: re-add redis clearing --- run_tests.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/run_tests.py b/run_tests.py index f0a5efb7e..d90ac8175 100644 --- a/run_tests.py +++ b/run_tests.py @@ -84,11 +84,11 @@ if __name__ == '__main__': if FLAGS.fake_tests: from nova.tests.fake_flags import * # use db 8 for fake tests - #FLAGS.redis_db = 8 - #if FLAGS.flush_db: - # logging.info("Flushing redis datastore") - # r = datastore.Redis.instance() - # r.flushdb() + FLAGS.redis_db = 8 + if FLAGS.flush_db: + logging.info("Flushing redis datastore") + r = datastore.Redis.instance() + r.flushdb() else: from nova.tests.real_flags import * -- cgit From 5cc8d5839cdb20d588c808c2eac52889365e4454 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 14 Aug 2010 21:24:26 -0700 Subject: more work on trying to get compute tests passing --- nova/auth/manager.py | 10 ++- nova/compute/service.py | 16 ++--- nova/models.py | 148 ++++++++++++----------------------------- nova/network/service.py | 9 +-- nova/tests/compute_unittest.py | 37 +++++++---- 5 files changed, 83 insertions(+), 137 deletions(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 064fd78bc..f7f454898 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -31,6 +31,7 @@ import zipfile from nova import crypto from nova import exception from nova import flags +from nova import models from nova import utils from nova.auth import signer from nova.network import vpn @@ -201,6 +202,11 @@ class Project(AuthBase): ip, port = AuthManager().get_project_vpn_data(self) return port + @property + def network(self): + session = models.create_session() + return session.query(models.Network).filter_by(project_id=self.id).first() + def has_manager(self, user): return AuthManager().is_project_manager(user, self) @@ -521,7 +527,9 @@ class AuthManager(object): description, member_users) if project_dict: - return Project(**project_dict) + project = Project(**project_dict) + # FIXME(ja): create network? + return project def add_to_project(self, user, project): """Add user to project""" diff --git a/nova/compute/service.py b/nova/compute/service.py index 820116453..ff27a9b88 100644 --- a/nova/compute/service.py +++ b/nova/compute/service.py @@ -38,7 +38,7 @@ from nova import process from nova import service from nova import utils from nova.compute import disk -from nova.compute import model +from nova import models from nova.compute import power_state from nova.compute.instance_types import INSTANCE_TYPES from nova.network import service as network_service @@ -61,7 +61,6 @@ class ComputeService(service.Service): super(ComputeService, self).__init__() self._instances = {} self._conn = virt_connection.get_connection() - self.instdir = model.InstanceDirectory() # TODO(joshua): This needs to ensure system state, specifically: modprobe aoe def noop(self): @@ -116,19 +115,14 @@ class ComputeService(service.Service): def run_instance(self, instance_id, **_kwargs): """ launch a new instance with specified options """ logging.debug("Starting instance %s..." % (instance_id)) - inst = self.instdir.get(instance_id) - # TODO: Get the real security group of launch in here - security_group = "default" + session = models.create_session() + inst = session.query(models.Instance).filter_by(id=instance_id).first() # NOTE(vish): passing network type allows us to express the # network without making a call to network to find # out which type of network to setup - network_service.setup_compute_network( - inst.get('network_type', 'vlan'), - inst['user_id'], - inst['project_id'], - security_group) + network_service.setup_compute_network(inst) - inst['node_name'] = FLAGS.node_name + inst.node_name = FLAGS.node_name inst.save() # TODO(vish) check to make sure the availability zone matches new_inst = Instance(self._conn, name=instance_id, data=inst) diff --git a/nova/models.py b/nova/models.py index 4c739488a..067616029 100644 --- a/nova/models.py +++ b/nova/models.py @@ -1,107 +1,23 @@ from sqlalchemy.orm import relationship, backref, validates from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, DateTime, Boolean, Text from sqlalchemy.ext.declarative import declarative_base -from auth import * +from nova import auth Base = declarative_base() -class User(Base): - # sqlalchemy - __tablename__ = 'users' - sid = Column(String, primary_key=True) - - # backwards compatibility - @classmethod - def safe_id(cls, obj): - """Safe get object id - - This method will return the id of the object if the object - is of this class, otherwise it will return the original object. - This allows methods to accept objects or ids as paramaters. - - """ - if isinstance(obj, cls): - return obj.id - else: - return obj - -# def __init__(self, id, name, access, secret, admin): -# self.id = id -# self.name = name -# self.access = access -# self.secret = secret -# self.admin = admin - - def __getattr__(self, name): - if name == 'id': - return self.uid - else: raise AttributeError, name - - def is_superuser(self): - return AuthManager().is_superuser(self) - - def is_admin(self): - return AuthManager().is_admin(self) - - def has_role(self, role): - return AuthManager().has_role(self, role) - - def add_role(self, role): - return AuthManager().add_role(self, role) - - def remove_role(self, role): - return AuthManager().remove_role(self, role) - - def is_project_member(self, project): - return AuthManager().is_project_member(self, project) - - def is_project_manager(self, project): - return AuthManager().is_project_manager(self, project) - - def generate_key_pair(self, name): - return AuthManager().generate_key_pair(self.id, name) - - def create_key_pair(self, name, public_key, fingerprint): - return AuthManager().create_key_pair(self.id, - name, - public_key, - fingerprint) - - def get_key_pair(self, name): - return AuthManager().get_key_pair(self.id, name) - - def delete_key_pair(self, name): - return AuthManager().delete_key_pair(self.id, name) - - def get_key_pairs(self): - return AuthManager().get_key_pairs(self.id) - - def __repr__(self): - return "User('%s', '%s', '%s', '%s', %s)" % (self.id, - self.name, - self.access, - self.secret, - self.admin) - - - -class Project(Base): - __tablename__ = 'projects' - sid = Column(String, primary_key=True) - class Image(Base): __tablename__ = 'images' - user_sid = Column(String, ForeignKey('users.sid'), nullable=False) - project_sid = Column(String, ForeignKey('projects.sid'), nullable=False) + user_id = Column(String)#, ForeignKey('users.id'), nullable=False) + project_id = Column(String)#, ForeignKey('projects.id'), nullable=False) - sid = Column(String, primary_key=True) + id = Column(String, primary_key=True) image_type = Column(String) public = Column(Boolean, default=False) state = Column(String) location = Column(String) arch = Column(String) - default_kernel_sid = Column(String) - default_ramdisk_sid = Column(String) + default_kernel_id = Column(String) + default_ramdisk_id = Column(String) created_at = Column(DateTime) updated_at = Column(DateTime) # auto update on change FIXME @@ -115,13 +31,13 @@ class Image(Base): def validate_state(self, key, state): assert(state in ['available', 'pending', 'disabled']) - @validates('default_kernel_sid') - def validate_kernel_sid(self, key, val): + @validates('default_kernel_id') + def validate_kernel_id(self, key, val): if val != 'machine': assert(val is None) - @validates('default_ramdisk_sid') - def validate_ramdisk_sid(self, key, val): + @validates('default_ramdisk_id') + def validate_ramdisk_id(self, key, val): if val != 'machine': assert(val is None) @@ -131,7 +47,7 @@ class Network(Base): bridge = Column(String) vlan = Column(String) #vpn_port = Column(Integer) - project_sid = Column(String, ForeignKey('projects.sid'), nullable=False) + project_id = Column(String) #, ForeignKey('projects.id'), nullable=False) class PhysicalNode(Base): __tablename__ = 'physical_nodes' @@ -141,16 +57,25 @@ class Instance(Base): __tablename__ = 'instances' id = Column(Integer, primary_key=True) - user_sid = Column(String, ForeignKey('users.sid'), nullable=False) - project_sid = Column(String, ForeignKey('projects.sid')) + user_id = Column(String) #, ForeignKey('users.id'), nullable=False) + project_id = Column(String) #, ForeignKey('projects.id')) + + @property + def user(self): + return auth.manager.AuthManager().get_user(self.user_id) + + @property + def project(self): + return auth.manager.AuthManager().get_project(self.project_id) - image_sid = Column(Integer, ForeignKey('images.sid'), nullable=False) - kernel_sid = Column(String, ForeignKey('images.sid'), nullable=True) - ramdisk_sid = Column(String, ForeignKey('images.sid'), nullable=True) + image_id = Column(Integer, ForeignKey('images.id'), nullable=False) + kernel_id = Column(String, ForeignKey('images.id'), nullable=True) + ramdisk_id = Column(String, ForeignKey('images.id'), nullable=True) launch_index = Column(Integer) key_name = Column(String) key_data = Column(Text) + security_group = Column(String) state = Column(String) @@ -161,7 +86,6 @@ class Instance(Base): user_data = Column(Text) -# user = relationship(User, backref=backref('instances', order_by=id)) # ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id)) # kernel = relationship(Kernel, backref=backref('instances', order_by=id)) # project = relationship(Project, backref=backref('instances', order_by=id)) @@ -182,17 +106,29 @@ class Volume(Base): blade_id = Column(Integer) -if __name__ == '__main__': +engine = None +def create_engine(): + global engine + if engine is not None: + return engine from sqlalchemy import create_engine engine = create_engine('sqlite:///:memory:', echo=True) - Base.metadata.create_all(engine) + Base.metadata.create_all(engine) + return engine +def create_session(engine=None): + if engine is None: + engine = create_engine() from sqlalchemy.orm import sessionmaker Session = sessionmaker(bind=engine) - session = Session() + return Session() + +if __name__ == '__main__': + engine = create_engine() + session = create_session(engine) - instance = Instance(image_sid='as', ramdisk_sid='AS', user_sid='anthony') - user = User(sid='anthony') + instance = Instance(image_id='as', ramdisk_id='AS', user_id='anthony') + user = User(id='anthony') session.add(instance) session.commit() diff --git a/nova/network/service.py b/nova/network/service.py index 9c0f5520b..4be855960 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -61,13 +61,10 @@ def type_to_class(network_type): raise NotFound("Couldn't find %s network type" % network_type) -def setup_compute_network(network_type, user_id, project_id, security_group): +def setup_compute_network(instance): """Sets up the network on a compute host""" - srv = type_to_class(network_type) - srv.setup_compute_network(network_type, - user_id, - project_id, - security_group) + srv = type_to_class(instance.project.network.kind) + srv.setup_compute_network(inst) def get_host_for_project(project_id): diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index da0f82e3a..c079f9a4d 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -25,7 +25,8 @@ from nova import exception from nova import flags from nova import test from nova import utils -from nova.compute import model +from nova import models +from nova.auth import manager from nova.compute import service @@ -60,21 +61,31 @@ class ComputeConnectionTestCase(test.TrialTestCase): self.flags(connection_type='fake', fake_storage=True) self.compute = service.ComputeService() + self.manager = manager.AuthManager() + user = self.manager.create_user('fake', 'fake', 'fake') + project = self.manager.create_project('fake', 'fake', 'fake') + + def tearDown(self): + self.manager.delete_user('fake') + self.manager.delete_project('fake') def create_instance(self): - instdir = model.InstanceDirectory() - inst = instdir.new() + session = models.create_session() + + inst = models.Instance(user_id='fake', project_id='fake', image_id='ami-test') + session.add(inst) + session.commit() # TODO(ja): add ami, ari, aki, user_data - inst['reservation_id'] = 'r-fakeres' - inst['launch_time'] = '10' - inst['user_id'] = 'fake' - inst['project_id'] = 'fake' - inst['instance_type'] = 'm1.tiny' - inst['node_name'] = FLAGS.node_name - inst['mac_address'] = utils.generate_mac() - inst['ami_launch_index'] = 0 - inst.save() - return inst['instance_id'] + # inst['reservation_id'] = 'r-fakeres' + # inst['launch_time'] = '10' + #inst['user_id'] = 'fake' + #inst['project_id'] = 'fake' + #inst['instance_type'] = 'm1.tiny' + #inst['node_name'] = FLAGS.node_name + #inst['mac_address'] = utils.generate_mac() + #inst['ami_launch_index'] = 0 + #inst.save() + return inst.id @defer.inlineCallbacks def test_run_describe_terminate(self): -- cgit From 3ee748bb6f55ad341606919901c4c17a82d069fd Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 14 Aug 2010 22:55:04 -0700 Subject: ComputeConnectionTestCase is almost working again --- nova/auth/manager.py | 6 +- nova/compute/service.py | 187 ++++++++++++++--------------------------- nova/models.py | 18 ++-- nova/network/service.py | 21 ++--- nova/tests/compute_unittest.py | 38 +++------ nova/virt/fake.py | 4 +- 6 files changed, 106 insertions(+), 168 deletions(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index f7f454898..4a813c861 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -528,7 +528,11 @@ class AuthManager(object): member_users) if project_dict: project = Project(**project_dict) - # FIXME(ja): create network? + # FIXME(ja): EVIL HACK - this should poll from a pool + session = models.create_session() + net = models.Network(project_id=project.id, kind='vlan') + session.add(net) + session.commit() return project def add_to_project(self, user, project): diff --git a/nova/compute/service.py b/nova/compute/service.py index ff27a9b88..dc6a93bdb 100644 --- a/nova/compute/service.py +++ b/nova/compute/service.py @@ -68,11 +68,15 @@ class ComputeService(service.Service): return defer.succeed('PONG') def get_instance(self, instance_id): - # inst = self.instdir.get(instance_id) - # return inst - if self.instdir.exists(instance_id): - return Instance.fromName(self._conn, instance_id) - return None + session = models.create_session() + return session.query(models.Instance).filter_by(id=instance_id).one() + + def update_state(self, instance_id): + session = models.create_session() + inst = session.query(models.Instance).filter_by(id=instance_id).one() + # FIXME(ja): include other fields from state? + inst.state = self._conn.get_info(instance_id)['state'] + session.flush() @exception.wrap_exception def adopt_instances(self): @@ -87,14 +91,6 @@ class ComputeService(service.Service): pass return defer.succeed(len(self._instances)) - @exception.wrap_exception - def describe_instances(self): - retval = {} - for inst in self.instdir.by_node(FLAGS.node_name): - retval[inst['instance_id']] = ( - Instance.fromName(self._conn, inst['instance_id'])) - return retval - @defer.inlineCallbacks def report_state(self, nodename, daemon): # TODO(termie): make this pattern be more elegant. -todd @@ -111,6 +107,7 @@ class ComputeService(service.Service): logging.exception("model server went away") yield + @defer.inlineCallbacks @exception.wrap_exception def run_instance(self, instance_id, **_kwargs): """ launch a new instance with specified options """ @@ -121,56 +118,82 @@ class ComputeService(service.Service): # network without making a call to network to find # out which type of network to setup network_service.setup_compute_network(inst) - inst.node_name = FLAGS.node_name - inst.save() + session.commit() + # TODO(vish) check to make sure the availability zone matches - new_inst = Instance(self._conn, name=instance_id, data=inst) - logging.info("Instances current state is %s", new_inst.state) - if new_inst.is_running(): - raise exception.Error("Instance is already running") - new_inst.spawn() + inst.set_state(power_state.NOSTATE, 'spawning') + session.commit() + try: + yield self._conn.spawn(inst) + except Exception, ex: + logging.debug(ex) + inst.set_state(power_state.SHUTDOWN) + + self.update_state(instance_id) + + @defer.inlineCallbacks @exception.wrap_exception def terminate_instance(self, instance_id): """ terminate an instance on this machine """ logging.debug("Got told to terminate instance %s" % instance_id) - instance = self.get_instance(instance_id) - # inst = self.instdir.get(instance_id) - if not instance: - raise exception.Error( - 'trying to terminate unknown instance: %s' % instance_id) - d = instance.destroy() - # d.addCallback(lambda x: inst.destroy()) - return d + session = models.create_session() + instance = session.query(models.Instance).filter_by(id=instance_id).one() + if instance.state == power_state.SHUTOFF: + # self.datamodel.destroy() FIXME: RE-ADD ????? + raise exception.Error('trying to destroy already destroyed' + ' instance: %s' % instance_id) + + instance.set_state(power_state.NOSTATE, 'shutting_down') + yield self._conn.destroy(instance) + # FIXME(ja): should we keep it in a terminated state for a bit? + session.delete(instance) + session.flush() + + @defer.inlineCallbacks @exception.wrap_exception def reboot_instance(self, instance_id): """ reboot an instance on this server KVM doesn't support reboot, so we terminate and restart """ + self.update_state(instance_id) instance = self.get_instance(instance_id) - if not instance: + + # FIXME(ja): this is only checking the model state - not state on disk? + if instance.state != power_state.RUNNING: raise exception.Error( - 'trying to reboot unknown instance: %s' % instance_id) - return instance.reboot() + 'trying to reboot a non-running' + 'instance: %s (state: %s excepted: %s)' % (instance.id, instance.state, power_state.RUNNING)) + + logging.debug('rebooting instance %s' % instance.id) + instance.set_state(power_state.NOSTATE, 'rebooting') + yield self._conn.reboot(instance) + self.update_state(instance_id) - @defer.inlineCallbacks @exception.wrap_exception def get_console_output(self, instance_id): """ send the console output for an instance """ + # FIXME: Abstract this for Xen + logging.debug("Getting console output for %s" % (instance_id)) - inst = self.instdir.get(instance_id) - instance = self.get_instance(instance_id) - if not instance: - raise exception.Error( - 'trying to get console log for unknown: %s' % instance_id) - rv = yield instance.console_output() + session = models.create_session() + inst = self.get_instance(instance_id) + + if FLAGS.connection_type == 'libvirt': + fname = os.path.abspath( + os.path.join(FLAGS.instances_path, inst.id, 'console.log')) + with open(fname, 'r') as f: + output = f.read() + else: + output = 'FAKE CONSOLE OUTPUT' + # TODO(termie): this stuff belongs in the API layer, no need to # munge the data we send to ourselves output = {"InstanceId" : instance_id, "Timestamp" : "2", - "output" : base64.b64encode(rv)} - defer.returnValue(output) + "output" : base64.b64encode(output)} + return output @defer.inlineCallbacks @exception.wrap_exception @@ -264,29 +287,6 @@ class Instance(object): self.datamodel.save() logging.debug("Finished init of Instance with id of %s" % name) - @classmethod - def fromName(cls, conn, name): - """ use the saved data for reloading the instance """ - instdir = model.InstanceDirectory() - instance = instdir.get(name) - return cls(conn=conn, name=name, data=instance) - - def set_state(self, state_code, state_description=None): - self.datamodel['state'] = state_code - if not state_description: - state_description = power_state.name(state_code) - self.datamodel['state_description'] = state_description - self.datamodel.save() - - @property - def state(self): - # it is a string in datamodel - return int(self.datamodel['state']) - - @property - def name(self): - return self.datamodel['name'] - def is_pending(self): return (self.state == power_state.NOSTATE or self.state == 'pending') @@ -297,64 +297,3 @@ class Instance(object): logging.debug("Instance state is: %s" % self.state) return (self.state == power_state.RUNNING or self.state == 'running') - def describe(self): - return self.datamodel - - def info(self): - result = self._conn.get_info(self.name) - result['node_name'] = FLAGS.node_name - return result - - def update_state(self): - self.datamodel.update(self.info()) - self.set_state(self.state) - self.datamodel.save() # Extra, but harmless - - @defer.inlineCallbacks - @exception.wrap_exception - def destroy(self): - if self.is_destroyed(): - self.datamodel.destroy() - raise exception.Error('trying to destroy already destroyed' - ' instance: %s' % self.name) - - self.set_state(power_state.NOSTATE, 'shutting_down') - yield self._conn.destroy(self) - self.datamodel.destroy() - - @defer.inlineCallbacks - @exception.wrap_exception - def reboot(self): - if not self.is_running(): - raise exception.Error( - 'trying to reboot a non-running' - 'instance: %s (state: %s)' % (self.name, self.state)) - - logging.debug('rebooting instance %s' % self.name) - self.set_state(power_state.NOSTATE, 'rebooting') - yield self._conn.reboot(self) - self.update_state() - - @defer.inlineCallbacks - @exception.wrap_exception - def spawn(self): - self.set_state(power_state.NOSTATE, 'spawning') - logging.debug("Starting spawn in Instance") - try: - yield self._conn.spawn(self) - except Exception, ex: - logging.debug(ex) - self.set_state(power_state.SHUTDOWN) - self.update_state() - - @exception.wrap_exception - def console_output(self): - # FIXME: Abstract this for Xen - if FLAGS.connection_type == 'libvirt': - fname = os.path.abspath( - os.path.join(self.datamodel['basepath'], 'console.log')) - with open(fname, 'r') as f: - console = f.read() - else: - console = 'FAKE CONSOLE OUTPUT' - return defer.succeed(console) diff --git a/nova/models.py b/nova/models.py index 067616029..51600bd24 100644 --- a/nova/models.py +++ b/nova/models.py @@ -22,7 +22,6 @@ class Image(Base): created_at = Column(DateTime) updated_at = Column(DateTime) # auto update on change FIXME - @validates('image_type') def validate_image_type(self, key, image_type): assert(image_type in ['machine', 'kernel', 'ramdisk', 'raw']) @@ -46,6 +45,7 @@ class Network(Base): id = Column(Integer, primary_key=True) bridge = Column(String) vlan = Column(String) + kind = Column(String) #vpn_port = Column(Integer) project_id = Column(String) #, ForeignKey('projects.id'), nullable=False) @@ -77,7 +77,8 @@ class Instance(Base): key_data = Column(Text) security_group = Column(String) - state = Column(String) + state = Column(Integer) + state_description = Column(String) hostname = Column(String) physical_node_id = Column(Integer) @@ -86,6 +87,13 @@ class Instance(Base): user_data = Column(Text) + def set_state(self, state_code, state_description=None): + from nova.compute import power_state + self.state = state_code + if not state_description: + state_description = power_state.name(state_code) + self.state_description = state_description + # ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id)) # kernel = relationship(Kernel, backref=backref('instances', order_by=id)) # project = relationship(Project, backref=backref('instances', order_by=id)) @@ -95,9 +103,9 @@ class Instance(Base): # power_state = what we have # task_state = transitory and may trigger power state transition - @validates('state') - def validate_state(self, key, state): - assert(state in ['nostate', 'running', 'blocked', 'paused', 'shutdown', 'shutoff', 'crashed']) + #@validates('state') + #def validate_state(self, key, state): + # assert(state in ['nostate', 'running', 'blocked', 'paused', 'shutdown', 'shutoff', 'crashed']) class Volume(Base): __tablename__ = 'volumes' diff --git a/nova/network/service.py b/nova/network/service.py index 4be855960..b6777efc7 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -29,6 +29,7 @@ from nova.exception import NotFound from nova.network import exception from nova.network import model from nova.network import vpn +from nova.network import linux_net FLAGS = flags.FLAGS @@ -64,7 +65,7 @@ def type_to_class(network_type): def setup_compute_network(instance): """Sets up the network on a compute host""" srv = type_to_class(instance.project.network.kind) - srv.setup_compute_network(inst) + srv.setup_compute_network(instance) def get_host_for_project(project_id): @@ -115,8 +116,7 @@ class BaseNetworkService(service.Service): pass @classmethod - def setup_compute_network(cls, user_id, project_id, security_group, - *args, **kwargs): + def setup_compute_network(cls, instance, *args, **kwargs): """Sets up matching network for compute hosts""" raise NotImplementedError() @@ -144,8 +144,7 @@ class FlatNetworkService(BaseNetworkService): """Basic network where no vlans are used""" @classmethod - def setup_compute_network(cls, user_id, project_id, security_group, - *args, **kwargs): + def setup_compute_network(cls, instance, *args, **kwargs): """Network is created manually""" pass @@ -242,13 +241,11 @@ class VlanNetworkService(BaseNetworkService): vpn.NetworkData.create(project_id) @classmethod - def setup_compute_network(cls, user_id, project_id, security_group, - *args, **kwargs): + def setup_compute_network(cls, instance, *args, **kwargs): """Sets up matching network for compute hosts""" # NOTE(vish): Use BridgedNetwork instead of DHCPNetwork because # we don't want to run dnsmasq on the client machines - net = model.BridgedNetwork.get_network_for_project( - user_id, - project_id, - security_group) - net.express() + net = instance.project.network + # FIXME(ja): hack - uncomment this: + #linux_net.vlan_create(net) + #linux_net.bridge_create(net) diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index c079f9a4d..b2a89a345 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -91,31 +91,25 @@ class ComputeConnectionTestCase(test.TrialTestCase): def test_run_describe_terminate(self): instance_id = self.create_instance() - rv = yield self.compute.run_instance(instance_id) + yield self.compute.run_instance(instance_id) - rv = yield self.compute.describe_instances() - logging.info("Running instances: %s", rv) - self.assertEqual(rv[instance_id].name, instance_id) + session = models.create_session() + instances = session.query(models.Instance).all() + logging.info("Running instances: %s", instances) + self.assertEqual(len(instances), 1) - rv = yield self.compute.terminate_instance(instance_id) + yield self.compute.terminate_instance(instance_id) - rv = yield self.compute.describe_instances() - logging.info("After terminating instances: %s", rv) - self.assertEqual(rv, {}) + instances = session.query(models.Instance).all() + logging.info("After terminating instances: %s", instances) + self.assertEqual(len(instances), 0) @defer.inlineCallbacks def test_reboot(self): instance_id = self.create_instance() - rv = yield self.compute.run_instance(instance_id) - - rv = yield self.compute.describe_instances() - self.assertEqual(rv[instance_id].name, instance_id) - + yield self.compute.run_instance(instance_id) yield self.compute.reboot_instance(instance_id) - - rv = yield self.compute.describe_instances() - self.assertEqual(rv[instance_id].name, instance_id) - rv = yield self.compute.terminate_instance(instance_id) + yield self.compute.terminate_instance(instance_id) @defer.inlineCallbacks def test_console_output(self): @@ -129,10 +123,6 @@ class ComputeConnectionTestCase(test.TrialTestCase): @defer.inlineCallbacks def test_run_instance_existing(self): instance_id = self.create_instance() - rv = yield self.compute.run_instance(instance_id) - - rv = yield self.compute.describe_instances() - self.assertEqual(rv[instance_id].name, instance_id) - - self.assertRaises(exception.Error, self.compute.run_instance, instance_id) - rv = yield self.compute.terminate_instance(instance_id) + yield self.compute.run_instance(instance_id) + self.assertFailure(self.compute.run_instance(instance_id), exception.Error) + yield self.compute.terminate_instance(instance_id) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index d9ae5ac96..90ea9d053 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -46,14 +46,14 @@ class FakeConnection(object): def spawn(self, instance): fake_instance = FakeInstance() - self.instances[instance.name] = fake_instance + self.instances[instance.id] = fake_instance fake_instance._state = power_state.RUNNING def reboot(self, instance): pass def destroy(self, instance): - del self.instances[instance.name] + del self.instances[instance.id] def get_info(self, instance_id): i = self.instances[instance_id] -- cgit From 295a56c665be7b7461ff41141a93cffb79ab4909 Mon Sep 17 00:00:00 2001 From: Sleepsonthefloor Date: Sat, 14 Aug 2010 07:08:34 -0700 Subject: remove more direct session interactions --- nova/compute/service.py | 16 ++++++---------- nova/models.py | 12 ++++++++---- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/nova/compute/service.py b/nova/compute/service.py index dc6a93bdb..4e6a2c944 100644 --- a/nova/compute/service.py +++ b/nova/compute/service.py @@ -68,15 +68,13 @@ class ComputeService(service.Service): return defer.succeed('PONG') def get_instance(self, instance_id): - session = models.create_session() - return session.query(models.Instance).filter_by(id=instance_id).one() + return models.Instance.find(instance_id) def update_state(self, instance_id): - session = models.create_session() - inst = session.query(models.Instance).filter_by(id=instance_id).one() + inst = models.Instance.find(instance_id) # FIXME(ja): include other fields from state? inst.state = self._conn.get_info(instance_id)['state'] - session.flush() + inst.save() @exception.wrap_exception def adopt_instances(self): @@ -112,18 +110,17 @@ class ComputeService(service.Service): def run_instance(self, instance_id, **_kwargs): """ launch a new instance with specified options """ logging.debug("Starting instance %s..." % (instance_id)) - session = models.create_session() - inst = session.query(models.Instance).filter_by(id=instance_id).first() + inst = models.Instance.find(instance_id) # NOTE(vish): passing network type allows us to express the # network without making a call to network to find # out which type of network to setup network_service.setup_compute_network(inst) inst.node_name = FLAGS.node_name - session.commit() + inst.save() # TODO(vish) check to make sure the availability zone matches inst.set_state(power_state.NOSTATE, 'spawning') - session.commit() + inst.save() try: yield self._conn.spawn(inst) @@ -177,7 +174,6 @@ class ComputeService(service.Service): # FIXME: Abstract this for Xen logging.debug("Getting console output for %s" % (instance_id)) - session = models.create_session() inst = self.get_instance(instance_id) if FLAGS.connection_type == 'libvirt': diff --git a/nova/models.py b/nova/models.py index 79273965b..62341a24c 100644 --- a/nova/models.py +++ b/nova/models.py @@ -33,6 +33,12 @@ class NovaBase(object): session = NovaBase.get_session() return session.query(cls).all() + @classmethod + def find(cls, obj_id): + session = NovaBase.get_session() + #print cls + return session.query(cls).filter_by(id=obj_id).one() + def save(self): session = NovaBase.get_session() session.add(self) @@ -144,15 +150,13 @@ class Volume(Base): blade_id = Column(Integer) -def create_engine(): - return NovaBase.get_engine(); def create_session(engine=None): return NovaBase.get_session() if __name__ == '__main__': - engine = create_engine() - session = create_session(engine) + engine = NovasBase.create_engine() + session = NovasBase.create_session(engine) instance = Instance(image_id='as', ramdisk_id='AS', user_id='anthony') user = User(id='anthony') -- cgit From 665ef27e95d89c518154bfc6b2d9a53929dfeaef Mon Sep 17 00:00:00 2001 From: Sleepsonthefloor Date: Sun, 15 Aug 2010 13:36:01 -0700 Subject: add refresh on model --- nova/models.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nova/models.py b/nova/models.py index 9cbebca73..561a722fc 100644 --- a/nova/models.py +++ b/nova/models.py @@ -53,6 +53,10 @@ class NovaBase(object): session.delete(self) session.flush() + def refresh(self): + session = NovaBase.get_session() + session.refresh(self) + class Image(Base, NovaBase): __tablename__ = 'images' user_id = Column(String)#, ForeignKey('users.id'), nullable=False) -- cgit From fb6bf337bc2fe702307842b57e33b9f5f9011147 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sun, 15 Aug 2010 22:48:54 +0100 Subject: Rework virt.xenapi's concurrency model. There were many places where we were inadvertently blocking the reactor thread. The reworking puts all calls to XenAPI on background threads, so that they won't block the reactor thread. Long-lived operations (VM start, reboot, etc) are invoked asynchronously at the XenAPI level (Async.VM.start, etc). These return a XenAPI task. We relinquish the background thread at this point, so as not to hold threads in the pool for too long, and use reactor.callLater to poll the task. This combination of techniques means that we don't block the reactor thread at all, and at the same time we don't hold lots of threads waiting for long-running operations. There is a FIXME in here: get_info does not conform to these new rules. Changes are required in compute.service before we can make get_info non-blocking. --- nova/virt/xenapi.py | 178 +++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 147 insertions(+), 31 deletions(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 9fe15644f..6b41061c1 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -16,15 +16,33 @@ """ A connection to XenServer or Xen Cloud Platform. + +The concurrency model for this class is as follows: + +All XenAPI calls are on a thread (using t.i.t.deferToThread, or the decorator +deferredToThread). They are remote calls, and so may hang for the usual +reasons. They should not be allowed to block the reactor thread. + +All long-running XenAPI calls (VM.start, VM.reboot, etc) are called async +(using XenAPI.VM.async_start etc). These return a task, which can then be +polled for completion. Polling is handled using reactor.callLater. + +This combination of techniques means that we don't block the reactor thread at +all, and at the same time we don't hold lots of threads waiting for +long-running operations. + +FIXME: get_info currently doesn't conform to these rules, and will block the +reactor thread if the VM.get_by_name_label or VM.get_record calls block. """ import logging import xmlrpclib from twisted.internet import defer +from twisted.internet import reactor from twisted.internet import task +from twisted.internet.threads import deferToThread -from nova import exception from nova import flags from nova import process from nova.auth.manager import AuthManager @@ -43,6 +61,9 @@ flags.DEFINE_string('xenapi_connection_username', flags.DEFINE_string('xenapi_connection_password', None, 'Password for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi.') +flags.DEFINE_float('xenapi_task_poll_interval', + 0.5, + 'The interval used for polling of remote tasks (Async.VM.start, etc). Used only if connection_type=xenapi.') def get_connection(_): @@ -61,6 +82,12 @@ def get_connection(_): return XenAPIConnection(url, username, password) +def deferredToThread(f): + def g(*args, **kwargs): + return deferToThread(f, *args, **kwargs) + return g + + class XenAPIConnection(object): def __init__(self, url, user, pw): @@ -72,9 +99,8 @@ class XenAPIConnection(object): for vm in self._conn.xenapi.VM.get_all()] @defer.inlineCallbacks - @exception.wrap_exception def spawn(self, instance): - vm = yield self.lookup(instance.name) + vm = yield self._lookup(instance.name) if vm is not None: raise Exception('Attempted to create non-unique name %s' % instance.name) @@ -93,22 +119,28 @@ class XenAPIConnection(object): user = AuthManager().get_user(instance.datamodel['user_id']) project = AuthManager().get_project(instance.datamodel['project_id']) - vdi_uuid = yield self.fetch_image( + vdi_uuid = yield self._fetch_image( instance.datamodel['image_id'], user, project, True) - kernel = yield self.fetch_image( + kernel = yield self._fetch_image( instance.datamodel['kernel_id'], user, project, False) - ramdisk = yield self.fetch_image( + ramdisk = yield self._fetch_image( instance.datamodel['ramdisk_id'], user, project, False) - vdi_ref = yield self._conn.xenapi.VDI.get_by_uuid(vdi_uuid) + vdi_ref = yield self._call_xenapi('VDI.get_by_uuid', vdi_uuid) - vm_ref = yield self.create_vm(instance, kernel, ramdisk) - yield self.create_vbd(vm_ref, vdi_ref, 0, True) + vm_ref = yield self._create_vm(instance, kernel, ramdisk) + yield self._create_vbd(vm_ref, vdi_ref, 0, True) if network_ref: yield self._create_vif(vm_ref, network_ref, mac_address) - yield self._conn.xenapi.VM.start(vm_ref, False, False) + logging.debug('Starting VM %s...', vm_ref) + yield self._call_xenapi('VM.start', vm_ref, False, False) + logging.info('Spawning VM %s created %s.', instance.name, vm_ref) - def create_vm(self, instance, kernel, ramdisk): + @defer.inlineCallbacks + def _create_vm(self, instance, kernel, ramdisk): + """Create a VM record. Returns a Deferred that gives the new + VM reference.""" + mem = str(long(instance.datamodel['memory_kb']) * 1024) vcpus = str(instance.datamodel['vcpus']) rec = { @@ -141,12 +173,16 @@ class XenAPIConnection(object): 'other_config': {}, } logging.debug('Created VM %s...', instance.name) - vm_ref = self._conn.xenapi.VM.create(rec) + vm_ref = yield self._call_xenapi('VM.create', rec) logging.debug('Created VM %s as %s.', instance.name, vm_ref) - return vm_ref + defer.returnValue(vm_ref) - def create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): + @defer.inlineCallbacks + def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): + """Create a VBD record. Returns a Deferred that gives the new + VBD reference.""" + vbd_rec = {} vbd_rec['VM'] = vm_ref vbd_rec['VDI'] = vdi_ref @@ -161,13 +197,17 @@ class XenAPIConnection(object): vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref) - vbd_ref = self._conn.xenapi.VBD.create(vbd_rec) + vbd_ref = yield self._call_xenapi('VBD.create', vbd_rec) logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref, vdi_ref) - return vbd_ref + defer.returnValue(vbd_ref) + @defer.inlineCallbacks def _create_vif(self, vm_ref, network_ref, mac_address): + """Create a VIF record. Returns a Deferred that gives the new + VIF reference.""" + vif_rec = {} vif_rec['device'] = '0' vif_rec['network']= network_ref @@ -179,27 +219,31 @@ class XenAPIConnection(object): vif_rec['qos_algorithm_params'] = {} logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref, network_ref) - vif_ref = self._conn.xenapi.VIF.create(vif_rec) + vif_ref = yield self._call_xenapi('VIF.create', vif_rec) logging.debug('Created VIF %s for VM %s, network %s.', vif_ref, vm_ref, network_ref) - return vif_ref + defer.returnValue(vif_ref) + @defer.inlineCallbacks def _find_network_with_bridge(self, bridge): expr = 'field "bridge" = "%s"' % bridge - networks = self._conn.xenapi.network.get_all_records_where(expr) + networks = yield self._call_xenapi('network.get_all_records_where', + expr) if len(networks) == 1: - return networks.keys()[0] + defer.returnValue(networks.keys()[0]) elif len(networks) > 1: raise Exception('Found non-unique network for bridge %s' % bridge) else: raise Exception('Found no network for bridge %s' % bridge) - def fetch_image(self, image, user, project, use_sr): + @defer.inlineCallbacks + def _fetch_image(self, image, user, project, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place it on dom0's filesystem. The former is for VM disks, the latter for - its kernel and ramdisk (if external kernels are being used).""" + its kernel and ramdisk (if external kernels are being used). + Returns a Deferred that gives the new VDI UUID.""" url = images.image_url(image) access = AuthManager().get_access_key(user, project) @@ -211,23 +255,31 @@ class XenAPIConnection(object): args['password'] = user.secret if use_sr: args['add_partition'] = 'true' - return self._call_plugin('objectstore', fn, args) + task = yield self._async_call_plugin('objectstore', fn, args) + uuid = yield self._wait_for_task(task) + defer.returnValue(uuid) + @defer.inlineCallbacks def reboot(self, instance): - vm = self.lookup(instance.name) + vm = yield self._lookup(instance.name) if vm is None: raise Exception('instance not present %s' % instance.name) - yield self._conn.xenapi.VM.clean_reboot(vm) + task = yield self._call_xenapi('Async.VM.clean_reboot', vm) + yield self._wait_for_task(task) + + @defer.inlineCallbacks def destroy(self, instance): - vm = self.lookup(instance.name) + vm = yield self._lookup(instance.name) if vm is None: raise Exception('instance not present %s' % instance.name) - yield self._conn.xenapi.VM.destroy(vm) + task = yield self._call_xenapi('Async.VM.destroy', vm) + yield self._wait_for_task(task) + def get_info(self, instance_id): - vm = self.lookup(instance_id) + vm = self._lookup_blocking(instance_id) if vm is None: raise Exception('instance not present %s' % instance_id) rec = self._conn.xenapi.VM.get_record(vm) @@ -237,7 +289,13 @@ class XenAPIConnection(object): 'num_cpu': rec['VCPUs_max'], 'cpu_time': 0} - def lookup(self, i): + + @deferredToThread + def _lookup(self, i): + return self._lookup_blocking(i) + + + def _lookup_blocking(self, i): vms = self._conn.xenapi.VM.get_by_name_label(i) n = len(vms) if n == 0: @@ -248,9 +306,55 @@ class XenAPIConnection(object): return vms[0] - def _call_plugin(self, plugin, fn, args): + def _wait_for_task(self, task): + """Return a Deferred that will give the result of the given task. + The task is polled until it completes.""" + d = defer.Deferred() + reactor.callLater(0, self._poll_task, task, d) + return d + + + @deferredToThread + def _poll_task(self, task, deferred): + """Poll the given XenAPI task, and fire the given Deferred if we + get a result.""" + try: + #logging.debug('Polling task %s...', task) + status = self._conn.xenapi.task.get_status(task) + if status == 'pending': + reactor.callLater(FLAGS.xenapi_task_poll_interval, + self._poll_task, task, deferred) + elif status == 'success': + result = self._conn.xenapi.task.get_result(task) + logging.info('Task %s status: success. %s', task, result) + deferred.callback(_parse_xmlrpc_value(result)) + else: + error_info = self._conn.xenapi.task.get_error_info(task) + logging.warn('Task %s status: %s. %s', task, status, + error_info) + deferred.errback(XenAPI.Failure(error_info)) + #logging.debug('Polling task %s done.', task) + except Exception, exn: + logging.warn(exn) + deferred.errback(exn) + + + @deferredToThread + def _call_xenapi(self, method, *args): + """Call the specified XenAPI method on a background thread. Returns + a Deferred for the result.""" + f = self._conn.xenapi + for m in method.split('.'): + f = f.__getattr__(m) + return f(*args) + + + @deferredToThread + def _async_call_plugin(self, plugin, fn, args): + """Call Async.host.call_plugin on a background thread. Returns a + Deferred with the task reference.""" return _unwrap_plugin_exceptions( - self._conn.xenapi.host.call_plugin, + self._conn.xenapi.Async.host.call_plugin, self._get_xenapi_host(), plugin, fn, args) @@ -286,3 +390,15 @@ def _unwrap_plugin_exceptions(func, *args, **kwargs): except xmlrpclib.ProtocolError, exn: logging.debug("Got exception: %s", exn) raise + + +def _parse_xmlrpc_value(val): + """Parse the given value as if it were an XML-RPC value. This is + sometimes used as the format for the task.result field.""" + if not val: + return val + x = xmlrpclib.loads( + '' + + val + + '') + return x[0][0] -- cgit From d1185adcf6f060c125274d31cf11a4f750521d24 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sun, 15 Aug 2010 23:11:52 +0100 Subject: Add documentation to spawn, reboot, and destroy stating that those functions should return Deferreds. Update the fake implementations to do so (the libvirt ones already do, and making the xenapi ones do so is the subject of a current merge request). --- nova/virt/fake.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 105837181..155833f3f 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -24,6 +24,8 @@ This module also documents the semantics of real hypervisor connections. import logging +from twisted.internet import defer + from nova.compute import power_state @@ -89,10 +91,13 @@ class FakeConnection(object): This function should use the data there to guide the creation of the new instance. - Once this function successfully completes, the instance should be + The work will be done asynchronously. This function returns a + Deferred that allows the caller to detect when it is complete. + + Once this successfully completes, the instance should be running (power_state.RUNNING). - If this function fails, any partial instance should be completely + If this fails, any partial instance should be completely cleaned up, and the virtualization platform should be in the state that it was before this call began. """ @@ -100,6 +105,7 @@ class FakeConnection(object): fake_instance = FakeInstance() self.instances[instance.name] = fake_instance fake_instance._state = power_state.RUNNING + return defer.succeed(None) def reboot(self, instance): """ @@ -107,8 +113,11 @@ class FakeConnection(object): The given parameter is an instance of nova.compute.service.Instance, and so the instance is being specified as instance.name. + + The work will be done asynchronously. This function returns a + Deferred that allows the caller to detect when it is complete. """ - pass + return defer.succeed(None) def destroy(self, instance): """ @@ -116,8 +125,12 @@ class FakeConnection(object): The given parameter is an instance of nova.compute.service.Instance, and so the instance is being specified as instance.name. + + The work will be done asynchronously. This function returns a + Deferred that allows the caller to detect when it is complete. """ del self.instances[instance.name] + return defer.succeed(None) def get_info(self, instance_id): """ -- cgit From 33de18633fc6bb5fae64869dfe9963bf81f7f167 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 15 Aug 2010 15:55:53 -0700 Subject: refactoring volume and some cleanup in model and compute --- nova/compute/service.py | 24 ++-- nova/models.py | 38 ++++-- nova/tests/volume_unittest.py | 47 +++---- nova/volume/service.py | 280 ++++++++++++++---------------------------- 4 files changed, 155 insertions(+), 234 deletions(-) diff --git a/nova/compute/service.py b/nova/compute/service.py index 4e6a2c944..7f6f3ad6e 100644 --- a/nova/compute/service.py +++ b/nova/compute/service.py @@ -67,13 +67,10 @@ class ComputeService(service.Service): """ simple test of an AMQP message call """ return defer.succeed('PONG') - def get_instance(self, instance_id): - return models.Instance.find(instance_id) - def update_state(self, instance_id): inst = models.Instance.find(instance_id) # FIXME(ja): include other fields from state? - inst.state = self._conn.get_info(instance_id)['state'] + inst.state = self._conn.get_info(instance_id)['state'] inst.save() @exception.wrap_exception @@ -109,6 +106,8 @@ class ComputeService(service.Service): @exception.wrap_exception def run_instance(self, instance_id, **_kwargs): """ launch a new instance with specified options """ + if instance_id in self._conn.list_instances(): + raise exception.Error("Instance has already been created") logging.debug("Starting instance %s..." % (instance_id)) inst = models.Instance.find(instance_id) # NOTE(vish): passing network type allows us to express the @@ -135,19 +134,18 @@ class ComputeService(service.Service): def terminate_instance(self, instance_id): """ terminate an instance on this machine """ logging.debug("Got told to terminate instance %s" % instance_id) - session = models.create_session() - instance = session.query(models.Instance).filter_by(id=instance_id).one() + inst = models.Instance.find(instance_id) - if instance.state == power_state.SHUTOFF: + if inst.state == power_state.SHUTOFF: # self.datamodel.destroy() FIXME: RE-ADD ????? raise exception.Error('trying to destroy already destroyed' ' instance: %s' % instance_id) - instance.set_state(power_state.NOSTATE, 'shutting_down') - yield self._conn.destroy(instance) + inst.set_state(power_state.NOSTATE, 'shutting_down') + inst.save() + yield self._conn.destroy(inst) # FIXME(ja): should we keep it in a terminated state for a bit? - session.delete(instance) - session.flush() + inst.delete() @defer.inlineCallbacks @exception.wrap_exception @@ -155,7 +153,7 @@ class ComputeService(service.Service): """ reboot an instance on this server KVM doesn't support reboot, so we terminate and restart """ self.update_state(instance_id) - instance = self.get_instance(instance_id) + instance = models.Instance.find(instance_id) # FIXME(ja): this is only checking the model state - not state on disk? if instance.state != power_state.RUNNING: @@ -174,7 +172,7 @@ class ComputeService(service.Service): # FIXME: Abstract this for Xen logging.debug("Getting console output for %s" % (instance_id)) - inst = self.get_instance(instance_id) + inst = models.Instance.find(instance_id) if FLAGS.connection_type == 'libvirt': fname = os.path.abspath( diff --git a/nova/models.py b/nova/models.py index 62341a24c..c397270db 100644 --- a/nova/models.py +++ b/nova/models.py @@ -1,7 +1,8 @@ -from sqlalchemy.orm import relationship, backref, validates +from sqlalchemy.orm import relationship, backref, validates, exc from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, DateTime, Boolean, Text from sqlalchemy.ext.declarative import declarative_base from nova import auth +from nova import exception Base = declarative_base() @@ -14,9 +15,9 @@ class NovaBase(object): @classmethod def create_engine(cls): if NovaBase._engine is not None: - return _engine + return NovaBase._engine from sqlalchemy import create_engine - NovaBase._engine = create_engine('sqlite:///:memory:', echo=True) + NovaBase._engine = create_engine('sqlite:///:memory:', echo=False) Base.metadata.create_all(NovaBase._engine) return NovaBase._engine @@ -24,7 +25,7 @@ class NovaBase(object): def get_session(cls): from sqlalchemy.orm import sessionmaker if NovaBase._session == None: - NovaBase.create_engine(); + NovaBase.create_engine() NovaBase._session = sessionmaker(bind=NovaBase._engine)() return NovaBase._session @@ -37,13 +38,21 @@ class NovaBase(object): def find(cls, obj_id): session = NovaBase.get_session() #print cls - return session.query(cls).filter_by(id=obj_id).one() + try: + return session.query(cls).filter_by(id=obj_id).one() + except exc.NoResultFound: + raise exception.NotFound("No model for id %s" % obj_id) def save(self): session = NovaBase.get_session() session.add(self) session.commit() + def delete(self): + session = NovaBase.get_session() + session.delete(self) + session.flush() + class Image(Base, NovaBase): __tablename__ = 'images' user_id = Column(String)#, ForeignKey('users.id'), nullable=False) @@ -143,20 +152,33 @@ class Instance(Base, NovaBase): #def validate_state(self, key, state): # assert(state in ['nostate', 'running', 'blocked', 'paused', 'shutdown', 'shutoff', 'crashed']) -class Volume(Base): +class Volume(Base, NovaBase): __tablename__ = 'volumes' id = Column(Integer, primary_key=True) + volume_id = Column(String) shelf_id = Column(Integer) blade_id = Column(Integer) + user_id = Column(String) #, ForeignKey('users.id'), nullable=False) + project_id = Column(String) #, ForeignKey('projects.id')) + # FIXME: should be physical_node_id = Column(Integer) + node_name = Column(String) + size = Column(Integer) + alvailability_zone = Column(String) # FIXME foreign key? + instance_id = Column(Integer, ForeignKey('volumes.id'), nullable=True) + mountpoint = Column(String) + attach_time = Column(String) # FIXME datetime + status = Column(String) # FIXME enum? + attach_status = Column(String) # FIXME enum + delete_on_termination = Column(Boolean) def create_session(engine=None): return NovaBase.get_session() if __name__ == '__main__': - engine = NovasBase.create_engine() - session = NovasBase.create_session(engine) + engine = NovaBase.create_engine() + session = NovaBase.create_session(engine) instance = Instance(image_id='as', ramdisk_id='AS', user_id='anthony') user = User(id='anthony') diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index 2a07afe69..e979995fd 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -17,15 +17,14 @@ # under the License. import logging -import shutil -import tempfile from twisted.internet import defer -from nova import compute from nova import exception from nova import flags +from nova import models from nova import test +from nova.compute import service as compute_service from nova.volume import service as volume_service @@ -36,29 +35,22 @@ class VolumeTestCase(test.TrialTestCase): def setUp(self): logging.getLogger().setLevel(logging.DEBUG) super(VolumeTestCase, self).setUp() - self.compute = compute.service.ComputeService() - self.volume = None - self.tempdir = tempfile.mkdtemp() + self.compute = compute_service.ComputeService() self.flags(connection_type='fake', - fake_storage=True, - aoe_export_dir=self.tempdir) + fake_storage=True) self.volume = volume_service.VolumeService() - def tearDown(self): - shutil.rmtree(self.tempdir) - @defer.inlineCallbacks def test_run_create_volume(self): vol_size = '0' user_id = 'fake' project_id = 'fake' volume_id = yield self.volume.create_volume(vol_size, user_id, project_id) - # TODO(termie): get_volume returns differently than create_volume self.assertEqual(volume_id, - volume_service.get_volume(volume_id)['volume_id']) + models.Volume.find(volume_id).id) - rv = self.volume.delete_volume(volume_id) - self.assertRaises(exception.Error, volume_service.get_volume, volume_id) + yield self.volume.delete_volume(volume_id) + self.assertRaises(exception.NotFound, models.Volume.find, volume_id) @defer.inlineCallbacks def test_too_big_volume(self): @@ -100,32 +92,31 @@ class VolumeTestCase(test.TrialTestCase): project_id = 'fake' mountpoint = "/dev/sdf" volume_id = yield self.volume.create_volume(vol_size, user_id, project_id) - volume_obj = volume_service.get_volume(volume_id) - volume_obj.start_attach(instance_id, mountpoint) + vol = models.Volume.find(volume_id) + self.volume.start_attach(volume_id, instance_id, mountpoint) if FLAGS.fake_tests: - volume_obj.finish_attach() + self.volume.finish_attach(volume_id) else: rv = yield self.compute.attach_volume(instance_id, volume_id, mountpoint) - self.assertEqual(volume_obj['status'], "in-use") - self.assertEqual(volume_obj['attach_status'], "attached") - self.assertEqual(volume_obj['instance_id'], instance_id) - self.assertEqual(volume_obj['mountpoint'], mountpoint) + self.assertEqual(vol.status, "in-use") + self.assertEqual(vol.attach_status, "attached") + self.assertEqual(vol.instance_id, instance_id) + self.assertEqual(vol.mountpoint, mountpoint) self.assertFailure(self.volume.delete_volume(volume_id), exception.Error) - volume_obj.start_detach() + self.volume.start_detach(volume_id) if FLAGS.fake_tests: - volume_obj.finish_detach() + self.volume.finish_detach(volume_id) else: rv = yield self.volume.detach_volume(instance_id, volume_id) - volume_obj = volume_service.get_volume(volume_id) - self.assertEqual(volume_obj['status'], "available") + self.assertEqual(vol.status, "available") rv = self.volume.delete_volume(volume_id) self.assertRaises(exception.Error, - volume_service.get_volume, + models.Volume.find, volume_id) @defer.inlineCallbacks @@ -135,7 +126,7 @@ class VolumeTestCase(test.TrialTestCase): project_id = 'fake' shelf_blades = [] def _check(volume_id): - vol = volume_service.get_volume(volume_id) + vol = models.Volume.find(volume_id) shelf_blade = '%s.%s' % (vol['shelf_id'], vol['blade_id']) self.assert_(shelf_blade not in shelf_blades) shelf_blades.append(shelf_blade) diff --git a/nova/volume/service.py b/nova/volume/service.py index 1086b4cd0..76f7e9695 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -27,9 +27,9 @@ import os from twisted.internet import defer -from nova import datastore from nova import exception from nova import flags +from nova import models from nova import process from nova import service from nova import utils @@ -65,15 +65,6 @@ flags.DEFINE_boolean('fake_storage', False, class NoMoreBlades(exception.Error): pass -def get_volume(volume_id): - """ Returns a redis-backed volume object """ - volume_class = Volume - if FLAGS.fake_storage: - volume_class = FakeVolume - vol = volume_class.lookup(volume_id) - if vol: - return vol - raise exception.Error("Volume does not exist") class VolumeService(service.Service): """ @@ -83,10 +74,7 @@ class VolumeService(service.Service): """ def __init__(self): super(VolumeService, self).__init__() - self.volume_class = Volume - if FLAGS.fake_storage: - self.volume_class = FakeVolume - self._init_volume_group() + self._exec_init_volumes() @defer.inlineCallbacks @validate.rangetest(size=(0, 1000)) @@ -97,218 +85,140 @@ class VolumeService(service.Service): Volume at this point has size, owner, and zone. """ logging.debug("Creating volume of size: %s" % (size)) - vol = yield self.volume_class.create(size, user_id, project_id) - logging.debug("restarting exports") - yield self._restart_exports() - defer.returnValue(vol['volume_id']) - def by_node(self, node_id): - """ returns a list of volumes for a node """ - for volume_id in datastore.Redis.instance().smembers('volumes:%s' % (node_id)): - yield self.volume_class(volume_id=volume_id) - - @property - def all(self): - """ returns a list of all volumes """ - for volume_id in datastore.Redis.instance().smembers('volumes'): - yield self.volume_class(volume_id=volume_id) + vol = models.Volume() + vol.volume_id = utils.generate_uid('vol') + vol.node_name = FLAGS.node_name + vol.size = size + vol.user_id = user_id + vol.project_id = project_id + vol.availability_zone = FLAGS.storage_availability_zone + vol.status = "creating" # creating | available | in-use + vol.attach_status = "detached" # attaching | attached | detaching | detached + vol.save() + yield self._exec_create_volume(vol) + yield self._setup_export(vol) + # TODO(joshua): We need to trigger a fanout message + # for aoe-discover on all the nodes + vol.status = "available" + vol.save() + logging.debug("restarting exports") + yield self._exec_ensure_exports() + defer.returnValue(vol.id) @defer.inlineCallbacks def delete_volume(self, volume_id): logging.debug("Deleting volume with id of: %s" % (volume_id)) - vol = get_volume(volume_id) - if vol['attach_status'] == "attached": + vol = models.Volume.find(volume_id) + if vol.attach_status == "attached": raise exception.Error("Volume is still attached") - if vol['node_name'] != FLAGS.node_name: + if vol.node_name != FLAGS.node_name: raise exception.Error("Volume is not local to this node") - yield vol.destroy() + yield self._exec_delete_volume(vol) + yield vol.delete() defer.returnValue(True) @defer.inlineCallbacks - def _restart_exports(self): - if FLAGS.fake_storage: - return - # NOTE(vish): these commands sometimes sends output to stderr for warnings - yield process.simple_execute("sudo vblade-persist auto all", error_ok=1) - yield process.simple_execute("sudo vblade-persist start all", error_ok=1) - - @defer.inlineCallbacks - def _init_volume_group(self): - if FLAGS.fake_storage: - return - yield process.simple_execute( - "sudo pvcreate %s" % (FLAGS.storage_dev)) - yield process.simple_execute( - "sudo vgcreate %s %s" % (FLAGS.volume_group, - FLAGS.storage_dev)) - -class Volume(): - - def __init__(self, volume_id=None): - self.volume_id = volume_id - super(Volume, self).__init__() - - @property - def identifier(self): - return self.volume_id - - def default_state(self): - return {"volume_id": self.volume_id, - "node_name": "unassigned"} - - @classmethod - @defer.inlineCallbacks - def create(cls, size, user_id, project_id): - volume_id = utils.generate_uid('vol') - vol = cls(volume_id) - vol['node_name'] = FLAGS.node_name - vol['size'] = size - vol['user_id'] = user_id - vol['project_id'] = project_id - vol['availability_zone'] = FLAGS.storage_availability_zone - vol["instance_id"] = 'none' - vol["mountpoint"] = 'none' - vol['attach_time'] = 'none' - vol['status'] = "creating" # creating | available | in-use - vol['attach_status'] = "detached" # attaching | attached | detaching | detached - vol['delete_on_termination'] = 'False' - vol.save() - yield vol._create_lv() - yield vol._setup_export() - # TODO(joshua) - We need to trigger a fanout message for aoe-discover on all the nodes - vol['status'] = "available" - vol.save() - defer.returnValue(vol) - - def start_attach(self, instance_id, mountpoint): - """ """ - self['instance_id'] = instance_id - self['mountpoint'] = mountpoint - self['status'] = "in-use" - self['attach_status'] = "attaching" - self['attach_time'] = utils.isotime() - self['delete_on_termination'] = 'False' - self.save() - - def finish_attach(self): - """ """ - self['attach_status'] = "attached" - self.save() - - def start_detach(self): - """ """ - self['attach_status'] = "detaching" - self.save() - - def finish_detach(self): - self['instance_id'] = None - self['mountpoint'] = None - self['status'] = "available" - self['attach_status'] = "detached" - self.save() - - def save(self): - is_new = self.is_new_record() - super(Volume, self).save() - if is_new: - redis = datastore.Redis.instance() - key = self.__devices_key - # TODO(vish): these should be added by admin commands - more = redis.scard(self._redis_association_name("node", - self['node_name'])) - if (not redis.exists(key) and not more): - for shelf_id in range(FLAGS.first_shelf_id, - FLAGS.last_shelf_id + 1): - for blade_id in range(FLAGS.blades_per_shelf): - redis.sadd(key, "%s.%s" % (shelf_id, blade_id)) - self.associate_with("node", self['node_name']) - - @defer.inlineCallbacks - def destroy(self): - yield self._remove_export() - yield self._delete_lv() - self.unassociate_with("node", self['node_name']) - if self.get('shelf_id', None) and self.get('blade_id', None): - redis = datastore.Redis.instance() - key = self.__devices_key - redis.sadd(key, "%s.%s" % (self['shelf_id'], self['blade_id'])) - super(Volume, self).destroy() - - @defer.inlineCallbacks - def _create_lv(self): - if str(self['size']) == '0': + def _exec_create_volume(self, vol): + if str(vol.size) == '0': sizestr = '100M' else: - sizestr = '%sG' % self['size'] + sizestr = '%sG' % vol.size yield process.simple_execute( "sudo lvcreate -L %s -n %s %s" % (sizestr, - self['volume_id'], + vol.volume_id, FLAGS.volume_group), error_ok=1) @defer.inlineCallbacks - def _delete_lv(self): + def _exec_delete_volume(self, vol): yield process.simple_execute( "sudo lvremove -f %s/%s" % (FLAGS.volume_group, - self['volume_id']), error_ok=1) - - @property - def __devices_key(self): - return 'volume_devices:%s' % FLAGS.node_name + vol.volume_id), error_ok=1) @defer.inlineCallbacks - def _setup_export(self): - redis = datastore.Redis.instance() - key = self.__devices_key - device = redis.spop(key) + def _setup_export(self, vol): + # FIXME: device needs to be a pool + device = "1.1" if not device: raise NoMoreBlades() (shelf_id, blade_id) = device.split('.') - self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id) - self['shelf_id'] = shelf_id - self['blade_id'] = blade_id - self.save() - yield self._exec_setup_export() + vol.aoe_device = "e%s.%s" % (shelf_id, blade_id) + vol.shelf_id = shelf_id + vol.blade_id = blade_id + vol.save() + yield self._exec_setup_export(vol) @defer.inlineCallbacks - def _exec_setup_export(self): + def _exec_setup_export(self, vol): + if FLAGS.fake_storage: + return yield process.simple_execute( "sudo vblade-persist setup %s %s %s /dev/%s/%s" % - (self['shelf_id'], - self['blade_id'], + (self, vol['shelf_id'], + vol.blade_id, FLAGS.aoe_eth_dev, FLAGS.volume_group, - self['volume_id']), error_ok=1) + vol.volume_id), error_ok=1) @defer.inlineCallbacks - def _remove_export(self): - if not self.get('shelf_id', None) or not self.get('blade_id', None): + def _remove_export(self, vol): + if not vol.shelf_id or not vol.blade_id: defer.returnValue(False) - yield self._exec_remove_export() + yield self._exec_remove_export(vol) defer.returnValue(True) @defer.inlineCallbacks - def _exec_remove_export(self): + def _exec_remove_export(self, vol): + if FLAGS.fake_storage: + return yield process.simple_execute( - "sudo vblade-persist stop %s %s" % (self['shelf_id'], - self['blade_id']), error_ok=1) + "sudo vblade-persist stop %s %s" % (self, vol.shelf_id, + vol.blade_id), error_ok=1) yield process.simple_execute( - "sudo vblade-persist destroy %s %s" % (self['shelf_id'], - self['blade_id']), error_ok=1) - + "sudo vblade-persist destroy %s %s" % (self, vol.shelf_id, + vol.blade_id), error_ok=1) + @defer.inlineCallbacks + def _exec_ensure_exports(self): + if FLAGS.fake_storage: + return + # NOTE(vish): these commands sometimes sends output to stderr for warnings + yield process.simple_execute("sudo vblade-persist auto all", error_ok=1) + yield process.simple_execute("sudo vblade-persist start all", error_ok=1) + @defer.inlineCallbacks + def _exec_init_volumes(self): + if FLAGS.fake_storage: + return + yield process.simple_execute( + "sudo pvcreate %s" % (FLAGS.storage_dev)) + yield process.simple_execute( + "sudo vgcreate %s %s" % (FLAGS.volume_group, + FLAGS.storage_dev)) -class FakeVolume(Volume): - def _create_lv(self): - pass + def start_attach(self, volume_id, instance_id, mountpoint): + vol = models.Volume.find(volume_id) + vol.instance_id = instance_id + vol.mountpoint = mountpoint + vol.status = "in-use" + vol.attach_status = "attaching" + vol.attach_time = utils.isotime() + vol.save() - def _exec_setup_export(self): - fname = os.path.join(FLAGS.aoe_export_dir, self['aoe_device']) - f = file(fname, "w") - f.close() + def finish_attach(self, volume_id): + vol = models.Volume.find(volume_id) + vol.attach_status = "attached" + vol.save() - def _exec_remove_export(self): - os.unlink(os.path.join(FLAGS.aoe_export_dir, self['aoe_device'])) + def start_detach(self, volume_id): + vol = models.Volume.find(volume_id) + vol.attach_status = "detaching" + vol.save() - def _delete_lv(self): - pass + def finish_detach(self, volume_id): + vol = models.Volume.find(volume_id) + vol.instance_id = None + vol.mountpoint = None + vol.status = "available" + vol.attach_status = "detached" + vol.save() -- cgit From 11aa7a7c959783d48e624707d59d30ccdd8b2733 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 15 Aug 2010 16:20:50 -0700 Subject: don't try to create and destroy lvs in fake mode --- nova/volume/service.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nova/volume/service.py b/nova/volume/service.py index 76f7e9695..4ca3ba2a5 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -120,6 +120,8 @@ class VolumeService(service.Service): @defer.inlineCallbacks def _exec_create_volume(self, vol): + if FLAGS.fake_storage: + return if str(vol.size) == '0': sizestr = '100M' else: @@ -132,6 +134,8 @@ class VolumeService(service.Service): @defer.inlineCallbacks def _exec_delete_volume(self, vol): + if FLAGS.fake_storage: + return yield process.simple_execute( "sudo lvremove -f %s/%s" % (FLAGS.volume_group, vol.volume_id), error_ok=1) -- cgit From 0c5b2dc5e2f215ab6b8023e571c5b537e7fa730e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 15 Aug 2010 16:37:06 -0700 Subject: typos --- nova/tests/volume_unittest.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index e979995fd..91706580f 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -127,11 +127,11 @@ class VolumeTestCase(test.TrialTestCase): shelf_blades = [] def _check(volume_id): vol = models.Volume.find(volume_id) - shelf_blade = '%s.%s' % (vol['shelf_id'], vol['blade_id']) + shelf_blade = '%s.%s' % (vol.shelf_id, vol.blade_id) self.assert_(shelf_blade not in shelf_blades) shelf_blades.append(shelf_blade) logging.debug("got %s" % shelf_blade) - vol.destroy() + vol.delete() deferreds = [] for i in range(5): d = self.volume.create_volume(vol_size, user_id, project_id) -- cgit From fa70aefb00e487102564b92f6d32047dd8998054 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 16 Aug 2010 01:51:28 -0700 Subject: fix launching and describing instances to work with sqlalchemy --- nova/compute/libvirt.xml.template | 3 +- nova/compute/service.py | 77 +++------------------------ nova/endpoint/cloud.py | 106 ++++++++++++++++++------------------ nova/models.py | 18 ++++++- nova/virt/libvirt_conn.py | 109 +++++++++++++++++++------------------- nova/volume/service.py | 1 - 6 files changed, 131 insertions(+), 183 deletions(-) diff --git a/nova/compute/libvirt.xml.template b/nova/compute/libvirt.xml.template index 307f9d03a..17bd79b7c 100644 --- a/nova/compute/libvirt.xml.template +++ b/nova/compute/libvirt.xml.template @@ -1,7 +1,7 @@ %(name)s - hvm + hvm %(basepath)s/kernel %(basepath)s/ramdisk root=/dev/vda1 console=ttyS0 @@ -26,5 +26,4 @@ - %(nova)s diff --git a/nova/compute/service.py b/nova/compute/service.py index 7f6f3ad6e..b80ef3740 100644 --- a/nova/compute/service.py +++ b/nova/compute/service.py @@ -70,7 +70,7 @@ class ComputeService(service.Service): def update_state(self, instance_id): inst = models.Instance.find(instance_id) # FIXME(ja): include other fields from state? - inst.state = self._conn.get_info(instance_id)['state'] + inst.state = self._conn.get_info(inst.name)['state'] inst.save() @exception.wrap_exception @@ -106,7 +106,7 @@ class ComputeService(service.Service): @exception.wrap_exception def run_instance(self, instance_id, **_kwargs): """ launch a new instance with specified options """ - if instance_id in self._conn.list_instances(): + if str(instance_id) in self._conn.list_instances(): raise exception.Error("Instance has already been created") logging.debug("Starting instance %s..." % (instance_id)) inst = models.Instance.find(instance_id) @@ -119,12 +119,11 @@ class ComputeService(service.Service): # TODO(vish) check to make sure the availability zone matches inst.set_state(power_state.NOSTATE, 'spawning') - inst.save() try: yield self._conn.spawn(inst) - except Exception, ex: - logging.debug(ex) + except: + logging.exception("Failed to spawn instance %s" % inst.name) inst.set_state(power_state.SHUTDOWN) self.update_state(instance_id) @@ -142,7 +141,6 @@ class ComputeService(service.Service): ' instance: %s' % instance_id) inst.set_state(power_state.NOSTATE, 'shutting_down') - inst.save() yield self._conn.destroy(inst) # FIXME(ja): should we keep it in a terminated state for a bit? inst.delete() @@ -159,9 +157,9 @@ class ComputeService(service.Service): if instance.state != power_state.RUNNING: raise exception.Error( 'trying to reboot a non-running' - 'instance: %s (state: %s excepted: %s)' % (instance.id, instance.state, power_state.RUNNING)) + 'instance: %s (state: %s excepted: %s)' % (instance.name, instance.state, power_state.RUNNING)) - logging.debug('rebooting instance %s' % instance.id) + logging.debug('rebooting instance %s' % instance.name) instance.set_state(power_state.NOSTATE, 'rebooting') yield self._conn.reboot(instance) self.update_state(instance_id) @@ -176,7 +174,7 @@ class ComputeService(service.Service): if FLAGS.connection_type == 'libvirt': fname = os.path.abspath( - os.path.join(FLAGS.instances_path, inst.id, 'console.log')) + os.path.join(FLAGS.instances_path, inst.name, 'console.log')) with open(fname, 'r') as f: output = f.read() else: @@ -230,64 +228,3 @@ class Group(object): class ProductCode(object): def __init__(self, product_code): self.product_code = product_code - - -class Instance(object): - - NOSTATE = 0x00 - RUNNING = 0x01 - BLOCKED = 0x02 - PAUSED = 0x03 - SHUTDOWN = 0x04 - SHUTOFF = 0x05 - CRASHED = 0x06 - - def __init__(self, conn, name, data): - """ spawn an instance with a given name """ - self._conn = conn - # TODO(vish): this can be removed after data has been updated - # data doesn't seem to have a working iterator so in doesn't work - if data.get('owner_id', None) is not None: - data['user_id'] = data['owner_id'] - data['project_id'] = data['owner_id'] - self.datamodel = data - - size = data.get('instance_type', FLAGS.default_instance_type) - if size not in INSTANCE_TYPES: - raise exception.Error('invalid instance type: %s' % size) - - self.datamodel.update(INSTANCE_TYPES[size]) - - self.datamodel['name'] = name - self.datamodel['instance_id'] = name - self.datamodel['basepath'] = data.get( - 'basepath', os.path.abspath( - os.path.join(FLAGS.instances_path, self.name))) - self.datamodel['memory_kb'] = int(self.datamodel['memory_mb']) * 1024 - self.datamodel.setdefault('image_id', FLAGS.default_image) - self.datamodel.setdefault('kernel_id', FLAGS.default_kernel) - self.datamodel.setdefault('ramdisk_id', FLAGS.default_ramdisk) - self.datamodel.setdefault('project_id', self.datamodel['user_id']) - self.datamodel.setdefault('bridge_name', None) - #self.datamodel.setdefault('key_data', None) - #self.datamodel.setdefault('key_name', None) - #self.datamodel.setdefault('addressing_type', None) - - # TODO(joshua) - The ugly non-flat ones - self.datamodel['groups'] = data.get('security_group', 'default') - # TODO(joshua): Support product codes somehow - self.datamodel.setdefault('product_codes', None) - - self.datamodel.save() - logging.debug("Finished init of Instance with id of %s" % name) - - def is_pending(self): - return (self.state == power_state.NOSTATE or self.state == 'pending') - - def is_destroyed(self): - return self.state == power_state.SHUTOFF - - def is_running(self): - logging.debug("Instance state is: %s" % self.state) - return (self.state == power_state.RUNNING or self.state == 'running') - diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 5366acec7..b68c13456 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -31,6 +31,7 @@ from twisted.internet import defer from nova import datastore from nova import exception from nova import flags +from nova import models from nova import rpc from nova import utils from nova.auth import rbac @@ -403,46 +404,43 @@ class CloudController(object): def _format_instances(self, context, reservation_id = None): reservations = {} if context.user.is_admin(): - instgenerator = self.instdir.all + instgenerator = models.Instance.all() else: - instgenerator = self.instdir.by_project(context.project.id) + instgenerator = models.Instance.all() # FIXME for instance in instgenerator: - res_id = instance.get('reservation_id', 'Unknown') + res_id = instance.reservation_id if reservation_id != None and reservation_id != res_id: continue if not context.user.is_admin(): if instance['image_id'] == FLAGS.vpn_image_id: continue i = {} - i['instance_id'] = instance.get('instance_id', None) - i['image_id'] = instance.get('image_id', None) - i['instance_state'] = { - 'code': instance.get('state', 0), - 'name': instance.get('state_description', 'pending') + i['instanceId'] = instance.name + i['imageId'] = instance.image_id + i['instanceState'] = { + 'code': instance.state, + 'name': instance.state_description } - i['public_dns_name'] = network_model.get_public_ip_for_instance( - i['instance_id']) - i['private_dns_name'] = instance.get('private_dns_name', None) + i['public_dns_name'] = None #network_model.get_public_ip_for_instance( + # i['instance_id']) + i['private_dns_name'] = instance.fixed_ip if not i['public_dns_name']: i['public_dns_name'] = i['private_dns_name'] - i['dns_name'] = instance.get('dns_name', None) - i['key_name'] = instance.get('key_name', None) + i['dns_name'] = None + i['key_name'] = instance.key_name if context.user.is_admin(): i['key_name'] = '%s (%s, %s)' % (i['key_name'], - instance.get('project_id', None), - instance.get('node_name', '')) - i['product_codes_set'] = self._convert_to_set( - instance.get('product_codes', None), 'product_code') - i['instance_type'] = instance.get('instance_type', None) - i['launch_time'] = instance.get('launch_time', None) - i['ami_launch_index'] = instance.get('ami_launch_index', - None) + instance.project_id, + 'node_name') # FIXME + i['product_codes_set'] = self._convert_to_set([], 'product_codes') + i['instance_type'] = instance.instance_type + i['launch_time'] = instance.created_at + i['ami_launch_index'] = instance.launch_index if not reservations.has_key(res_id): r = {} r['reservation_id'] = res_id - r['owner_id'] = instance.get('project_id', None) - r['group_set'] = self._convert_to_set( - instance.get('groups', None), 'group_id') + r['owner_id'] = instance.project_id + r['group_set'] = self._convert_to_set([], 'groups') r['instances_set'] = [] reservations[res_id] = r reservations[res_id]['instances_set'].append(i) @@ -528,7 +526,7 @@ class CloudController(object): defer.returnValue('%s.%s' %(FLAGS.network_topic, host)) @rbac.allow('projectmanager', 'sysadmin') - @defer.inlineCallbacks + #@defer.inlineCallbacks def run_instances(self, context, **kwargs): # make sure user can access the image # vpn image is private so it doesn't show up on lists @@ -560,46 +558,46 @@ class CloudController(object): raise exception.ApiError('Key Pair %s not found' % kwargs['key_name']) key_data = key_pair.public_key - network_topic = yield self._get_network_topic(context) + # network_topic = yield self._get_network_topic(context) # TODO: Get the real security group of launch in here security_group = "default" for num in range(int(kwargs['max_count'])): is_vpn = False if image_id == FLAGS.vpn_image_id: is_vpn = True - inst = self.instdir.new() - allocate_data = yield rpc.call(network_topic, - {"method": "allocate_fixed_ip", - "args": {"user_id": context.user.id, - "project_id": context.project.id, - "security_group": security_group, - "is_vpn": is_vpn, - "hostname": inst.instance_id}}) - inst['image_id'] = image_id - inst['kernel_id'] = kernel_id - inst['ramdisk_id'] = ramdisk_id - inst['user_data'] = kwargs.get('user_data', '') - inst['instance_type'] = kwargs.get('instance_type', 'm1.small') - inst['reservation_id'] = reservation_id - inst['launch_time'] = launch_time - inst['key_data'] = key_data or '' - inst['key_name'] = kwargs.get('key_name', '') - inst['user_id'] = context.user.id - inst['project_id'] = context.project.id - inst['ami_launch_index'] = num - inst['security_group'] = security_group - inst['hostname'] = inst.instance_id + inst = models.Instance() + #allocate_data = yield rpc.call(network_topic, + # {"method": "allocate_fixed_ip", + # "args": {"user_id": context.user.id, + # "project_id": context.project.id, + # "security_group": security_group, + # "is_vpn": is_vpn, + # "hostname": inst.instance_id}}) + allocate_data = {'mac_address': utils.generate_mac(), + 'fixed_ip': '192.168.0.100'} + inst.image_id = image_id + inst.kernel_id = kernel_id + inst.ramdisk_id = ramdisk_id + inst.user_data = kwargs.get('user_data', '') + inst.instance_type = kwargs.get('instance_type', 'm1.small') + inst.reservation_id = reservation_id + inst.key_data = key_data + inst.key_name = kwargs.get('key_name', None) + inst.user_id = context.user.id + inst.project_id = context.project.id + inst.launch_index = num + inst.security_group = security_group + inst.hostname = inst.id for (key, value) in allocate_data.iteritems(): - inst[key] = value - + setattr(inst, key, value) inst.save() rpc.cast(FLAGS.compute_topic, {"method": "run_instance", - "args": {"instance_id": inst.instance_id}}) + "args": {"instance_id": inst.id}}) logging.debug("Casting to node for %s's instance with IP of %s" % - (context.user.name, inst['private_dns_name'])) - # TODO: Make Network figure out the network name from ip. - defer.returnValue(self._format_instances(context, reservation_id)) + (context.user.name, inst.fixed_ip)) + # defer.returnValue(self._format_instances(context, reservation_id)) + return self._format_instances(context, reservation_id) @rbac.allow('projectmanager', 'sysadmin') @defer.inlineCallbacks diff --git a/nova/models.py b/nova/models.py index c397270db..9cbebca73 100644 --- a/nova/models.py +++ b/nova/models.py @@ -17,7 +17,7 @@ class NovaBase(object): if NovaBase._engine is not None: return NovaBase._engine from sqlalchemy import create_engine - NovaBase._engine = create_engine('sqlite:///:memory:', echo=False) + NovaBase._engine = create_engine('sqlite:////root/nova.sqlite', echo=False) Base.metadata.create_all(NovaBase._engine) return NovaBase._engine @@ -91,6 +91,11 @@ class Network(Base): bridge = Column(String) vlan = Column(String) kind = Column(String) + + @property + def bridge_name(self): + # HACK: this should be set on creation + return 'br100' #vpn_port = Column(Integer) project_id = Column(String) #, ForeignKey('projects.id'), nullable=False) @@ -113,6 +118,12 @@ class Instance(Base, NovaBase): def project(self): return auth.manager.AuthManager().get_project(self.project_id) + # FIXME: make this opaque somehow + @property + def name(self): + return "i-%s" % self.id + + image_id = Column(Integer, ForeignKey('images.id'), nullable=False) kernel_id = Column(String, ForeignKey('images.id'), nullable=True) ramdisk_id = Column(String, ForeignKey('images.id'), nullable=True) @@ -132,12 +143,17 @@ class Instance(Base, NovaBase): user_data = Column(Text) + reservation_id = Column(String) + mac_address = Column(String) + fixed_ip = Column(String) + def set_state(self, state_code, state_description=None): from nova.compute import power_state self.state = state_code if not state_description: state_description = power_state.name(state_code) self.state_description = state_description + self.save() # ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id)) # kernel = relationship(Kernel, backref=backref('instances', order_by=id)) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 13305be0f..ef285b86e 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -86,7 +86,7 @@ class LibvirtConnection(object): try: virt_dom = self._conn.lookupByName(instance.name) virt_dom.destroy() - except Exception, _err: + except Exception as _err: pass # If the instance is already terminated, we're still happy d = defer.Deferred() @@ -98,7 +98,7 @@ class LibvirtConnection(object): timer = task.LoopingCall(f=None) def _wait_for_shutdown(): try: - instance.update_state() + instance.set_state(self.get_info(instance.name)['state']) if instance.state == power_state.SHUTDOWN: timer.stop() d.callback(None) @@ -112,7 +112,7 @@ class LibvirtConnection(object): def _cleanup(self, instance): - target = os.path.abspath(instance.datamodel['basepath']) + target = os.path.join(FLAGS.instances_path, instance.name) logging.info("Deleting instance files at %s", target) if os.path.exists(target): shutil.rmtree(target) @@ -121,7 +121,7 @@ class LibvirtConnection(object): @defer.inlineCallbacks @exception.wrap_exception def reboot(self, instance): - xml = self.toXml(instance) + xml = self.to_xml(instance) yield self._conn.lookupByName(instance.name).destroy() yield self._conn.createXML(xml, 0) @@ -129,8 +129,8 @@ class LibvirtConnection(object): timer = task.LoopingCall(f=None) def _wait_for_reboot(): try: - instance.update_state() - if instance.is_running(): + instance.set_state(self.get_info(instance.name)['state']) + if instance.state == power_state.RUNNING: logging.debug('rebooted instance %s' % instance.name) timer.stop() d.callback(None) @@ -147,7 +147,7 @@ class LibvirtConnection(object): @defer.inlineCallbacks @exception.wrap_exception def spawn(self, instance): - xml = self.toXml(instance) + xml = self.to_xml(instance) instance.set_state(power_state.NOSTATE, 'launching') yield self._create_image(instance, xml) yield self._conn.createXML(xml, 0) @@ -159,15 +159,14 @@ class LibvirtConnection(object): timer = task.LoopingCall(f=None) def _wait_for_boot(): try: - instance.update_state() - if instance.is_running(): + instance.set_state(self.get_info(instance.name)['state']) + if instance.state == power_state.RUNNING: logging.debug('booted instance %s' % instance.name) timer.stop() local_d.callback(None) - except Exception, exn: - logging.error("_wait_for_boot exception %s" % exn) - self.set_state(power_state.SHUTDOWN) - logging.error('Failed to boot instance %s' % instance.name) + except: + logging.exception('Failed to boot instance %s' % instance.name) + instance.set_state(power_state.SHUTDOWN) timer.stop() local_d.callback(None) timer.f = _wait_for_boot @@ -176,10 +175,9 @@ class LibvirtConnection(object): @defer.inlineCallbacks - def _create_image(self, instance, libvirt_xml): + def _create_image(self, inst, libvirt_xml): # syntactic nicety - data = instance.datamodel - basepath = lambda x='': self.basepath(instance, x) + basepath = lambda x='': os.path.join(FLAGS.instances_path, inst.name, x) # ensure directories exist and are writable yield process.simple_execute('mkdir -p %s' % basepath()) @@ -188,70 +186,71 @@ class LibvirtConnection(object): # TODO(termie): these are blocking calls, it would be great # if they weren't. - logging.info('Creating image for: %s', data['instance_id']) + logging.info('Creating image for: %s', inst.name) f = open(basepath('libvirt.xml'), 'w') f.write(libvirt_xml) f.close() - user = manager.AuthManager().get_user(data['user_id']) - project = manager.AuthManager().get_project(data['project_id']) + user = manager.AuthManager().get_user(inst.user_id) + project = manager.AuthManager().get_project(inst.project_id) if not os.path.exists(basepath('disk')): - yield images.fetch(data['image_id'], basepath('disk-raw'), user, project) + yield images.fetch(inst.image_id, basepath('disk-raw'), user, project) if not os.path.exists(basepath('kernel')): - yield images.fetch(data['kernel_id'], basepath('kernel'), user, project) + yield images.fetch(inst.kernel_id, basepath('kernel'), user, project) if not os.path.exists(basepath('ramdisk')): - yield images.fetch(data['ramdisk_id'], basepath('ramdisk'), user, project) + yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, project) execute = lambda cmd, input=None: \ process.simple_execute(cmd=cmd, input=input, error_ok=1) - key = data['key_data'] + key = inst.key_data net = None - if data.get('inject_network', False): + network = inst.project.network + if False: # should be network.is_injected: with open(FLAGS.injected_network_template) as f: - net = f.read() % {'address': data['private_dns_name'], - 'network': data['network_network'], - 'netmask': data['network_netmask'], - 'gateway': data['network_gateway'], - 'broadcast': data['network_broadcast'], - 'dns': data['network_dns']} + net = f.read() % {'address': inst.fixed_ip, + 'network': network.network, + 'netmask': network.netmask, + 'gateway': network.gateway, + 'broadcast': network.broadcast, + 'dns': network.network.dns} if key or net: - logging.info('Injecting data into image %s', data['image_id']) + logging.info('Injecting data into image %s', inst.image_id) yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute) if os.path.exists(basepath('disk')): yield process.simple_execute('rm -f %s' % basepath('disk')) - bytes = (instance_types.INSTANCE_TYPES[data['instance_type']]['local_gb'] + bytes = (instance_types.INSTANCE_TYPES[inst.instance_type]['local_gb'] * 1024 * 1024 * 1024) yield disk.partition( basepath('disk-raw'), basepath('disk'), bytes, execute=execute) - - def basepath(self, instance, path=''): - return os.path.abspath(os.path.join(instance.datamodel['basepath'], path)) - - - def toXml(self, instance): + def to_xml(self, instance): # TODO(termie): cache? logging.debug("Starting the toXML method") - libvirt_xml = open(FLAGS.libvirt_xml_template).read() - xml_info = instance.datamodel.copy() + with open(FLAGS.libvirt_xml_template) as f: + libvirt_xml = f.read() + network = instance.project.network + # FIXME(vish): stick this in db + instance_type = instance_types.INSTANCE_TYPES[instance.instance_type] + xml_info = {'type': FLAGS.libvirt_type, + 'name': instance.name, + 'basepath': os.path.join(FLAGS.instances_path, instance.name), + 'memory_kb': instance_type['memory_mb'] * 1024, + 'vcpus': instance_type['vcpus'], + 'bridge_name': network.bridge_name, + 'mac_address': instance.mac_address} # TODO(joshua): Make this xml express the attached disks as well - - # TODO(termie): lazy lazy hack because xml is annoying - xml_info['nova'] = json.dumps(instance.datamodel.copy()) - xml_info['type'] = FLAGS.libvirt_type libvirt_xml = libvirt_xml % xml_info logging.debug("Finished the toXML method") return libvirt_xml - - def get_info(self, instance_id): - virt_dom = self._conn.lookupByName(instance_id) + def get_info(self, instance_name): + virt_dom = self._conn.lookupByName(instance_name) (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() return {'state': state, 'max_mem': max_mem, @@ -260,14 +259,14 @@ class LibvirtConnection(object): 'cpu_time': cpu_time} - def get_disks(self, instance_id): + def get_disks(self, instance_name): """ Note that this function takes an instance ID, not an Instance, so that it can be called by monitor. Returns a list of all block devices for this domain. """ - domain = self._conn.lookupByName(instance_id) + domain = self._conn.lookupByName(instance_name) # TODO(devcamcar): Replace libxml2 with etree. xml = domain.XMLDesc(0) doc = None @@ -303,14 +302,14 @@ class LibvirtConnection(object): return disks - def get_interfaces(self, instance_id): + def get_interfaces(self, instance_name): """ Note that this function takes an instance ID, not an Instance, so that it can be called by monitor. Returns a list of all network interfaces for this instance. """ - domain = self._conn.lookupByName(instance_id) + domain = self._conn.lookupByName(instance_name) # TODO(devcamcar): Replace libxml2 with etree. xml = domain.XMLDesc(0) doc = None @@ -346,19 +345,19 @@ class LibvirtConnection(object): return interfaces - def block_stats(self, instance_id, disk): + def block_stats(self, instance_name, disk): """ Note that this function takes an instance ID, not an Instance, so that it can be called by monitor. """ - domain = self._conn.lookupByName(instance_id) + domain = self._conn.lookupByName(instance_name) return domain.blockStats(disk) - def interface_stats(self, instance_id, interface): + def interface_stats(self, instance_name, interface): """ Note that this function takes an instance ID, not an Instance, so that it can be called by monitor. """ - domain = self._conn.lookupByName(instance_id) + domain = self._conn.lookupByName(instance_name) return domain.interfaceStats(interface) diff --git a/nova/volume/service.py b/nova/volume/service.py index 4ca3ba2a5..4d959aadb 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -23,7 +23,6 @@ Currently uses Ata-over-Ethernet. """ import logging -import os from twisted.internet import defer -- cgit From d508418214016d5c00aa8d304f9498f5b99a960b Mon Sep 17 00:00:00 2001 From: andy Date: Mon, 16 Aug 2010 14:16:21 +0200 Subject: rather comprehensive style fixes --- nova/adminclient.py | 7 +++++++ nova/auth/fakeldap.py | 1 - nova/auth/ldapdriver.py | 1 + nova/auth/manager.py | 12 +++++++----- nova/auth/rbac.py | 2 ++ nova/auth/signer.py | 10 +++++++--- nova/cloudpipe/api.py | 3 ++- nova/cloudpipe/pipelib.py | 2 +- nova/compute/disk.py | 4 ++++ nova/compute/model.py | 2 ++ nova/compute/monitor.py | 35 +++++++++++++++++++++-------------- nova/compute/service.py | 1 + nova/crypto.py | 8 +++++++- nova/endpoint/admin.py | 4 ++++ nova/endpoint/api.py | 7 +++++-- nova/endpoint/cloud.py | 3 +-- nova/endpoint/images.py | 7 ++++++- nova/exception.py | 8 ++++++++ nova/fakerabbit.py | 5 +++-- nova/flags.py | 40 +++++++++++++++++----------------------- nova/network/exception.py | 12 ++++++------ nova/network/linux_net.py | 6 +++--- nova/network/model.py | 3 ++- nova/network/service.py | 16 ++++++++-------- nova/network/vpn.py | 3 +-- nova/objectstore/bucket.py | 1 + nova/objectstore/handler.py | 38 ++++++++++++++++++++++++++------------ nova/objectstore/image.py | 1 + nova/objectstore/stored.py | 4 ++-- nova/process.py | 3 +++ nova/rpc.py | 5 +++-- nova/test.py | 5 ++--- nova/utils.py | 12 +++++++++--- nova/validate.py | 1 + nova/virt/images.py | 8 ++++++-- nova/virt/libvirt_conn.py | 23 +++++------------------ nova/virt/xenapi.py | 39 +++++++++++++++++---------------------- nova/volume/service.py | 4 +++- run_tests.py | 9 ++++----- 39 files changed, 209 insertions(+), 146 deletions(-) diff --git a/nova/adminclient.py b/nova/adminclient.py index 242298a75..0ca32b1e5 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -20,6 +20,7 @@ Nova User API client library. """ import base64 + import boto from boto.ec2.regioninfo import RegionInfo @@ -57,6 +58,7 @@ class UserInfo(object): elif name == 'secretkey': self.secretkey = str(value) + class UserRole(object): """ Information about a Nova user's role, as parsed through SAX. @@ -79,6 +81,7 @@ class UserRole(object): else: setattr(self, name, str(value)) + class ProjectInfo(object): """ Information about a Nova project, as parsed through SAX @@ -114,12 +117,14 @@ class ProjectInfo(object): else: setattr(self, name, str(value)) + class ProjectMember(object): """ Information about a Nova project member, as parsed through SAX. Fields include: memberId """ + def __init__(self, connection=None): self.connection = connection self.memberId = None @@ -135,6 +140,7 @@ class ProjectMember(object): self.memberId = value else: setattr(self, name, str(value)) + class HostInfo(object): """ @@ -163,6 +169,7 @@ class HostInfo(object): def endElement(self, name, value, connection): setattr(self, name, value) + class NovaAdminClient(object): def __init__(self, clc_ip='127.0.0.1', region='nova', access_key='admin', secret_key='admin', **kwargs): diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py index b420924af..bc744fa01 100644 --- a/nova/auth/fakeldap.py +++ b/nova/auth/fakeldap.py @@ -219,7 +219,6 @@ class FakeLDAP(object): raise NO_SUCH_OBJECT() return objects - @property def __redis_prefix(self): return 'ldap:' diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 453fa196c..6bf7fcd1e 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -30,6 +30,7 @@ import sys from nova import exception from nova import flags + FLAGS = flags.FLAGS flags.DEFINE_string('ldap_url', 'ldap://localhost', 'Point this at your ldap server') diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 064fd78bc..80ee78896 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -37,7 +37,6 @@ from nova.network import vpn FLAGS = flags.FLAGS - flags.DEFINE_list('allowed_roles', ['cloudadmin', 'itsec', 'sysadmin', 'netadmin', 'developer'], 'Allowed roles for project') @@ -52,7 +51,6 @@ flags.DEFINE_list('superuser_roles', ['cloudadmin'], flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'], 'Roles that apply to all projects') - flags.DEFINE_string('credentials_template', utils.abspath('auth/novarc.template'), 'Template for creating users rc file') @@ -67,15 +65,14 @@ flags.DEFINE_string('credential_cert_file', 'cert.pem', 'Filename of certificate in credentials zip') flags.DEFINE_string('credential_rc_file', 'novarc', 'Filename of rc in credentials zip') - flags.DEFINE_string('credential_cert_subject', '/C=US/ST=California/L=MountainView/O=AnsoLabs/' 'OU=NovaDev/CN=%s-%s', 'Subject for certificate for users') - flags.DEFINE_string('auth_driver', 'nova.auth.ldapdriver.FakeLdapDriver', 'Driver that auth manager uses') + class AuthBase(object): """Base class for objects relating to auth @@ -83,6 +80,7 @@ class AuthBase(object): an id member. They may optionally contain methods that delegate to AuthManager, but should not implement logic themselves. """ + @classmethod def safe_id(cls, obj): """Safe get object id @@ -100,6 +98,7 @@ class AuthBase(object): class User(AuthBase): """Object representing a user""" + def __init__(self, id, name, access, secret, admin): AuthBase.__init__(self) self.id = id @@ -161,6 +160,7 @@ class KeyPair(AuthBase): Even though this object is named KeyPair, only the public key and fingerprint is stored. The user's private key is not saved. """ + def __init__(self, id, name, owner_id, public_key, fingerprint): AuthBase.__init__(self) self.id = id @@ -179,6 +179,7 @@ class KeyPair(AuthBase): class Project(AuthBase): """Represents a Project returned from the datastore""" + def __init__(self, id, name, project_manager_id, description, member_ids): AuthBase.__init__(self) self.id = id @@ -227,7 +228,6 @@ class Project(AuthBase): self.member_ids) - class AuthManager(object): """Manager Singleton for dealing with Users, Projects, and Keypairs @@ -239,7 +239,9 @@ class AuthManager(object): AuthManager also manages associated data related to Auth objects that need to be more accessible, such as vpn ips and ports. """ + _instance = None + def __new__(cls, *args, **kwargs): """Returns the AuthManager singleton""" if not cls._instance: diff --git a/nova/auth/rbac.py b/nova/auth/rbac.py index 7fab9419f..1446e4e27 100644 --- a/nova/auth/rbac.py +++ b/nova/auth/rbac.py @@ -32,6 +32,7 @@ def allow(*roles): return wrapped_f return wrap + def deny(*roles): def wrap(f): def wrapped_f(self, context, *args, **kwargs): @@ -44,6 +45,7 @@ def deny(*roles): return wrapped_f return wrap + def __matches_role(context, role): if role == 'all': return True diff --git a/nova/auth/signer.py b/nova/auth/signer.py index 634f22f0d..8334806d2 100644 --- a/nova/auth/signer.py +++ b/nova/auth/signer.py @@ -48,11 +48,15 @@ import hashlib import hmac import logging import urllib -import boto # NOTE(vish): for new boto -import boto.utils # NOTE(vish): for old boto + +# NOTE(vish): for new boto +import boto +# NOTE(vish): for old boto +import boto.utils from nova.exception import Error + class Signer(object): """ hacked up code from boto/connection.py """ @@ -77,7 +81,6 @@ class Signer(object): return self._calc_signature_2(params, verb, server_string, path) raise Error('Unknown Signature Version: %s' % self.SignatureVersion) - def _get_utf8_value(self, value): if not isinstance(value, str) and not isinstance(value, unicode): value = str(value) @@ -133,5 +136,6 @@ class Signer(object): logging.debug('base64 encoded digest: %s' % b64) return b64 + if __name__ == '__main__': print Signer('foo').generate({"SignatureMethod": 'HmacSHA256', 'SignatureVersion': '2'}, "get", "server", "/foo") diff --git a/nova/cloudpipe/api.py b/nova/cloudpipe/api.py index 0bffe9aa3..56aa89834 100644 --- a/nova/cloudpipe/api.py +++ b/nova/cloudpipe/api.py @@ -21,9 +21,10 @@ Tornado REST API Request Handlers for CloudPipe """ import logging -import tornado.web import urllib +import tornado.web + from nova import crypto from nova.auth import manager diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py index 5b0ed3471..2867bcb21 100644 --- a/nova/cloudpipe/pipelib.py +++ b/nova/cloudpipe/pipelib.py @@ -36,11 +36,11 @@ from nova.endpoint import api FLAGS = flags.FLAGS - flags.DEFINE_string('boot_script_template', utils.abspath('cloudpipe/bootscript.sh'), 'Template for script to run on cloudpipe instance boot') + class CloudPipe(object): def __init__(self, cloud_controller): self.controller = cloud_controller diff --git a/nova/compute/disk.py b/nova/compute/disk.py index 1ffcca685..c340c5a79 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -24,6 +24,7 @@ Includes injection of SSH PGP keys into authorized_keys file. import logging import os import tempfile + from twisted.internet import defer from nova import exception @@ -84,6 +85,7 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None): yield execute('dd if=%s of=%s bs=%d seek=%d conv=notrunc,fsync' % (infile, outfile, sector_size, primary_first)) + @defer.inlineCallbacks def inject_data(image, key=None, net=None, partition=None, execute=None): """Injects a ssh key and optionally net data into a disk image. @@ -137,6 +139,7 @@ def inject_data(image, key=None, net=None, partition=None, execute=None): # remove loopback yield execute('sudo losetup -d %s' % device) + @defer.inlineCallbacks def _inject_key_into_fs(key, fs, execute=None): sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh') @@ -146,6 +149,7 @@ def _inject_key_into_fs(key, fs, execute=None): keyfile = os.path.join(sshdir, 'authorized_keys') yield execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n') + @defer.inlineCallbacks def _inject_net_into_fs(net, fs, execute=None): netfile = os.path.join(os.path.join(os.path.join( diff --git a/nova/compute/model.py b/nova/compute/model.py index 266a93b9a..84432b55f 100644 --- a/nova/compute/model.py +++ b/nova/compute/model.py @@ -168,6 +168,7 @@ class Instance(datastore.BasicModel): self.unassociate_with("ip", self.state['private_dns_name']) return super(Instance, self).destroy() + class Host(datastore.BasicModel): """A Host is the machine where a Daemon is running.""" @@ -235,6 +236,7 @@ class Daemon(datastore.BasicModel): for x in cls.associated_to("host", hostname): yield x + class SessionToken(datastore.BasicModel): """This is a short-lived auth token that is passed through web requests""" diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 19e1a483d..268864900 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -24,14 +24,15 @@ Instance Monitoring: in the object store. """ -import boto -import boto.s3 import datetime import logging import os -import rrdtool import sys import time + +import boto +import boto.s3 +import rrdtool from twisted.internet import defer from twisted.internet import task from twisted.application import service @@ -41,13 +42,12 @@ from nova.virt import connection as virt_connection FLAGS = flags.FLAGS -flags.DEFINE_integer( - 'monitoring_instances_delay', 5, 'Sleep time between updates') -flags.DEFINE_integer( - 'monitoring_instances_step', 300, 'Interval of RRD updates') -flags.DEFINE_string( - 'monitoring_rrd_path', '/var/nova/monitor/instances', - 'Location of RRD files') +flags.DEFINE_integer('monitoring_instances_delay', 5, + 'Sleep time between updates') +flags.DEFINE_integer('monitoring_instances_step', 300, + 'Interval of RRD updates') +flags.DEFINE_string('monitoring_rrd_path', '/var/nova/monitor/instances', + 'Location of RRD files') RRD_VALUES = { @@ -61,7 +61,7 @@ RRD_VALUES = { 'RRA:MAX:0.5:6:800', 'RRA:MAX:0.5:24:800', 'RRA:MAX:0.5:288:800', - ], + ], 'net': [ 'DS:rx:COUNTER:600:0:1250000', 'DS:tx:COUNTER:600:0:1250000', @@ -73,7 +73,7 @@ RRD_VALUES = { 'RRA:MAX:0.5:6:800', 'RRA:MAX:0.5:24:800', 'RRA:MAX:0.5:288:800', - ], + ], 'disk': [ 'DS:rd:COUNTER:600:U:U', 'DS:wr:COUNTER:600:U:U', @@ -85,12 +85,13 @@ RRD_VALUES = { 'RRA:MAX:0.5:6:800', 'RRA:MAX:0.5:24:800', 'RRA:MAX:0.5:444:800', - ] -} + ] + } utcnow = datetime.datetime.utcnow + def update_rrd(instance, name, data): """ Updates the specified RRD file. @@ -106,6 +107,7 @@ def update_rrd(instance, name, data): '%d:%s' % (timestamp, data) ) + def init_rrd(instance, name): """ Initializes the specified RRD file. @@ -124,6 +126,7 @@ def init_rrd(instance, name): '--start', '0', *RRD_VALUES[name] ) + def graph_cpu(instance, duration): """ @@ -148,6 +151,7 @@ def graph_cpu(instance, duration): store_graph(instance.instance_id, filename) + def graph_net(instance, duration): """ Creates a graph of network usage for the specified instance and duration. @@ -174,6 +178,7 @@ def graph_net(instance, duration): ) store_graph(instance.instance_id, filename) + def graph_disk(instance, duration): """ @@ -202,6 +207,7 @@ def graph_disk(instance, duration): store_graph(instance.instance_id, filename) + def store_graph(instance_id, filename): """ Transmits the specified graph file to internal object store on cloud @@ -387,6 +393,7 @@ class InstanceMonitor(object, service.Service): """ Monitors the running instances of the current machine. """ + def __init__(self): """ Initialize the monitoring loop. diff --git a/nova/compute/service.py b/nova/compute/service.py index 820116453..e59f3fb34 100644 --- a/nova/compute/service.py +++ b/nova/compute/service.py @@ -29,6 +29,7 @@ import json import logging import os import sys + from twisted.internet import defer from twisted.internet import task diff --git a/nova/crypto.py b/nova/crypto.py index cc84f5e45..b05548ea1 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -24,7 +24,6 @@ SSH keypairs and x509 certificates. import base64 import hashlib import logging -import M2Crypto import os import shutil import struct @@ -32,6 +31,8 @@ import tempfile import time import utils +import M2Crypto + from nova import exception from nova import flags @@ -42,11 +43,13 @@ flags.DEFINE_string('keys_path', utils.abspath('../keys'), 'Where we keep our ke flags.DEFINE_string('ca_path', utils.abspath('../CA'), 'Where we keep our root CA') flags.DEFINE_boolean('use_intermediate_ca', False, 'Should we use intermediate CAs for each project?') + def ca_path(project_id): if project_id: return "%s/INTER/%s/cacert.pem" % (FLAGS.ca_path, project_id) return "%s/cacert.pem" % (FLAGS.ca_path) + def fetch_ca(project_id=None, chain=True): if not FLAGS.use_intermediate_ca: project_id = None @@ -60,6 +63,7 @@ def fetch_ca(project_id=None, chain=True): buffer += cafile.read() return buffer + def generate_key_pair(bits=1024): # what is the magic 65537? @@ -109,6 +113,7 @@ def generate_x509_cert(subject, bits=1024): shutil.rmtree(tmpdir) return (private_key, csr) + def sign_csr(csr_text, intermediate=None): if not FLAGS.use_intermediate_ca: intermediate = None @@ -122,6 +127,7 @@ def sign_csr(csr_text, intermediate=None): os.chdir(start) return _sign_csr(csr_text, user_ca) + def _sign_csr(csr_text, ca_folder): tmpfolder = tempfile.mkdtemp() csrfile = open("%s/inbound.csr" % (tmpfolder), "w") diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py index 4f4824fca..d6f622755 100644 --- a/nova/endpoint/admin.py +++ b/nova/endpoint/admin.py @@ -37,6 +37,7 @@ def user_dict(user, base64_file=None): else: return {} + def project_dict(project): """Convert the project object to a result dict""" if project: @@ -47,6 +48,7 @@ def project_dict(project): else: return {} + def host_dict(host): """Convert a host model object to a result dict""" if host: @@ -54,6 +56,7 @@ def host_dict(host): else: return {} + def admin_only(target): """Decorator for admin-only API calls""" def wrapper(*args, **kwargs): @@ -66,6 +69,7 @@ def admin_only(target): return wrapper + class AdminController(object): """ API Controller for users, hosts, nodes, and workers. diff --git a/nova/endpoint/api.py b/nova/endpoint/api.py index 78a18b9ea..40be00bb7 100755 --- a/nova/endpoint/api.py +++ b/nova/endpoint/api.py @@ -25,12 +25,13 @@ import logging import multiprocessing import random import re -import tornado.web -from twisted.internet import defer import urllib # TODO(termie): replace minidom with etree from xml.dom import minidom +import tornado.web +from twisted.internet import defer + from nova import crypto from nova import exception from nova import flags @@ -43,6 +44,7 @@ from nova.endpoint import cloud FLAGS = flags.FLAGS flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') + _log = logging.getLogger("api") _log.setLevel(logging.DEBUG) @@ -227,6 +229,7 @@ class MetadataRequestHandler(tornado.web.RequestHandler): self.print_data(data) self.finish() + class APIRequestHandler(tornado.web.RequestHandler): def get(self, controller_name): self.execute(controller_name) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 5366acec7..a3d6d1aab 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -26,6 +26,7 @@ import base64 import logging import os import time + from twisted.internet import defer from nova import datastore @@ -44,7 +45,6 @@ from nova.volume import service FLAGS = flags.FLAGS - flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') @@ -362,7 +362,6 @@ class CloudController(object): 'status': volume['attach_status'], 'volumeId': volume_id}) - @rbac.allow('projectmanager', 'sysadmin') def detach_volume(self, context, volume_id, **kwargs): volume = self._get_volume(context, volume_id) diff --git a/nova/endpoint/images.py b/nova/endpoint/images.py index fe7cb5d11..2a88d66af 100644 --- a/nova/endpoint/images.py +++ b/nova/endpoint/images.py @@ -21,10 +21,11 @@ Proxy AMI-related calls from the cloud controller, to the running objectstore daemon. """ -import boto.s3.connection import json import urllib +import boto.s3.connection + from nova import flags from nova import utils from nova.auth import manager @@ -32,6 +33,7 @@ from nova.auth import manager FLAGS = flags.FLAGS + def modify(context, image_id, operation): conn(context).make_request( method='POST', @@ -53,6 +55,7 @@ def register(context, image_location): return image_id + def list(context, filter_list=[]): """ return a list of all images that a user can see @@ -68,6 +71,7 @@ def list(context, filter_list=[]): return [i for i in result if i['imageId'] in filter_list] return result + def deregister(context, image_id): """ unregister an image """ conn(context).make_request( @@ -75,6 +79,7 @@ def deregister(context, image_id): bucket='_images', query_args=qs({'image_id': image_id})) + def conn(context): access = manager.AuthManager().get_access_key(context.user, context.project) diff --git a/nova/exception.py b/nova/exception.py index 52497a19e..29bcb17f8 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -25,31 +25,39 @@ import logging import sys import traceback + class Error(Exception): def __init__(self, message=None): super(Error, self).__init__(message) + class ApiError(Error): def __init__(self, message='Unknown', code='Unknown'): self.message = message self.code = code super(ApiError, self).__init__('%s: %s'% (code, message)) + class NotFound(Error): pass + class Duplicate(Error): pass + class NotAuthorized(Error): pass + class NotEmpty(Error): pass + class Invalid(Error): pass + def wrap_exception(f): def _wrap(*args, **kw): try: diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index 689194513..068025249 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -16,12 +16,13 @@ # License for the specific language governing permissions and limitations # under the License. -""" Based a bit on the carrot.backeds.queue backend... but a lot better """ +"""Based a bit on the carrot.backeds.queue backend... but a lot better.""" -from carrot.backends import base import logging import Queue as queue +from carrot.backends import base + class Message(base.BaseMessage): pass diff --git a/nova/flags.py b/nova/flags.py index b3bdd088f..e3feb252d 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -175,29 +175,25 @@ DEFINE_string('network_topic', 'network', 'the topic network nodes listen on') DEFINE_bool('verbose', False, 'show debug output') DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit') -DEFINE_bool('fake_network', False, 'should we use fake network devices and addresses') +DEFINE_bool('fake_network', False, + 'should we use fake network devices and addresses') DEFINE_string('rabbit_host', 'localhost', 'rabbit host') DEFINE_integer('rabbit_port', 5672, 'rabbit port') DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') DEFINE_string('rabbit_password', 'guest', 'rabbit password') DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') -DEFINE_string('ec2_url', - 'http://127.0.0.1:8773/services/Cloud', - 'Url to ec2 api server') - -DEFINE_string('default_image', - 'ami-11111', - 'default image to use, testing only') -DEFINE_string('default_kernel', - 'aki-11111', - 'default kernel to use, testing only') -DEFINE_string('default_ramdisk', - 'ari-11111', - 'default ramdisk to use, testing only') -DEFINE_string('default_instance_type', - 'm1.small', - 'default instance type to use, testing only') +DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud', + 'Url to ec2 api server') + +DEFINE_string('default_image', 'ami-11111', + 'default image to use, testing only') +DEFINE_string('default_kernel', 'aki-11111', + 'default kernel to use, testing only') +DEFINE_string('default_ramdisk', 'ari-11111', + 'default ramdisk to use, testing only') +DEFINE_string('default_instance_type', 'm1.small', + 'default instance type to use, testing only') DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server') DEFINE_string('vpn_key_suffix', @@ -207,10 +203,8 @@ DEFINE_string('vpn_key_suffix', DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger') # UNUSED -DEFINE_string('node_availability_zone', - 'nova', - 'availability zone of this node') -DEFINE_string('node_name', - socket.gethostname(), - 'name of this node') +DEFINE_string('node_availability_zone', 'nova', + 'availability zone of this node') +DEFINE_string('node_name', socket.gethostname(), + 'name of this node') diff --git a/nova/network/exception.py b/nova/network/exception.py index 8d7aa1498..2a3f5ec14 100644 --- a/nova/network/exception.py +++ b/nova/network/exception.py @@ -20,29 +20,29 @@ Exceptions for network errors. """ -from nova.exception import Error +from nova import exception -class NoMoreAddresses(Error): +class NoMoreAddresses(exception.Error): """No More Addresses are available in the network""" pass -class AddressNotAllocated(Error): +class AddressNotAllocated(exception.Error): """The specified address has not been allocated""" pass -class AddressAlreadyAssociated(Error): +class AddressAlreadyAssociated(exception.Error): """The specified address has already been associated""" pass -class AddressNotAssociated(Error): +class AddressNotAssociated(exception.Error): """The specified address is not associated""" pass -class NotValidNetworkSize(Error): +class NotValidNetworkSize(exception.Error): """The network size is not valid""" pass diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 4ebc2097b..b5385fcab 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -18,16 +18,16 @@ Implements vlans, bridges, and iptables rules using linux utilities. """ import logging -import signal import os +import signal -# todo(ja): does the definition of network_path belong here? +# TODO(ja): does the definition of network_path belong here? from nova import flags from nova import utils -FLAGS = flags.FLAGS +FLAGS = flags.FLAGS flags.DEFINE_string('dhcpbridge_flagfile', '/etc/nova/nova-dhcpbridge.conf', 'location of flagfile for dhcpbridge') diff --git a/nova/network/model.py b/nova/network/model.py index ce9345067..0900e1513 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -20,11 +20,11 @@ Model Classes for network control, including VLANs, DHCP, and IP allocation. """ -import IPy import logging import os import time +import IPy from nova import datastore from nova import exception as nova_exception from nova import flags @@ -53,6 +53,7 @@ flags.DEFINE_integer('cnt_vpn_clients', 5, flags.DEFINE_integer('cloudpipe_start_port', 12000, 'Starting port for mapped CloudPipe external ports') + logging.getLogger().setLevel(logging.DEBUG) diff --git a/nova/network/service.py b/nova/network/service.py index 9c0f5520b..22e84477f 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -21,17 +21,17 @@ Network Hosts are responsible for allocating ips and setting up network """ from nova import datastore +from nova import exception from nova import flags from nova import service from nova import utils from nova.auth import manager -from nova.exception import NotFound from nova.network import exception from nova.network import model from nova.network import vpn -FLAGS = flags.FLAGS +FLAGS = flags.FLAGS flags.DEFINE_string('network_type', 'flat', 'Service Class for Networking') @@ -41,15 +41,15 @@ flags.DEFINE_list('flat_network_ips', ['192.168.0.2', '192.168.0.3', '192.168.0.4'], 'Available ips for simple network') flags.DEFINE_string('flat_network_network', '192.168.0.0', - 'Network for simple network') + 'Network for simple network') flags.DEFINE_string('flat_network_netmask', '255.255.255.0', - 'Netmask for simple network') + 'Netmask for simple network') flags.DEFINE_string('flat_network_gateway', '192.168.0.1', - 'Broadcast for simple network') + 'Broadcast for simple network') flags.DEFINE_string('flat_network_broadcast', '192.168.0.255', - 'Broadcast for simple network') + 'Broadcast for simple network') flags.DEFINE_string('flat_network_dns', '8.8.4.4', - 'Dns for simple network') + 'Dns for simple network') def type_to_class(network_type): @@ -58,7 +58,7 @@ def type_to_class(network_type): return FlatNetworkService elif network_type == 'vlan': return VlanNetworkService - raise NotFound("Couldn't find %s network type" % network_type) + raise exception.NotFound("Couldn't find %s network type" % network_type) def setup_compute_network(network_type, user_id, project_id, security_group): diff --git a/nova/network/vpn.py b/nova/network/vpn.py index a0e2a7fa1..cf2579e61 100644 --- a/nova/network/vpn.py +++ b/nova/network/vpn.py @@ -23,9 +23,8 @@ from nova import exception from nova import flags from nova import utils -FLAGS = flags.FLAGS - +FLAGS = flags.FLAGS flags.DEFINE_string('vpn_ip', utils.get_my_ip(), 'Public IP for the cloudpipe VPN servers') flags.DEFINE_integer('vpn_start_port', 1000, diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py index b42a96233..c2b412dd7 100644 --- a/nova/objectstore/bucket.py +++ b/nova/objectstore/bucket.py @@ -36,6 +36,7 @@ FLAGS = flags.FLAGS flags.DEFINE_string('buckets_path', utils.abspath('../buckets'), 'path to s3 buckets') + class Bucket(object): def __init__(self, name): self.name = name diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index dfe1918e3..035e342ca 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -38,17 +38,19 @@ S3 client with this module:: """ import datetime -import logging import json +import logging import multiprocessing import os -from tornado import escape import urllib -from twisted.application import internet, service -from twisted.web.resource import Resource -from twisted.web import server, static, error - +from tornado import escape +from twisted.application import internet +from twisted.application import service +from twisted.web import error +from twisted.web import resource +from twisted.web import server +from twisted.web import static from nova import exception from nova import flags @@ -60,6 +62,7 @@ from nova.objectstore import image FLAGS = flags.FLAGS + def render_xml(request, value): assert isinstance(value, dict) and len(value) == 1 request.setHeader("Content-Type", "application/xml; charset=UTF-8") @@ -72,11 +75,13 @@ def render_xml(request, value): request.write('') request.finish() + def finish(request, content=None): if content: request.write(content) request.finish() + def _render_parts(value, write_cb): if isinstance(value, basestring): write_cb(escape.xhtml_escape(value)) @@ -95,11 +100,13 @@ def _render_parts(value, write_cb): else: raise Exception("Unknown S3 value type %r", value) + def get_argument(request, key, default_value): if key in request.args: return request.args[key][0] return default_value + def get_context(request): try: # Authorization Header format: 'AWS :' @@ -120,13 +127,14 @@ def get_context(request): logging.debug("Authentication Failure: %s" % ex) raise exception.NotAuthorized -class ErrorHandlingResource(Resource): + +class ErrorHandlingResource(resource.Resource): """Maps exceptions to 404 / 401 codes. Won't work for exceptions thrown after NOT_DONE_YET is returned.""" # TODO(unassigned) (calling-all-twisted-experts): This needs to be plugged in to the right place in twisted... # This doesn't look like it's the right place (consider exceptions in getChild; or after NOT_DONE_YET is returned def render(self, request): try: - return Resource.render(self, request) + return resource.Resource.render(self, request) except exception.NotFound: request.setResponseCode(404) return '' @@ -134,6 +142,7 @@ class ErrorHandlingResource(Resource): request.setResponseCode(403) return '' + class S3(ErrorHandlingResource): """Implementation of an S3-like storage server based on local files.""" def getChild(self, name, request): @@ -154,9 +163,10 @@ class S3(ErrorHandlingResource): }}) return server.NOT_DONE_YET + class BucketResource(ErrorHandlingResource): def __init__(self, name): - Resource.__init__(self) + resource.Resource.__init__(self) self.name = name def getChild(self, name, request): @@ -206,7 +216,7 @@ class BucketResource(ErrorHandlingResource): class ObjectResource(ErrorHandlingResource): def __init__(self, bucket, name): - Resource.__init__(self) + resource.Resource.__init__(self) self.bucket = bucket self.name = name @@ -245,17 +255,19 @@ class ObjectResource(ErrorHandlingResource): request.setResponseCode(204) return '' + class ImageResource(ErrorHandlingResource): isLeaf = True def __init__(self, name): - Resource.__init__(self) + resource.Resource.__init__(self) self.img = image.Image(name) def render_GET(self, request): return static.File(self.img.image_path, defaultType='application/octet-stream').render_GET(request) -class ImagesResource(Resource): + +class ImagesResource(resource.Resource): def getChild(self, name, request): if name == '': return self @@ -339,11 +351,13 @@ class ImagesResource(Resource): request.setResponseCode(204) return '' + def get_site(): root = S3() site = server.Site(root) return site + def get_application(): factory = get_site() application = service.Application("objectstore") diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py index 861eb364f..fb780a0ec 100644 --- a/nova/objectstore/image.py +++ b/nova/objectstore/image.py @@ -42,6 +42,7 @@ FLAGS = flags.FLAGS flags.DEFINE_string('images_path', utils.abspath('../images'), 'path to decrypted images') + class Image(object): def __init__(self, image_id): self.image_id = image_id diff --git a/nova/objectstore/stored.py b/nova/objectstore/stored.py index 81c047b22..9829194cb 100644 --- a/nova/objectstore/stored.py +++ b/nova/objectstore/stored.py @@ -23,7 +23,7 @@ Properties of an object stored within a bucket. import os import nova.crypto -from nova.exception import NotFound, NotAuthorized +from nova import exception class Object(object): @@ -33,7 +33,7 @@ class Object(object): self.key = key self.path = bucket._object_path(key) if not os.path.isfile(self.path): - raise NotFound + raise exception.NotFound def __repr__(self): return "" % (self.bucket, self.key) diff --git a/nova/process.py b/nova/process.py index 2dc56372f..86f29e2c4 100644 --- a/nova/process.py +++ b/nova/process.py @@ -23,6 +23,7 @@ Process pool, still buggy right now. import logging import multiprocessing import StringIO + from twisted.internet import defer from twisted.internet import error from twisted.internet import process @@ -205,6 +206,7 @@ class ProcessPool(object): self._pool.release() return rv + class SharedPool(object): _instance = None def __init__(self): @@ -213,5 +215,6 @@ class SharedPool(object): def __getattr__(self, key): return getattr(self._instance, key) + def simple_execute(cmd, **kwargs): return SharedPool().simple_execute(cmd, **kwargs) diff --git a/nova/rpc.py b/nova/rpc.py index 4ac546c2a..824a66b5b 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -21,12 +21,13 @@ AMQP-based RPC. Queues have consumers and publishers. No fan-out support yet. """ -from carrot import connection as carrot_connection -from carrot import messaging import json import logging import sys import uuid + +from carrot import connection as carrot_connection +from carrot import messaging from twisted.internet import defer from twisted.internet import task diff --git a/nova/test.py b/nova/test.py index c7e08734f..c392c8a84 100644 --- a/nova/test.py +++ b/nova/test.py @@ -22,11 +22,11 @@ Allows overriding of flags for use of fakes, and some black magic for inline callbacks. """ -import mox -import stubout import sys import time +import mox +import stubout from tornado import ioloop from twisted.internet import defer from twisted.trial import unittest @@ -91,7 +91,6 @@ class TrialTestCase(unittest.TestCase): setattr(FLAGS, k, v) - class BaseTestCase(TrialTestCase): # TODO(jaypipes): Can this be moved into the TrialTestCase class? """Base test case class for all unit tests.""" diff --git a/nova/utils.py b/nova/utils.py index 63db080f1..e826f9b71 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -20,7 +20,7 @@ System-level utilities and helper functions. """ -from datetime import datetime, timedelta +import datetime import inspect import logging import os @@ -32,9 +32,11 @@ import sys from nova import exception from nova import flags + FLAGS = flags.FLAGS TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" + def import_class(import_str): """Returns a class from a string including module and class""" mod_str, _sep, class_str = import_str.rpartition('.') @@ -44,6 +46,7 @@ def import_class(import_str): except (ImportError, ValueError, AttributeError): raise exception.NotFound('Class %s cannot be found' % class_str) + def fetchfile(url, target): logging.debug("Fetching %s" % url) # c = pycurl.Curl() @@ -55,6 +58,7 @@ def fetchfile(url, target): # fp.close() execute("curl %s -o %s" % (url, target)) + def execute(cmd, input=None, addl_env=None): env = os.environ.copy() if addl_env: @@ -129,10 +133,12 @@ def get_my_ip(): logging.warn("Couldn't get IP, using 127.0.0.1 %s", ex) return "127.0.0.1" + def isotime(at=None): if not at: - at = datetime.utcnow() + at = datetime.datetime.utcnow() return at.strftime(TIME_FORMAT) + def parse_isotime(timestr): - return datetime.strptime(timestr, TIME_FORMAT) + return datetime.datetime.strptime(timestr, TIME_FORMAT) diff --git a/nova/validate.py b/nova/validate.py index a69306fad..21f4ed286 100644 --- a/nova/validate.py +++ b/nova/validate.py @@ -57,6 +57,7 @@ def rangetest(**argchecks): # validate ranges for both+defaults return onCall return onDecorator + def typetest(**argchecks): def onDecorator(func): import sys diff --git a/nova/virt/images.py b/nova/virt/images.py index 1e23c48b9..a3ca72bdd 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -27,11 +27,11 @@ import urlparse from nova import flags from nova import process -from nova.auth import signer from nova.auth import manager +from nova.auth import signer -FLAGS = flags.FLAGS +FLAGS = flags.FLAGS flags.DEFINE_bool('use_s3', True, 'whether to get images from s3 or use local copy') @@ -43,6 +43,7 @@ def fetch(image, path, user, project): f = _fetch_local_image return f(image, path, user, project) + def _fetch_s3_image(image, path, user, project): url = image_url(image) @@ -66,13 +67,16 @@ def _fetch_s3_image(image, path, user, project): cmd += ['-o', path] return process.SharedPool().execute(executable=cmd[0], args=cmd[1:]) + def _fetch_local_image(image, path, user, project): source = _image_path('%s/image' % image) return process.simple_execute('cp %s %s' % (source, path)) + def _image_path(path): return os.path.join(FLAGS.images_path, path) + def image_url(image): return "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port, image) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 97e1b0ab2..d1a4a6b67 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -42,6 +42,7 @@ from nova.virt import images libvirt = None libxml2 = None + FLAGS = flags.FLAGS flags.DEFINE_string('libvirt_xml_template', utils.abspath('virt/libvirt.qemu.xml.template'), @@ -57,7 +58,9 @@ flags.DEFINE_string('libvirt_type', 'Libvirt domain type (valid options are: kvm, qemu, uml)') flags.DEFINE_string('libvirt_uri', '', - 'Override the default libvirt URI (which is dependent on libvirt_type)') + 'Override the default libvirt URI (which is dependent' + ' on libvirt_type)') + def get_connection(read_only): # These are loaded late so that there's no need to install these @@ -70,6 +73,7 @@ def get_connection(read_only): libxml2 = __import__('libxml2') return LibvirtConnection(read_only) + class LibvirtConnection(object): def __init__(self, read_only): self.libvirt_uri, template_file = self.get_uri_and_template() @@ -78,14 +82,12 @@ class LibvirtConnection(object): self._wrapped_conn = None self.read_only = read_only - @property def _conn(self): if not self._wrapped_conn: self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only) return self._wrapped_conn - def get_uri_and_template(self): if FLAGS.libvirt_type == 'uml': uri = FLAGS.libvirt_uri or 'uml:///system' @@ -95,7 +97,6 @@ class LibvirtConnection(object): template_file = FLAGS.libvirt_xml_template return uri, template_file - def _connect(self, uri, read_only): auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], 'root', @@ -106,13 +107,10 @@ class LibvirtConnection(object): else: return libvirt.openAuth(uri, auth, 0) - - def list_instances(self): return [self._conn.lookupByID(x).name() for x in self._conn.listDomainsID()] - def destroy(self, instance): try: virt_dom = self._conn.lookupByName(instance.name) @@ -141,14 +139,12 @@ class LibvirtConnection(object): timer.start(interval=0.5, now=True) return d - def _cleanup(self, instance): target = os.path.abspath(instance.datamodel['basepath']) logging.info("Deleting instance files at %s", target) if os.path.exists(target): shutil.rmtree(target) - @defer.inlineCallbacks @exception.wrap_exception def reboot(self, instance): @@ -174,7 +170,6 @@ class LibvirtConnection(object): timer.start(interval=0.5, now=True) yield d - @defer.inlineCallbacks @exception.wrap_exception def spawn(self, instance): @@ -205,7 +200,6 @@ class LibvirtConnection(object): timer.start(interval=0.5, now=True) yield local_d - @defer.inlineCallbacks def _create_image(self, instance, libvirt_xml): # syntactic nicety @@ -260,11 +254,9 @@ class LibvirtConnection(object): yield disk.partition( basepath('disk-raw'), basepath('disk'), bytes, execute=execute) - def basepath(self, instance, path=''): return os.path.abspath(os.path.join(instance.datamodel['basepath'], path)) - def toXml(self, instance): # TODO(termie): cache? logging.debug("Starting the toXML method") @@ -279,7 +271,6 @@ class LibvirtConnection(object): return libvirt_xml - def get_info(self, instance_id): virt_dom = self._conn.lookupByName(instance_id) (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() @@ -289,7 +280,6 @@ class LibvirtConnection(object): 'num_cpu': num_cpu, 'cpu_time': cpu_time} - def get_disks(self, instance_id): """ Note that this function takes an instance ID, not an Instance, so @@ -332,7 +322,6 @@ class LibvirtConnection(object): return disks - def get_interfaces(self, instance_id): """ Note that this function takes an instance ID, not an Instance, so @@ -375,7 +364,6 @@ class LibvirtConnection(object): return interfaces - def block_stats(self, instance_id, disk): """ Note that this function takes an instance ID, not an Instance, so @@ -384,7 +372,6 @@ class LibvirtConnection(object): domain = self._conn.lookupByName(instance_id) return domain.blockStats(disk) - def interface_stats(self, instance_id, interface): """ Note that this function takes an instance ID, not an Instance, so diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 9fe15644f..2f5994983 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -33,16 +33,29 @@ from nova.virt import images XenAPI = None + FLAGS = flags.FLAGS flags.DEFINE_string('xenapi_connection_url', None, - 'URL for connection to XenServer/Xen Cloud Platform. Required if connection_type=xenapi.') + 'URL for connection to XenServer/Xen Cloud Platform.' + ' Required if connection_type=xenapi.') flags.DEFINE_string('xenapi_connection_username', 'root', - 'Username for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi.') + 'Username for connection to XenServer/Xen Cloud Platform.' + ' Used only if connection_type=xenapi.') flags.DEFINE_string('xenapi_connection_password', None, - 'Password for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi.') + 'Password for connection to XenServer/Xen Cloud Platform.' + ' Used only if connection_type=xenapi.') + + +XENAPI_POWER_STATE = { + 'Halted' : power_state.SHUTDOWN, + 'Running' : power_state.RUNNING, + 'Paused' : power_state.PAUSED, + 'Suspended': power_state.SHUTDOWN, # FIXME + 'Crashed' : power_state.CRASHED +} def get_connection(_): @@ -62,7 +75,6 @@ def get_connection(_): class XenAPIConnection(object): - def __init__(self, url, user, pw): self._conn = XenAPI.Session(url) self._conn.login_with_password(user, pw) @@ -107,7 +119,6 @@ class XenAPIConnection(object): yield self._create_vif(vm_ref, network_ref, mac_address) yield self._conn.xenapi.VM.start(vm_ref, False, False) - def create_vm(self, instance, kernel, ramdisk): mem = str(long(instance.datamodel['memory_kb']) * 1024) vcpus = str(instance.datamodel['vcpus']) @@ -145,7 +156,6 @@ class XenAPIConnection(object): logging.debug('Created VM %s as %s.', instance.name, vm_ref) return vm_ref - def create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): vbd_rec = {} vbd_rec['VM'] = vm_ref @@ -166,7 +176,6 @@ class XenAPIConnection(object): vdi_ref) return vbd_ref - def _create_vif(self, vm_ref, network_ref, mac_address): vif_rec = {} vif_rec['device'] = '0' @@ -184,7 +193,6 @@ class XenAPIConnection(object): vm_ref, network_ref) return vif_ref - def _find_network_with_bridge(self, bridge): expr = 'field "bridge" = "%s"' % bridge networks = self._conn.xenapi.network.get_all_records_where(expr) @@ -195,7 +203,6 @@ class XenAPIConnection(object): else: raise Exception('Found no network for bridge %s' % bridge) - def fetch_image(self, image, user, project, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place it on dom0's filesystem. The former is for VM disks, the latter for @@ -213,7 +220,6 @@ class XenAPIConnection(object): args['add_partition'] = 'true' return self._call_plugin('objectstore', fn, args) - def reboot(self, instance): vm = self.lookup(instance.name) if vm is None: @@ -231,7 +237,7 @@ class XenAPIConnection(object): if vm is None: raise Exception('instance not present %s' % instance_id) rec = self._conn.xenapi.VM.get_record(vm) - return {'state': power_state_from_xenapi[rec['power_state']], + return {'state': XENAPI_POWER_STATE[rec['power_state']], 'max_mem': long(rec['memory_static_max']) >> 10, 'mem': long(rec['memory_dynamic_max']) >> 10, 'num_cpu': rec['VCPUs_max'], @@ -247,26 +253,15 @@ class XenAPIConnection(object): else: return vms[0] - def _call_plugin(self, plugin, fn, args): return _unwrap_plugin_exceptions( self._conn.xenapi.host.call_plugin, self._get_xenapi_host(), plugin, fn, args) - def _get_xenapi_host(self): return self._conn.xenapi.session.get_this_host(self._conn.handle) -power_state_from_xenapi = { - 'Halted' : power_state.SHUTDOWN, - 'Running' : power_state.RUNNING, - 'Paused' : power_state.PAUSED, - 'Suspended': power_state.SHUTDOWN, # FIXME - 'Crashed' : power_state.CRASHED -} - - def _unwrap_plugin_exceptions(func, *args, **kwargs): try: return func(*args, **kwargs) diff --git a/nova/volume/service.py b/nova/volume/service.py index 66163a812..104bafe90 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -65,6 +65,7 @@ flags.DEFINE_boolean('fake_storage', False, class NoMoreBlades(exception.Error): pass + def get_volume(volume_id): """ Returns a redis-backed volume object """ volume_class = Volume @@ -75,6 +76,7 @@ def get_volume(volume_id): return vol raise exception.Error("Volume does not exist") + class VolumeService(service.Service): """ There is one VolumeNode running on each host. @@ -142,6 +144,7 @@ class VolumeService(service.Service): "sudo vgcreate %s %s" % (FLAGS.volume_group, FLAGS.storage_dev)) + class Volume(datastore.BasicModel): def __init__(self, volume_id=None): @@ -297,7 +300,6 @@ class Volume(datastore.BasicModel): self['blade_id']), error_ok=1) - class FakeVolume(Volume): def _create_lv(self): pass diff --git a/run_tests.py b/run_tests.py index d90ac8175..77aa9088a 100644 --- a/run_tests.py +++ b/run_tests.py @@ -38,11 +38,11 @@ Due to our use of multiprocessing it we frequently get some ignorable 'Interrupted system call' exceptions after test completion. """ + import __main__ import os import sys - from twisted.scripts import trial as trial_script from nova import datastore @@ -65,13 +65,12 @@ from nova.tests.volume_unittest import * FLAGS = flags.FLAGS - flags.DEFINE_bool('flush_db', True, 'Flush the database before running fake tests') - flags.DEFINE_string('tests_stderr', 'run_tests.err.log', - 'Path to where to pipe STDERR during test runs. ' - 'Default = "run_tests.err.log"') + 'Path to where to pipe STDERR during test runs.' + ' Default = "run_tests.err.log"') + if __name__ == '__main__': OptionsClass = twistd.WrapTwistedOptions(trial_script.Options) -- cgit From 5c4a806c852a1c7180bc1c7e2ea8f065198e36d2 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 16 Aug 2010 10:57:42 -0400 Subject: PEP8 and name corrections --- bin/nova-rsapi | 2 +- nova/endpoint/aws/__init__.py | 4 ++-- nova/endpoint/rackspace/__init__.py | 10 +++++----- nova/endpoint/rackspace/controllers/base.py | 4 ++-- nova/wsgi.py | 4 ++-- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/bin/nova-rsapi b/bin/nova-rsapi index 3fc61860e..a35936eff 100755 --- a/bin/nova-rsapi +++ b/bin/nova-rsapi @@ -32,4 +32,4 @@ flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') if __name__ == '__main__': utils.default_flagfile() - wsgi.run_server(nova.endpoint.ApiVersionRouter(), FLAGS.cc_port) + wsgi.run_server(nova.endpoint.APIVersionRouter(), FLAGS.cc_port) diff --git a/nova/endpoint/aws/__init__.py b/nova/endpoint/aws/__init__.py index f49270a30..4507cae62 100644 --- a/nova/endpoint/aws/__init__.py +++ b/nova/endpoint/aws/__init__.py @@ -4,7 +4,7 @@ import webob.dec from nova import wsgi # TODO(gundlach): temp -class Api(wsgi.Router): +class API(wsgi.Router): """WSGI entry point for all AWS API requests.""" def __init__(self): @@ -14,7 +14,7 @@ class Api(wsgi.Router): targets = {"dummy": self.dummy } - super(Api, self).__init__(mapper, targets) + super(API, self).__init__(mapper, targets) @webob.dec.wsgify def dummy(self, req): diff --git a/nova/endpoint/rackspace/__init__.py b/nova/endpoint/rackspace/__init__.py index f14f6218c..162b35caa 100644 --- a/nova/endpoint/rackspace/__init__.py +++ b/nova/endpoint/rackspace/__init__.py @@ -37,12 +37,12 @@ FLAGS = flags.FLAGS flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') -class Api(wsgi.Middleware): +class API(wsgi.Middleware): """WSGI entry point for all Rackspace API requests.""" def __init__(self): - app = AuthMiddleware(ApiRouter()) - super(Api, self).__init__(app) + app = AuthMiddleware(APIRouter()) + super(API, self).__init__(app) class AuthMiddleware(wsgi.Middleware): @@ -66,7 +66,7 @@ class AuthMiddleware(wsgi.Middleware): return self.application -class ApiRouter(wsgi.Router): +class APIRouter(wsgi.Router): """ Routes requests on the Rackspace API to the appropriate controller and method. @@ -87,4 +87,4 @@ class ApiRouter(wsgi.Router): 'sharedipgroups': controllers.SharedIpGroupsController() } - super(ApiRouter, self).__init__(mapper, targets) + super(APIRouter, self).__init__(mapper, targets) diff --git a/nova/endpoint/rackspace/controllers/base.py b/nova/endpoint/rackspace/controllers/base.py index 3ada53fd4..8cd44f62e 100644 --- a/nova/endpoint/rackspace/controllers/base.py +++ b/nova/endpoint/rackspace/controllers/base.py @@ -1,6 +1,6 @@ -from nova.wsgi import WSGIController +from nova import wsgi -class BaseController(WSGIController): +class BaseController(wsgi.Controller): @classmethod def render(cls, instance): if isinstance(instance, list): diff --git a/nova/wsgi.py b/nova/wsgi.py index 0570e1829..52e155101 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -146,7 +146,7 @@ class Router(object): Each route in `mapper` must specify a 'controller' string, which is a key into the 'targets' dictionary whose value is a WSGI app to - run. If routing to a WSGIController, you'll want to specify + run. If routing to a wsgi.Controller, you'll want to specify 'action' as well so the controller knows what method to call on itself. @@ -195,7 +195,7 @@ class Router(object): return app -class WSGIController(object): +class Controller(object): """ WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method on itself. -- cgit From f78a8936b1a401f07fc0a09d4bd150d2793e436e Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 16 Aug 2010 13:22:41 -0400 Subject: All controller actions receive a 'req' parameter containing the webob Request. --- nova/endpoint/__init__.py | 10 +++--- nova/endpoint/aws/__init__.py | 6 ++-- nova/endpoint/rackspace/__init__.py | 23 ++++++------ nova/endpoint/rackspace/controllers/servers.py | 2 +- nova/wsgi.py | 48 ++++++++++++-------------- 5 files changed, 41 insertions(+), 48 deletions(-) diff --git a/nova/endpoint/__init__.py b/nova/endpoint/__init__.py index 065f45848..9aae933af 100644 --- a/nova/endpoint/__init__.py +++ b/nova/endpoint/__init__.py @@ -36,16 +36,16 @@ import routes from nova.endpoint import rackspace from nova.endpoint import aws -class ApiVersionRouter(wsgi.Router): +class APIVersionRouter(wsgi.Router): """Routes top-level requests to the appropriate API.""" def __init__(self): mapper = routes.Mapper() - mapper.connect(None, "/v1.0/{path_info:.*}", controller="rs") - mapper.connect(None, "/ec2/{path_info:.*}", controller="ec2") + rsapi = rackspace.API() + mapper.connect(None, "/v1.0/{path_info:.*}", controller=rsapi) - targets = {"rs": rackspace.Api(), "ec2": aws.Api()} + mapper.connect(None, "/ec2/{path_info:.*}", controller=aws.API()) - super(ApiVersionRouter, self).__init__(mapper, targets) + super(APIVersionRouter, self).__init__(mapper) diff --git a/nova/endpoint/aws/__init__.py b/nova/endpoint/aws/__init__.py index 4507cae62..55cbb8fd3 100644 --- a/nova/endpoint/aws/__init__.py +++ b/nova/endpoint/aws/__init__.py @@ -10,11 +10,9 @@ class API(wsgi.Router): def __init__(self): mapper = routes.Mapper() - mapper.connect(None, "{all:.*}", controller="dummy") + mapper.connect(None, "{all:.*}", controller=self.dummy) - targets = {"dummy": self.dummy } - - super(API, self).__init__(mapper, targets) + super(API, self).__init__(mapper) @webob.dec.wsgify def dummy(self, req): diff --git a/nova/endpoint/rackspace/__init__.py b/nova/endpoint/rackspace/__init__.py index 162b35caa..78b9c9429 100644 --- a/nova/endpoint/rackspace/__init__.py +++ b/nova/endpoint/rackspace/__init__.py @@ -75,16 +75,13 @@ class APIRouter(wsgi.Router): def __init__(self): mapper = routes.Mapper() - mapper.resource("server", "servers") - mapper.resource("image", "images") - mapper.resource("flavor", "flavors") - mapper.resource("sharedipgroup", "sharedipgroups") - - targets = { - 'servers': controllers.ServersController(), - 'images': controllers.ImagesController(), - 'flavors': controllers.FlavorsController(), - 'sharedipgroups': controllers.SharedIpGroupsController() - } - - super(APIRouter, self).__init__(mapper, targets) + mapper.resource("server", "servers", + controller=controllers.ServersController()) + mapper.resource("image", "images", + controller=controllers.ImagesController()) + mapper.resource("flavor", "flavors", + controller=controllers.FlavorsController()) + mapper.resource("sharedipgroup", "sharedipgroups", + controller=controllers.SharedIpGroupsController()) + + super(APIRouter, self).__init__(mapper) diff --git a/nova/endpoint/rackspace/controllers/servers.py b/nova/endpoint/rackspace/controllers/servers.py index db02e058d..2f8e662d6 100644 --- a/nova/endpoint/rackspace/controllers/servers.py +++ b/nova/endpoint/rackspace/controllers/servers.py @@ -5,7 +5,7 @@ from nova.endpoint.rackspace.controllers.base import BaseController class ServersController(BaseController): entity_name = 'servers' - def index(cls): + def index(self, **kwargs): return [instance_details(inst) for inst in compute.InstanceDirectory().all] def show(self, **kwargs): diff --git a/nova/wsgi.py b/nova/wsgi.py index 52e155101..a0a175dc7 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -140,34 +140,31 @@ class Router(object): WSGI middleware that maps incoming requests to WSGI apps. """ - def __init__(self, mapper, targets): + def __init__(self, mapper): """ Create a router for the given routes.Mapper. - Each route in `mapper` must specify a 'controller' string, which is - a key into the 'targets' dictionary whose value is a WSGI app to - run. If routing to a wsgi.Controller, you'll want to specify - 'action' as well so the controller knows what method to call on - itself. + Each route in `mapper` must specify a 'controller', which is a + WSGI app to call. You'll probably want to specify an 'action' as + well and have your controller be a wsgi.Controller, who will route + the request to the action method. Examples: mapper = routes.Mapper() - targets = { "servers": ServerController(), "blog": BlogWsgiApp() } + sc = ServerController() # Explicit mapping of one route to a controller+action - mapper.connect(None, "/svrlist", controller="servers", action="list") + mapper.connect(None, "/svrlist", controller=sc, action="list") - # Controller string is implicitly equal to 2nd param here, and - # actions are all implicitly defined - mapper.resource("server", "servers") + # Actions are all implicitly defined + mapper.resource("server", "servers", controller=sc) # Pointing to an arbitrary WSGI app. You can specify the # {path_info:.*} parameter so the target app can be handed just that # section of the URL. - mapper.connect(None, "/v1.0/{path_info:.*}", controller="blog") + mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp()) """ self.map = mapper - self.targets = targets self._router = routes.middleware.RoutesMiddleware(self._dispatch, self.map) @@ -186,31 +183,32 @@ class Router(object): and putting the information into req.environ. Either returns 404 or the routed WSGI app's response. """ - if req.environ['routes.route'] is None: - return webob.exc.HTTPNotFound() match = req.environ['wsgiorg.routing_args'][1] - app_name = match['controller'] - - app = self.targets[app_name] + if not match: + return webob.exc.HTTPNotFound() + app = match['controller'] return app class Controller(object): """ WSGI app that reads routing information supplied by RoutesMiddleware - and calls the requested action method on itself. + and calls the requested action method upon itself. All action methods + must, in addition to their normal parameters, accept a 'req' argument + which is the incoming webob.Request. """ @webob.dec.wsgify def __call__(self, req): """ - Call the method on self specified in req.environ by RoutesMiddleware. + Call the method specified in req.environ by RoutesMiddleware. """ - routes_dict = req.environ['wsgiorg.routing_args'][1] - action = routes_dict['action'] + arg_dict = req.environ['wsgiorg.routing_args'][1] + action = arg_dict['action'] method = getattr(self, action) - del routes_dict['controller'] - del routes_dict['action'] - return method(**routes_dict) + del arg_dict['controller'] + del arg_dict['action'] + arg_dict['req'] = req + return method(**arg_dict) class Serializer(object): -- cgit From bf2549282067a7a824ea97e66a5b2f0ca06416bd Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Mon, 16 Aug 2010 13:56:29 -0400 Subject: Make individual disables for R0201 instead of file-level. --- nova/objectstore/handler.py | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index cb38c89f2..a5eab9828 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -19,10 +19,6 @@ # License for the specific language governing permissions and limitations # under the License. -# Disabling pylint's R0201 (Method could be a function) because -# the API of render_GET, render_PUT, etc is Twisted's, not ours. -# pylint: disable-msg=R0201 - """ Implementation of an S3-like storage server based on local files. @@ -168,7 +164,7 @@ class S3(ErrorHandlingResource): else: return BucketResource(name) - def render_GET(self, request): + def render_GET(self, request): # pylint: disable-msg=R0201 """Renders the GET request for a list of buckets as XML""" logging.debug('List of buckets requested') buckets = [b for b in bucket.Bucket.all() \ @@ -325,7 +321,7 @@ class ImagesResource(Resource): else: return ImageResource(name) - def render_GET(self, request): + def render_GET(self, request): # pylint: disable-msg=R0201 """ returns a json listing of all images that a user has permissions to see """ @@ -336,7 +332,7 @@ class ImagesResource(Resource): request.finish() return server.NOT_DONE_YET - def render_PUT(self, request): + def render_PUT(self, request): # pylint: disable-msg=R0201 """ create a new registered image """ image_id = get_argument(request, 'image_id', u'') @@ -357,8 +353,8 @@ class ImagesResource(Resource): p.start() return '' - def render_POST(self, request): - """ update image attributes: public/private """ + def render_POST(self, request): # pylint: disable-msg=R0201 + """Update image attributes: public/private""" image_id = get_argument(request, 'image_id', u'') operation = get_argument(request, 'operation', u'') @@ -372,8 +368,8 @@ class ImagesResource(Resource): return '' - def render_DELETE(self, request): - """ delete a registered image """ + def render_DELETE(self, request): # pylint: disable-msg=R0201 + """Delete a registered image""" image_id = get_argument(request, "image_id", u"") image_object = image.Image(image_id) -- cgit From 31c08591793311606551bf0e6bfc14b155b491a6 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 17 Aug 2010 16:46:19 +0200 Subject: Use the argument handler specified by twistd, if any. --- nova/flags.py | 3 +++ nova/server.py | 6 +++++- nova/twistd.py | 12 +++++++++++- 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index e3feb252d..e0181102e 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -141,6 +141,7 @@ def _wrapper(func): return _wrapped +DEFINE = _wrapper(gflags.DEFINE) DEFINE_string = _wrapper(gflags.DEFINE_string) DEFINE_integer = _wrapper(gflags.DEFINE_integer) DEFINE_bool = _wrapper(gflags.DEFINE_bool) @@ -152,6 +153,8 @@ DEFINE_spaceseplist = _wrapper(gflags.DEFINE_spaceseplist) DEFINE_multistring = _wrapper(gflags.DEFINE_multistring) DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int) +ArgumentSerializer = gflags.ArgumentSerializer + def DECLARE(name, module_string, flag_values=FLAGS): if module_string not in sys.modules: diff --git a/nova/server.py b/nova/server.py index 96550f078..c6b60e090 100644 --- a/nova/server.py +++ b/nova/server.py @@ -44,6 +44,8 @@ flags.DEFINE_bool('use_syslog', True, 'output to syslog when daemonizing') flags.DEFINE_string('logfile', None, 'log file to output to') flags.DEFINE_string('pidfile', None, 'pid file to output to') flags.DEFINE_string('working_directory', './', 'working directory...') +flags.DEFINE_integer('uid', os.getuid(), 'uid under which to run') +flags.DEFINE_integer('gid', os.getgid(), 'gid under which to run') def stop(pidfile): @@ -135,6 +137,8 @@ def daemonize(args, name, main): threaded=False), stdin=stdin, stdout=stdout, - stderr=stderr + stderr=stderr, + uid=FLAGS.uid, + gid=FLAGS.gid ): main(args) diff --git a/nova/twistd.py b/nova/twistd.py index 8de322aa5..a72cc85e6 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -48,6 +48,13 @@ class TwistdServerOptions(ServerOptions): def parseArgs(self, *args): return +class FlagParser(object): + def __init__(self, parser): + self.parser = parser + + def Parse(self, s): + return self.parser(s) + def WrapTwistedOptions(wrapped): class TwistedOptionsToFlags(wrapped): @@ -79,7 +86,10 @@ def WrapTwistedOptions(wrapped): reflect.accumulateClassList(self.__class__, 'optParameters', twistd_params) for param in twistd_params: key = param[0].replace('-', '_') - flags.DEFINE_string(key, param[2], str(param[-1])) + if len(param) > 4: + flags.DEFINE(FlagParser(param[4]), key, param[2], str(param[3]), serializer=flags.ArgumentSerializer()) + else: + flags.DEFINE_string(key, param[2], str(param[3])) def _absorbHandlers(self): twistd_handlers = {} -- cgit From 9878a6b8b4691e206dc5d35c39313880db34f229 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 17 Aug 2010 11:03:15 -0400 Subject: Simpler installation, and, can run install_venv from anywhere instead of just from checkout root --- tools/install_venv.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tools/install_venv.py b/tools/install_venv.py index e1a270638..4e775eb33 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -38,15 +38,16 @@ def die(message, *args): def run_command(cmd, redirect_output=True, error_ok=False): - """Runs a command in an out-of-process shell, returning the - output of that command + """ + Runs a command in an out-of-process shell, returning the + output of that command. Working directory is ROOT. """ if redirect_output: stdout = subprocess.PIPE else: stdout = None - proc = subprocess.Popen(cmd, stdout=stdout) + proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout) output = proc.communicate()[0] if not error_ok and proc.returncode != 0: die('Command "%s" failed.\n%s', ' '.join(cmd), output) @@ -94,6 +95,12 @@ def install_dependencies(venv=VENV): redirect_output=False) + # Tell the virtual env how to "import nova" + pathfile=os.path.join(venv, "lib", "python2.6", "site-packages", "nova.pth") + f=open(pathfile, 'w') + f.write("%s\n" % ROOT) + + def print_help(): help = """ Nova development environment setup is complete. -- cgit From f92851ba8ffcb530f6f3c4ea354dd89d29146f6c Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 17 Aug 2010 13:03:38 -0400 Subject: Remove duplicate definition of flag --- nova/endpoint/rackspace/__init__.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nova/endpoint/rackspace/__init__.py b/nova/endpoint/rackspace/__init__.py index 78b9c9429..ac53ee10b 100644 --- a/nova/endpoint/rackspace/__init__.py +++ b/nova/endpoint/rackspace/__init__.py @@ -33,10 +33,6 @@ from nova.auth import manager from nova.endpoint.rackspace import controllers -FLAGS = flags.FLAGS -flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') - - class API(wsgi.Middleware): """WSGI entry point for all Rackspace API requests.""" -- cgit From e8be36d7a7be2ebbf5493766ce909d7913bf61e0 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 17 Aug 2010 13:23:20 -0400 Subject: Move eventlet-using class out of endpoint/__init__.py into its own submodule, so that twisted-related code using endpoint.[other stuff] wouldn't run eventlet and make unit tests throw crazy errors about eventlet 0.9.10 not playing nicely with twisted. --- bin/nova-rsapi | 5 ++--- nova/endpoint/__init__.py | 51 ----------------------------------------------- nova/endpoint/newapi.py | 51 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+), 54 deletions(-) create mode 100644 nova/endpoint/newapi.py diff --git a/bin/nova-rsapi b/bin/nova-rsapi index a35936eff..e2722422e 100755 --- a/bin/nova-rsapi +++ b/bin/nova-rsapi @@ -21,15 +21,14 @@ Daemon for the Rackspace API endpoint. """ -import nova.endpoint - from nova import flags from nova import utils from nova import wsgi +from nova.endpoint import newapi FLAGS = flags.FLAGS flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') if __name__ == '__main__': utils.default_flagfile() - wsgi.run_server(nova.endpoint.APIVersionRouter(), FLAGS.cc_port) + wsgi.run_server(newapi.APIVersionRouter(), FLAGS.cc_port) diff --git a/nova/endpoint/__init__.py b/nova/endpoint/__init__.py index 9aae933af..e69de29bb 100644 --- a/nova/endpoint/__init__.py +++ b/nova/endpoint/__init__.py @@ -1,51 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -:mod:`nova.endpoint` -- Main NOVA Api endpoints -===================================================== - -.. automodule:: nova.endpoint - :platform: Unix - :synopsis: REST APIs for all nova functions -.. moduleauthor:: Jesse Andrews -.. moduleauthor:: Devin Carlen -.. moduleauthor:: Vishvananda Ishaya -.. moduleauthor:: Joshua McKenty -.. moduleauthor:: Manish Singh -.. moduleauthor:: Andy Smith -""" - -from nova import wsgi -import routes -from nova.endpoint import rackspace -from nova.endpoint import aws - -class APIVersionRouter(wsgi.Router): - """Routes top-level requests to the appropriate API.""" - - def __init__(self): - mapper = routes.Mapper() - - rsapi = rackspace.API() - mapper.connect(None, "/v1.0/{path_info:.*}", controller=rsapi) - - mapper.connect(None, "/ec2/{path_info:.*}", controller=aws.API()) - - super(APIVersionRouter, self).__init__(mapper) - diff --git a/nova/endpoint/newapi.py b/nova/endpoint/newapi.py new file mode 100644 index 000000000..9aae933af --- /dev/null +++ b/nova/endpoint/newapi.py @@ -0,0 +1,51 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`nova.endpoint` -- Main NOVA Api endpoints +===================================================== + +.. automodule:: nova.endpoint + :platform: Unix + :synopsis: REST APIs for all nova functions +.. moduleauthor:: Jesse Andrews +.. moduleauthor:: Devin Carlen +.. moduleauthor:: Vishvananda Ishaya +.. moduleauthor:: Joshua McKenty +.. moduleauthor:: Manish Singh +.. moduleauthor:: Andy Smith +""" + +from nova import wsgi +import routes +from nova.endpoint import rackspace +from nova.endpoint import aws + +class APIVersionRouter(wsgi.Router): + """Routes top-level requests to the appropriate API.""" + + def __init__(self): + mapper = routes.Mapper() + + rsapi = rackspace.API() + mapper.connect(None, "/v1.0/{path_info:.*}", controller=rsapi) + + mapper.connect(None, "/ec2/{path_info:.*}", controller=aws.API()) + + super(APIVersionRouter, self).__init__(mapper) + -- cgit From 383764fb36858f5f7f2b36ca283563d2581dabdb Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 17 Aug 2010 13:00:46 -0700 Subject: clean up linux_net --- nova/network/linux_net.py | 243 +++++++++++++++++++++++++++------------------- nova/network/service.py | 22 +++-- 2 files changed, 155 insertions(+), 110 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 15050adaf..e6bb80bb8 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -24,6 +24,7 @@ import os # todo(ja): does the definition of network_path belong here? from nova import flags +from nova import models from nova import utils FLAGS = flags.FLAGS @@ -32,102 +33,96 @@ flags.DEFINE_string('dhcpbridge_flagfile', '/etc/nova/nova-dhcpbridge.conf', 'location of flagfile for dhcpbridge') +flags.DEFINE_string('networks_path', utils.abspath('../networks'), + 'Location to keep network config files') +flags.DEFINE_string('public_interface', 'vlan1', + 'Interface for public IP addresses') +flags.DEFINE_string('bridge_dev', 'eth0', + 'network device for bridges') -def execute(cmd, addl_env=None): - """Wrapper around utils.execute for fake_network""" - if FLAGS.fake_network: - logging.debug("FAKE NET: %s", cmd) - return "fake", 0 - else: - return utils.execute(cmd, addl_env=addl_env) - - -def runthis(desc, cmd): - """Wrapper around utils.runthis for fake_network""" - if FLAGS.fake_network: - return execute(cmd) - else: - return utils.runthis(desc, cmd) - - -def device_exists(device): - """Check if ethernet device exists""" - (_out, err) = execute("ifconfig %s" % device) - return not err - - -def confirm_rule(cmd): - """Delete and re-add iptables rule""" - execute("sudo iptables --delete %s" % (cmd)) - execute("sudo iptables -I %s" % (cmd)) - - -def remove_rule(cmd): - """Remove iptables rule""" - execute("sudo iptables --delete %s" % (cmd)) - - -def bind_public_ip(public_ip, interface): - """Bind ip to an interface""" - runthis("Binding IP to interface: %s", - "sudo ip addr add %s dev %s" % (public_ip, interface)) +def bind_elastic_ip(elastic_ip): + """Bind ip to public interface""" + _execute("sudo ip addr add %s dev %s" % (elastic_ip, + FLAGS.public_interface)) -def unbind_public_ip(public_ip, interface): +def unbind_elastic_ip(elastic_ip): """Unbind a public ip from an interface""" - runthis("Binding IP to interface: %s", - "sudo ip addr del %s dev %s" % (public_ip, interface)) - - -def vlan_create(net): + _execute("sudo ip addr del %s dev %s" % (elastic_ip, + FLAGS.public_interface)) + + +def ensure_vlan_forward(public_ip, port, private_ip): + """Sets up forwarding rules for vlan""" + _confirm_rule("FORWARD -d %s -p udp --dport 1194 -j ACCEPT" % private_ip) + _confirm_rule( + "PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194" + % (public_ip, port, private_ip)) + +DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] + +def ensure_elastic_forward(elastic_ip, fixed_ip): + """Ensure elastic ip forwarding rule""" + _confirm_rule("PREROUTING -t nat -d %s -j DNAT --to %s" + % (elastic_ip, fixed_ip)) + _confirm_rule("POSTROUTING -t nat -s %s -j SNAT --to %s" + % (fixed_ip, elastic_ip)) + # TODO(joshua): Get these from the secgroup datastore entries + _confirm_rule("FORWARD -d %s -p icmp -j ACCEPT" + % (fixed_ip)) + for (protocol, port) in DEFAULT_PORTS: + _confirm_rule( + "FORWARD -d %s -p %s --dport %s -j ACCEPT" + % (fixed_ip, protocol, port)) + +def remove_elastic_forward(elastic_ip, fixed_ip): + """Remove forwarding for elastic ip""" + _remove_rule("PREROUTING -t nat -d %s -j DNAT --to %s" + % (elastic_ip, fixed_ip)) + _remove_rule("POSTROUTING -t nat -s %s -j SNAT --to %s" + % (fixed_ip, elastic_ip)) + _remove_rule("FORWARD -d %s -p icmp -j ACCEPT" + % (fixed_ip)) + for (protocol, port) in DEFAULT_PORTS: + _remove_rule("FORWARD -d %s -p %s --dport %s -j ACCEPT" + % (fixed_ip, protocol, port)) + +def vlan_create(vlan_num): """Create a vlan on on a bridge device unless vlan already exists""" - if not device_exists("vlan%s" % net['vlan']): - logging.debug("Starting VLAN inteface for %s network", (net['vlan'])) - execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD") - execute("sudo vconfig add %s %s" % (FLAGS.bridge_dev, net['vlan'])) - execute("sudo ifconfig vlan%s up" % (net['vlan'])) - - -def bridge_create(net): - """Create a bridge on a vlan unless it already exists""" - if not device_exists(net['bridge_name']): - logging.debug("Starting Bridge inteface for %s network", (net['vlan'])) - execute("sudo brctl addbr %s" % (net['bridge_name'])) - execute("sudo brctl setfd %s 0" % (net.bridge_name)) - # execute("sudo brctl setageing %s 10" % (net.bridge_name)) - execute("sudo brctl stp %s off" % (net['bridge_name'])) - execute("sudo brctl addif %s vlan%s" % (net['bridge_name'], - net['vlan'])) - if net.bridge_gets_ip: - execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \ - (net['bridge_name'], net.gateway, net.broadcast, net.netmask)) - confirm_rule("FORWARD --in-interface %s -j ACCEPT" % - (net['bridge_name'])) + interface = "vlan%s" % vlan_num + if not _device_exists(interface): + logging.debug("Starting VLAN inteface %s", interface) + _execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD") + _execute("sudo vconfig add %s %s" % (FLAGS.bridge.dev, vlan_num)) + _execute("sudo ifconfig %s up" % interface) + return interface + + +def bridge_create(interface, bridge, network=None): + """Create a bridge on an bridge unless it already exists""" + if not _device_exists(bridge): + logging.debug("Starting Bridge inteface for %s", interface) + _execute("sudo brctl addbr %s" % bridge) + _execute("sudo brctl setfd %s 0" % bridge) + # _execute("sudo brctl setageing %s 10" % bridge) + _execute("sudo brctl stp %s off" % bridge) + _execute("sudo brctl addif %s %s" % (bridge, interface)) + if network: + _execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \ + (bridge, + network.gateway, + network.broadcast, + network.netmask)) + _confirm_rule("FORWARD --in-bridge %s -j ACCEPT" % bridge) else: - execute("sudo ifconfig %s up" % net['bridge_name']) - - -def _dnsmasq_cmd(net): - """Builds dnsmasq command""" - cmd = ['sudo -E dnsmasq', - ' --strict-order', - ' --bind-interfaces', - ' --conf-file=', - ' --pid-file=%s' % dhcp_file(net['vlan'], 'pid'), - ' --listen-address=%s' % net.dhcp_listen_address, - ' --except-interface=lo', - ' --dhcp-range=%s,static,120s' % net.dhcp_range_start, - ' --dhcp-hostsfile=%s' % dhcp_file(net['vlan'], 'conf'), - ' --dhcp-script=%s' % bin_file('nova-dhcpbridge'), - ' --leasefile-ro'] - return ''.join(cmd) + _execute("sudo ifconfig %s up" % bridge) -def host_dhcp(address): - """Return a host string for an address object""" - return "%s,%s.novalocal,%s" % (address['mac'], - address['hostname'], - address.address) +def host_dhcp(fixed_ip): + """Return a host string for a fixed ip""" + return "%s,%s.novalocal,%s" % (fixed_ip.instance.mac_address, + fixed_ip.instance.host_name, + fixed_ip.ip_str) # TODO(ja): if the system has restarted or pid numbers have wrapped @@ -135,17 +130,21 @@ def host_dhcp(address): # dnsmasq. As well, sending a HUP only reloads the hostfile, # so any configuration options (like dchp-range, vlan, ...) # aren't reloaded -def start_dnsmasq(network): +def update_dhcp(network): """(Re)starts a dnsmasq server for a given network if a dnsmasq instance is already running then send a HUP signal causing it to reload, otherwise spawn a new instance """ - with open(dhcp_file(network['vlan'], 'conf'), 'w') as f: - for address in network.assigned_objs: - f.write("%s\n" % host_dhcp(address)) + # FIXME abstract this + session = models.NovaBase.get_session() + query = session.query(models.FixedIp).filter_by(allocated=True) + fixed_ips = query.filter_by(network_id=network.id) + with open(_dhcp_file(network['vlan'], 'conf'), 'w') as f: + for fixed_ip in fixed_ips: + f.write("%s\n" % host_dhcp(fixed_ip)) - pid = dnsmasq_pid_for(network) + pid = _dnsmasq_pid_for(network) # if dnsmasq is already running, then tell it to reload if pid: @@ -159,13 +158,55 @@ def start_dnsmasq(network): # FLAGFILE and DNSMASQ_INTERFACE in env env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, - 'DNSMASQ_INTERFACE': network['bridge_name']} - execute(_dnsmasq_cmd(network), addl_env=env) + 'DNSMASQ_INTERFACE': network.bridge_name} + _execute(_dnsmasq_cmd(network), addl_env=env) + + +def _execute(cmd, addl_env=None): + """Wrapper around utils._execute for fake_network""" + if FLAGS.fake_network: + logging.debug("FAKE NET: %s", cmd) + return "fake", 0 + else: + return utils._execute(cmd, addl_env=addl_env) + + +def _device_exists(device): + """Check if ethernet device exists""" + (_out, err) = _execute("ifconfig %s" % device) + return not err + + +def _confirm_rule(cmd): + """Delete and re-add iptables rule""" + _execute("sudo iptables --delete %s" % (cmd)) + _execute("sudo iptables -I %s" % (cmd)) + + +def _remove_rule(cmd): + """Remove iptables rule""" + _execute("sudo iptables --delete %s" % (cmd)) + + +def _dnsmasq_cmd(net): + """Builds dnsmasq command""" + cmd = ['sudo -E dnsmasq', + ' --strict-order', + ' --bind-interfaces', + ' --conf-file=', + ' --pid-file=%s' % _dhcp_file(net['vlan'], 'pid'), + ' --listen-address=%s' % net.dhcp_listen_address, + ' --except-interface=lo', + ' --dhcp-range=%s,static,120s' % net.dhcp_range_start, + ' --dhcp-hostsfile=%s' % _dhcp_file(net['vlan'], 'conf'), + ' --dhcp-script=%s' % _bin_file('nova-dhcpbridge'), + ' --leasefile-ro'] + return ''.join(cmd) -def stop_dnsmasq(network): +def _stop_dnsmasq(network): """Stops the dnsmasq instance for a given network""" - pid = dnsmasq_pid_for(network) + pid = _dnsmasq_pid_for(network) if pid: try: @@ -174,18 +215,18 @@ def stop_dnsmasq(network): logging.debug("Killing dnsmasq threw %s", exc) -def dhcp_file(vlan, kind): +def _dhcp_file(vlan, kind): """Return path to a pid, leases or conf file for a vlan""" return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path, vlan, kind)) -def bin_file(script): +def _bin_file(script): """Return the absolute path to scipt in the bin directory""" return os.path.abspath(os.path.join(__file__, "../../../bin", script)) -def dnsmasq_pid_for(network): +def _dnsmasq_pid_for(network): """Returns he pid for prior dnsmasq instance for a vlan Returns None if no pid file exists @@ -193,7 +234,7 @@ def dnsmasq_pid_for(network): If machine has rebooted pid might be incorrect (caller should check) """ - pid_file = dhcp_file(network['vlan'], 'pid') + pid_file = _dhcp_file(network.vlan, 'pid') if os.path.exists(pid_file): with open(pid_file, 'r') as f: diff --git a/nova/network/service.py b/nova/network/service.py index 6ff338353..309ce874d 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -52,6 +52,14 @@ flags.DEFINE_string('flat_network_broadcast', '192.168.0.255', flags.DEFINE_string('flat_network_dns', '8.8.4.4', 'Dns for simple network') +flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks') +flags.DEFINE_integer('vlan_end', 4093, 'Last VLAN for private networks') +flags.DEFINE_integer('network_size', 256, + 'Number of addresses in each private subnet') +flags.DEFINE_string('public_range', '4.4.4.0/24', 'Public IP address block') +flags.DEFINE_string('private_range', '10.0.0.0/8', 'Private IP address block') +flags.DEFINE_integer('cnt_vpn_clients', 5, + 'Number of addresses reserved for vpn clients') def type_to_class(network_type): """Convert a network_type string into an actual Python class""" @@ -74,11 +82,6 @@ def get_host_for_project(project_id): return redis.get(_host_key(project_id)) -def _host_key(project_id): - """Returns redis host key for network""" - return "networkhost:%s" % project_id - - class BaseNetworkService(service.Service): """Implements common network service functionality @@ -187,10 +190,11 @@ class FlatNetworkService(BaseNetworkService): class VlanNetworkService(BaseNetworkService): """Vlan network with dhcp""" - # NOTE(vish): A lot of the interactions with network/model.py can be - # simplified and improved. Also there it may be useful - # to support vlans separately from dhcp, instead of having - # both of them together in this class. + def __init__(self, *args, **kwargs): + super(VlanNetworkService, self).__init__(*args, **kwargs) + # TODO(vish): some better type of dependency injection? + self.driver = linux_net + # pylint: disable=W0221 def allocate_fixed_ip(self, user_id, -- cgit From 8a8a1400426ca5355fa778ee34edc7b72ae74566 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 17 Aug 2010 13:02:11 -0700 Subject: start with model code --- nova/models.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/nova/models.py b/nova/models.py index 561a722fc..e4075faeb 100644 --- a/nova/models.py +++ b/nova/models.py @@ -193,6 +193,18 @@ class Volume(Base, NovaBase): attach_status = Column(String) # FIXME enum delete_on_termination = Column(Boolean) +class Network(Base, NovaBase): + __tablename__ = 'networks' + +class FixedIp(Base, NovaBase): + __tablename__ = 'fixed_ips' + +class ElasticIp(Base, NovaBase): + __tablename__ = 'elastic_ips' + +class Vpn(Base, NovaBase): + __tablename__ = 'vpns' + def create_session(engine=None): return NovaBase.get_session() -- cgit From 200daa3e5d5571add6c2937cf847641d065e87b8 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Wed, 18 Aug 2010 00:05:06 +0200 Subject: Stylistic improvements. --- nova/flags.py | 2 -- nova/twistd.py | 6 +++++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index e0181102e..6f9f906dd 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -153,8 +153,6 @@ DEFINE_spaceseplist = _wrapper(gflags.DEFINE_spaceseplist) DEFINE_multistring = _wrapper(gflags.DEFINE_multistring) DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int) -ArgumentSerializer = gflags.ArgumentSerializer - def DECLARE(name, module_string, flag_values=FLAGS): if module_string not in sys.modules: diff --git a/nova/twistd.py b/nova/twistd.py index a72cc85e6..9511c231c 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -21,6 +21,7 @@ Twisted daemon helpers, specifically to parse out gFlags from twisted flags, manage pid files and support syslogging. """ +import gflags import logging import os import signal @@ -48,6 +49,7 @@ class TwistdServerOptions(ServerOptions): def parseArgs(self, *args): return + class FlagParser(object): def __init__(self, parser): self.parser = parser @@ -87,7 +89,9 @@ def WrapTwistedOptions(wrapped): for param in twistd_params: key = param[0].replace('-', '_') if len(param) > 4: - flags.DEFINE(FlagParser(param[4]), key, param[2], str(param[3]), serializer=flags.ArgumentSerializer()) + flags.DEFINE(FlagParser(param[4]), + key, param[2], str(param[3]), + serializer=gflags.ArgumentSerializer()) else: flags.DEFINE_string(key, param[2], str(param[3])) -- cgit From 1e403e56dc1147ce3feea1b8931948bc35f23a44 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Tue, 17 Aug 2010 16:43:37 -0700 Subject: In an effort to keep new and old API code separate, I've created a nova.api to put all new API code under. This means nova.endpoint only contains the old Tornado implementation. I also cleaned up a few pep8 and other style nits in the new API code. --- bin/nova-api-new | 34 +++++++++ bin/nova-rsapi | 34 --------- nova/api/__init__.py | 38 ++++++++++ nova/api/ec2/__init__.py | 42 +++++++++++ nova/api/rackspace/__init__.py | 81 +++++++++++++++++++++ nova/api/rackspace/controllers/__init__.py | 0 nova/api/rackspace/controllers/base.py | 30 ++++++++ nova/api/rackspace/controllers/flavors.py | 18 +++++ nova/api/rackspace/controllers/images.py | 18 +++++ nova/api/rackspace/controllers/servers.py | 83 ++++++++++++++++++++++ nova/api/rackspace/controllers/sharedipgroups.py | 18 +++++ nova/endpoint/aws/__init__.py | 22 ------ nova/endpoint/newapi.py | 51 ------------- nova/endpoint/rackspace/__init__.py | 83 ---------------------- nova/endpoint/rackspace/controllers/__init__.py | 5 -- nova/endpoint/rackspace/controllers/base.py | 9 --- nova/endpoint/rackspace/controllers/flavors.py | 1 - nova/endpoint/rackspace/controllers/images.py | 1 - nova/endpoint/rackspace/controllers/servers.py | 63 ---------------- .../rackspace/controllers/sharedipgroups.py | 1 - 20 files changed, 362 insertions(+), 270 deletions(-) create mode 100755 bin/nova-api-new delete mode 100755 bin/nova-rsapi create mode 100644 nova/api/__init__.py create mode 100644 nova/api/ec2/__init__.py create mode 100644 nova/api/rackspace/__init__.py create mode 100644 nova/api/rackspace/controllers/__init__.py create mode 100644 nova/api/rackspace/controllers/base.py create mode 100644 nova/api/rackspace/controllers/flavors.py create mode 100644 nova/api/rackspace/controllers/images.py create mode 100644 nova/api/rackspace/controllers/servers.py create mode 100644 nova/api/rackspace/controllers/sharedipgroups.py delete mode 100644 nova/endpoint/aws/__init__.py delete mode 100644 nova/endpoint/newapi.py delete mode 100644 nova/endpoint/rackspace/__init__.py delete mode 100644 nova/endpoint/rackspace/controllers/__init__.py delete mode 100644 nova/endpoint/rackspace/controllers/base.py delete mode 100644 nova/endpoint/rackspace/controllers/flavors.py delete mode 100644 nova/endpoint/rackspace/controllers/images.py delete mode 100644 nova/endpoint/rackspace/controllers/servers.py delete mode 100644 nova/endpoint/rackspace/controllers/sharedipgroups.py diff --git a/bin/nova-api-new b/bin/nova-api-new new file mode 100755 index 000000000..fda42339c --- /dev/null +++ b/bin/nova-api-new @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# pylint: disable-msg=C0103 +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Nova API daemon. +""" + +from nova import api +from nova import flags +from nova import utils +from nova import wsgi + +FLAGS = flags.FLAGS +flags.DEFINE_integer('api_port', 8773, 'API port') + +if __name__ == '__main__': + utils.default_flagfile() + wsgi.run_server(api.API(), FLAGS.api_port) diff --git a/bin/nova-rsapi b/bin/nova-rsapi deleted file mode 100755 index e2722422e..000000000 --- a/bin/nova-rsapi +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python -# pylint: disable-msg=C0103 -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" - Daemon for the Rackspace API endpoint. -""" - -from nova import flags -from nova import utils -from nova import wsgi -from nova.endpoint import newapi - -FLAGS = flags.FLAGS -flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') - -if __name__ == '__main__': - utils.default_flagfile() - wsgi.run_server(newapi.APIVersionRouter(), FLAGS.cc_port) diff --git a/nova/api/__init__.py b/nova/api/__init__.py new file mode 100644 index 000000000..a6bb93348 --- /dev/null +++ b/nova/api/__init__.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Root WSGI middleware for all API controllers. +""" + +import routes + +from nova import wsgi +from nova.api import ec2 +from nova.api import rackspace + + +class API(wsgi.Router): + """Routes top-level requests to the appropriate controller.""" + + def __init__(self): + mapper = routes.Mapper() + mapper.connect(None, "/v1.0/{path_info:.*}", + controller=rackspace.API()) + mapper.connect(None, "/ec2/{path_info:.*}", controller=ec2.API()) + super(API, self).__init__(mapper) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py new file mode 100644 index 000000000..6eec0abf7 --- /dev/null +++ b/nova/api/ec2/__init__.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +WSGI middleware for EC2 API controllers. +""" + +import routes +import webob.dec + +from nova import wsgi + + +class API(wsgi.Router): + """Routes EC2 requests to the appropriate controller.""" + + def __init__(self): + mapper = routes.Mapper() + mapper.connect(None, "{all:.*}", controller=self.dummy) + super(API, self).__init__(mapper) + + @staticmethod + @webob.dec.wsgify + def dummy(req): + """Temporary dummy controller.""" + msg = "dummy response -- please hook up __init__() to cloud.py instead" + return repr({'dummy': msg, + 'kwargs': repr(req.environ['wsgiorg.routing_args'][1])}) diff --git a/nova/api/rackspace/__init__.py b/nova/api/rackspace/__init__.py new file mode 100644 index 000000000..662cbe495 --- /dev/null +++ b/nova/api/rackspace/__init__.py @@ -0,0 +1,81 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +WSGI middleware for Rackspace API controllers. +""" + +import json +import time + +import routes +import webob.dec +import webob.exc + +from nova import flags +from nova import wsgi +from nova.api.rackspace.controllers import flavors +from nova.api.rackspace.controllers import images +from nova.api.rackspace.controllers import servers +from nova.api.rackspace.controllers import sharedipgroups +from nova.auth import manager + + +class API(wsgi.Middleware): + """WSGI entry point for all Rackspace API requests.""" + + def __init__(self): + app = AuthMiddleware(APIRouter()) + super(API, self).__init__(app) + + +class AuthMiddleware(wsgi.Middleware): + """Authorize the rackspace API request or return an HTTP Forbidden.""" + + #TODO(gundlach): isn't this the old Nova API's auth? Should it be replaced + #with correct RS API auth? + + @webob.dec.wsgify + def __call__(self, req): + context = {} + if "HTTP_X_AUTH_TOKEN" in req.environ: + context['user'] = manager.AuthManager().get_user_from_access_key( + req.environ['HTTP_X_AUTH_TOKEN']) + if context['user']: + context['project'] = manager.AuthManager().get_project( + context['user'].name) + if "user" not in context: + return webob.exc.HTTPForbidden() + req.environ['nova.context'] = context + return self.application + + +class APIRouter(wsgi.Router): + """ + Routes requests on the Rackspace API to the appropriate controller + and method. + """ + + def __init__(self): + mapper = routes.Mapper() + mapper.resource("server", "servers", controller=servers.Controller()) + mapper.resource("image", "images", controller=images.Controller()) + mapper.resource("flavor", "flavors", controller=flavors.Controller()) + mapper.resource("sharedipgroup", "sharedipgroups", + controller=sharedipgroups.Controller()) + super(APIRouter, self).__init__(mapper) diff --git a/nova/api/rackspace/controllers/__init__.py b/nova/api/rackspace/controllers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/api/rackspace/controllers/base.py b/nova/api/rackspace/controllers/base.py new file mode 100644 index 000000000..dd2c6543c --- /dev/null +++ b/nova/api/rackspace/controllers/base.py @@ -0,0 +1,30 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import wsgi + + +class Controller(wsgi.Controller): + """TODO(eday): Base controller for all rackspace controllers. What is this + for? Is this just Rackspace specific? """ + + @classmethod + def render(cls, instance): + if isinstance(instance, list): + return {cls.entity_name: cls.render(instance)} + else: + return {"TODO": "TODO"} diff --git a/nova/api/rackspace/controllers/flavors.py b/nova/api/rackspace/controllers/flavors.py new file mode 100644 index 000000000..986f11434 --- /dev/null +++ b/nova/api/rackspace/controllers/flavors.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +class Controller(object): pass diff --git a/nova/api/rackspace/controllers/images.py b/nova/api/rackspace/controllers/images.py new file mode 100644 index 000000000..986f11434 --- /dev/null +++ b/nova/api/rackspace/controllers/images.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +class Controller(object): pass diff --git a/nova/api/rackspace/controllers/servers.py b/nova/api/rackspace/controllers/servers.py new file mode 100644 index 000000000..1911d5abf --- /dev/null +++ b/nova/api/rackspace/controllers/servers.py @@ -0,0 +1,83 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import rpc +from nova.compute import model as compute +from nova.api.rackspace.controllers import base + + +class Controller(base.Controller): + entity_name = 'servers' + + def index(self, **kwargs): + instanmces = [] + for inst in compute.InstanceDirectory().all: + instances.append(instance_details(inst)) + + def show(self, **kwargs): + instance_id = kwargs['id'] + return compute.InstanceDirectory().get(instance_id) + + def delete(self, **kwargs): + instance_id = kwargs['id'] + instance = compute.InstanceDirectory().get(instance_id) + if not instance: + raise ServerNotFound("The requested server was not found") + instance.destroy() + return True + + def create(self, **kwargs): + inst = self.build_server_instance(kwargs['server']) + rpc.cast( + FLAGS.compute_topic, { + "method": "run_instance", + "args": {"instance_id": inst.instance_id}}) + + def update(self, **kwargs): + instance_id = kwargs['id'] + instance = compute.InstanceDirectory().get(instance_id) + if not instance: + raise ServerNotFound("The requested server was not found") + instance.update(kwargs['server']) + instance.save() + + def build_server_instance(self, env): + """Build instance data structure and save it to the data store.""" + reservation = utils.generate_uid('r') + ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + inst = self.instdir.new() + inst['name'] = env['server']['name'] + inst['image_id'] = env['server']['imageId'] + inst['instance_type'] = env['server']['flavorId'] + inst['user_id'] = env['user']['id'] + inst['project_id'] = env['project']['id'] + inst['reservation_id'] = reservation + inst['launch_time'] = ltime + inst['mac_address'] = utils.generate_mac() + address = self.network.allocate_ip( + inst['user_id'], + inst['project_id'], + mac=inst['mac_address']) + inst['private_dns_name'] = str(address) + inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( + inst['user_id'], + inst['project_id'], + 'default')['bridge_name'] + # key_data, key_name, ami_launch_index + # TODO(todd): key data or root password + inst.save() + return inst diff --git a/nova/api/rackspace/controllers/sharedipgroups.py b/nova/api/rackspace/controllers/sharedipgroups.py new file mode 100644 index 000000000..986f11434 --- /dev/null +++ b/nova/api/rackspace/controllers/sharedipgroups.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +class Controller(object): pass diff --git a/nova/endpoint/aws/__init__.py b/nova/endpoint/aws/__init__.py deleted file mode 100644 index 55cbb8fd3..000000000 --- a/nova/endpoint/aws/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -import routes -import webob.dec - -from nova import wsgi - -# TODO(gundlach): temp -class API(wsgi.Router): - """WSGI entry point for all AWS API requests.""" - - def __init__(self): - mapper = routes.Mapper() - - mapper.connect(None, "{all:.*}", controller=self.dummy) - - super(API, self).__init__(mapper) - - @webob.dec.wsgify - def dummy(self, req): - #TODO(gundlach) - msg = "dummy response -- please hook up __init__() to cloud.py instead" - return repr({ 'dummy': msg, - 'kwargs': repr(req.environ['wsgiorg.routing_args'][1]) }) diff --git a/nova/endpoint/newapi.py b/nova/endpoint/newapi.py deleted file mode 100644 index 9aae933af..000000000 --- a/nova/endpoint/newapi.py +++ /dev/null @@ -1,51 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -:mod:`nova.endpoint` -- Main NOVA Api endpoints -===================================================== - -.. automodule:: nova.endpoint - :platform: Unix - :synopsis: REST APIs for all nova functions -.. moduleauthor:: Jesse Andrews -.. moduleauthor:: Devin Carlen -.. moduleauthor:: Vishvananda Ishaya -.. moduleauthor:: Joshua McKenty -.. moduleauthor:: Manish Singh -.. moduleauthor:: Andy Smith -""" - -from nova import wsgi -import routes -from nova.endpoint import rackspace -from nova.endpoint import aws - -class APIVersionRouter(wsgi.Router): - """Routes top-level requests to the appropriate API.""" - - def __init__(self): - mapper = routes.Mapper() - - rsapi = rackspace.API() - mapper.connect(None, "/v1.0/{path_info:.*}", controller=rsapi) - - mapper.connect(None, "/ec2/{path_info:.*}", controller=aws.API()) - - super(APIVersionRouter, self).__init__(mapper) - diff --git a/nova/endpoint/rackspace/__init__.py b/nova/endpoint/rackspace/__init__.py deleted file mode 100644 index ac53ee10b..000000000 --- a/nova/endpoint/rackspace/__init__.py +++ /dev/null @@ -1,83 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Rackspace API Endpoint -""" - -import json -import time - -import webob.dec -import webob.exc -import routes - -from nova import flags -from nova import wsgi -from nova.auth import manager -from nova.endpoint.rackspace import controllers - - -class API(wsgi.Middleware): - """WSGI entry point for all Rackspace API requests.""" - - def __init__(self): - app = AuthMiddleware(APIRouter()) - super(API, self).__init__(app) - - -class AuthMiddleware(wsgi.Middleware): - """Authorize the rackspace API request or return an HTTP Forbidden.""" - - #TODO(gundlach): isn't this the old Nova API's auth? Should it be replaced - #with correct RS API auth? - - @webob.dec.wsgify - def __call__(self, req): - context = {} - if "HTTP_X_AUTH_TOKEN" in req.environ: - context['user'] = manager.AuthManager().get_user_from_access_key( - req.environ['HTTP_X_AUTH_TOKEN']) - if context['user']: - context['project'] = manager.AuthManager().get_project( - context['user'].name) - if "user" not in context: - return webob.exc.HTTPForbidden() - req.environ['nova.context'] = context - return self.application - - -class APIRouter(wsgi.Router): - """ - Routes requests on the Rackspace API to the appropriate controller - and method. - """ - - def __init__(self): - mapper = routes.Mapper() - - mapper.resource("server", "servers", - controller=controllers.ServersController()) - mapper.resource("image", "images", - controller=controllers.ImagesController()) - mapper.resource("flavor", "flavors", - controller=controllers.FlavorsController()) - mapper.resource("sharedipgroup", "sharedipgroups", - controller=controllers.SharedIpGroupsController()) - - super(APIRouter, self).__init__(mapper) diff --git a/nova/endpoint/rackspace/controllers/__init__.py b/nova/endpoint/rackspace/controllers/__init__.py deleted file mode 100644 index 052b6f365..000000000 --- a/nova/endpoint/rackspace/controllers/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from nova.endpoint.rackspace.controllers.images import ImagesController -from nova.endpoint.rackspace.controllers.flavors import FlavorsController -from nova.endpoint.rackspace.controllers.servers import ServersController -from nova.endpoint.rackspace.controllers.sharedipgroups import \ - SharedIpGroupsController diff --git a/nova/endpoint/rackspace/controllers/base.py b/nova/endpoint/rackspace/controllers/base.py deleted file mode 100644 index 8cd44f62e..000000000 --- a/nova/endpoint/rackspace/controllers/base.py +++ /dev/null @@ -1,9 +0,0 @@ -from nova import wsgi - -class BaseController(wsgi.Controller): - @classmethod - def render(cls, instance): - if isinstance(instance, list): - return { cls.entity_name : cls.render(instance) } - else: - return { "TODO": "TODO" } diff --git a/nova/endpoint/rackspace/controllers/flavors.py b/nova/endpoint/rackspace/controllers/flavors.py deleted file mode 100644 index f256cc852..000000000 --- a/nova/endpoint/rackspace/controllers/flavors.py +++ /dev/null @@ -1 +0,0 @@ -class FlavorsController(object): pass diff --git a/nova/endpoint/rackspace/controllers/images.py b/nova/endpoint/rackspace/controllers/images.py deleted file mode 100644 index ae2a08849..000000000 --- a/nova/endpoint/rackspace/controllers/images.py +++ /dev/null @@ -1 +0,0 @@ -class ImagesController(object): pass diff --git a/nova/endpoint/rackspace/controllers/servers.py b/nova/endpoint/rackspace/controllers/servers.py deleted file mode 100644 index 2f8e662d6..000000000 --- a/nova/endpoint/rackspace/controllers/servers.py +++ /dev/null @@ -1,63 +0,0 @@ -from nova import rpc -from nova.compute import model as compute -from nova.endpoint.rackspace.controllers.base import BaseController - -class ServersController(BaseController): - entity_name = 'servers' - - def index(self, **kwargs): - return [instance_details(inst) for inst in compute.InstanceDirectory().all] - - def show(self, **kwargs): - instance_id = kwargs['id'] - return compute.InstanceDirectory().get(instance_id) - - def delete(self, **kwargs): - instance_id = kwargs['id'] - instance = compute.InstanceDirectory().get(instance_id) - if not instance: - raise ServerNotFound("The requested server was not found") - instance.destroy() - return True - - def create(self, **kwargs): - inst = self.build_server_instance(kwargs['server']) - rpc.cast( - FLAGS.compute_topic, { - "method": "run_instance", - "args": {"instance_id": inst.instance_id}}) - - def update(self, **kwargs): - instance_id = kwargs['id'] - instance = compute.InstanceDirectory().get(instance_id) - if not instance: - raise ServerNotFound("The requested server was not found") - instance.update(kwargs['server']) - instance.save() - - def build_server_instance(self, env): - """Build instance data structure and save it to the data store.""" - reservation = utils.generate_uid('r') - ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) - inst = self.instdir.new() - inst['name'] = env['server']['name'] - inst['image_id'] = env['server']['imageId'] - inst['instance_type'] = env['server']['flavorId'] - inst['user_id'] = env['user']['id'] - inst['project_id'] = env['project']['id'] - inst['reservation_id'] = reservation - inst['launch_time'] = ltime - inst['mac_address'] = utils.generate_mac() - address = self.network.allocate_ip( - inst['user_id'], - inst['project_id'], - mac=inst['mac_address']) - inst['private_dns_name'] = str(address) - inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( - inst['user_id'], - inst['project_id'], - 'default')['bridge_name'] - # key_data, key_name, ami_launch_index - # TODO(todd): key data or root password - inst.save() - return inst diff --git a/nova/endpoint/rackspace/controllers/sharedipgroups.py b/nova/endpoint/rackspace/controllers/sharedipgroups.py deleted file mode 100644 index 9d346d623..000000000 --- a/nova/endpoint/rackspace/controllers/sharedipgroups.py +++ /dev/null @@ -1 +0,0 @@ -class SharedIpGroupsController(object): pass -- cgit From f8f8bc61e0a87b5b72b4539ea3c7b219235a0693 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 17 Aug 2010 16:55:45 -0700 Subject: network datamodel code --- nova/models.py | 84 ++++++++--- nova/network/linux_net.py | 14 +- nova/network/model.py | 18 --- nova/network/service.py | 354 ++++++++++++++++++++++++++++------------------ 4 files changed, 297 insertions(+), 173 deletions(-) diff --git a/nova/models.py b/nova/models.py index e4075faeb..88627ae06 100644 --- a/nova/models.py +++ b/nova/models.py @@ -89,19 +89,6 @@ class Image(Base, NovaBase): if val != 'machine': assert(val is None) -class Network(Base): - __tablename__ = 'networks' - id = Column(Integer, primary_key=True) - bridge = Column(String) - vlan = Column(String) - kind = Column(String) - - @property - def bridge_name(self): - # HACK: this should be set on creation - return 'br100' - #vpn_port = Column(Integer) - project_id = Column(String) #, ForeignKey('projects.id'), nullable=False) class PhysicalNode(Base): __tablename__ = 'physical_nodes' @@ -186,24 +173,89 @@ class Volume(Base, NovaBase): node_name = Column(String) size = Column(Integer) alvailability_zone = Column(String) # FIXME foreign key? - instance_id = Column(Integer, ForeignKey('volumes.id'), nullable=True) + instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) mountpoint = Column(String) attach_time = Column(String) # FIXME datetime status = Column(String) # FIXME enum? attach_status = Column(String) # FIXME enum delete_on_termination = Column(Boolean) + class Network(Base, NovaBase): __tablename__ = 'networks' + id = Column(Integer, primary_key=True) + kind = Column(String) + injected = Column(Boolean) + network_str = Column(String) + netmask = Column(String) + bridge = Column(String) + gateway = Column(String) + broadcast = Column(String) + dns = Column(String) + + vlan = Column(Integer) + vpn_public_ip_str = Column(String) + vpn_public_port = Column(Integer) + vpn_private_ip_str = Column(String) + + project_id = Column(String) #, ForeignKey('projects.id'), nullable=False) + # FIXME: should be physical_node_id = Column(Integer) + node_name = Column(String) + + +class NetworkIndex(Base, NovaBase): + __tablename__ = 'network_indexes' + id = Column(Integer, primary_key=True) + index = Column(Integer) + network_id = Column(Integer, ForeignKey('networks.id'), nullable=True) + network = relationship(Network, backref=backref('vpn', + uselist=False)) + + +#FIXME can these both come from the same baseclass? class FixedIp(Base, NovaBase): __tablename__ = 'fixed_ips' + id = Column(Integer, primary_key=True) + ip_str = Column(String, unique=True) + network_id = Column(Integer, ForeignKey('networks.id'), nullable=False) + network = relationship(Network, backref=backref('fixed_ips')) + instance = relationship(Instance, backref=backref('fixed_ip', + uselist=False)) + instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) + instance = relationship(Instance, backref=backref('fixed_ip', + uselist=False)) + allocated = Column(Boolean) + leased = Column(Boolean) + reserved = Column(Boolean) + + @classmethod + def find_by_ip_str(cls, ip_str): + session = NovaBase.get_session() + try: + return session.query(cls).filter_by(ip_str=ip_str).one() + except exc.NoResultFound: + raise exception.NotFound("No model for ip str %s" % ip_str) class ElasticIp(Base, NovaBase): __tablename__ = 'elastic_ips' + id = Column(Integer, primary_key=True) + ip_str = Column(String, unique=True) + fixed_ip_id = Column(Integer, ForeignKey('fixed_ip.id'), nullable=True) + fixed_ip = relationship(Network, backref=backref('elastic_ips')) + + project_id = Column(String) #, ForeignKey('projects.id'), nullable=False) + # FIXME: should be physical_node_id = Column(Integer) + node_name = Column(String) + + @classmethod + def find_by_ip_str(cls, ip_str): + session = NovaBase.get_session() + try: + return session.query(cls).filter_by(ip_str=ip_str).one() + except exc.NoResultFound: + raise exception.NotFound("No model for ip str %s" % ip_str) -class Vpn(Base, NovaBase): - __tablename__ = 'vpns' def create_session(engine=None): return NovaBase.get_session() diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index e6bb80bb8..73b9500d2 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -47,7 +47,7 @@ def bind_elastic_ip(elastic_ip): def unbind_elastic_ip(elastic_ip): - """Unbind a public ip from an interface""" + """Unbind a public ip from public interface""" _execute("sudo ip addr del %s dev %s" % (elastic_ip, FLAGS.public_interface)) @@ -87,8 +87,13 @@ def remove_elastic_forward(elastic_ip, fixed_ip): _remove_rule("FORWARD -d %s -p %s --dport %s -j ACCEPT" % (fixed_ip, protocol, port)) -def vlan_create(vlan_num): - """Create a vlan on on a bridge device unless vlan already exists""" + +def ensure_vlan_bridge(vlan_num, bridge, network=None): + """Create a vlan and bridge unless they already exist""" + interface = ensure_vlan(vlan_num) + ensure_bridge(bridge, interface) + +def ensure_vlan(vlan_num): interface = "vlan%s" % vlan_num if not _device_exists(interface): logging.debug("Starting VLAN inteface %s", interface) @@ -98,8 +103,7 @@ def vlan_create(vlan_num): return interface -def bridge_create(interface, bridge, network=None): - """Create a bridge on an bridge unless it already exists""" +def ensure_bridge(bridge, interface, network=None): if not _device_exists(bridge): logging.debug("Starting Bridge inteface for %s", interface) _execute("sudo brctl addbr %s" % bridge) diff --git a/nova/network/model.py b/nova/network/model.py index 434fda9ed..24e5d6afb 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -28,30 +28,12 @@ import time from nova import datastore from nova import exception as nova_exception from nova import flags -from nova import utils from nova.auth import manager from nova.network import exception from nova.network import linux_net FLAGS = flags.FLAGS -flags.DEFINE_string('networks_path', utils.abspath('../networks'), - 'Location to keep network config files') -flags.DEFINE_integer('public_vlan', 1, 'VLAN for public IP addresses') -flags.DEFINE_string('public_interface', 'vlan1', - 'Interface for public IP addresses') -flags.DEFINE_string('bridge_dev', 'eth1', - 'network device for bridges') -flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks') -flags.DEFINE_integer('vlan_end', 4093, 'Last VLAN for private networks') -flags.DEFINE_integer('network_size', 256, - 'Number of addresses in each private subnet') -flags.DEFINE_string('public_range', '4.4.4.0/24', 'Public IP address block') -flags.DEFINE_string('private_range', '10.0.0.0/8', 'Private IP address block') -flags.DEFINE_integer('cnt_vpn_clients', 5, - 'Number of addresses reserved for vpn clients') -flags.DEFINE_integer('cloudpipe_start_port', 12000, - 'Starting port for mapped CloudPipe external ports') logging.getLogger().setLevel(logging.DEBUG) diff --git a/nova/network/service.py b/nova/network/service.py index 309ce874d..2b931f342 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -20,15 +20,18 @@ Network Hosts are responsible for allocating ips and setting up network """ -from nova import datastore +import logging + +import IPy +from sqlalchemy.orm import exc + from nova import flags +from nova import models from nova import service from nova import utils from nova.auth import manager from nova.exception import NotFound from nova.network import exception -from nova.network import model -from nova.network import vpn from nova.network import linux_net FLAGS = flags.FLAGS @@ -54,6 +57,9 @@ flags.DEFINE_string('flat_network_dns', '8.8.4.4', flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks') flags.DEFINE_integer('vlan_end', 4093, 'Last VLAN for private networks') +flags.DEFINE_string('vpn_ip', utils.get_my_ip(), + 'Public IP for the cloudpipe VPN servers') +flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks') flags.DEFINE_integer('network_size', 256, 'Number of addresses in each private subnet') flags.DEFINE_string('public_range', '4.4.4.0/24', 'Public IP address block') @@ -61,6 +67,9 @@ flags.DEFINE_string('private_range', '10.0.0.0/8', 'Private IP address block') flags.DEFINE_integer('cnt_vpn_clients', 5, 'Number of addresses reserved for vpn clients') +# TODO(vish): some better type of dependency injection? +_driver = linux_net + def type_to_class(network_type): """Convert a network_type string into an actual Python class""" if network_type == 'flat': @@ -70,16 +79,24 @@ def type_to_class(network_type): raise NotFound("Couldn't find %s network type" % network_type) -def setup_compute_network(instance): +def setup_compute_network(project_id): """Sets up the network on a compute host""" - srv = type_to_class(instance.project.network.kind) - srv.setup_compute_network(instance) + network = get_network_for_project(project_id) + srv = type_to_class(network.kind) + srv.setup_compute_network(network) + + +def get_network_for_project(project_id): + """Get network allocated to project from datastore""" + project = manager.AuthManager().get_project(project_id) + if not project: + raise exception.NotFound() + return project.network def get_host_for_project(project_id): """Get host allocated to project from datastore""" - redis = datastore.Redis.instance() - return redis.get(_host_key(project_id)) + return get_network_for_project(project_id).node_name class BaseNetworkService(service.Service): @@ -87,169 +104,238 @@ class BaseNetworkService(service.Service): This class must be subclassed. """ - def __init__(self, *args, **kwargs): - self.network = model.PublicNetworkController() - super(BaseNetworkService, self).__init__(*args, **kwargs) - def set_network_host(self, user_id, project_id, *args, **kwargs): + def set_network_host(self, project_id): """Safely sets the host of the projects network""" - redis = datastore.Redis.instance() - key = _host_key(project_id) - if redis.setnx(key, FLAGS.node_name): - self._on_set_network_host(user_id, project_id, - security_group='default', - *args, **kwargs) - return FLAGS.node_name - else: - return redis.get(key) - - def allocate_fixed_ip(self, user_id, project_id, - security_group='default', - *args, **kwargs): - """Subclass implements getting fixed ip from the pool""" - raise NotImplementedError() - - def deallocate_fixed_ip(self, fixed_ip, *args, **kwargs): - """Subclass implements return of ip to the pool""" - raise NotImplementedError() - - def _on_set_network_host(self, user_id, project_id, - *args, **kwargs): + network = get_network_for_project(project_id) + if network.node_name: + return network.node_name + network.node_name = FLAGS.node_name + network.kind = FLAGS.network_type + try: + network.save() + self._on_set_network_host(network) + except exc.ConcurrentModificationError: + network.refresh() # FIXME is this implemented? + return network.node_name + + def allocate_fixed_ip(self, project_id, instance_id, *args, **kwargs): + """Gets fixed ip from the pool""" + session = models.NovaBase.get_session() + query = session.query(models.FixedIp).filter_by(project_id=project_id) + query = query.filter_by(allocated=False).filter_by(reserved=False) + query = query.filter_by(leased=False) + while(True): + try: + fixed_ip = query.first() + except exc.NoResultFound: + raise exception.NoMoreAddresses() + # FIXME will this set backreference? + fixed_ip.instance_id = instance_id + fixed_ip.allocated = True + session.add(fixed_ip) + try: + fixed_ip.save() + return fixed_ip.ip_str + except exc.ConcurrentModificationError: + pass + + def deallocate_fixed_ip(self, fixed_ip_str, *args, **kwargs): + """Returns a fixed ip to the pool""" + fixed_ip = models.FixedIp.find_by_ip_str(fixed_ip_str) + fixed_ip.instance = None + fixed_ip.allocated = False + fixed_ip.save() + + + def _on_set_network_host(self, network, *args, **kwargs): """Called when this host becomes the host for a project""" pass @classmethod - def setup_compute_network(cls, instance, *args, **kwargs): + def setup_compute_network(cls, network): """Sets up matching network for compute hosts""" raise NotImplementedError() - def allocate_elastic_ip(self, user_id, project_id): - """Gets a elastic ip from the pool""" - # NOTE(vish): Replicating earlier decision to use 'public' as - # mac address name, although this should probably - # be done inside of the PublicNetworkController - return self.network.allocate_ip(user_id, project_id, 'public') - - def associate_elastic_ip(self, elastic_ip, fixed_ip, instance_id): + def allocate_elastic_ip(self, project_id): + """Gets an elastic ip from the pool""" + # FIXME: add elastic ips through manage command + session = models.NovaBase.get_session() + node_name = FLAGS.node_name + query = session.query(models.ElasticIp).filter_by(node_name=node_name) + query = query.filter_by(fixed_ip_id=None) + while(True): + try: + elastic_ip = query.first() + except exc.NoResultFound: + raise exception.NoMoreAddresses() + elastic_ip.project_id = project_id + session.add(elastic_ip) + try: + elastic_ip.save() + return elastic_ip.ip_str + except exc.ConcurrentModificationError: + pass + + def associate_elastic_ip(self, elastic_ip_str, fixed_ip_str): """Associates an elastic ip to a fixed ip""" - self.network.associate_address(elastic_ip, fixed_ip, instance_id) - - def disassociate_elastic_ip(self, elastic_ip): + elastic_ip = models.ElasticIp.find_by_ip_str(elastic_ip_str) + fixed_ip = models.FixedIp.find_by_ip_str(elastic_ip_str) + elastic_ip.fixed_ip = fixed_ip + _driver.bind_elastic_ip(elastic_ip_str) + _driver.ensure_elastic_forward(elastic_ip_str, fixed_ip_str) + elastic_ip.save() + + def disassociate_elastic_ip(self, elastic_ip_str): """Disassociates a elastic ip""" - self.network.disassociate_address(elastic_ip) + elastic_ip = models.ElasticIp.find_by_ip_str(elastic_ip_str) + fixed_ip_str = elastic_ip.fixed_ip.ip_str + elastic_ip.fixed_ip = None + _driver.unbind_elastic_ip(elastic_ip_str) + _driver.remove_elastic_forward(elastic_ip_str, fixed_ip_str) + elastic_ip.save() - def deallocate_elastic_ip(self, elastic_ip): - """Returns a elastic ip to the pool""" - self.network.deallocate_ip(elastic_ip) + def deallocate_elastic_ip(self, elastic_ip_str): + """Returns an elastic ip to the pool""" + elastic_ip = models.ElasticIp.find_by_ip_str(elastic_ip_str) + elastic_ip.project_id = None + elastic_ip.save() class FlatNetworkService(BaseNetworkService): """Basic network where no vlans are used""" @classmethod - def setup_compute_network(cls, instance, *args, **kwargs): + def setup_compute_network(cls, network): """Network is created manually""" pass - def allocate_fixed_ip(self, - user_id, - project_id, - security_group='default', - *args, **kwargs): - """Gets a fixed ip from the pool - - Flat network just grabs the next available ip from the pool - """ - # NOTE(vish): Some automation could be done here. For example, - # creating the flat_network_bridge and setting up - # a gateway. This is all done manually atm. - redis = datastore.Redis.instance() - if not redis.exists('ips') and not len(redis.keys('instances:*')): - for fixed_ip in FLAGS.flat_network_ips: - redis.sadd('ips', fixed_ip) - fixed_ip = redis.spop('ips') - if not fixed_ip: - raise exception.NoMoreAddresses() - # TODO(vish): some sort of dns handling for hostname should - # probably be done here. - return {'inject_network': True, - 'network_type': FLAGS.network_type, - 'mac_address': utils.generate_mac(), - 'private_dns_name': str(fixed_ip), - 'bridge_name': FLAGS.flat_network_bridge, - 'network_network': FLAGS.flat_network_network, - 'network_netmask': FLAGS.flat_network_netmask, - 'network_gateway': FLAGS.flat_network_gateway, - 'network_broadcast': FLAGS.flat_network_broadcast, - 'network_dns': FLAGS.flat_network_dns} - - def deallocate_fixed_ip(self, fixed_ip, *args, **kwargs): - """Returns an ip to the pool""" - datastore.Redis.instance().sadd('ips', fixed_ip) - + def _on_set_network_host(self, network, *args, **kwargs): + """Called when this host becomes the host for a project""" + # FIXME should there be two types of network objects in the database? + network.injected = True + network.network_str=FLAGS.flat_network_network + network.netmask=FLAGS.flat_network_netmask + network.bridge=FLAGS.flat_network_bridge + network.gateway=FLAGS.flat_network_gateway + network.broadcast=FLAGS.flat_network_broadcast + network.dns=FLAGS.flat_network_dns + network.save() + # FIXME add public ips from flags to the datastore class VlanNetworkService(BaseNetworkService): """Vlan network with dhcp""" - def __init__(self, *args, **kwargs): - super(VlanNetworkService, self).__init__(*args, **kwargs) - # TODO(vish): some better type of dependency injection? - self.driver = linux_net - - # pylint: disable=W0221 - def allocate_fixed_ip(self, - user_id, - project_id, - security_group='default', - is_vpn=False, - hostname=None, + + def allocate_fixed_ip(self, project_id, instance_id, is_vpn=False, *args, **kwargs): """Gets a fixed ip from the pool""" - mac = utils.generate_mac() - net = model.get_project_network(project_id) + network = get_network_for_project(project_id) if is_vpn: - fixed_ip = net.allocate_vpn_ip(user_id, - project_id, - mac, - hostname) + fixed_ip = models.FixedIp.find_by_ip_str(network.vpn_private_ip_str) + if fixed_ip.allocated: + raise exception.AddressAlreadyAllocated() + # FIXME will this set backreference? + fixed_ip.instance_id = instance_id + fixed_ip.allocated = True + fixed_ip.save() + _driver.ensure_vlan_forward(network.vpn_public_ip_str, + network.vpn_public_port, + network.vpn_private_ip_str) + ip_str = fixed_ip.ip_str else: - fixed_ip = net.allocate_ip(user_id, - project_id, - mac, - hostname) - return {'network_type': FLAGS.network_type, - 'bridge_name': net['bridge_name'], - 'mac_address': mac, - 'private_dns_name': fixed_ip} - - def deallocate_fixed_ip(self, fixed_ip, - *args, **kwargs): + parent = super(VlanNetworkService, self) + ip_str = parent.allocate_fixed_ip(project_id, instance_id) + _driver.ensure_vlan_bridge(network.vlan, network.bridge) + return ip_str + + def deallocate_fixed_ip(self, fixed_ip_str): """Returns an ip to the pool""" - return model.get_network_by_address(fixed_ip).deallocate_ip(fixed_ip) + fixed_ip = models.FixedIp.find_by_ip_str(fixed_ip_str) + if fixed_ip.leased: + logging.debug("Deallocating IP %s", fixed_ip_str) + fixed_ip.allocated = False + # keep instance id until release occurs + fixed_ip.save() + else: + self.release_ip(fixed_ip_str) - def lease_ip(self, fixed_ip): + def lease_ip(self, fixed_ip_str): """Called by bridge when ip is leased""" - return model.get_network_by_address(fixed_ip).lease_ip(fixed_ip) - - def release_ip(self, fixed_ip): + fixed_ip = models.FixedIp.find_by_ip_str(fixed_ip_str) + if not fixed_ip.allocated: + raise exception.AddressNotAllocated(fixed_ip_str) + logging.debug("Leasing IP %s", fixed_ip_str) + fixed_ip.leased = True + fixed_ip.save() + + def release_ip(self, fixed_ip_str): """Called by bridge when ip is released""" - return model.get_network_by_address(fixed_ip).release_ip(fixed_ip) + fixed_ip = models.FixedIp.find_by_ip_str(fixed_ip_str) + logging.debug("Releasing IP %s", fixed_ip_str) + fixed_ip.leased = False + fixed_ip.allocated = False + fixed_ip.instance = None + fixed_ip.save() + def restart_nets(self): """Ensure the network for each user is enabled""" - for project in manager.AuthManager().get_projects(): - model.get_project_network(project.id).express() + # FIXME + pass - def _on_set_network_host(self, user_id, project_id, - *args, **kwargs): + def _on_set_network_host(self, network): """Called when this host becomes the host for a project""" - vpn.NetworkData.create(project_id) + # FIXME add indexes to datastore + # index = self._get_network_index(network) + index = 0 + private_net = IPy.IP(FLAGS.private_range) + start = index * FLAGS.network_size + # minus one for the gateway. + network_str = "%s-%s" % (private_net[start], + private_net[start + FLAGS.network_size - 1]) + vlan = FLAGS.vlan_start + index + project_net = IPy.IP(network_str) + network.network_str = network_str + network.netmask = project_net.netmask() + network.vlan = vlan + network.bridge = 'br%s' % vlan + network.gateway = project_net.gateway() + network.broadcast = project_net.broadast() + network.vpn_private_ip_str = project_net[2] + network.vpn_public_ip_str = FLAGS.vpn_ip + network.vpn_public_port = FLAGS.vpn_start + index + # create network fixed ips + BOTTOM_RESERVED = 3 + TOP_RESERVED = 1 + FLAGS.vpn_client_cnt + for i in range(len(project_net)): + fixed_ip = models.FixedIp() + fixed_ip.ip_str = project_net[i] + if i < BOTTOM_RESERVED or i > TOP_RESERVED: + fixed_ip.reserved = True + fixed_ip.network = network + fixed_ip.save() + + + def _get_network_index(self, network): + """Get non-conflicting index for network""" + session = models.NovaBase.get_session() + node_name = FLAGS.node_name + query = session.query(models.NetworkIndex).filter_by(network_id=None) + while(True): + try: + network_index = query.first() + except exc.NoResultFound: + raise exception.NoMoreNetworks() + network_index.network = network + session.add(network_index) + try: + network_index.save() + return network_index.index + except exc.ConcurrentModificationError: + pass + @classmethod - def setup_compute_network(cls, instance, *args, **kwargs): + def setup_compute_network(cls, network): """Sets up matching network for compute hosts""" - # NOTE(vish): Use BridgedNetwork instead of DHCPNetwork because - # we don't want to run dnsmasq on the client machines - net = instance.project.network - # FIXME(ja): hack - uncomment this: - #linux_net.vlan_create(net) - #linux_net.bridge_create(net) + _driver.ensure_vlan_bridge(network.vlan, network.bridge) -- cgit From 1cd448f907e132c451d6b27c64d16c17b7530952 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 17 Aug 2010 16:56:16 -0700 Subject: removed extra files --- nova/network/model.py | 609 -------------------------------------------------- nova/network/vpn.py | 127 ----------- 2 files changed, 736 deletions(-) delete mode 100644 nova/network/model.py delete mode 100644 nova/network/vpn.py diff --git a/nova/network/model.py b/nova/network/model.py deleted file mode 100644 index 24e5d6afb..000000000 --- a/nova/network/model.py +++ /dev/null @@ -1,609 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Model Classes for network control, including VLANs, DHCP, and IP allocation. -""" - -import IPy -import logging -import os -import time - -from nova import datastore -from nova import exception as nova_exception -from nova import flags -from nova.auth import manager -from nova.network import exception -from nova.network import linux_net - - -FLAGS = flags.FLAGS - -logging.getLogger().setLevel(logging.DEBUG) - - -class Vlan(): - """Tracks vlans assigned to project it the datastore""" - def __init__(self, project, vlan): # pylint: disable=W0231 - """ - Since we don't want to try and find a vlan by its identifier, - but by a project id, we don't call super-init. - """ - self.project_id = project - self.vlan_id = vlan - - @property - def identifier(self): - """Datastore identifier""" - return "%s:%s" % (self.project_id, self.vlan_id) - - @classmethod - def create(cls, project, vlan): - """Create a Vlan object""" - instance = cls(project, vlan) - instance.save() - return instance - - @classmethod - def lookup(cls, project): - """Returns object by project if it exists in datastore or None""" - set_name = cls._redis_set_name(cls.__name__) - vlan = datastore.Redis.instance().hget(set_name, project) - if vlan: - return cls(project, vlan) - else: - return None - - @classmethod - def dict_by_project(cls): - """A hash of project:vlan""" - set_name = cls._redis_set_name(cls.__name__) - return datastore.Redis.instance().hgetall(set_name) or {} - - @classmethod - def dict_by_vlan(cls): - """A hash of vlan:project""" - set_name = cls._redis_set_name(cls.__name__) - retvals = {} - hashset = datastore.Redis.instance().hgetall(set_name) or {} - for (key, val) in hashset.iteritems(): - retvals[val] = key - return retvals - - @classmethod - def all(cls): - set_name = cls._redis_set_name(cls.__name__) - elements = datastore.Redis.instance().hgetall(set_name) - for project in elements: - yield cls(project, elements[project]) - - def save(self): - """ - Vlan saves state into a giant hash named "vlans", with keys of - project_id and value of vlan number. Therefore, we skip the - default way of saving into "vlan:ID" and adding to a set of "vlans". - """ - set_name = self._redis_set_name(self.__class__.__name__) - datastore.Redis.instance().hset(set_name, - self.project_id, - self.vlan_id) - - def destroy(self): - """Removes the object from the datastore""" - set_name = self._redis_set_name(self.__class__.__name__) - datastore.Redis.instance().hdel(set_name, self.project_id) - - def subnet(self): - """Returns a string containing the subnet""" - vlan = int(self.vlan_id) - network = IPy.IP(FLAGS.private_range) - start = (vlan - FLAGS.vlan_start) * FLAGS.network_size - # minus one for the gateway. - return "%s-%s" % (network[start], - network[start + FLAGS.network_size - 1]) - - -class FixedIp(): - """Represents a fixed ip in the datastore""" - - def __init__(self, address): - self.address = address - super(FixedIp, self).__init__() - - @property - def identifier(self): - return self.address - - # NOTE(vish): address states allocated, leased, deallocated - def default_state(self): - return {'address': self.address, - 'state': 'none'} - - @classmethod - # pylint: disable=R0913 - def create(cls, user_id, project_id, address, mac, hostname, network_id): - """Creates an FixedIp object""" - addr = cls(address) - addr['user_id'] = user_id - addr['project_id'] = project_id - addr['mac'] = mac - if hostname is None: - hostname = "ip-%s" % address.replace('.', '-') - addr['hostname'] = hostname - addr['network_id'] = network_id - addr['state'] = 'allocated' - addr.save() - return addr - - def save(self): - is_new = self.is_new_record() - success = super(FixedIp, self).save() - if success and is_new: - self.associate_with("network", self['network_id']) - - def destroy(self): - self.unassociate_with("network", self['network_id']) - super(FixedIp, self).destroy() - - -class ElasticIp(FixedIp): - """Represents an elastic ip in the datastore""" - override_type = "address" - - def default_state(self): - return {'address': self.address, - 'instance_id': 'available', - 'private_ip': 'available'} - - -# CLEANUP: -# TODO(ja): does vlanpool "keeper" need to know the min/max - -# shouldn't FLAGS always win? -class BaseNetwork(): - """Implements basic logic for allocating ips in a network""" - override_type = 'network' - address_class = FixedIp - - @property - def identifier(self): - """Datastore identifier""" - return self.network_id - - def default_state(self): - """Default values for new objects""" - return {'network_id': self.network_id, 'network_str': self.network_str} - - @classmethod - # pylint: disable=R0913 - def create(cls, user_id, project_id, security_group, vlan, network_str): - """Create a BaseNetwork object""" - network_id = "%s:%s" % (project_id, security_group) - net = cls(network_id, network_str) - net['user_id'] = user_id - net['project_id'] = project_id - net["vlan"] = vlan - net["bridge_name"] = "br%s" % vlan - net.save() - return net - - def __init__(self, network_id, network_str=None): - self.network_id = network_id - self.network_str = network_str - super(BaseNetwork, self).__init__() - self.save() - - @property - def network(self): - """Returns a string representing the network""" - return IPy.IP(self['network_str']) - - @property - def netmask(self): - """Returns the netmask of this network""" - return self.network.netmask() - - @property - def gateway(self): - """Returns the network gateway address""" - return self.network[1] - - @property - def broadcast(self): - """Returns the network broadcast address""" - return self.network.broadcast() - - @property - def bridge_name(self): - """Returns the bridge associated with this network""" - return "br%s" % (self["vlan"]) - - @property - def user(self): - """Returns the user associated with this network""" - return manager.AuthManager().get_user(self['user_id']) - - @property - def project(self): - """Returns the project associated with this network""" - return manager.AuthManager().get_project(self['project_id']) - - # pylint: disable=R0913 - def _add_host(self, user_id, project_id, ip_address, mac, hostname): - """Add a host to the datastore""" - self.address_class.create(user_id, project_id, ip_address, - mac, hostname, self.identifier) - - def _rem_host(self, ip_address): - """Remove a host from the datastore""" - self.address_class(ip_address).destroy() - - @property - def assigned(self): - """Returns a list of all assigned addresses""" - return self.address_class.associated_keys('network', self.identifier) - - @property - def assigned_objs(self): - """Returns a list of all assigned addresses as objects""" - return self.address_class.associated_to('network', self.identifier) - - def get_address(self, ip_address): - """Returns a specific ip as an object""" - if ip_address in self.assigned: - return self.address_class(ip_address) - return None - - @property - def available(self): - """Returns a list of all available addresses in the network""" - for idx in range(self.num_bottom_reserved_ips, - len(self.network) - self.num_top_reserved_ips): - address = str(self.network[idx]) - if not address in self.assigned: - yield address - - @property - def num_bottom_reserved_ips(self): - """Returns number of ips reserved at the bottom of the range""" - return 2 # Network, Gateway - - @property - def num_top_reserved_ips(self): - """Returns number of ips reserved at the top of the range""" - return 1 # Broadcast - - def allocate_ip(self, user_id, project_id, mac, hostname=None): - """Allocates an ip to a mac address""" - for address in self.available: - logging.debug("Allocating IP %s to %s", address, project_id) - self._add_host(user_id, project_id, address, mac, hostname) - self.express(address=address) - return address - raise exception.NoMoreAddresses("Project %s with network %s" % - (project_id, str(self.network))) - - def lease_ip(self, ip_str): - """Called when DHCP lease is activated""" - if not ip_str in self.assigned: - raise exception.AddressNotAllocated() - address = self.get_address(ip_str) - if address: - logging.debug("Leasing allocated IP %s", ip_str) - address['state'] = 'leased' - address.save() - - def release_ip(self, ip_str): - """Called when DHCP lease expires - - Removes the ip from the assigned list""" - if not ip_str in self.assigned: - raise exception.AddressNotAllocated() - logging.debug("Releasing IP %s", ip_str) - self._rem_host(ip_str) - self.deexpress(address=ip_str) - - def deallocate_ip(self, ip_str): - """Deallocates an allocated ip""" - if not ip_str in self.assigned: - raise exception.AddressNotAllocated() - address = self.get_address(ip_str) - if address: - if address['state'] != 'leased': - # NOTE(vish): address hasn't been leased, so release it - self.release_ip(ip_str) - else: - logging.debug("Deallocating allocated IP %s", ip_str) - address['state'] == 'deallocated' - address.save() - - def express(self, address=None): - """Set up network. Implemented in subclasses""" - pass - - def deexpress(self, address=None): - """Tear down network. Implemented in subclasses""" - pass - - -class BridgedNetwork(BaseNetwork): - """ - Virtual Network that can express itself to create a vlan and - a bridge (with or without an IP address/netmask/gateway) - - properties: - bridge_name - string (example value: br42) - vlan - integer (example value: 42) - bridge_dev - string (example: eth0) - bridge_gets_ip - boolean used during bridge creation - - if bridge_gets_ip then network address for bridge uses the properties: - gateway - broadcast - netmask - """ - - bridge_gets_ip = False - override_type = 'network' - - @classmethod - def get_network_for_project(cls, - user_id, - project_id, - security_group='default'): - """Returns network for a given project""" - vlan = get_vlan_for_project(project_id) - network_str = vlan.subnet() - return cls.create(user_id, project_id, security_group, vlan.vlan_id, - network_str) - - def __init__(self, *args, **kwargs): - super(BridgedNetwork, self).__init__(*args, **kwargs) - self['bridge_dev'] = FLAGS.bridge_dev - self.save() - - def express(self, address=None): - super(BridgedNetwork, self).express(address=address) - linux_net.vlan_create(self) - linux_net.bridge_create(self) - - -class DHCPNetwork(BridgedNetwork): - """Network supporting DHCP""" - bridge_gets_ip = True - override_type = 'network' - - def __init__(self, *args, **kwargs): - super(DHCPNetwork, self).__init__(*args, **kwargs) - if not(os.path.exists(FLAGS.networks_path)): - os.makedirs(FLAGS.networks_path) - - @property - def num_bottom_reserved_ips(self): - # For cloudpipe - return super(DHCPNetwork, self).num_bottom_reserved_ips + 1 - - @property - def num_top_reserved_ips(self): - return super(DHCPNetwork, self).num_top_reserved_ips + \ - FLAGS.cnt_vpn_clients - - @property - def dhcp_listen_address(self): - """Address where dhcp server should listen""" - return self.gateway - - @property - def dhcp_range_start(self): - """Starting address dhcp server should use""" - return self.network[self.num_bottom_reserved_ips] - - def express(self, address=None): - super(DHCPNetwork, self).express(address=address) - if len(self.assigned) > 0: - logging.debug("Starting dnsmasq server for network with vlan %s", - self['vlan']) - linux_net.start_dnsmasq(self) - else: - logging.debug("Not launching dnsmasq: no hosts.") - self.express_vpn() - - def allocate_vpn_ip(self, user_id, project_id, mac, hostname=None): - """Allocates the reserved ip to a vpn instance""" - address = str(self.network[2]) - self._add_host(user_id, project_id, address, mac, hostname) - self.express(address=address) - return address - - def express_vpn(self): - """Sets up routing rules for vpn""" - private_ip = str(self.network[2]) - linux_net.confirm_rule("FORWARD -d %s -p udp --dport 1194 -j ACCEPT" - % (private_ip, )) - linux_net.confirm_rule( - "PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194" - % (self.project.vpn_ip, self.project.vpn_port, private_ip)) - - def deexpress(self, address=None): - # if this is the last address, stop dns - super(DHCPNetwork, self).deexpress(address=address) - if len(self.assigned) == 0: - linux_net.stop_dnsmasq(self) - else: - linux_net.start_dnsmasq(self) - -DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] - - -class PublicNetworkController(BaseNetwork): - """Handles elastic ips""" - override_type = 'network' - address_class = ElasticIp - - def __init__(self, *args, **kwargs): - network_id = "public:default" - super(PublicNetworkController, self).__init__(network_id, - FLAGS.public_range, *args, **kwargs) - self['user_id'] = "public" - self['project_id'] = "public" - self["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', - time.gmtime()) - self["vlan"] = FLAGS.public_vlan - self.save() - self.express() - - def deallocate_ip(self, ip_str): - # NOTE(vish): cleanup is now done on release by the parent class - self.release_ip(ip_str) - - def associate_address(self, public_ip, private_ip, instance_id): - """Associates a public ip to a private ip and instance id""" - if not public_ip in self.assigned: - raise exception.AddressNotAllocated() - # TODO(josh): Keep an index going both ways - for addr in self.assigned_objs: - if addr.get('private_ip', None) == private_ip: - raise exception.AddressAlreadyAssociated() - addr = self.get_address(public_ip) - if addr.get('private_ip', 'available') != 'available': - raise exception.AddressAlreadyAssociated() - addr['private_ip'] = private_ip - addr['instance_id'] = instance_id - addr.save() - self.express(address=public_ip) - - def disassociate_address(self, public_ip): - """Disassociates a public ip with its private ip""" - if not public_ip in self.assigned: - raise exception.AddressNotAllocated() - addr = self.get_address(public_ip) - if addr.get('private_ip', 'available') == 'available': - raise exception.AddressNotAssociated() - self.deexpress(address=public_ip) - addr['private_ip'] = 'available' - addr['instance_id'] = 'available' - addr.save() - - def express(self, address=None): - if address: - if not address in self.assigned: - raise exception.AddressNotAllocated() - addresses = [self.get_address(address)] - else: - addresses = self.assigned_objs - for addr in addresses: - if addr.get('private_ip', 'available') == 'available': - continue - public_ip = addr['address'] - private_ip = addr['private_ip'] - linux_net.bind_public_ip(public_ip, FLAGS.public_interface) - linux_net.confirm_rule("PREROUTING -t nat -d %s -j DNAT --to %s" - % (public_ip, private_ip)) - linux_net.confirm_rule("POSTROUTING -t nat -s %s -j SNAT --to %s" - % (private_ip, public_ip)) - # TODO(joshua): Get these from the secgroup datastore entries - linux_net.confirm_rule("FORWARD -d %s -p icmp -j ACCEPT" - % (private_ip)) - for (protocol, port) in DEFAULT_PORTS: - linux_net.confirm_rule( - "FORWARD -d %s -p %s --dport %s -j ACCEPT" - % (private_ip, protocol, port)) - - def deexpress(self, address=None): - addr = self.get_address(address) - private_ip = addr['private_ip'] - linux_net.unbind_public_ip(address, FLAGS.public_interface) - linux_net.remove_rule("PREROUTING -t nat -d %s -j DNAT --to %s" - % (address, private_ip)) - linux_net.remove_rule("POSTROUTING -t nat -s %s -j SNAT --to %s" - % (private_ip, address)) - linux_net.remove_rule("FORWARD -d %s -p icmp -j ACCEPT" - % (private_ip)) - for (protocol, port) in DEFAULT_PORTS: - linux_net.remove_rule("FORWARD -d %s -p %s --dport %s -j ACCEPT" - % (private_ip, protocol, port)) - - -# FIXME(todd): does this present a race condition, or is there some -# piece of architecture that mitigates it (only one queue -# listener per net)? -def get_vlan_for_project(project_id): - """Allocate vlan IDs to individual users""" - vlan = Vlan.lookup(project_id) - if vlan: - return vlan - known_vlans = Vlan.dict_by_vlan() - for vnum in range(FLAGS.vlan_start, FLAGS.vlan_end): - vstr = str(vnum) - if not vstr in known_vlans: - return Vlan.create(project_id, vnum) - old_project_id = known_vlans[vstr] - if not manager.AuthManager().get_project(old_project_id): - vlan = Vlan.lookup(old_project_id) - if vlan: - # NOTE(todd): This doesn't check for vlan id match, because - # it seems to be assumed that vlan<=>project is - # always a 1:1 mapping. It could be made way - # sexier if it didn't fight against the way - # BasicModel worked and used associate_with - # to build connections to projects. - # NOTE(josh): This is here because we want to make sure we - # don't orphan any VLANs. It is basically - # garbage collection for after projects abandoned - # their reference. - vlan.destroy() - vlan.project_id = project_id - vlan.save() - return vlan - else: - return Vlan.create(project_id, vnum) - raise exception.AddressNotAllocated("Out of VLANs") - - -def get_project_network(project_id, security_group='default'): - """Gets a project's private network, allocating one if needed""" - project = manager.AuthManager().get_project(project_id) - if not project: - raise nova_exception.NotFound("Project %s doesn't exist." % project_id) - manager_id = project.project_manager_id - return DHCPNetwork.get_network_for_project(manager_id, - project.id, - security_group) - - -def get_network_by_address(address): - """Gets the network for a given private ip""" - address_record = FixedIp.lookup(address) - if not address_record: - raise exception.AddressNotAllocated() - return get_project_network(address_record['project_id']) - - -def get_network_by_interface(iface, security_group='default'): - """Gets the network for a given interface""" - vlan = iface.rpartition("br")[2] - project_id = Vlan.dict_by_vlan().get(vlan) - return get_project_network(project_id, security_group) - - -def get_public_ip_for_instance(instance_id): - """Gets the public ip for a given instance""" - # FIXME(josh): this should be a lookup - iteration won't scale - for address_record in ElasticIp.all(): - if address_record.get('instance_id', 'available') == instance_id: - return address_record['address'] diff --git a/nova/network/vpn.py b/nova/network/vpn.py deleted file mode 100644 index 5eb1c2b20..000000000 --- a/nova/network/vpn.py +++ /dev/null @@ -1,127 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Network Data for projects""" - -from nova import datastore -from nova import exception -from nova import flags -from nova import utils - -FLAGS = flags.FLAGS - - -flags.DEFINE_string('vpn_ip', utils.get_my_ip(), - 'Public IP for the cloudpipe VPN servers') -flags.DEFINE_integer('vpn_start_port', 1000, - 'Start port for the cloudpipe VPN servers') -flags.DEFINE_integer('vpn_end_port', 2000, - 'End port for the cloudpipe VPN servers') - - -class NoMorePorts(exception.Error): - """No ports available to allocate for the given ip""" - pass - - -class NetworkData(): - """Manages network host, and vpn ip and port for projects""" - def __init__(self, project_id): - self.project_id = project_id - super(NetworkData, self).__init__() - - @property - def identifier(self): - """Identifier used for key in redis""" - return self.project_id - - @classmethod - def create(cls, project_id): - """Creates a vpn for project - - This method finds a free ip and port and stores the associated - values in the datastore. - """ - # TODO(vish): will we ever need multiiple ips per host? - port = cls.find_free_port_for_ip(FLAGS.vpn_ip) - network_data = cls(project_id) - # save ip for project - network_data['host'] = FLAGS.node_name - network_data['project'] = project_id - network_data['ip'] = FLAGS.vpn_ip - network_data['port'] = port - network_data.save() - return network_data - - @classmethod - def find_free_port_for_ip(cls, vpn_ip): - """Finds a free port for a given ip from the redis set""" - # TODO(vish): these redis commands should be generalized and - # placed into a base class. Conceptually, it is - # similar to an association, but we are just - # storing a set of values instead of keys that - # should be turned into objects. - cls._ensure_set_exists(vpn_ip) - - port = datastore.Redis.instance().spop(cls._redis_ports_key(vpn_ip)) - if not port: - raise NoMorePorts() - return port - - @classmethod - def _redis_ports_key(cls, vpn_ip): - """Key that ports are stored under in redis""" - return 'ip:%s:ports' % vpn_ip - - @classmethod - def _ensure_set_exists(cls, vpn_ip): - """Creates the set of ports for the ip if it doesn't already exist""" - # TODO(vish): these ports should be allocated through an admin - # command instead of a flag - redis = datastore.Redis.instance() - if (not redis.exists(cls._redis_ports_key(vpn_ip)) and - not redis.exists(cls._redis_association_name('ip', vpn_ip))): - for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1): - redis.sadd(cls._redis_ports_key(vpn_ip), i) - - @classmethod - def num_ports_for_ip(cls, vpn_ip): - """Calculates the number of free ports for a given ip""" - cls._ensure_set_exists(vpn_ip) - return datastore.Redis.instance().scard('ip:%s:ports' % vpn_ip) - - @property - def ip(self): # pylint: disable=C0103 - """The ip assigned to the project""" - return self['ip'] - - @property - def port(self): - """The port assigned to the project""" - return int(self['port']) - - def save(self): - """Saves the association to the given ip""" - self.associate_with('ip', self.ip) - super(NetworkData, self).save() - - def destroy(self): - """Cleans up datastore and adds port back to pool""" - self.unassociate_with('ip', self.ip) - datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port) - super(NetworkData, self).destroy() -- cgit From 49f391642639fd0f5bdcc5e791b839eb3a702850 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 17 Aug 2010 18:08:39 -0700 Subject: fix vpn access for auth --- nova/auth/manager.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index c16eb0c3c..d2d4d641b 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -34,7 +34,6 @@ from nova import flags from nova import models from nova import utils from nova.auth import signer -from nova.network import vpn FLAGS = flags.FLAGS @@ -571,10 +570,12 @@ class AuthManager(object): @return: A tuple containing (ip, port) or None, None if vpn has not been allocated for user. """ - network_data = vpn.NetworkData.lookup(Project.safe_id(project)) - if not network_data: + # FIXME(vish): this shouldn't be messing with the datamodel directly + if not isinstance(project, Project): + project = self.get_project(project) + if not project.network: raise exception.NotFound('project network data has not been set') - return (network_data.ip, network_data.port) + return (project.network.vpn_ip_str, project.network.vpn_port) def delete_project(self, project): """Deletes a project""" -- cgit From f9214212f1aed4e574f6be6c32a6002a3621625e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 17 Aug 2010 18:10:11 -0700 Subject: remove references to deleted files so tests run --- nova/endpoint/cloud.py | 1 - nova/tests/network_unittest.py | 2 -- 2 files changed, 3 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 3bc03e0b1..e5d4661df 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -41,7 +41,6 @@ from nova.compute import model from nova.compute.instance_types import INSTANCE_TYPES from nova.endpoint import images from nova.network import service as network_service -from nova.network import model as network_model from nova.volume import service diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 039509809..72dc88f27 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -26,9 +26,7 @@ from nova import flags from nova import test from nova import utils from nova.auth import manager -from nova.network import model from nova.network import service -from nova.network import vpn from nova.network.exception import NoMoreAddresses FLAGS = flags.FLAGS -- cgit From c41d9601555c78e3c91fb481fdfb3d50ffdf440b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 17 Aug 2010 19:41:17 -0700 Subject: progress on tests passing --- nova/compute/service.py | 11 ++----- nova/models.py | 50 ++++++++++++++++++++++++++----- nova/network/service.py | 46 ++++++++++++++++++----------- nova/tests/fake_flags.py | 1 + nova/tests/network_unittest.py | 67 ++++++++++++++++++++++++------------------ nova/virt/fake.py | 16 +++++----- 6 files changed, 121 insertions(+), 70 deletions(-) diff --git a/nova/compute/service.py b/nova/compute/service.py index 13507a1bb..708134072 100644 --- a/nova/compute/service.py +++ b/nova/compute/service.py @@ -25,25 +25,19 @@ Compute Service: """ import base64 -import json import logging import os -import sys from twisted.internet import defer -from twisted.internet import task from nova import exception from nova import flags from nova import process from nova import service from nova import utils -from nova.compute import disk from nova import models from nova.compute import power_state -from nova.compute.instance_types import INSTANCE_TYPES from nova.network import service as network_service -from nova.objectstore import image # for image_path flag from nova.virt import connection as virt_connection from nova.volume import service as volume_service @@ -107,14 +101,15 @@ class ComputeService(service.Service): @exception.wrap_exception def run_instance(self, instance_id, **_kwargs): """ launch a new instance with specified options """ - if str(instance_id) in self._conn.list_instances(): + inst = models.Instance.find(instance_id) + if inst.name in self._conn.list_instances(): raise exception.Error("Instance has already been created") logging.debug("Starting instance %s..." % (instance_id)) inst = models.Instance.find(instance_id) # NOTE(vish): passing network type allows us to express the # network without making a call to network to find # out which type of network to setup - network_service.setup_compute_network(inst) + network_service.setup_compute_network(inst.project_id) inst.node_name = FLAGS.node_name inst.save() diff --git a/nova/models.py b/nova/models.py index 88627ae06..5fc4ba1cf 100644 --- a/nova/models.py +++ b/nova/models.py @@ -1,11 +1,43 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +SQLAlchemy models for nova data +""" +import os + from sqlalchemy.orm import relationship, backref, validates, exc -from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, DateTime, Boolean, Text +from sqlalchemy import Table, Column, Integer, String +from sqlalchemy import MetaData, ForeignKey, DateTime, Boolean, Text from sqlalchemy.ext.declarative import declarative_base + from nova import auth from nova import exception +from nova import flags + +FLAGS=flags.FLAGS Base = declarative_base() +flags.DEFINE_string('sql_connection', + 'sqlite:///%s/nova.sqlite' % os.path.abspath("./"), + 'connection string for sql database') + class NovaBase(object): created_at = Column(DateTime) updated_at = Column(DateTime) @@ -17,7 +49,7 @@ class NovaBase(object): if NovaBase._engine is not None: return NovaBase._engine from sqlalchemy import create_engine - NovaBase._engine = create_engine('sqlite:////root/nova.sqlite', echo=False) + NovaBase._engine = create_engine(FLAGS.sql_connection, echo=False) Base.metadata.create_all(NovaBase._engine) return NovaBase._engine @@ -34,6 +66,11 @@ class NovaBase(object): session = NovaBase.get_session() return session.query(cls).all() + @classmethod + def count(cls): + session = NovaBase.get_session() + return session.query(cls).count() + @classmethod def find(cls, obj_id): session = NovaBase.get_session() @@ -136,7 +173,6 @@ class Instance(Base, NovaBase): reservation_id = Column(String) mac_address = Column(String) - fixed_ip = Column(String) def set_state(self, state_code, state_description=None): from nova.compute import power_state @@ -209,7 +245,7 @@ class NetworkIndex(Base, NovaBase): id = Column(Integer, primary_key=True) index = Column(Integer) network_id = Column(Integer, ForeignKey('networks.id'), nullable=True) - network = relationship(Network, backref=backref('vpn', + network = relationship(Network, backref=backref('network_index', uselist=False)) @@ -220,8 +256,6 @@ class FixedIp(Base, NovaBase): ip_str = Column(String, unique=True) network_id = Column(Integer, ForeignKey('networks.id'), nullable=False) network = relationship(Network, backref=backref('fixed_ips')) - instance = relationship(Instance, backref=backref('fixed_ip', - uselist=False)) instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) instance = relationship(Instance, backref=backref('fixed_ip', uselist=False)) @@ -241,8 +275,8 @@ class ElasticIp(Base, NovaBase): __tablename__ = 'elastic_ips' id = Column(Integer, primary_key=True) ip_str = Column(String, unique=True) - fixed_ip_id = Column(Integer, ForeignKey('fixed_ip.id'), nullable=True) - fixed_ip = relationship(Network, backref=backref('elastic_ips')) + fixed_ip_id = Column(Integer, ForeignKey('fixed_ips.id'), nullable=True) + fixed_ip = relationship(FixedIp, backref=backref('elastic_ips')) project_id = Column(String) #, ForeignKey('projects.id'), nullable=False) # FIXME: should be physical_node_id = Column(Integer) diff --git a/nova/network/service.py b/nova/network/service.py index 8ddc4bc84..115a7fa98 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -56,7 +56,7 @@ flags.DEFINE_string('flat_network_dns', '8.8.4.4', 'Dns for simple network') flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks') -flags.DEFINE_integer('vlan_end', 4093, 'Last VLAN for private networks') +flags.DEFINE_integer('num_networks', 1000, 'Number of networks to support') flags.DEFINE_string('vpn_ip', utils.get_my_ip(), 'Public IP for the cloudpipe VPN servers') flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks') @@ -90,7 +90,7 @@ def get_network_for_project(project_id): """Get network allocated to project from datastore""" project = manager.AuthManager().get_project(project_id) if not project: - raise exception.NotFound() + raise exception.NotFound("Couldn't find project %s" % project_id) return project.network @@ -121,14 +121,15 @@ class BaseNetworkService(service.Service): def allocate_fixed_ip(self, project_id, instance_id, *args, **kwargs): """Gets fixed ip from the pool""" + print "allocating", project_id, instance_id + network = get_network_for_project(project_id) session = models.NovaBase.get_session() - query = session.query(models.FixedIp).filter_by(project_id=project_id) + query = session.query(models.FixedIp).filter_by(network_id=network.id) query = query.filter_by(allocated=False).filter_by(reserved=False) query = query.filter_by(leased=False) while(True): - try: - fixed_ip = query.first() - except exc.NoResultFound: + fixed_ip = query.first() + if not fixed_ip: raise network_exception.NoMoreAddresses() # FIXME will this set backreference? fixed_ip.instance_id = instance_id @@ -225,6 +226,18 @@ class FlatNetworkService(BaseNetworkService): class VlanNetworkService(BaseNetworkService): """Vlan network with dhcp""" + def __init__(self, *args, **kwargs): + super(VlanNetworkService, self).__init__(*args, **kwargs) + self._ensure_network_indexes() + + def _ensure_network_indexes(self): + # NOTE(vish): this should probably be removed and added via + # admin command or fixtures + if models.NetworkIndex.count() == 0: + for i in range(FLAGS.num_networks): + network_index = models.NetworkIndex() + network_index.index = i + network_index.save() def allocate_fixed_ip(self, project_id, instance_id, is_vpn=False, *args, **kwargs): @@ -285,9 +298,7 @@ class VlanNetworkService(BaseNetworkService): def _on_set_network_host(self, network): """Called when this host becomes the host for a project""" - # FIXME add indexes to datastore - # index = self._get_network_index(network) - index = 0 + index = self._get_network_index(network) private_net = IPy.IP(FLAGS.private_range) start = index * FLAGS.network_size # minus one for the gateway. @@ -296,21 +307,22 @@ class VlanNetworkService(BaseNetworkService): vlan = FLAGS.vlan_start + index project_net = IPy.IP(network_str) network.network_str = network_str - network.netmask = project_net.netmask() + network.netmask = str(project_net.netmask()) network.vlan = vlan network.bridge = 'br%s' % vlan - network.gateway = project_net.gateway() - network.broadcast = project_net.broadast() - network.vpn_private_ip_str = project_net[2] + network.gateway = str(project_net[1]) + network.broadcast = str(project_net.broadcast()) + network.vpn_private_ip_str = str(project_net[2]) network.vpn_public_ip_str = FLAGS.vpn_ip network.vpn_public_port = FLAGS.vpn_start + index # create network fixed ips BOTTOM_RESERVED = 3 - TOP_RESERVED = 1 + FLAGS.vpn_client_cnt - for i in range(len(project_net)): + TOP_RESERVED = 1 + FLAGS.cnt_vpn_clients + num_ips = len(project_net) + for i in range(num_ips): fixed_ip = models.FixedIp() - fixed_ip.ip_str = project_net[i] - if i < BOTTOM_RESERVED or i > TOP_RESERVED: + fixed_ip.ip_str = str(project_net[i]) + if i < BOTTOM_RESERVED or num_ips - i < TOP_RESERVED: fixed_ip.reserved = True fixed_ip.network = network fixed_ip.save() diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index a7310fb26..ecbc65937 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -26,3 +26,4 @@ FLAGS.fake_rabbit = True FLAGS.fake_network = True FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' FLAGS.verbose = True +FLAGS.sql_connection = 'sqlite:///:memory:' diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 72dc88f27..8b7730d87 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -23,6 +23,7 @@ import os import logging from nova import flags +from nova import models from nova import test from nova import utils from nova.auth import manager @@ -47,16 +48,20 @@ class NetworkTestCase(test.TrialTestCase): self.manager = manager.AuthManager() self.user = self.manager.create_user('netuser', 'netuser', 'netuser') self.projects = [] - self.projects.append(self.manager.create_project('netuser', - 'netuser', - 'netuser')) + self.service = service.VlanNetworkService() for i in range(0, 6): name = 'project%s' % i self.projects.append(self.manager.create_project(name, 'netuser', name)) - vpn.NetworkData.create(self.projects[i].id) - self.service = service.VlanNetworkService() + # create the necessary network data for the project + self.service.set_network_host(self.projects[i].id) + instance = models.Instance() + instance.mac_address = utils.generate_mac() + instance.hostname = 'fake' + instance.image_id = 'fake' + instance.save() + self.instance = instance def tearDown(self): # pylint: disable=C0103 super(NetworkTestCase, self).tearDown() @@ -67,32 +72,34 @@ class NetworkTestCase(test.TrialTestCase): def test_public_network_allocation(self): """Makes sure that we can allocaate a public ip""" pubnet = IPy.IP(flags.FLAGS.public_range) - address = self.service.allocate_elastic_ip(self.user.id, - self.projects[0].id) + address = self.service.allocate_elastic_ip(self.projects[0].id) self.assertTrue(IPy.IP(address) in pubnet) def test_allocate_deallocate_fixed_ip(self): """Makes sure that we can allocate and deallocate a fixed ip""" - result = self.service.allocate_fixed_ip( - self.user.id, self.projects[0].id) - address = result['private_dns_name'] - mac = result['mac_address'] - net = model.get_project_network(self.projects[0].id, "default") + address = self.service.allocate_fixed_ip(self.projects[0].id, + self.instance.id) + net = service.get_project_network(self.projects[0].id) self.assertEqual(True, is_in_project(address, self.projects[0].id)) - hostname = "test-host" - issue_ip(mac, address, hostname, net.bridge_name) + issue_ip(self.instance.mac_address, + address, + self.instance.hostname, + net.bridge) self.service.deallocate_fixed_ip(address) # Doesn't go away until it's dhcp released self.assertEqual(True, is_in_project(address, self.projects[0].id)) - release_ip(mac, address, hostname, net.bridge_name) + release_ip(self.instance.mac_address, + address, + self.instance.hostname, + net.bridge) self.assertEqual(False, is_in_project(address, self.projects[0].id)) def test_side_effects(self): """Ensures allocating and releasing has no side effects""" hostname = "side-effect-host" - result = self.service.allocate_fixed_ip(self.user.id, + result = self.service.allocate_fixed_ip( self.projects[0].id) mac = result['mac_address'] address = result['private_dns_name'] @@ -101,8 +108,8 @@ class NetworkTestCase(test.TrialTestCase): secondmac = result['mac_address'] secondaddress = result['private_dns_name'] - net = model.get_project_network(self.projects[0].id, "default") - secondnet = model.get_project_network(self.projects[1].id, "default") + net = service.get_project_network(self.projects[0].id) + secondnet = service.get_project_network(self.projects[1].id) self.assertEqual(True, is_in_project(address, self.projects[0].id)) self.assertEqual(True, is_in_project(secondaddress, @@ -128,7 +135,7 @@ class NetworkTestCase(test.TrialTestCase): def test_subnet_edge(self): """Makes sure that private ips don't overlap""" - result = self.service.allocate_fixed_ip(self.user.id, + result = self.service.allocate_fixed_ip( self.projects[0].id) firstaddress = result['private_dns_name'] hostname = "toomany-hosts" @@ -146,7 +153,7 @@ class NetworkTestCase(test.TrialTestCase): self.user, project_id) mac3 = result['mac_address'] address3 = result['private_dns_name'] - net = model.get_project_network(project_id, "default") + net = service.get_project_network(project_id) issue_ip(mac, address, hostname, net.bridge_name) issue_ip(mac2, address2, hostname, net.bridge_name) issue_ip(mac3, address3, hostname, net.bridge_name) @@ -162,7 +169,7 @@ class NetworkTestCase(test.TrialTestCase): release_ip(mac, address, hostname, net.bridge_name) release_ip(mac2, address2, hostname, net.bridge_name) release_ip(mac3, address3, hostname, net.bridge_name) - net = model.get_project_network(self.projects[0].id, "default") + net = service.get_project_network(self.projects[0].id) self.service.deallocate_fixed_ip(firstaddress) release_ip(mac, firstaddress, hostname, net.bridge_name) @@ -184,12 +191,12 @@ class NetworkTestCase(test.TrialTestCase): def test_ips_are_reused(self): """Makes sure that ip addresses that are deallocated get reused""" result = self.service.allocate_fixed_ip( - self.user.id, self.projects[0].id) + self.projects[0].id) mac = result['mac_address'] address = result['private_dns_name'] hostname = "reuse-host" - net = model.get_project_network(self.projects[0].id, "default") + net = service.get_project_network(self.projects[0].id) issue_ip(mac, address, hostname, net.bridge_name) self.service.deallocate_fixed_ip(address) @@ -215,7 +222,7 @@ class NetworkTestCase(test.TrialTestCase): There are ips reserved at the bottom and top of the range. services (network, gateway, CloudPipe, broadcast) """ - net = model.get_project_network(self.projects[0].id, "default") + net = service.get_project_network(self.projects[0].id) num_preallocated_ips = len(net.assigned) net_size = flags.FLAGS.network_size num_available_ips = net_size - (net.num_bottom_reserved_ips + @@ -226,7 +233,7 @@ class NetworkTestCase(test.TrialTestCase): def test_too_many_addresses(self): """Test for a NoMoreAddresses exception when all fixed ips are used. """ - net = model.get_project_network(self.projects[0].id, "default") + net = service.get_project_network(self.projects[0].id) hostname = "toomany-hosts" macs = {} @@ -234,15 +241,17 @@ class NetworkTestCase(test.TrialTestCase): # Number of availaible ips is len of the available list num_available_ips = len(list(net.available)) for i in range(num_available_ips): - result = self.service.allocate_fixed_ip(self.user.id, + result = self.service.allocate_fixed_ip( self.projects[0].id) macs[i] = result['mac_address'] addresses[i] = result['private_dns_name'] issue_ip(macs[i], addresses[i], hostname, net.bridge_name) self.assertEqual(len(list(net.available)), 0) - self.assertRaises(NoMoreAddresses, self.service.allocate_fixed_ip, - self.user.id, self.projects[0].id) + self.assertRaises(NoMoreAddresses, + self.service.allocate_fixed_ip, + self.projects[0].id, + 0) for i in range(len(addresses)): self.service.deallocate_fixed_ip(addresses[i]) @@ -252,7 +261,7 @@ class NetworkTestCase(test.TrialTestCase): def is_in_project(address, project_id): """Returns true if address is in specified project""" - return address in model.get_project_network(project_id).assigned + return models.FixedIp.find_by_ip_str(address) == service.get_project_network(project_id) def binpath(script): diff --git a/nova/virt/fake.py b/nova/virt/fake.py index f7ee34695..060b53729 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -103,7 +103,7 @@ class FakeConnection(object): """ fake_instance = FakeInstance() - self.instances[instance.id] = fake_instance + self.instances[instance.name] = fake_instance fake_instance._state = power_state.RUNNING return defer.succeed(None) @@ -132,7 +132,7 @@ class FakeConnection(object): del self.instances[instance.name] return defer.succeed(None) - def get_info(self, instance_id): + def get_info(self, instance_name): """ Get a block of information about the given instance. This is returned as a dictionary containing 'state': The power_state of the instance, @@ -141,14 +141,14 @@ class FakeConnection(object): of virtual CPUs the instance has, 'cpu_time': The total CPU time used by the instance, in nanoseconds. """ - i = self.instances[instance_id] + i = self.instances[instance_name] return {'state': i._state, 'max_mem': 0, 'mem': 0, 'num_cpu': 2, 'cpu_time': 0} - def list_disks(self, instance_id): + def list_disks(self, instance_name): """ Return the IDs of all the virtual disks attached to the specified instance, as a list. These IDs are opaque to the caller (they are @@ -160,7 +160,7 @@ class FakeConnection(object): """ return ['A_DISK'] - def list_interfaces(self, instance_id): + def list_interfaces(self, instance_name): """ Return the IDs of all the virtual network interfaces attached to the specified instance, as a list. These IDs are opaque to the caller @@ -173,10 +173,10 @@ class FakeConnection(object): """ return ['A_VIF'] - def block_stats(self, instance_id, disk_id): + def block_stats(self, instance_name, disk_id): """ Return performance counters associated with the given disk_id on the - given instance_id. These are returned as [rd_req, rd_bytes, wr_req, + given instance_name. These are returned as [rd_req, rd_bytes, wr_req, wr_bytes, errs], where rd indicates read, wr indicates write, req is the total number of I/O requests made, bytes is the total number of bytes transferred, and errs is the number of requests held up due to a @@ -194,7 +194,7 @@ class FakeConnection(object): """ return [0L, 0L, 0L, 0L, null] - def interface_stats(self, instance_id, iface_id): + def interface_stats(self, instance_name, iface_id): """ Return performance counters associated with the given iface_id on the given instance_id. These are returned as [rx_bytes, rx_packets, -- cgit From 8eb531becb7e67169fddb8f7d1547589ab733dc7 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 17 Aug 2010 20:33:37 -0700 Subject: almost there --- bin/nova-dhcpbridge | 17 +++++----- nova/models.py | 9 +++--- nova/network/linux_net.py | 28 ++++++++++------- nova/network/service.py | 3 +- nova/tests/network_unittest.py | 71 ++++++++++++++++++++---------------------- 5 files changed, 65 insertions(+), 63 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index f70a4482c..593811598 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -34,7 +34,6 @@ from nova import flags from nova import rpc from nova import utils from nova.network import linux_net -from nova.network import model from nova.network import service FLAGS = flags.FLAGS @@ -43,11 +42,12 @@ FLAGS = flags.FLAGS def add_lease(_mac, ip, _hostname, _interface): """Set the IP that was assigned by the DHCP server.""" if FLAGS.fake_rabbit: + logging.debug("leasing_ip") service.VlanNetworkService().lease_ip(ip) else: rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name), {"method": "lease_ip", - "args": {"fixed_ip": ip}}) + "args": {"fixed_ip_str": ip}}) def old_lease(_mac, _ip, _hostname, _interface): @@ -58,20 +58,18 @@ def old_lease(_mac, _ip, _hostname, _interface): def del_lease(_mac, ip, _hostname, _interface): """Called when a lease expires.""" if FLAGS.fake_rabbit: + logging.debug("releasing_ip") service.VlanNetworkService().release_ip(ip) else: rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name), {"method": "release_ip", - "args": {"fixed_ip": ip}}) + "args": {"fixed_ip_str": ip}}) def init_leases(interface): """Get the list of hosts for an interface.""" - net = model.get_network_by_interface(interface) - res = "" - for address in net.assigned_objs: - res += "%s\n" % linux_net.host_dhcp(address) - return res + network = service.get_network_by_interface(interface) + return linux_net.get_dhcp_hosts(network) def main(): @@ -80,6 +78,9 @@ def main(): utils.default_flagfile(flagfile) argv = FLAGS(sys.argv) interface = os.environ.get('DNSMASQ_INTERFACE', 'br0') + LOG_FILENAME = 'example.log' + logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG) + logging.debug("this is a test") if int(os.environ.get('TESTING', '0')): FLAGS.fake_rabbit = True FLAGS.redis_db = 8 diff --git a/nova/models.py b/nova/models.py index 5fc4ba1cf..110a4fc80 100644 --- a/nova/models.py +++ b/nova/models.py @@ -214,7 +214,6 @@ class Volume(Base, NovaBase): attach_time = Column(String) # FIXME datetime status = Column(String) # FIXME enum? attach_status = Column(String) # FIXME enum - delete_on_termination = Column(Boolean) class Network(Base, NovaBase): @@ -222,7 +221,7 @@ class Network(Base, NovaBase): id = Column(Integer, primary_key=True) kind = Column(String) - injected = Column(Boolean) + injected = Column(Boolean, default=False) network_str = Column(String) netmask = Column(String) bridge = Column(String) @@ -259,9 +258,9 @@ class FixedIp(Base, NovaBase): instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) instance = relationship(Instance, backref=backref('fixed_ip', uselist=False)) - allocated = Column(Boolean) - leased = Column(Boolean) - reserved = Column(Boolean) + allocated = Column(Boolean, default=False) + leased = Column(Boolean, default=False) + reserved = Column(Boolean, default=False) @classmethod def find_by_ip_str(cls, ip_str): diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 48d71f11e..6fa3bae73 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -122,11 +122,15 @@ def ensure_bridge(bridge, interface, network=None): _execute("sudo ifconfig %s up" % bridge) -def host_dhcp(fixed_ip): - """Return a host string for a fixed ip""" - return "%s,%s.novalocal,%s" % (fixed_ip.instance.mac_address, - fixed_ip.instance.host_name, - fixed_ip.ip_str) +def get_dhcp_hosts(network): + hosts = [] + # FIXME abstract this + session = models.NovaBase.get_session() + query = session.query(models.FixedIp).filter_by(allocated=True) + fixed_ips = query.filter_by(network_id=network.id) + for fixed_ip in network.fixed_ips: + hosts.append(_host_dhcp(fixed_ip)) + return '\n'.join(hosts) # TODO(ja): if the system has restarted or pid numbers have wrapped @@ -140,13 +144,8 @@ def update_dhcp(network): if a dnsmasq instance is already running then send a HUP signal causing it to reload, otherwise spawn a new instance """ - # FIXME abstract this - session = models.NovaBase.get_session() - query = session.query(models.FixedIp).filter_by(allocated=True) - fixed_ips = query.filter_by(network_id=network.id) with open(_dhcp_file(network['vlan'], 'conf'), 'w') as f: - for fixed_ip in fixed_ips: - f.write("%s\n" % host_dhcp(fixed_ip)) + f.write(get_dhcp_hosts(network)) pid = _dnsmasq_pid_for(network) @@ -166,6 +165,13 @@ def update_dhcp(network): _execute(_dnsmasq_cmd(network), addl_env=env) +def _host_dhcp(fixed_ip): + """Return a host string for a fixed ip""" + return "%s,%s.novalocal,%s" % (fixed_ip.instance.mac_address, + fixed_ip.instance.host_name, + fixed_ip.ip_str) + + def _execute(cmd, addl_env=None): """Wrapper around utils._execute for fake_network""" if FLAGS.fake_network: diff --git a/nova/network/service.py b/nova/network/service.py index 115a7fa98..8d676111a 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -121,11 +121,10 @@ class BaseNetworkService(service.Service): def allocate_fixed_ip(self, project_id, instance_id, *args, **kwargs): """Gets fixed ip from the pool""" - print "allocating", project_id, instance_id network = get_network_for_project(project_id) session = models.NovaBase.get_session() query = session.query(models.FixedIp).filter_by(network_id=network.id) - query = query.filter_by(allocated=False).filter_by(reserved=False) + query = query.filter_by(reserved=False).filter_by(allocated=False) query = query.filter_by(leased=False) while(True): fixed_ip = query.first() diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 8b7730d87..657dd89d2 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -43,7 +43,8 @@ class NetworkTestCase(test.TrialTestCase): fake_storage=True, fake_network=True, auth_driver='nova.auth.ldapdriver.FakeLdapDriver', - network_size=32) + network_size=32, + num_networks=10) logging.getLogger().setLevel(logging.DEBUG) self.manager = manager.AuthManager() self.user = self.manager.create_user('netuser', 'netuser', 'netuser') @@ -79,22 +80,16 @@ class NetworkTestCase(test.TrialTestCase): """Makes sure that we can allocate and deallocate a fixed ip""" address = self.service.allocate_fixed_ip(self.projects[0].id, self.instance.id) - net = service.get_project_network(self.projects[0].id) - self.assertEqual(True, is_in_project(address, self.projects[0].id)) - issue_ip(self.instance.mac_address, - address, - self.instance.hostname, - net.bridge) + net = service.get_network_for_project(self.projects[0].id) + self.assertEqual(True, is_allocated_in_project(address, self.projects[0].id)) + issue_ip(address, net.bridge) self.service.deallocate_fixed_ip(address) # Doesn't go away until it's dhcp released - self.assertEqual(True, is_in_project(address, self.projects[0].id)) + self.assertEqual(True, is_allocated_in_project(address, self.projects[0].id)) - release_ip(self.instance.mac_address, - address, - self.instance.hostname, - net.bridge) - self.assertEqual(False, is_in_project(address, self.projects[0].id)) + release_ip(address, net.bridge) + self.assertEqual(False, is_allocated_in_project(address, self.projects[0].id)) def test_side_effects(self): """Ensures allocating and releasing has no side effects""" @@ -108,13 +103,13 @@ class NetworkTestCase(test.TrialTestCase): secondmac = result['mac_address'] secondaddress = result['private_dns_name'] - net = service.get_project_network(self.projects[0].id) - secondnet = service.get_project_network(self.projects[1].id) + net = service.get_network_for_project(self.projects[0].id) + secondnet = service.get_network_for_project(self.projects[1].id) - self.assertEqual(True, is_in_project(address, self.projects[0].id)) - self.assertEqual(True, is_in_project(secondaddress, + self.assertEqual(True, is_allocated_in_project(address, self.projects[0].id)) + self.assertEqual(True, is_allocated_in_project(secondaddress, self.projects[1].id)) - self.assertEqual(False, is_in_project(address, self.projects[1].id)) + self.assertEqual(False, is_allocated_in_project(address, self.projects[1].id)) # Addresses are allocated before they're issued issue_ip(mac, address, hostname, net.bridge_name) @@ -122,15 +117,15 @@ class NetworkTestCase(test.TrialTestCase): self.service.deallocate_fixed_ip(address) release_ip(mac, address, hostname, net.bridge_name) - self.assertEqual(False, is_in_project(address, self.projects[0].id)) + self.assertEqual(False, is_allocated_in_project(address, self.projects[0].id)) # First address release shouldn't affect the second - self.assertEqual(True, is_in_project(secondaddress, + self.assertEqual(True, is_allocated_in_project(secondaddress, self.projects[1].id)) self.service.deallocate_fixed_ip(secondaddress) release_ip(secondmac, secondaddress, hostname, secondnet.bridge_name) - self.assertEqual(False, is_in_project(secondaddress, + self.assertEqual(False, is_allocated_in_project(secondaddress, self.projects[1].id)) def test_subnet_edge(self): @@ -153,15 +148,15 @@ class NetworkTestCase(test.TrialTestCase): self.user, project_id) mac3 = result['mac_address'] address3 = result['private_dns_name'] - net = service.get_project_network(project_id) + net = service.get_network_for_project(project_id) issue_ip(mac, address, hostname, net.bridge_name) issue_ip(mac2, address2, hostname, net.bridge_name) issue_ip(mac3, address3, hostname, net.bridge_name) - self.assertEqual(False, is_in_project(address, + self.assertEqual(False, is_allocated_in_project(address, self.projects[0].id)) - self.assertEqual(False, is_in_project(address2, + self.assertEqual(False, is_allocated_in_project(address2, self.projects[0].id)) - self.assertEqual(False, is_in_project(address3, + self.assertEqual(False, is_allocated_in_project(address3, self.projects[0].id)) self.service.deallocate_fixed_ip(address) self.service.deallocate_fixed_ip(address2) @@ -169,7 +164,7 @@ class NetworkTestCase(test.TrialTestCase): release_ip(mac, address, hostname, net.bridge_name) release_ip(mac2, address2, hostname, net.bridge_name) release_ip(mac3, address3, hostname, net.bridge_name) - net = service.get_project_network(self.projects[0].id) + net = service.get_network_for_project(self.projects[0].id) self.service.deallocate_fixed_ip(firstaddress) release_ip(mac, firstaddress, hostname, net.bridge_name) @@ -196,7 +191,7 @@ class NetworkTestCase(test.TrialTestCase): address = result['private_dns_name'] hostname = "reuse-host" - net = service.get_project_network(self.projects[0].id) + net = service.get_network_for_project(self.projects[0].id) issue_ip(mac, address, hostname, net.bridge_name) self.service.deallocate_fixed_ip(address) @@ -222,7 +217,7 @@ class NetworkTestCase(test.TrialTestCase): There are ips reserved at the bottom and top of the range. services (network, gateway, CloudPipe, broadcast) """ - net = service.get_project_network(self.projects[0].id) + net = service.get_network_for_project(self.projects[0].id) num_preallocated_ips = len(net.assigned) net_size = flags.FLAGS.network_size num_available_ips = net_size - (net.num_bottom_reserved_ips + @@ -233,7 +228,7 @@ class NetworkTestCase(test.TrialTestCase): def test_too_many_addresses(self): """Test for a NoMoreAddresses exception when all fixed ips are used. """ - net = service.get_project_network(self.projects[0].id) + net = service.get_network_for_project(self.projects[0].id) hostname = "toomany-hosts" macs = {} @@ -259,9 +254,13 @@ class NetworkTestCase(test.TrialTestCase): self.assertEqual(len(list(net.available)), num_available_ips) -def is_in_project(address, project_id): +def is_allocated_in_project(address, project_id): """Returns true if address is in specified project""" - return models.FixedIp.find_by_ip_str(address) == service.get_project_network(project_id) + fixed_ip = models.FixedIp.find_by_ip_str(address) + project_net = service.get_network_for_project(project_id) + print fixed_ip.instance + # instance exists until release + return fixed_ip.instance and project_net == fixed_ip.network def binpath(script): @@ -269,10 +268,9 @@ def binpath(script): return os.path.abspath(os.path.join(__file__, "../../../bin", script)) -def issue_ip(mac, private_ip, hostname, interface): +def issue_ip(private_ip, interface): """Run add command on dhcpbridge""" - cmd = "%s add %s %s %s" % (binpath('nova-dhcpbridge'), - mac, private_ip, hostname) + cmd = "%s add %s fake fake" % (binpath('nova-dhcpbridge'), private_ip) env = {'DNSMASQ_INTERFACE': interface, 'TESTING': '1', 'FLAGFILE': FLAGS.dhcpbridge_flagfile} @@ -280,10 +278,9 @@ def issue_ip(mac, private_ip, hostname, interface): logging.debug("ISSUE_IP: %s, %s ", out, err) -def release_ip(mac, private_ip, hostname, interface): +def release_ip(private_ip, interface): """Run del command on dhcpbridge""" - cmd = "%s del %s %s %s" % (binpath('nova-dhcpbridge'), - mac, private_ip, hostname) + cmd = "%s del %s fake fake" % (binpath('nova-dhcpbridge'), private_ip) env = {'DNSMASQ_INTERFACE': interface, 'TESTING': '1', 'FLAGFILE': FLAGS.dhcpbridge_flagfile} -- cgit From 67ea462eadcc02ca2f8244062c786bd98871e9e8 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Tue, 17 Aug 2010 23:46:16 -0700 Subject: Added unittests for wsgi and api. --- nova/api/__init__.py | 5 +- nova/api/test.py | 70 +++++++++++++++++++++++++++ nova/wsgi.py | 17 ++++--- nova/wsgi_test.py | 133 +++++++++++++++++++++++++++++++++++++++++++++++++++ pylintrc | 14 ++++-- 5 files changed, 224 insertions(+), 15 deletions(-) create mode 100644 nova/api/test.py create mode 100644 nova/wsgi_test.py diff --git a/nova/api/__init__.py b/nova/api/__init__.py index a6bb93348..b9b9e3988 100644 --- a/nova/api/__init__.py +++ b/nova/api/__init__.py @@ -32,7 +32,6 @@ class API(wsgi.Router): def __init__(self): mapper = routes.Mapper() - mapper.connect(None, "/v1.0/{path_info:.*}", - controller=rackspace.API()) - mapper.connect(None, "/ec2/{path_info:.*}", controller=ec2.API()) + mapper.connect("/v1.0/{path_info:.*}", controller=rackspace.API()) + mapper.connect("/ec2/{path_info:.*}", controller=ec2.API()) super(API, self).__init__(mapper) diff --git a/nova/api/test.py b/nova/api/test.py new file mode 100644 index 000000000..09f79c02e --- /dev/null +++ b/nova/api/test.py @@ -0,0 +1,70 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test for the root WSGI middleware for all API controllers. +""" + +import unittest + +import stubout + +from nova import api +from nova import wsgi_test + + +class Test(unittest.TestCase): + + def setUp(self): # pylint: disable-msg=C0103 + self.called = False + self.stubs = stubout.StubOutForTesting() + + def tearDown(self): # pylint: disable-msg=C0103 + self.stubs.UnsetAll() + + def test_rackspace(self): + self.stubs.Set(api.rackspace, 'API', get_api_stub(self)) + api.API()(wsgi_test.get_environ({'PATH_INFO': '/v1.0/cloud'}), + wsgi_test.start_response) + self.assertTrue(self.called) + + def test_ec2(self): + self.stubs.Set(api.ec2, 'API', get_api_stub(self)) + api.API()(wsgi_test.get_environ({'PATH_INFO': '/ec2/cloud'}), + wsgi_test.start_response) + self.assertTrue(self.called) + + def test_not_found(self): + self.stubs.Set(api.ec2, 'API', get_api_stub(self)) + self.stubs.Set(api.rackspace, 'API', get_api_stub(self)) + api.API()(wsgi_test.get_environ({'PATH_INFO': '/'}), + wsgi_test.start_response) + self.assertFalse(self.called) + + +def get_api_stub(test_object): + """Get a stub class that verifies next part of the request.""" + + class APIStub(object): + """Class to verify request and mark it was called.""" + test = test_object + + def __call__(self, environ, start_response): + self.test.assertEqual(environ['PATH_INFO'], '/cloud') + self.test.called = True + + return APIStub diff --git a/nova/wsgi.py b/nova/wsgi.py index a0a175dc7..baf6cccd9 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -83,7 +83,7 @@ class Application(object): raise NotImplementedError("You must implement __call__") -class Middleware(Application): # pylint: disable=W0223 +class Middleware(Application): """ Base WSGI middleware wrapper. These classes require an application to be initialized that will be called next. By default the middleware will @@ -91,11 +91,11 @@ class Middleware(Application): # pylint: disable=W0223 behavior. """ - def __init__(self, application): # pylint: disable=W0231 + def __init__(self, application): # pylint: disable-msg=W0231 self.application = application @webob.dec.wsgify - def __call__(self, req): + def __call__(self, req): # pylint: disable-msg=W0221 """Override to implement middleware behavior.""" return self.application @@ -113,7 +113,7 @@ class Debug(Middleware): resp = req.get_response(self.application) print ("*" * 40) + " RESPONSE HEADERS" - for (key, value) in resp.headers: + for (key, value) in resp.headers.iteritems(): print key, "=", value print @@ -127,7 +127,7 @@ class Debug(Middleware): Iterator that prints the contents of a wrapper string iterator when iterated. """ - print ("*" * 40) + "BODY" + print ("*" * 40) + " BODY" for part in app_iter: sys.stdout.write(part) sys.stdout.flush() @@ -176,8 +176,9 @@ class Router(object): """ return self._router + @staticmethod @webob.dec.wsgify - def _dispatch(self, req): + def _dispatch(req): """ Called by self._router after matching the incoming request to a route and putting the information into req.environ. Either returns 404 @@ -197,6 +198,7 @@ class Controller(object): must, in addition to their normal parameters, accept a 'req' argument which is the incoming webob.Request. """ + @webob.dec.wsgify def __call__(self, req): """ @@ -249,6 +251,7 @@ class Serializer(object): return repr(data) def _to_xml_node(self, doc, metadata, nodename, data): + """Recursive method to convert data members to XML nodes.""" result = doc.createElement(nodename) if type(data) is list: singular = metadata.get('plurals', {}).get(nodename, None) @@ -262,7 +265,7 @@ class Serializer(object): result.appendChild(node) elif type(data) is dict: attrs = metadata.get('attributes', {}).get(nodename, {}) - for k,v in data.items(): + for k, v in data.items(): if k in attrs: result.setAttribute(k, str(v)) else: diff --git a/nova/wsgi_test.py b/nova/wsgi_test.py new file mode 100644 index 000000000..02bf067d6 --- /dev/null +++ b/nova/wsgi_test.py @@ -0,0 +1,133 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test WSGI basics and provide some helper functions for other WSGI tests. +""" + +import unittest + +import routes + +from nova import wsgi + + +class Test(unittest.TestCase): + + def setUp(self): # pylint: disable-msg=C0103 + self.called = False + + def test_debug(self): + + class Application(wsgi.Application): + """Dummy application to test debug.""" + test = self + + def __call__(self, environ, test_start_response): + test_start_response("200", [("X-Test", "checking")]) + self.test.called = True + return ['Test response'] + + app = wsgi.Debug(Application())(get_environ(), start_response) + self.assertTrue(self.called) + for _ in app: + pass + + def test_router(self): + + class Application(wsgi.Application): + """Test application to call from router.""" + test = self + + def __call__(self, environ, test_start_response): + test_start_response("200", []) + self.test.called = True + return [] + + class Router(wsgi.Router): + """Test router.""" + + def __init__(self): + mapper = routes.Mapper() + mapper.connect("/test", controller=Application()) + super(Router, self).__init__(mapper) + + Router()(get_environ({'PATH_INFO': '/test'}), start_response) + self.assertTrue(self.called) + self.called = False + Router()(get_environ({'PATH_INFO': '/bad'}), start_response) + self.assertFalse(self.called) + + def test_controller(self): + + class Controller(wsgi.Controller): + """Test controller to call from router.""" + test = self + + def show(self, **kwargs): + """Mark that this has been called.""" + self.test.called = True + self.test.assertEqual(kwargs['id'], '123') + return "Test" + + class Router(wsgi.Router): + """Test router.""" + + def __init__(self): + mapper = routes.Mapper() + mapper.resource("test", "tests", controller=Controller()) + super(Router, self).__init__(mapper) + + Router()(get_environ({'PATH_INFO': '/tests/123'}), start_response) + self.assertTrue(self.called) + self.called = False + Router()(get_environ({'PATH_INFO': '/test/123'}), start_response) + self.assertFalse(self.called) + + def test_serializer(self): + # TODO(eday): Placeholder for serializer testing. + pass + + +def get_environ(overwrite={}): # pylint: disable-msg=W0102 + """Get a WSGI environment, overwriting any entries given.""" + environ = {'SERVER_PROTOCOL': 'HTTP/1.1', + 'GATEWAY_INTERFACE': 'CGI/1.1', + 'wsgi.version': (1, 0), + 'SERVER_PORT': '443', + 'SERVER_NAME': '127.0.0.1', + 'REMOTE_ADDR': '127.0.0.1', + 'wsgi.run_once': False, + 'wsgi.errors': None, + 'wsgi.multiprocess': False, + 'SCRIPT_NAME': '', + 'wsgi.url_scheme': 'https', + 'wsgi.input': None, + 'REQUEST_METHOD': 'GET', + 'PATH_INFO': '/', + 'CONTENT_TYPE': 'text/plain', + 'wsgi.multithread': True, + 'QUERY_STRING': '', + 'eventlet.input': None} + return dict(environ, **overwrite) + + +def start_response(_status, _headers): + """Dummy start_response to use with WSGI tests.""" + pass diff --git a/pylintrc b/pylintrc index 6c799c7ea..36cc337e5 100644 --- a/pylintrc +++ b/pylintrc @@ -1,9 +1,7 @@ [Messages Control] -disable=C0103 -# TODOs in code comments are fine... -disable=W0511 -# *args and **kwargs are fine -disable=W0142 +# W0511: TODOs in code comments are fine. +# W0142: *args and **kwargs are fine. +disable-msg=W0511,W0142 [Basic] # Variables can be 1 to 31 characters long, with @@ -14,6 +12,12 @@ variable-rgx=[a-z_][a-z0-9_]{0,30}$ # and be lowecased with underscores method-rgx=[a-z_][a-z0-9_]{2,50}$ +# Module names matching nova-* are ok (files in bin/) +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(nova-[a-z0-9_]+))$ + +# Don't require docstrings on tests. +no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ + [Design] max-public-methods=100 min-public-methods=0 -- cgit From 62e3bab39fcd9628325c3a16d4b76b5e82e35099 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 18 Aug 2010 02:07:04 -0700 Subject: network tests pass --- bin/nova-dhcpbridge | 6 +- nova/auth/manager.py | 5 +- nova/network/exception.py | 5 + nova/network/service.py | 15 +-- nova/tests/network_unittest.py | 257 ++++++++++++++++++++++------------------- 5 files changed, 158 insertions(+), 130 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 593811598..266fd70ce 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -81,13 +81,17 @@ def main(): LOG_FILENAME = 'example.log' logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG) logging.debug("this is a test") + sqlfile = os.environ.get('SQL_DB', '') if int(os.environ.get('TESTING', '0')): + logging.debug("fake rabbit is true") FLAGS.fake_rabbit = True FLAGS.redis_db = 8 - FLAGS.network_size = 32 + FLAGS.network_size = 16 FLAGS.connection_type = 'fake' FLAGS.fake_network = True FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' + FLAGS.num_networks = 5 + FLAGS.sql_connection = 'sqlite:///%s' % sqlfile action = argv[1] if action in ['add', 'del', 'old']: mac = argv[2] diff --git a/nova/auth/manager.py b/nova/auth/manager.py index d2d4d641b..69816882e 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -573,9 +573,10 @@ class AuthManager(object): # FIXME(vish): this shouldn't be messing with the datamodel directly if not isinstance(project, Project): project = self.get_project(project) - if not project.network: + if not project.network.vpn_public_port: raise exception.NotFound('project network data has not been set') - return (project.network.vpn_ip_str, project.network.vpn_port) + return (project.network.vpn_public_ip_str, + project.network.vpn_public_port) def delete_project(self, project): """Deletes a project""" diff --git a/nova/network/exception.py b/nova/network/exception.py index 2a3f5ec14..ad0dd404d 100644 --- a/nova/network/exception.py +++ b/nova/network/exception.py @@ -23,6 +23,11 @@ Exceptions for network errors. from nova import exception +class NoMoreNetworks(exception.Error): + """No More Networks are available""" + pass + + class NoMoreAddresses(exception.Error): """No More Addresses are available in the network""" pass diff --git a/nova/network/service.py b/nova/network/service.py index 8d676111a..9bbb833b7 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -165,9 +165,8 @@ class BaseNetworkService(service.Service): query = session.query(models.ElasticIp).filter_by(node_name=node_name) query = query.filter_by(fixed_ip_id=None) while(True): - try: - elastic_ip = query.first() - except exc.NoResultFound: + elastic_ip = query.first() + if not elastic_ip: raise network_exception.NoMoreAddresses() elastic_ip.project_id = project_id session.add(elastic_ip) @@ -180,7 +179,7 @@ class BaseNetworkService(service.Service): def associate_elastic_ip(self, elastic_ip_str, fixed_ip_str): """Associates an elastic ip to a fixed ip""" elastic_ip = models.ElasticIp.find_by_ip_str(elastic_ip_str) - fixed_ip = models.FixedIp.find_by_ip_str(elastic_ip_str) + fixed_ip = models.FixedIp.find_by_ip_str(fixed_ip_str) elastic_ip.fixed_ip = fixed_ip _driver.bind_elastic_ip(elastic_ip_str) _driver.ensure_elastic_forward(elastic_ip_str, fixed_ip_str) @@ -254,9 +253,11 @@ class VlanNetworkService(BaseNetworkService): network.vpn_public_port, network.vpn_private_ip_str) ip_str = fixed_ip.ip_str + logging.debug("Allocating vpn IP %s", ip_str) else: parent = super(VlanNetworkService, self) ip_str = parent.allocate_fixed_ip(project_id, instance_id) + logging.debug("sql %s", FLAGS.sql_connection) _driver.ensure_vlan_bridge(network.vlan, network.bridge) return ip_str @@ -273,6 +274,7 @@ class VlanNetworkService(BaseNetworkService): def lease_ip(self, fixed_ip_str): """Called by bridge when ip is leased""" + logging.debug("sql %s", FLAGS.sql_connection) fixed_ip = models.FixedIp.find_by_ip_str(fixed_ip_str) if not fixed_ip.allocated: raise network_exception.AddressNotAllocated(fixed_ip_str) @@ -333,9 +335,8 @@ class VlanNetworkService(BaseNetworkService): node_name = FLAGS.node_name query = session.query(models.NetworkIndex).filter_by(network_id=None) while(True): - try: - network_index = query.first() - except exc.NoResultFound: + network_index = query.first() + if not network_index: raise network_exception.NoMoreNetworks() network_index.network = network session.add(network_index) diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 657dd89d2..00aaac346 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -21,6 +21,7 @@ Unit Tests for network code import IPy import os import logging +import tempfile from nova import flags from nova import models @@ -28,7 +29,7 @@ from nova import test from nova import utils from nova.auth import manager from nova.network import service -from nova.network.exception import NoMoreAddresses +from nova.network.exception import NoMoreAddresses, NoMoreNetworks FLAGS = flags.FLAGS @@ -39,18 +40,21 @@ class NetworkTestCase(test.TrialTestCase): super(NetworkTestCase, self).setUp() # NOTE(vish): if you change these flags, make sure to change the # flags in the corresponding section in nova-dhcpbridge + fd, sqlfile = tempfile.mkstemp() + self.sqlfile = os.path.abspath(sqlfile) self.flags(connection_type='fake', + sql_connection='sqlite:///%s' % self.sqlfile, fake_storage=True, fake_network=True, auth_driver='nova.auth.ldapdriver.FakeLdapDriver', - network_size=32, - num_networks=10) + network_size=16, + num_networks=5) logging.getLogger().setLevel(logging.DEBUG) self.manager = manager.AuthManager() self.user = self.manager.create_user('netuser', 'netuser', 'netuser') self.projects = [] self.service = service.VlanNetworkService() - for i in range(0, 6): + for i in range(5): name = 'project%s' % i self.projects.append(self.manager.create_project(name, 'netuser', @@ -62,149 +66,145 @@ class NetworkTestCase(test.TrialTestCase): instance.hostname = 'fake' instance.image_id = 'fake' instance.save() - self.instance = instance + self.instance_id = instance.id def tearDown(self): # pylint: disable=C0103 super(NetworkTestCase, self).tearDown() for project in self.projects: self.manager.delete_project(project) self.manager.delete_user(self.user) + os.unlink(self.sqlfile) - def test_public_network_allocation(self): + def test_public_network_association(self): """Makes sure that we can allocaate a public ip""" + # FIXME better way of adding elastic ips pubnet = IPy.IP(flags.FLAGS.public_range) - address = self.service.allocate_elastic_ip(self.projects[0].id) - self.assertTrue(IPy.IP(address) in pubnet) + elastic_ip = models.ElasticIp() + elastic_ip.ip_str = str(pubnet[0]) + elastic_ip.node_name = FLAGS.node_name + elastic_ip.save() + eaddress = self.service.allocate_elastic_ip(self.projects[0].id) + faddress = self.service.allocate_fixed_ip(self.projects[0].id, + self.instance_id) + self.assertEqual(eaddress, str(pubnet[0])) + self.service.associate_elastic_ip(eaddress, faddress) + # FIXME datamodel abstraction + self.assertEqual(elastic_ip.fixed_ip.ip_str, faddress) + self.service.disassociate_elastic_ip(eaddress) + self.assertEqual(elastic_ip.fixed_ip, None) + self.service.deallocate_elastic_ip(eaddress) + self.service.deallocate_fixed_ip(faddress) def test_allocate_deallocate_fixed_ip(self): """Makes sure that we can allocate and deallocate a fixed ip""" address = self.service.allocate_fixed_ip(self.projects[0].id, - self.instance.id) + self.instance_id) net = service.get_network_for_project(self.projects[0].id) - self.assertEqual(True, is_allocated_in_project(address, self.projects[0].id)) - issue_ip(address, net.bridge) + self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) + issue_ip(address, net.bridge, self.sqlfile) self.service.deallocate_fixed_ip(address) # Doesn't go away until it's dhcp released - self.assertEqual(True, is_allocated_in_project(address, self.projects[0].id)) + self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) - release_ip(address, net.bridge) - self.assertEqual(False, is_allocated_in_project(address, self.projects[0].id)) + release_ip(address, net.bridge, self.sqlfile) + self.assertFalse(is_allocated_in_project(address, self.projects[0].id)) def test_side_effects(self): """Ensures allocating and releasing has no side effects""" - hostname = "side-effect-host" - result = self.service.allocate_fixed_ip( - self.projects[0].id) - mac = result['mac_address'] - address = result['private_dns_name'] - result = self.service.allocate_fixed_ip(self.user, - self.projects[1].id) - secondmac = result['mac_address'] - secondaddress = result['private_dns_name'] + address = self.service.allocate_fixed_ip(self.projects[0].id, + self.instance_id) + address2 = self.service.allocate_fixed_ip(self.projects[1].id, + self.instance_id) net = service.get_network_for_project(self.projects[0].id) - secondnet = service.get_network_for_project(self.projects[1].id) + net2 = service.get_network_for_project(self.projects[1].id) - self.assertEqual(True, is_allocated_in_project(address, self.projects[0].id)) - self.assertEqual(True, is_allocated_in_project(secondaddress, - self.projects[1].id)) - self.assertEqual(False, is_allocated_in_project(address, self.projects[1].id)) + self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) + self.assertTrue(is_allocated_in_project(address2, self.projects[1].id)) + self.assertFalse(is_allocated_in_project(address, self.projects[1].id)) # Addresses are allocated before they're issued - issue_ip(mac, address, hostname, net.bridge_name) - issue_ip(secondmac, secondaddress, hostname, secondnet.bridge_name) + issue_ip(address, net.bridge, self.sqlfile) + issue_ip(address2, net2.bridge, self.sqlfile) self.service.deallocate_fixed_ip(address) - release_ip(mac, address, hostname, net.bridge_name) - self.assertEqual(False, is_allocated_in_project(address, self.projects[0].id)) + release_ip(address, net.bridge, self.sqlfile) + self.assertFalse(is_allocated_in_project(address, self.projects[0].id)) # First address release shouldn't affect the second - self.assertEqual(True, is_allocated_in_project(secondaddress, - self.projects[1].id)) + self.assertTrue(is_allocated_in_project(address2, self.projects[1].id)) - self.service.deallocate_fixed_ip(secondaddress) - release_ip(secondmac, secondaddress, hostname, secondnet.bridge_name) - self.assertEqual(False, is_allocated_in_project(secondaddress, - self.projects[1].id)) + self.service.deallocate_fixed_ip(address2) + issue_ip(address2, net.bridge, self.sqlfile) + release_ip(address2, net2.bridge, self.sqlfile) + self.assertFalse(is_allocated_in_project(address2, self.projects[1].id)) def test_subnet_edge(self): """Makes sure that private ips don't overlap""" - result = self.service.allocate_fixed_ip( - self.projects[0].id) - firstaddress = result['private_dns_name'] - hostname = "toomany-hosts" + first = self.service.allocate_fixed_ip(self.projects[0].id, + self.instance_id) for i in range(1, 5): project_id = self.projects[i].id - result = self.service.allocate_fixed_ip( - self.user, project_id) - mac = result['mac_address'] - address = result['private_dns_name'] - result = self.service.allocate_fixed_ip( - self.user, project_id) - mac2 = result['mac_address'] - address2 = result['private_dns_name'] - result = self.service.allocate_fixed_ip( - self.user, project_id) - mac3 = result['mac_address'] - address3 = result['private_dns_name'] + address = self.service.allocate_fixed_ip(project_id, self.instance_id) + address2 = self.service.allocate_fixed_ip(project_id, self.instance_id) + address3 = self.service.allocate_fixed_ip(project_id, self.instance_id) net = service.get_network_for_project(project_id) - issue_ip(mac, address, hostname, net.bridge_name) - issue_ip(mac2, address2, hostname, net.bridge_name) - issue_ip(mac3, address3, hostname, net.bridge_name) - self.assertEqual(False, is_allocated_in_project(address, - self.projects[0].id)) - self.assertEqual(False, is_allocated_in_project(address2, - self.projects[0].id)) - self.assertEqual(False, is_allocated_in_project(address3, - self.projects[0].id)) + issue_ip(address, net.bridge, self.sqlfile) + issue_ip(address2, net.bridge, self.sqlfile) + issue_ip(address3, net.bridge, self.sqlfile) + self.assertFalse(is_allocated_in_project(address, + self.projects[0].id)) + self.assertFalse(is_allocated_in_project(address2, + self.projects[0].id)) + self.assertFalse(is_allocated_in_project(address3, + self.projects[0].id)) self.service.deallocate_fixed_ip(address) self.service.deallocate_fixed_ip(address2) self.service.deallocate_fixed_ip(address3) - release_ip(mac, address, hostname, net.bridge_name) - release_ip(mac2, address2, hostname, net.bridge_name) - release_ip(mac3, address3, hostname, net.bridge_name) + release_ip(address, net.bridge, self.sqlfile) + release_ip(address2, net.bridge, self.sqlfile) + release_ip(address3, net.bridge, self.sqlfile) net = service.get_network_for_project(self.projects[0].id) - self.service.deallocate_fixed_ip(firstaddress) - release_ip(mac, firstaddress, hostname, net.bridge_name) + self.service.deallocate_fixed_ip(first) def test_vpn_ip_and_port_looks_valid(self): """Ensure the vpn ip and port are reasonable""" self.assert_(self.projects[0].vpn_ip) - self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start_port) - self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_end_port) + self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start) + self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_start + + FLAGS.num_networks) - def test_too_many_vpns(self): + def test_too_many_networks(self): """Ensure error is raised if we run out of vpn ports""" - vpns = [] - for i in xrange(vpn.NetworkData.num_ports_for_ip(FLAGS.vpn_ip)): - vpns.append(vpn.NetworkData.create("vpnuser%s" % i)) - self.assertRaises(vpn.NoMorePorts, vpn.NetworkData.create, "boom") - for network_datum in vpns: - network_datum.destroy() + projects = [] + networks_left = FLAGS.num_networks - len(self.projects) + for i in range(networks_left): + project = self.manager.create_project('many%s' % i, self.user) + self.service.set_network_host(project.id) + projects.append(project) + project = self.manager.create_project('boom' , self.user) + self.assertRaises(NoMoreNetworks, + self.service.set_network_host, + project.id) + self.manager.delete_project(project) + for project in projects: + self.manager.delete_project(project) + def test_ips_are_reused(self): """Makes sure that ip addresses that are deallocated get reused""" - result = self.service.allocate_fixed_ip( - self.projects[0].id) - mac = result['mac_address'] - address = result['private_dns_name'] - - hostname = "reuse-host" + address = self.service.allocate_fixed_ip(self.projects[0].id, + self.instance_id) net = service.get_network_for_project(self.projects[0].id) - - issue_ip(mac, address, hostname, net.bridge_name) + issue_ip(address, net.bridge, self.sqlfile) self.service.deallocate_fixed_ip(address) - release_ip(mac, address, hostname, net.bridge_name) + release_ip(address, net.bridge, self.sqlfile) - result = self.service.allocate_fixed_ip( - self.user, self.projects[0].id) - secondmac = result['mac_address'] - secondaddress = result['private_dns_name'] - self.assertEqual(address, secondaddress) - issue_ip(secondmac, secondaddress, hostname, net.bridge_name) - self.service.deallocate_fixed_ip(secondaddress) - release_ip(secondmac, secondaddress, hostname, net.bridge_name) + address2 = self.service.allocate_fixed_ip(self.projects[0].id, + self.instance_id) + self.assertEqual(address, address2) + self.service.deallocate_fixed_ip(address2) def test_available_ips(self): """Make sure the number of available ips for the network is correct @@ -217,50 +217,65 @@ class NetworkTestCase(test.TrialTestCase): There are ips reserved at the bottom and top of the range. services (network, gateway, CloudPipe, broadcast) """ - net = service.get_network_for_project(self.projects[0].id) - num_preallocated_ips = len(net.assigned) + network = service.get_network_for_project(self.projects[0].id) net_size = flags.FLAGS.network_size - num_available_ips = net_size - (net.num_bottom_reserved_ips + - num_preallocated_ips + - net.num_top_reserved_ips) - self.assertEqual(num_available_ips, len(list(net.available))) + total_ips = (available_ips(network) + + reserved_ips(network) + + allocated_ips(network)) + self.assertEqual(total_ips, net_size) def test_too_many_addresses(self): """Test for a NoMoreAddresses exception when all fixed ips are used. """ - net = service.get_network_for_project(self.projects[0].id) + network = service.get_network_for_project(self.projects[0].id) - hostname = "toomany-hosts" - macs = {} - addresses = {} # Number of availaible ips is len of the available list - num_available_ips = len(list(net.available)) + + num_available_ips = available_ips(network) + addresses = [] for i in range(num_available_ips): - result = self.service.allocate_fixed_ip( - self.projects[0].id) - macs[i] = result['mac_address'] - addresses[i] = result['private_dns_name'] - issue_ip(macs[i], addresses[i], hostname, net.bridge_name) + project_id = self.projects[0].id + addresses.append(self.service.allocate_fixed_ip(project_id, + self.instance_id)) + issue_ip(addresses[i],network.bridge, self.sqlfile) - self.assertEqual(len(list(net.available)), 0) + self.assertEqual(available_ips(network), 0) self.assertRaises(NoMoreAddresses, self.service.allocate_fixed_ip, self.projects[0].id, - 0) + self.instance_id) for i in range(len(addresses)): self.service.deallocate_fixed_ip(addresses[i]) - release_ip(macs[i], addresses[i], hostname, net.bridge_name) - self.assertEqual(len(list(net.available)), num_available_ips) + release_ip(addresses[i],network.bridge, self.sqlfile) + self.assertEqual(available_ips(network), num_available_ips) + + +# FIXME move these to abstraction layer +def available_ips(network): + session = models.NovaBase.get_session() + query = session.query(models.FixedIp).filter_by(network_id=network.id) + query = query.filter_by(allocated=False).filter_by(reserved=False) + return query.count() + +def allocated_ips(network): + session = models.NovaBase.get_session() + query = session.query(models.FixedIp).filter_by(network_id=network.id) + query = query.filter_by(allocated=True) + return query.count() +def reserved_ips(network): + session = models.NovaBase.get_session() + query = session.query(models.FixedIp).filter_by(network_id=network.id) + query = query.filter_by(reserved=True) + return query.count() def is_allocated_in_project(address, project_id): """Returns true if address is in specified project""" fixed_ip = models.FixedIp.find_by_ip_str(address) project_net = service.get_network_for_project(project_id) - print fixed_ip.instance # instance exists until release - return fixed_ip.instance and project_net == fixed_ip.network + return fixed_ip.instance is not None and fixed_ip.network == project_net def binpath(script): @@ -268,20 +283,22 @@ def binpath(script): return os.path.abspath(os.path.join(__file__, "../../../bin", script)) -def issue_ip(private_ip, interface): +def issue_ip(private_ip, interface, sqlfile): """Run add command on dhcpbridge""" - cmd = "%s add %s fake fake" % (binpath('nova-dhcpbridge'), private_ip) + cmd = "%s add fake %s fake" % (binpath('nova-dhcpbridge'), private_ip) env = {'DNSMASQ_INTERFACE': interface, 'TESTING': '1', + 'SQL_DB': sqlfile, 'FLAGFILE': FLAGS.dhcpbridge_flagfile} (out, err) = utils.execute(cmd, addl_env=env) logging.debug("ISSUE_IP: %s, %s ", out, err) -def release_ip(private_ip, interface): +def release_ip(private_ip, interface, sqlfile): """Run del command on dhcpbridge""" - cmd = "%s del %s fake fake" % (binpath('nova-dhcpbridge'), private_ip) + cmd = "%s del fake %s fake" % (binpath('nova-dhcpbridge'), private_ip) env = {'DNSMASQ_INTERFACE': interface, + 'SQL_DB': sqlfile, 'TESTING': '1', 'FLAGFILE': FLAGS.dhcpbridge_flagfile} (out, err) = utils.execute(cmd, addl_env=env) -- cgit From 142f6f6d3e7ce63e0a34cf68c8473d047766e093 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 18 Aug 2010 04:08:32 -0700 Subject: tests pass --- nova/models.py | 10 +++++-- nova/network/service.py | 6 ++--- nova/tests/volume_unittest.py | 26 +++++++++++++----- nova/volume/service.py | 62 ++++++++++++++++++++++++------------------- 4 files changed, 65 insertions(+), 39 deletions(-) diff --git a/nova/models.py b/nova/models.py index 110a4fc80..6342a86c5 100644 --- a/nova/models.py +++ b/nova/models.py @@ -199,8 +199,6 @@ class Volume(Base, NovaBase): __tablename__ = 'volumes' id = Column(Integer, primary_key=True) volume_id = Column(String) - shelf_id = Column(Integer) - blade_id = Column(Integer) user_id = Column(String) #, ForeignKey('users.id'), nullable=False) project_id = Column(String) #, ForeignKey('projects.id')) @@ -215,6 +213,14 @@ class Volume(Base, NovaBase): status = Column(String) # FIXME enum? attach_status = Column(String) # FIXME enum +class ExportDevice(Base, NovaBase): + __tablename__ = 'export_devices' + id = Column(Integer, primary_key=True) + shelf_id = Column(Integer) + blade_id = Column(Integer) + volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=True) + volume = relationship(Volume, backref=backref('export_device', + uselist=False)) class Network(Base, NovaBase): __tablename__ = 'networks' diff --git a/nova/network/service.py b/nova/network/service.py index 9bbb833b7..26ceaca25 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -135,7 +135,7 @@ class BaseNetworkService(service.Service): fixed_ip.allocated = True session.add(fixed_ip) try: - fixed_ip.save() + session.commit() return fixed_ip.ip_str except exc.ConcurrentModificationError: pass @@ -171,7 +171,7 @@ class BaseNetworkService(service.Service): elastic_ip.project_id = project_id session.add(elastic_ip) try: - elastic_ip.save() + session.commit() return elastic_ip.ip_str except exc.ConcurrentModificationError: pass @@ -341,7 +341,7 @@ class VlanNetworkService(BaseNetworkService): network_index.network = network session.add(network_index) try: - network_index.save() + session.commit() return network_index.index except exc.ConcurrentModificationError: pass diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index 91706580f..f29464cab 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -39,6 +39,20 @@ class VolumeTestCase(test.TrialTestCase): self.flags(connection_type='fake', fake_storage=True) self.volume = volume_service.VolumeService() + self.total_slots = 10 + # FIXME this should test actual creation method + self.devices = [] + for i in xrange(self.total_slots): + export_device = models.ExportDevice() + export_device.shelf_id = 0 + export_device.blade_id = i + export_device.save() + self.devices.append(export_device) + + def tearDown(self): + super(VolumeTestCase, self).tearDown() + for device in self.devices: + device.delete() @defer.inlineCallbacks def test_run_create_volume(self): @@ -68,14 +82,11 @@ class VolumeTestCase(test.TrialTestCase): vol_size = '1' user_id = 'fake' project_id = 'fake' - num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1 - total_slots = FLAGS.blades_per_shelf * num_shelves vols = [] - from nova import datastore - redis = datastore.Redis.instance() - for i in xrange(total_slots): + for i in xrange(self.total_slots): vid = yield self.volume.create_volume(vol_size, user_id, project_id) vols.append(vid) + print models.Volume.find(vid).export_device.volume_id self.assertFailure(self.volume.create_volume(vol_size, user_id, project_id), @@ -127,13 +138,14 @@ class VolumeTestCase(test.TrialTestCase): shelf_blades = [] def _check(volume_id): vol = models.Volume.find(volume_id) - shelf_blade = '%s.%s' % (vol.shelf_id, vol.blade_id) + shelf_blade = '%s.%s' % (vol.export_device.shelf_id, + vol.export_device.blade_id) self.assert_(shelf_blade not in shelf_blades) shelf_blades.append(shelf_blade) logging.debug("got %s" % shelf_blade) vol.delete() deferreds = [] - for i in range(5): + for i in range(self.total_slots): d = self.volume.create_volume(vol_size, user_id, project_id) d.addCallback(_check) d.addErrback(self.fail) diff --git a/nova/volume/service.py b/nova/volume/service.py index 4d959aadb..c056e5513 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -25,6 +25,7 @@ Currently uses Ata-over-Ethernet. import logging from twisted.internet import defer +from sqlalchemy.orm import exc from nova import exception from nova import flags @@ -42,12 +43,6 @@ flags.DEFINE_string('volume_group', 'nova-volumes', 'Name for the VG that will contain exported volumes') flags.DEFINE_string('aoe_eth_dev', 'eth0', 'Which device to export the volumes on') -flags.DEFINE_integer('first_shelf_id', - utils.last_octet(utils.get_my_ip()) * 10, - 'AoE starting shelf_id for this service') -flags.DEFINE_integer('last_shelf_id', - utils.last_octet(utils.get_my_ip()) * 10 + 9, - 'AoE starting shelf_id for this service') flags.DEFINE_string('aoe_export_dir', '/var/lib/vblade-persist/vblades', 'AoE directory where exports are created') @@ -120,7 +115,7 @@ class VolumeService(service.Service): @defer.inlineCallbacks def _exec_create_volume(self, vol): if FLAGS.fake_storage: - return + defer.returnValue(None) if str(vol.size) == '0': sizestr = '100M' else: @@ -134,39 +129,52 @@ class VolumeService(service.Service): @defer.inlineCallbacks def _exec_delete_volume(self, vol): if FLAGS.fake_storage: - return + defer.returnValue(None) yield process.simple_execute( "sudo lvremove -f %s/%s" % (FLAGS.volume_group, vol.volume_id), error_ok=1) @defer.inlineCallbacks def _setup_export(self, vol): - # FIXME: device needs to be a pool - device = "1.1" - if not device: - raise NoMoreBlades() - (shelf_id, blade_id) = device.split('.') - vol.aoe_device = "e%s.%s" % (shelf_id, blade_id) - vol.shelf_id = shelf_id - vol.blade_id = blade_id + # FIXME: abstract this. also remove vol.export_device.xxx cheat + session = models.NovaBase.get_session() + query = session.query(models.ExportDevice) + query = query.filter_by(volume=None) + print 'free devices', query.count() + while(True): + export_device = query.first() + if not export_device: + raise NoMoreBlades() + print 'volume id', vol.id + export_device.volume_id = vol.id + session.add(export_device) + try: + session.commit() + break + except exc.ConcurrentModificationError: + print 'concur' + pass + vol.aoe_device = "e%s.%s" % (export_device.shelf_id, + export_device.blade_id) + print 'id is', vol.export_device.volume_id vol.save() yield self._exec_setup_export(vol) @defer.inlineCallbacks def _exec_setup_export(self, vol): if FLAGS.fake_storage: - return + defer.returnValue(None) yield process.simple_execute( "sudo vblade-persist setup %s %s %s /dev/%s/%s" % - (self, vol['shelf_id'], - vol.blade_id, + (self, vol.export_device.shelf_id, + vol.export_device.blade_id, FLAGS.aoe_eth_dev, FLAGS.volume_group, vol.volume_id), error_ok=1) @defer.inlineCallbacks def _remove_export(self, vol): - if not vol.shelf_id or not vol.blade_id: + if not vol.export_device: defer.returnValue(False) yield self._exec_remove_export(vol) defer.returnValue(True) @@ -174,17 +182,17 @@ class VolumeService(service.Service): @defer.inlineCallbacks def _exec_remove_export(self, vol): if FLAGS.fake_storage: - return + defer.returnValue(None) yield process.simple_execute( - "sudo vblade-persist stop %s %s" % (self, vol.shelf_id, - vol.blade_id), error_ok=1) + "sudo vblade-persist stop %s %s" % (self, vol.export_device.shelf_id, + vol.export_device.blade_id), error_ok=1) yield process.simple_execute( - "sudo vblade-persist destroy %s %s" % (self, vol.shelf_id, - vol.blade_id), error_ok=1) + "sudo vblade-persist destroy %s %s" % (self, vol.export_device.shelf_id, + vol.export_device.blade_id), error_ok=1) @defer.inlineCallbacks def _exec_ensure_exports(self): if FLAGS.fake_storage: - return + defer.returnValue(None) # NOTE(vish): these commands sometimes sends output to stderr for warnings yield process.simple_execute("sudo vblade-persist auto all", error_ok=1) yield process.simple_execute("sudo vblade-persist start all", error_ok=1) @@ -192,7 +200,7 @@ class VolumeService(service.Service): @defer.inlineCallbacks def _exec_init_volumes(self): if FLAGS.fake_storage: - return + defer.returnValue(None) yield process.simple_execute( "sudo pvcreate %s" % (FLAGS.storage_dev)) yield process.simple_execute( -- cgit From 23e9600fc69541e132f36e27296104442df7ba41 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 18 Aug 2010 10:09:11 -0400 Subject: Fix pep8 violation --- tools/install_venv.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/install_venv.py b/tools/install_venv.py index 4e775eb33..f8c47ff04 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -96,8 +96,8 @@ def install_dependencies(venv=VENV): # Tell the virtual env how to "import nova" - pathfile=os.path.join(venv, "lib", "python2.6", "site-packages", "nova.pth") - f=open(pathfile, 'w') + pthfile = os.path.join(venv, "lib", "python2.6", "site-packages", "nova.pth") + f=open(pthfile, 'w') f.write("%s\n" % ROOT) -- cgit From 738bcb7d381a67b0884d861c7ad48fa08e37106a Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 18 Aug 2010 10:39:59 -0400 Subject: Newest pylint supports 'disable=', not 'disable-msg=' --- bin/nova-rsapi | 2 +- nova/test.py | 10 +++++----- nova/tests/objectstore_unittest.py | 16 ++++++++-------- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/bin/nova-rsapi b/bin/nova-rsapi index e2722422e..9ad6f9e94 100755 --- a/bin/nova-rsapi +++ b/bin/nova-rsapi @@ -1,5 +1,5 @@ #!/usr/bin/env python -# pylint: disable-msg=C0103 +# pylint: disable=C0103 # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the diff --git a/nova/test.py b/nova/test.py index c392c8a84..a75e0de1a 100644 --- a/nova/test.py +++ b/nova/test.py @@ -53,7 +53,7 @@ def skip_if_fake(func): class TrialTestCase(unittest.TestCase): """Test case base class for all unit tests""" - def setUp(self): # pylint: disable-msg=C0103 + def setUp(self): # pylint: disable=C0103 """Run before each test method to initialize test environment""" super(TrialTestCase, self).setUp() @@ -63,7 +63,7 @@ class TrialTestCase(unittest.TestCase): self.stubs = stubout.StubOutForTesting() self.flag_overrides = {} - def tearDown(self): # pylint: disable-msg=C0103 + def tearDown(self): # pylint: disable=C0103 """Runs after each test method to finalize/tear down test environment""" super(TrialTestCase, self).tearDown() self.reset_flags() @@ -94,7 +94,7 @@ class TrialTestCase(unittest.TestCase): class BaseTestCase(TrialTestCase): # TODO(jaypipes): Can this be moved into the TrialTestCase class? """Base test case class for all unit tests.""" - def setUp(self): # pylint: disable-msg=C0103 + def setUp(self): # pylint: disable=C0103 """Run before each test method to initialize test environment""" super(BaseTestCase, self).setUp() # TODO(termie): we could possibly keep a more global registry of @@ -106,7 +106,7 @@ class BaseTestCase(TrialTestCase): self._done_waiting = False self._timed_out = False - def tearDown(self):# pylint: disable-msg=C0103 + def tearDown(self):# pylint: disable=C0103 """Runs after each test method to finalize/tear down test environment""" super(BaseTestCase, self).tearDown() for x in self.injected: @@ -137,7 +137,7 @@ class BaseTestCase(TrialTestCase): if self._waiting: try: self.ioloop.remove_timeout(self._waiting) - except Exception: # pylint: disable-msg=W0703 + except Exception: # pylint: disable=W0703 # TODO(jaypipes): This produces a pylint warning. Should # we really be catching Exception and then passing here? pass diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py index dece4b5d5..5b956fccf 100644 --- a/nova/tests/objectstore_unittest.py +++ b/nova/tests/objectstore_unittest.py @@ -56,7 +56,7 @@ os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets')) class ObjectStoreTestCase(test.BaseTestCase): """Test objectstore API directly.""" - def setUp(self): # pylint: disable-msg=C0103 + def setUp(self): # pylint: disable=C0103 """Setup users and projects.""" super(ObjectStoreTestCase, self).setUp() self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'), @@ -78,7 +78,7 @@ class ObjectStoreTestCase(test.BaseTestCase): self.context = Context() - def tearDown(self): # pylint: disable-msg=C0103 + def tearDown(self): # pylint: disable=C0103 """Tear down users and projects.""" self.auth_manager.delete_project('proj1') self.auth_manager.delete_project('proj2') @@ -168,7 +168,7 @@ class ObjectStoreTestCase(test.BaseTestCase): class TestHTTPChannel(http.HTTPChannel): """Dummy site required for twisted.web""" - def checkPersistence(self, _, __): # pylint: disable-msg=C0103 + def checkPersistence(self, _, __): # pylint: disable=C0103 """Otherwise we end up with an unclean reactor.""" return False @@ -181,7 +181,7 @@ class TestSite(server.Site): class S3APITestCase(test.TrialTestCase): """Test objectstore through S3 API.""" - def setUp(self): # pylint: disable-msg=C0103 + def setUp(self): # pylint: disable=C0103 """Setup users, projects, and start a test server.""" super(S3APITestCase, self).setUp() @@ -198,7 +198,7 @@ class S3APITestCase(test.TrialTestCase): root = S3() self.site = TestSite(root) - # pylint: disable-msg=E1101 + # pylint: disable=E1101 self.listening_port = reactor.listenTCP(0, self.site, interface='127.0.0.1') # pylint: enable-msg=E1101 @@ -221,11 +221,11 @@ class S3APITestCase(test.TrialTestCase): self.conn.get_http_connection = get_http_connection - def _ensure_no_buckets(self, buckets): # pylint: disable-msg=C0111 + def _ensure_no_buckets(self, buckets): # pylint: disable=C0111 self.assertEquals(len(buckets), 0, "Bucket list was not empty") return True - def _ensure_one_bucket(self, buckets, name): # pylint: disable-msg=C0111 + def _ensure_one_bucket(self, buckets, name): # pylint: disable=C0111 self.assertEquals(len(buckets), 1, "Bucket list didn't have exactly one element in it") self.assertEquals(buckets[0].name, name, "Wrong name") @@ -296,7 +296,7 @@ class S3APITestCase(test.TrialTestCase): deferred.addCallback(self._ensure_no_buckets) return deferred - def tearDown(self): # pylint: disable-msg=C0103 + def tearDown(self): # pylint: disable=C0103 """Tear down auth and test server.""" self.auth_manager.delete_user('admin') self.auth_manager.delete_project('admin') -- cgit From 24a6fd40f657896fb20249392be6ed41c30ca679 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 18 Aug 2010 11:19:40 -0400 Subject: Image API work --- nova/endpoint/newapi.py | 4 --- nova/endpoint/rackspace/controllers/base.py | 9 +++++ nova/endpoint/rackspace/controllers/images.py | 48 ++++++++++++++++++++++++++- 3 files changed, 56 insertions(+), 5 deletions(-) diff --git a/nova/endpoint/newapi.py b/nova/endpoint/newapi.py index 9aae933af..7836be582 100644 --- a/nova/endpoint/newapi.py +++ b/nova/endpoint/newapi.py @@ -41,11 +41,7 @@ class APIVersionRouter(wsgi.Router): def __init__(self): mapper = routes.Mapper() - rsapi = rackspace.API() mapper.connect(None, "/v1.0/{path_info:.*}", controller=rsapi) - mapper.connect(None, "/ec2/{path_info:.*}", controller=aws.API()) - super(APIVersionRouter, self).__init__(mapper) - diff --git a/nova/endpoint/rackspace/controllers/base.py b/nova/endpoint/rackspace/controllers/base.py index 8cd44f62e..88922280b 100644 --- a/nova/endpoint/rackspace/controllers/base.py +++ b/nova/endpoint/rackspace/controllers/base.py @@ -7,3 +7,12 @@ class BaseController(wsgi.Controller): return { cls.entity_name : cls.render(instance) } else: return { "TODO": "TODO" } + + def serialize(self, data, request): + """ + Serialize the given dict to the response type requested in request. + Uses self._serialization_metadata if it exists, which is a dict mapping + MIME types to information needed to serialize to that type. + """ + _metadata = getattr(type(self), "_serialization_metadata", {}) + return Serializer(request.environ, _metadata).to_content_type(data) diff --git a/nova/endpoint/rackspace/controllers/images.py b/nova/endpoint/rackspace/controllers/images.py index ae2a08849..197d8375c 100644 --- a/nova/endpoint/rackspace/controllers/images.py +++ b/nova/endpoint/rackspace/controllers/images.py @@ -1 +1,47 @@ -class ImagesController(object): pass +from nova.endpoint.rackspace.controllers.base import BaseController +from nova.endpoint import images +from webob import exc + +#TODO(gundlach): Serialize return values +class ImagesController(BaseController): + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "image": [ "id", "name", "updated", "created", "status", + "serverId", "progress" ] + } + } + } + + def index(self, req): + context = req.environ['nova.api_request_context'] + return images.list(context) + + def show(self, req, id): + context = req.environ['nova.api_request_context'] + return images.list(context, filter_list=[id]) + + def delete(self, req, id): + context = req.environ['nova.api_request_context'] + # TODO(gundlach): make sure it's an image they may delete? + return images.deregister(context, id) + + def create(self, **kwargs): + # TODO(gundlach): no idea how to hook this up. code below + # is from servers.py. + inst = self.build_server_instance(kwargs['server']) + rpc.cast( + FLAGS.compute_topic, { + "method": "run_instance", + "args": {"instance_id": inst.instance_id}}) + + def update(self, **kwargs): + # TODO (gundlach): no idea how to hook this up. code below + # is from servers.py. + instance_id = kwargs['id'] + instance = compute.InstanceDirectory().get(instance_id) + if not instance: + raise ServerNotFound("The requested server was not found") + instance.update(kwargs['server']) + instance.save() -- cgit From 43d2310f87a2f78f342b171de403f3db74a98295 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 18 Aug 2010 08:39:28 -0700 Subject: Fixed typo. --- nova/api/rackspace/controllers/servers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/rackspace/controllers/servers.py b/nova/api/rackspace/controllers/servers.py index 1911d5abf..1d0221ea8 100644 --- a/nova/api/rackspace/controllers/servers.py +++ b/nova/api/rackspace/controllers/servers.py @@ -24,7 +24,7 @@ class Controller(base.Controller): entity_name = 'servers' def index(self, **kwargs): - instanmces = [] + instances = [] for inst in compute.InstanceDirectory().all: instances.append(instance_details(inst)) -- cgit From b380e4a93f6d8ebc772c3989d27f9549b730eee5 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 18 Aug 2010 11:44:24 -0400 Subject: Changed our minds: keep pylint equal to Ubuntu Lucid version, and use disable-msg throughout. --- bin/nova-rsapi | 2 +- nova/network/linux_net.py | 4 ++-- nova/network/model.py | 8 ++++---- nova/network/service.py | 2 +- nova/network/vpn.py | 2 +- nova/rpc.py | 8 ++++---- nova/test.py | 10 +++++----- nova/tests/network_unittest.py | 4 ++-- nova/tests/objectstore_unittest.py | 16 ++++++++-------- nova/tests/rpc_unittest.py | 2 +- nova/wsgi.py | 4 ++-- tools/pip-requires | 2 +- 12 files changed, 32 insertions(+), 32 deletions(-) diff --git a/bin/nova-rsapi b/bin/nova-rsapi index 9ad6f9e94..e2722422e 100755 --- a/bin/nova-rsapi +++ b/bin/nova-rsapi @@ -1,5 +1,5 @@ #!/usr/bin/env python -# pylint: disable=C0103 +# pylint: disable-msg=C0103 # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index a5014b2cb..9e5aabd97 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -154,7 +154,7 @@ def start_dnsmasq(network): try: os.kill(pid, signal.SIGHUP) return - except Exception as exc: # pylint: disable=W0703 + except Exception as exc: # pylint: disable-msg=W0703 logging.debug("Hupping dnsmasq threw %s", exc) # FLAGFILE and DNSMASQ_INTERFACE in env @@ -170,7 +170,7 @@ def stop_dnsmasq(network): if pid: try: os.kill(pid, signal.SIGTERM) - except Exception as exc: # pylint: disable=W0703 + except Exception as exc: # pylint: disable-msg=W0703 logging.debug("Killing dnsmasq threw %s", exc) diff --git a/nova/network/model.py b/nova/network/model.py index d3a6a6552..6e4fcc47e 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -59,7 +59,7 @@ logging.getLogger().setLevel(logging.DEBUG) class Vlan(datastore.BasicModel): """Tracks vlans assigned to project it the datastore""" - def __init__(self, project, vlan): # pylint: disable=W0231 + def __init__(self, project, vlan): # pylint: disable-msg=W0231 """ Since we don't want to try and find a vlan by its identifier, but by a project id, we don't call super-init. @@ -161,7 +161,7 @@ class FixedIp(datastore.BasicModel): 'state': 'none'} @classmethod - # pylint: disable=R0913 + # pylint: disable-msg=R0913 def create(cls, user_id, project_id, address, mac, hostname, network_id): """Creates an FixedIp object""" addr = cls(address) @@ -215,7 +215,7 @@ class BaseNetwork(datastore.BasicModel): return {'network_id': self.network_id, 'network_str': self.network_str} @classmethod - # pylint: disable=R0913 + # pylint: disable-msg=R0913 def create(cls, user_id, project_id, security_group, vlan, network_str): """Create a BaseNetwork object""" network_id = "%s:%s" % (project_id, security_group) @@ -268,7 +268,7 @@ class BaseNetwork(datastore.BasicModel): """Returns the project associated with this network""" return manager.AuthManager().get_project(self['project_id']) - # pylint: disable=R0913 + # pylint: disable-msg=R0913 def _add_host(self, user_id, project_id, ip_address, mac, hostname): """Add a host to the datastore""" self.address_class.create(user_id, project_id, ip_address, diff --git a/nova/network/service.py b/nova/network/service.py index da102a056..d3aa1c46f 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -195,7 +195,7 @@ class VlanNetworkService(BaseNetworkService): # simplified and improved. Also there it may be useful # to support vlans separately from dhcp, instead of having # both of them together in this class. - # pylint: disable=W0221 + # pylint: disable-msg=W0221 def allocate_fixed_ip(self, user_id, project_id, diff --git a/nova/network/vpn.py b/nova/network/vpn.py index cf2579e61..85366ed89 100644 --- a/nova/network/vpn.py +++ b/nova/network/vpn.py @@ -105,7 +105,7 @@ class NetworkData(datastore.BasicModel): return datastore.Redis.instance().scard('ip:%s:ports' % vpn_ip) @property - def ip(self): # pylint: disable=C0103 + def ip(self): # pylint: disable-msg=C0103 """The ip assigned to the project""" return self['ip'] diff --git a/nova/rpc.py b/nova/rpc.py index 824a66b5b..84a9b5590 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -59,7 +59,7 @@ class Connection(carrot_connection.BrokerConnection): params['backend_cls'] = fakerabbit.Backend # NOTE(vish): magic is fun! - # pylint: disable=W0142 + # pylint: disable-msg=W0142 cls._instance = cls(**params) return cls._instance @@ -104,7 +104,7 @@ class Consumer(messaging.Consumer): if self.failed_connection: # NOTE(vish): conn is defined in the parent class, we can # recreate it as long as we create the backend too - # pylint: disable=W0201 + # pylint: disable-msg=W0201 self.conn = Connection.recreate() self.backend = self.conn.create_backend() super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) @@ -114,7 +114,7 @@ class Consumer(messaging.Consumer): # NOTE(vish): This is catching all errors because we really don't # exceptions to be logged 10 times a second if some # persistent failure occurs. - except Exception: # pylint: disable=W0703 + except Exception: # pylint: disable-msg=W0703 if not self.failed_connection: logging.exception("Failed to fetch message from queue") self.failed_connection = True @@ -178,7 +178,7 @@ class AdapterConsumer(TopicConsumer): node_func = getattr(self.proxy, str(method)) node_args = dict((str(k), v) for k, v in args.iteritems()) # NOTE(vish): magic is fun! - # pylint: disable=W0142 + # pylint: disable-msg=W0142 d = defer.maybeDeferred(node_func, **node_args) if msg_id: d.addCallback(lambda rval: msg_reply(msg_id, rval, None)) diff --git a/nova/test.py b/nova/test.py index a75e0de1a..c392c8a84 100644 --- a/nova/test.py +++ b/nova/test.py @@ -53,7 +53,7 @@ def skip_if_fake(func): class TrialTestCase(unittest.TestCase): """Test case base class for all unit tests""" - def setUp(self): # pylint: disable=C0103 + def setUp(self): # pylint: disable-msg=C0103 """Run before each test method to initialize test environment""" super(TrialTestCase, self).setUp() @@ -63,7 +63,7 @@ class TrialTestCase(unittest.TestCase): self.stubs = stubout.StubOutForTesting() self.flag_overrides = {} - def tearDown(self): # pylint: disable=C0103 + def tearDown(self): # pylint: disable-msg=C0103 """Runs after each test method to finalize/tear down test environment""" super(TrialTestCase, self).tearDown() self.reset_flags() @@ -94,7 +94,7 @@ class TrialTestCase(unittest.TestCase): class BaseTestCase(TrialTestCase): # TODO(jaypipes): Can this be moved into the TrialTestCase class? """Base test case class for all unit tests.""" - def setUp(self): # pylint: disable=C0103 + def setUp(self): # pylint: disable-msg=C0103 """Run before each test method to initialize test environment""" super(BaseTestCase, self).setUp() # TODO(termie): we could possibly keep a more global registry of @@ -106,7 +106,7 @@ class BaseTestCase(TrialTestCase): self._done_waiting = False self._timed_out = False - def tearDown(self):# pylint: disable=C0103 + def tearDown(self):# pylint: disable-msg=C0103 """Runs after each test method to finalize/tear down test environment""" super(BaseTestCase, self).tearDown() for x in self.injected: @@ -137,7 +137,7 @@ class BaseTestCase(TrialTestCase): if self._waiting: try: self.ioloop.remove_timeout(self._waiting) - except Exception: # pylint: disable=W0703 + except Exception: # pylint: disable-msg=W0703 # TODO(jaypipes): This produces a pylint warning. Should # we really be catching Exception and then passing here? pass diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 039509809..993bfacc2 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -36,7 +36,7 @@ FLAGS = flags.FLAGS class NetworkTestCase(test.TrialTestCase): """Test cases for network code""" - def setUp(self): # pylint: disable=C0103 + def setUp(self): # pylint: disable-msg=C0103 super(NetworkTestCase, self).setUp() # NOTE(vish): if you change these flags, make sure to change the # flags in the corresponding section in nova-dhcpbridge @@ -60,7 +60,7 @@ class NetworkTestCase(test.TrialTestCase): vpn.NetworkData.create(self.projects[i].id) self.service = service.VlanNetworkService() - def tearDown(self): # pylint: disable=C0103 + def tearDown(self): # pylint: disable-msg=C0103 super(NetworkTestCase, self).tearDown() for project in self.projects: self.manager.delete_project(project) diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py index 5b956fccf..dece4b5d5 100644 --- a/nova/tests/objectstore_unittest.py +++ b/nova/tests/objectstore_unittest.py @@ -56,7 +56,7 @@ os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets')) class ObjectStoreTestCase(test.BaseTestCase): """Test objectstore API directly.""" - def setUp(self): # pylint: disable=C0103 + def setUp(self): # pylint: disable-msg=C0103 """Setup users and projects.""" super(ObjectStoreTestCase, self).setUp() self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'), @@ -78,7 +78,7 @@ class ObjectStoreTestCase(test.BaseTestCase): self.context = Context() - def tearDown(self): # pylint: disable=C0103 + def tearDown(self): # pylint: disable-msg=C0103 """Tear down users and projects.""" self.auth_manager.delete_project('proj1') self.auth_manager.delete_project('proj2') @@ -168,7 +168,7 @@ class ObjectStoreTestCase(test.BaseTestCase): class TestHTTPChannel(http.HTTPChannel): """Dummy site required for twisted.web""" - def checkPersistence(self, _, __): # pylint: disable=C0103 + def checkPersistence(self, _, __): # pylint: disable-msg=C0103 """Otherwise we end up with an unclean reactor.""" return False @@ -181,7 +181,7 @@ class TestSite(server.Site): class S3APITestCase(test.TrialTestCase): """Test objectstore through S3 API.""" - def setUp(self): # pylint: disable=C0103 + def setUp(self): # pylint: disable-msg=C0103 """Setup users, projects, and start a test server.""" super(S3APITestCase, self).setUp() @@ -198,7 +198,7 @@ class S3APITestCase(test.TrialTestCase): root = S3() self.site = TestSite(root) - # pylint: disable=E1101 + # pylint: disable-msg=E1101 self.listening_port = reactor.listenTCP(0, self.site, interface='127.0.0.1') # pylint: enable-msg=E1101 @@ -221,11 +221,11 @@ class S3APITestCase(test.TrialTestCase): self.conn.get_http_connection = get_http_connection - def _ensure_no_buckets(self, buckets): # pylint: disable=C0111 + def _ensure_no_buckets(self, buckets): # pylint: disable-msg=C0111 self.assertEquals(len(buckets), 0, "Bucket list was not empty") return True - def _ensure_one_bucket(self, buckets, name): # pylint: disable=C0111 + def _ensure_one_bucket(self, buckets, name): # pylint: disable-msg=C0111 self.assertEquals(len(buckets), 1, "Bucket list didn't have exactly one element in it") self.assertEquals(buckets[0].name, name, "Wrong name") @@ -296,7 +296,7 @@ class S3APITestCase(test.TrialTestCase): deferred.addCallback(self._ensure_no_buckets) return deferred - def tearDown(self): # pylint: disable=C0103 + def tearDown(self): # pylint: disable-msg=C0103 """Tear down auth and test server.""" self.auth_manager.delete_user('admin') self.auth_manager.delete_project('admin') diff --git a/nova/tests/rpc_unittest.py b/nova/tests/rpc_unittest.py index 764a97416..e12a28fbc 100644 --- a/nova/tests/rpc_unittest.py +++ b/nova/tests/rpc_unittest.py @@ -32,7 +32,7 @@ FLAGS = flags.FLAGS class RpcTestCase(test.BaseTestCase): """Test cases for rpc""" - def setUp(self): # pylint: disable=C0103 + def setUp(self): # pylint: disable-msg=C0103 super(RpcTestCase, self).setUp() self.conn = rpc.Connection.instance() self.receiver = TestReceiver() diff --git a/nova/wsgi.py b/nova/wsgi.py index a0a175dc7..fd87afe6e 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -83,7 +83,7 @@ class Application(object): raise NotImplementedError("You must implement __call__") -class Middleware(Application): # pylint: disable=W0223 +class Middleware(Application): # pylint: disable-msg=W0223 """ Base WSGI middleware wrapper. These classes require an application to be initialized that will be called next. By default the middleware will @@ -91,7 +91,7 @@ class Middleware(Application): # pylint: disable=W0223 behavior. """ - def __init__(self, application): # pylint: disable=W0231 + def __init__(self, application): # pylint: disable-msg=W0231 self.application = application @webob.dec.wsgify diff --git a/tools/pip-requires b/tools/pip-requires index 28af7bcb9..13e8e5f45 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -1,5 +1,5 @@ pep8==0.5.0 -pylint==0.21.1 +pylint==0.19 IPy==0.70 M2Crypto==0.20.2 amqplib==0.6.1 -- cgit From ad3bda4b1a81ee60230869a3d207141f7315a3ca Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 18 Aug 2010 11:53:41 -0400 Subject: pep8 typo --- tools/install_venv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/install_venv.py b/tools/install_venv.py index f8c47ff04..e108c29a1 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -97,7 +97,7 @@ def install_dependencies(venv=VENV): # Tell the virtual env how to "import nova" pthfile = os.path.join(venv, "lib", "python2.6", "site-packages", "nova.pth") - f=open(pthfile, 'w') + f = open(pthfile, 'w') f.write("%s\n" % ROOT) -- cgit From 4e5e72da2e3242026d757c8d5143e16f9d00cb6a Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 18 Aug 2010 08:56:33 -0700 Subject: Removed the 'controllers' directory under 'rackspace' due to full class name redundancy. --- nova/api/rackspace/__init__.py | 8 +-- nova/api/rackspace/base.py | 30 +++++++++ nova/api/rackspace/controllers/__init__.py | 0 nova/api/rackspace/controllers/base.py | 30 --------- nova/api/rackspace/controllers/flavors.py | 18 ----- nova/api/rackspace/controllers/images.py | 18 ----- nova/api/rackspace/controllers/servers.py | 83 ------------------------ nova/api/rackspace/controllers/sharedipgroups.py | 18 ----- nova/api/rackspace/flavors.py | 18 +++++ nova/api/rackspace/images.py | 18 +++++ nova/api/rackspace/servers.py | 83 ++++++++++++++++++++++++ nova/api/rackspace/sharedipgroups.py | 18 +++++ 12 files changed, 171 insertions(+), 171 deletions(-) create mode 100644 nova/api/rackspace/base.py delete mode 100644 nova/api/rackspace/controllers/__init__.py delete mode 100644 nova/api/rackspace/controllers/base.py delete mode 100644 nova/api/rackspace/controllers/flavors.py delete mode 100644 nova/api/rackspace/controllers/images.py delete mode 100644 nova/api/rackspace/controllers/servers.py delete mode 100644 nova/api/rackspace/controllers/sharedipgroups.py create mode 100644 nova/api/rackspace/flavors.py create mode 100644 nova/api/rackspace/images.py create mode 100644 nova/api/rackspace/servers.py create mode 100644 nova/api/rackspace/sharedipgroups.py diff --git a/nova/api/rackspace/__init__.py b/nova/api/rackspace/__init__.py index 662cbe495..27e78f801 100644 --- a/nova/api/rackspace/__init__.py +++ b/nova/api/rackspace/__init__.py @@ -29,10 +29,10 @@ import webob.exc from nova import flags from nova import wsgi -from nova.api.rackspace.controllers import flavors -from nova.api.rackspace.controllers import images -from nova.api.rackspace.controllers import servers -from nova.api.rackspace.controllers import sharedipgroups +from nova.api.rackspace import flavors +from nova.api.rackspace import images +from nova.api.rackspace import servers +from nova.api.rackspace import sharedipgroups from nova.auth import manager diff --git a/nova/api/rackspace/base.py b/nova/api/rackspace/base.py new file mode 100644 index 000000000..dd2c6543c --- /dev/null +++ b/nova/api/rackspace/base.py @@ -0,0 +1,30 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import wsgi + + +class Controller(wsgi.Controller): + """TODO(eday): Base controller for all rackspace controllers. What is this + for? Is this just Rackspace specific? """ + + @classmethod + def render(cls, instance): + if isinstance(instance, list): + return {cls.entity_name: cls.render(instance)} + else: + return {"TODO": "TODO"} diff --git a/nova/api/rackspace/controllers/__init__.py b/nova/api/rackspace/controllers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/nova/api/rackspace/controllers/base.py b/nova/api/rackspace/controllers/base.py deleted file mode 100644 index dd2c6543c..000000000 --- a/nova/api/rackspace/controllers/base.py +++ /dev/null @@ -1,30 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova import wsgi - - -class Controller(wsgi.Controller): - """TODO(eday): Base controller for all rackspace controllers. What is this - for? Is this just Rackspace specific? """ - - @classmethod - def render(cls, instance): - if isinstance(instance, list): - return {cls.entity_name: cls.render(instance)} - else: - return {"TODO": "TODO"} diff --git a/nova/api/rackspace/controllers/flavors.py b/nova/api/rackspace/controllers/flavors.py deleted file mode 100644 index 986f11434..000000000 --- a/nova/api/rackspace/controllers/flavors.py +++ /dev/null @@ -1,18 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -class Controller(object): pass diff --git a/nova/api/rackspace/controllers/images.py b/nova/api/rackspace/controllers/images.py deleted file mode 100644 index 986f11434..000000000 --- a/nova/api/rackspace/controllers/images.py +++ /dev/null @@ -1,18 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -class Controller(object): pass diff --git a/nova/api/rackspace/controllers/servers.py b/nova/api/rackspace/controllers/servers.py deleted file mode 100644 index 1d0221ea8..000000000 --- a/nova/api/rackspace/controllers/servers.py +++ /dev/null @@ -1,83 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova import rpc -from nova.compute import model as compute -from nova.api.rackspace.controllers import base - - -class Controller(base.Controller): - entity_name = 'servers' - - def index(self, **kwargs): - instances = [] - for inst in compute.InstanceDirectory().all: - instances.append(instance_details(inst)) - - def show(self, **kwargs): - instance_id = kwargs['id'] - return compute.InstanceDirectory().get(instance_id) - - def delete(self, **kwargs): - instance_id = kwargs['id'] - instance = compute.InstanceDirectory().get(instance_id) - if not instance: - raise ServerNotFound("The requested server was not found") - instance.destroy() - return True - - def create(self, **kwargs): - inst = self.build_server_instance(kwargs['server']) - rpc.cast( - FLAGS.compute_topic, { - "method": "run_instance", - "args": {"instance_id": inst.instance_id}}) - - def update(self, **kwargs): - instance_id = kwargs['id'] - instance = compute.InstanceDirectory().get(instance_id) - if not instance: - raise ServerNotFound("The requested server was not found") - instance.update(kwargs['server']) - instance.save() - - def build_server_instance(self, env): - """Build instance data structure and save it to the data store.""" - reservation = utils.generate_uid('r') - ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) - inst = self.instdir.new() - inst['name'] = env['server']['name'] - inst['image_id'] = env['server']['imageId'] - inst['instance_type'] = env['server']['flavorId'] - inst['user_id'] = env['user']['id'] - inst['project_id'] = env['project']['id'] - inst['reservation_id'] = reservation - inst['launch_time'] = ltime - inst['mac_address'] = utils.generate_mac() - address = self.network.allocate_ip( - inst['user_id'], - inst['project_id'], - mac=inst['mac_address']) - inst['private_dns_name'] = str(address) - inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( - inst['user_id'], - inst['project_id'], - 'default')['bridge_name'] - # key_data, key_name, ami_launch_index - # TODO(todd): key data or root password - inst.save() - return inst diff --git a/nova/api/rackspace/controllers/sharedipgroups.py b/nova/api/rackspace/controllers/sharedipgroups.py deleted file mode 100644 index 986f11434..000000000 --- a/nova/api/rackspace/controllers/sharedipgroups.py +++ /dev/null @@ -1,18 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -class Controller(object): pass diff --git a/nova/api/rackspace/flavors.py b/nova/api/rackspace/flavors.py new file mode 100644 index 000000000..986f11434 --- /dev/null +++ b/nova/api/rackspace/flavors.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +class Controller(object): pass diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py new file mode 100644 index 000000000..986f11434 --- /dev/null +++ b/nova/api/rackspace/images.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +class Controller(object): pass diff --git a/nova/api/rackspace/servers.py b/nova/api/rackspace/servers.py new file mode 100644 index 000000000..25d1fe9c8 --- /dev/null +++ b/nova/api/rackspace/servers.py @@ -0,0 +1,83 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import rpc +from nova.compute import model as compute +from nova.api.rackspace import base + + +class Controller(base.Controller): + entity_name = 'servers' + + def index(self, **kwargs): + instances = [] + for inst in compute.InstanceDirectory().all: + instances.append(instance_details(inst)) + + def show(self, **kwargs): + instance_id = kwargs['id'] + return compute.InstanceDirectory().get(instance_id) + + def delete(self, **kwargs): + instance_id = kwargs['id'] + instance = compute.InstanceDirectory().get(instance_id) + if not instance: + raise ServerNotFound("The requested server was not found") + instance.destroy() + return True + + def create(self, **kwargs): + inst = self.build_server_instance(kwargs['server']) + rpc.cast( + FLAGS.compute_topic, { + "method": "run_instance", + "args": {"instance_id": inst.instance_id}}) + + def update(self, **kwargs): + instance_id = kwargs['id'] + instance = compute.InstanceDirectory().get(instance_id) + if not instance: + raise ServerNotFound("The requested server was not found") + instance.update(kwargs['server']) + instance.save() + + def build_server_instance(self, env): + """Build instance data structure and save it to the data store.""" + reservation = utils.generate_uid('r') + ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + inst = self.instdir.new() + inst['name'] = env['server']['name'] + inst['image_id'] = env['server']['imageId'] + inst['instance_type'] = env['server']['flavorId'] + inst['user_id'] = env['user']['id'] + inst['project_id'] = env['project']['id'] + inst['reservation_id'] = reservation + inst['launch_time'] = ltime + inst['mac_address'] = utils.generate_mac() + address = self.network.allocate_ip( + inst['user_id'], + inst['project_id'], + mac=inst['mac_address']) + inst['private_dns_name'] = str(address) + inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( + inst['user_id'], + inst['project_id'], + 'default')['bridge_name'] + # key_data, key_name, ami_launch_index + # TODO(todd): key data or root password + inst.save() + return inst diff --git a/nova/api/rackspace/sharedipgroups.py b/nova/api/rackspace/sharedipgroups.py new file mode 100644 index 000000000..986f11434 --- /dev/null +++ b/nova/api/rackspace/sharedipgroups.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +class Controller(object): pass -- cgit From 7cd16b5754a38257d6b492bc29e6f99f2537f11a Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 18 Aug 2010 12:09:29 -0400 Subject: Missed one --- pylintrc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pylintrc b/pylintrc index 6c799c7ea..943eeac36 100644 --- a/pylintrc +++ b/pylintrc @@ -1,9 +1,9 @@ [Messages Control] -disable=C0103 +disable-msg=C0103 # TODOs in code comments are fine... -disable=W0511 +disable-msg=W0511 # *args and **kwargs are fine -disable=W0142 +disable-msg=W0142 [Basic] # Variables can be 1 to 31 characters long, with -- cgit From 50b8aea8c775a2a16da579291f69daf313441a81 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 18 Aug 2010 13:11:49 -0700 Subject: use with_lockmode for concurrency issues --- nova/models.py | 87 ++++++++++++++++++++++--------------------- nova/network/service.py | 85 ++++++++++++++++++++---------------------- nova/tests/volume_unittest.py | 8 +++- nova/volume/service.py | 28 +++++--------- 4 files changed, 101 insertions(+), 107 deletions(-) diff --git a/nova/models.py b/nova/models.py index 6342a86c5..aa9f3da09 100644 --- a/nova/models.py +++ b/nova/models.py @@ -39,6 +39,7 @@ flags.DEFINE_string('sql_connection', 'connection string for sql database') class NovaBase(object): + __table_args__ = {'mysql_engine':'InnoDB'} created_at = Column(DateTime) updated_at = Column(DateTime) @@ -96,17 +97,17 @@ class NovaBase(object): class Image(Base, NovaBase): __tablename__ = 'images' - user_id = Column(String)#, ForeignKey('users.id'), nullable=False) - project_id = Column(String)#, ForeignKey('projects.id'), nullable=False) + id = Column(Integer, primary_key=True) + user_id = Column(String(255))#, ForeignKey('users.id'), nullable=False) + project_id = Column(String(255))#, ForeignKey('projects.id'), nullable=False) - id = Column(String, primary_key=True) - image_type = Column(String) + image_type = Column(String(255)) public = Column(Boolean, default=False) - state = Column(String) - location = Column(String) - arch = Column(String) - default_kernel_id = Column(String) - default_ramdisk_id = Column(String) + state = Column(String(255)) + location = Column(String(255)) + arch = Column(String(255)) + default_kernel_id = Column(String(255)) + default_ramdisk_id = Column(String(255)) @validates('image_type') def validate_image_type(self, key, image_type): @@ -135,8 +136,8 @@ class Instance(Base, NovaBase): __tablename__ = 'instances' id = Column(Integer, primary_key=True) - user_id = Column(String) #, ForeignKey('users.id'), nullable=False) - project_id = Column(String) #, ForeignKey('projects.id')) + user_id = Column(String(255)) #, ForeignKey('users.id'), nullable=False) + project_id = Column(String(255)) #, ForeignKey('projects.id')) @property def user(self): @@ -153,26 +154,26 @@ class Instance(Base, NovaBase): image_id = Column(Integer, ForeignKey('images.id'), nullable=False) - kernel_id = Column(String, ForeignKey('images.id'), nullable=True) - ramdisk_id = Column(String, ForeignKey('images.id'), nullable=True) + kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True) + ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True) launch_index = Column(Integer) - key_name = Column(String) + key_name = Column(String(255)) key_data = Column(Text) - security_group = Column(String) + security_group = Column(String(255)) state = Column(Integer) - state_description = Column(String) + state_description = Column(String(255)) - hostname = Column(String) + hostname = Column(String(255)) physical_node_id = Column(Integer) instance_type = Column(Integer) user_data = Column(Text) - reservation_id = Column(String) - mac_address = Column(String) + reservation_id = Column(String(255)) + mac_address = Column(String(255)) def set_state(self, state_code, state_description=None): from nova.compute import power_state @@ -198,20 +199,20 @@ class Instance(Base, NovaBase): class Volume(Base, NovaBase): __tablename__ = 'volumes' id = Column(Integer, primary_key=True) - volume_id = Column(String) + volume_id = Column(String(255)) - user_id = Column(String) #, ForeignKey('users.id'), nullable=False) - project_id = Column(String) #, ForeignKey('projects.id')) + user_id = Column(String(255)) #, ForeignKey('users.id'), nullable=False) + project_id = Column(String(255)) #, ForeignKey('projects.id')) # FIXME: should be physical_node_id = Column(Integer) - node_name = Column(String) + node_name = Column(String(255)) size = Column(Integer) - alvailability_zone = Column(String) # FIXME foreign key? + alvailability_zone = Column(String(255)) # FIXME foreign key? instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) - mountpoint = Column(String) - attach_time = Column(String) # FIXME datetime - status = Column(String) # FIXME enum? - attach_status = Column(String) # FIXME enum + mountpoint = Column(String(255)) + attach_time = Column(String(255)) # FIXME datetime + status = Column(String(255)) # FIXME enum? + attach_status = Column(String(255)) # FIXME enum class ExportDevice(Base, NovaBase): __tablename__ = 'export_devices' @@ -225,24 +226,24 @@ class ExportDevice(Base, NovaBase): class Network(Base, NovaBase): __tablename__ = 'networks' id = Column(Integer, primary_key=True) - kind = Column(String) + kind = Column(String(255)) injected = Column(Boolean, default=False) - network_str = Column(String) - netmask = Column(String) - bridge = Column(String) - gateway = Column(String) - broadcast = Column(String) - dns = Column(String) + network_str = Column(String(255)) + netmask = Column(String(255)) + bridge = Column(String(255)) + gateway = Column(String(255)) + broadcast = Column(String(255)) + dns = Column(String(255)) vlan = Column(Integer) - vpn_public_ip_str = Column(String) + vpn_public_ip_str = Column(String(255)) vpn_public_port = Column(Integer) - vpn_private_ip_str = Column(String) + vpn_private_ip_str = Column(String(255)) - project_id = Column(String) #, ForeignKey('projects.id'), nullable=False) + project_id = Column(String(255)) #, ForeignKey('projects.id'), nullable=False) # FIXME: should be physical_node_id = Column(Integer) - node_name = Column(String) + node_name = Column(String(255)) class NetworkIndex(Base, NovaBase): @@ -258,7 +259,7 @@ class NetworkIndex(Base, NovaBase): class FixedIp(Base, NovaBase): __tablename__ = 'fixed_ips' id = Column(Integer, primary_key=True) - ip_str = Column(String, unique=True) + ip_str = Column(String(255), unique=True) network_id = Column(Integer, ForeignKey('networks.id'), nullable=False) network = relationship(Network, backref=backref('fixed_ips')) instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) @@ -279,13 +280,13 @@ class FixedIp(Base, NovaBase): class ElasticIp(Base, NovaBase): __tablename__ = 'elastic_ips' id = Column(Integer, primary_key=True) - ip_str = Column(String, unique=True) + ip_str = Column(String(255), unique=True) fixed_ip_id = Column(Integer, ForeignKey('fixed_ips.id'), nullable=True) fixed_ip = relationship(FixedIp, backref=backref('elastic_ips')) - project_id = Column(String) #, ForeignKey('projects.id'), nullable=False) + project_id = Column(String(255)) #, ForeignKey('projects.id'), nullable=False) # FIXME: should be physical_node_id = Column(Integer) - node_name = Column(String) + node_name = Column(String(255)) @classmethod def find_by_ip_str(cls, ip_str): diff --git a/nova/network/service.py b/nova/network/service.py index 26ceaca25..938d7832b 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -107,38 +107,42 @@ class BaseNetworkService(service.Service): def set_network_host(self, project_id): """Safely sets the host of the projects network""" - network = get_network_for_project(project_id) + # FIXME abstract this + session = models.NovaBase.get_session() + # FIXME will a second request fail or wait for first to finish? + query = session.query(models.Network).filter_by(project_id=project_id) + network = query.with_lockmode("update").first() + if not network: + raise exception.NotFound("Couldn't find network for %s" % + project_id) + # NOTE(vish): if with_lockmode isn't supported, as in sqlite, + # then this has concurrency issues if network.node_name: return network.node_name network.node_name = FLAGS.node_name network.kind = FLAGS.network_type - try: - network.save() - self._on_set_network_host(network) - except exc.ConcurrentModificationError: - network.refresh() # FIXME is this implemented? - return network.node_name + session.add(network) + session.commit() + self._on_set_network_host(network) def allocate_fixed_ip(self, project_id, instance_id, *args, **kwargs): """Gets fixed ip from the pool""" + # FIXME abstract this network = get_network_for_project(project_id) session = models.NovaBase.get_session() query = session.query(models.FixedIp).filter_by(network_id=network.id) query = query.filter_by(reserved=False).filter_by(allocated=False) - query = query.filter_by(leased=False) - while(True): - fixed_ip = query.first() - if not fixed_ip: - raise network_exception.NoMoreAddresses() - # FIXME will this set backreference? - fixed_ip.instance_id = instance_id - fixed_ip.allocated = True - session.add(fixed_ip) - try: - session.commit() - return fixed_ip.ip_str - except exc.ConcurrentModificationError: - pass + fixed_ip = query.filter_by(leased=False).with_lockmode("update").first + # NOTE(vish): if with_lockmode isn't supported, as in sqlite, + # then this has concurrency issues + if not fixed_ip: + raise network_exception.NoMoreAddresses() + # FIXME will this set backreference? + fixed_ip.instance_id = instance_id + fixed_ip.allocated = True + session.add(fixed_ip) + session.commit() + return fixed_ip.ip_str def deallocate_fixed_ip(self, fixed_ip_str, *args, **kwargs): """Returns a fixed ip to the pool""" @@ -160,21 +164,18 @@ class BaseNetworkService(service.Service): def allocate_elastic_ip(self, project_id): """Gets an elastic ip from the pool""" # FIXME: add elastic ips through manage command + # FIXME: abstract this session = models.NovaBase.get_session() node_name = FLAGS.node_name query = session.query(models.ElasticIp).filter_by(node_name=node_name) - query = query.filter_by(fixed_ip_id=None) - while(True): - elastic_ip = query.first() - if not elastic_ip: - raise network_exception.NoMoreAddresses() - elastic_ip.project_id = project_id - session.add(elastic_ip) - try: - session.commit() - return elastic_ip.ip_str - except exc.ConcurrentModificationError: - pass + query = query.filter_by(fixed_ip_id=None).with_lockmode("update") + elastic_ip = query.first() + if not elastic_ip: + raise network_exception.NoMoreAddresses() + elastic_ip.project_id = project_id + session.add(elastic_ip) + session.commit() + return elastic_ip.ip_str def associate_elastic_ip(self, elastic_ip_str, fixed_ip_str): """Associates an elastic ip to a fixed ip""" @@ -334,17 +335,13 @@ class VlanNetworkService(BaseNetworkService): session = models.NovaBase.get_session() node_name = FLAGS.node_name query = session.query(models.NetworkIndex).filter_by(network_id=None) - while(True): - network_index = query.first() - if not network_index: - raise network_exception.NoMoreNetworks() - network_index.network = network - session.add(network_index) - try: - session.commit() - return network_index.index - except exc.ConcurrentModificationError: - pass + network_index = query.with_lockmode("update").first() + if not network_index: + raise network_exception.NoMoreNetworks() + network_index.network = network + session.add(network_index) + session.commit() + return network_index.index @classmethod diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index f29464cab..62ea2a26c 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -131,19 +131,20 @@ class VolumeTestCase(test.TrialTestCase): volume_id) @defer.inlineCallbacks - def test_multiple_volume_race_condition(self): + def test_concurrent_volumes_get_different_blades(self): vol_size = "5" user_id = "fake" project_id = 'fake' shelf_blades = [] + volume_ids = [] def _check(volume_id): + volume_ids.append(volume_id) vol = models.Volume.find(volume_id) shelf_blade = '%s.%s' % (vol.export_device.shelf_id, vol.export_device.blade_id) self.assert_(shelf_blade not in shelf_blades) shelf_blades.append(shelf_blade) logging.debug("got %s" % shelf_blade) - vol.delete() deferreds = [] for i in range(self.total_slots): d = self.volume.create_volume(vol_size, user_id, project_id) @@ -151,6 +152,9 @@ class VolumeTestCase(test.TrialTestCase): d.addErrback(self.fail) deferreds.append(d) yield defer.DeferredList(deferreds) + for volume_id in volume_ids: + vol = models.Volume.find(volume_id) + vol.delete() def test_multi_node(self): # TODO(termie): Figure out how to test with two nodes, diff --git a/nova/volume/service.py b/nova/volume/service.py index c056e5513..c04f85145 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -25,7 +25,6 @@ Currently uses Ata-over-Ethernet. import logging from twisted.internet import defer -from sqlalchemy.orm import exc from nova import exception from nova import flags @@ -138,25 +137,18 @@ class VolumeService(service.Service): def _setup_export(self, vol): # FIXME: abstract this. also remove vol.export_device.xxx cheat session = models.NovaBase.get_session() - query = session.query(models.ExportDevice) - query = query.filter_by(volume=None) - print 'free devices', query.count() - while(True): - export_device = query.first() - if not export_device: - raise NoMoreBlades() - print 'volume id', vol.id - export_device.volume_id = vol.id - session.add(export_device) - try: - session.commit() - break - except exc.ConcurrentModificationError: - print 'concur' - pass + query = session.query(models.ExportDevice).filter_by(volume=None) + export_device = query.with_lockmode("update").first() + # NOTE(vish): if with_lockmode isn't supported, as in sqlite, + # then this has concurrency issues + if not export_device: + raise NoMoreBlades() + export_device.volume_id = vol.id + session.add(export_device) + session.commit() + # FIXME: aoe_device is redundant, should be turned into a method vol.aoe_device = "e%s.%s" % (export_device.shelf_id, export_device.blade_id) - print 'id is', vol.export_device.volume_id vol.save() yield self._exec_setup_export(vol) -- cgit From 7e403e381612e5678aa8f2b9e714d472ba4b3ef0 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 18 Aug 2010 22:19:39 +0100 Subject: Fix to better reflect (my believed intent) as to the meaning of error_ok (ignore stderr vs accept failure) --- nova/volume/service.py | 10 +++++----- tools/install_venv.py | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/nova/volume/service.py b/nova/volume/service.py index bf803eaf6..be62f621d 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -249,14 +249,14 @@ class Volume(datastore.BasicModel): "sudo lvcreate -L %s -n %s %s" % (sizestr, self['volume_id'], FLAGS.volume_group), - check_exit_code=True) + terminate_on_stderr=False) @defer.inlineCallbacks def _delete_lv(self): yield process.simple_execute( "sudo lvremove -f %s/%s" % (FLAGS.volume_group, self['volume_id']), - check_exit_code=True) + terminate_on_stderr=False) @property def __devices_key(self): @@ -285,7 +285,7 @@ class Volume(datastore.BasicModel): FLAGS.aoe_eth_dev, FLAGS.volume_group, self['volume_id']), - check_exit_code=True) + terminate_on_stderr=False) @defer.inlineCallbacks def _remove_export(self): @@ -299,11 +299,11 @@ class Volume(datastore.BasicModel): yield process.simple_execute( "sudo vblade-persist stop %s %s" % (self['shelf_id'], self['blade_id']), - check_exit_code=True) + terminate_on_stderr=False) yield process.simple_execute( "sudo vblade-persist destroy %s %s" % (self['shelf_id'], self['blade_id']), - check_exit_code=True) + terminate_on_stderr=False) class FakeVolume(Volume): diff --git a/tools/install_venv.py b/tools/install_venv.py index a9154fc33..1f0fa3cc7 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -37,7 +37,7 @@ def die(message, *args): sys.exit(1) -def run_command(cmd, redirect_output=True, check_exit_code=False): +def run_command(cmd, redirect_output=True, check_exit_code=True): """ Runs a command in an out-of-process shell, returning the output of that command. Working directory is ROOT. -- cgit From e5a448a616173cd391aaf458f5e0e5ff94a42c89 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 18 Aug 2010 22:33:11 +0100 Subject: Fix unit test bug this uncovered: don't release_ip that we haven't got from issue_ip --- nova/tests/network_unittest.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 993bfacc2..34b68f1ed 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -166,7 +166,6 @@ class NetworkTestCase(test.TrialTestCase): release_ip(mac3, address3, hostname, net.bridge_name) net = model.get_project_network(self.projects[0].id, "default") self.service.deallocate_fixed_ip(firstaddress) - release_ip(mac, firstaddress, hostname, net.bridge_name) def test_vpn_ip_and_port_looks_valid(self): """Ensure the vpn ip and port are reasonable""" -- cgit From 02592d584cc21e536574d20b01d8dbf82474bcd3 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 18 Aug 2010 15:00:20 -0700 Subject: Updated the tests to use webob, removed the 'called' thing and just use return values instead. --- nova/api/test.py | 43 ++++++++++++---------------- nova/wsgi_test.py | 83 +++++++++++++++---------------------------------------- 2 files changed, 40 insertions(+), 86 deletions(-) diff --git a/nova/api/test.py b/nova/api/test.py index 09f79c02e..51b114b8e 100644 --- a/nova/api/test.py +++ b/nova/api/test.py @@ -22,49 +22,40 @@ Test for the root WSGI middleware for all API controllers. import unittest import stubout +import webob +import webob.dec from nova import api -from nova import wsgi_test class Test(unittest.TestCase): def setUp(self): # pylint: disable-msg=C0103 - self.called = False self.stubs = stubout.StubOutForTesting() def tearDown(self): # pylint: disable-msg=C0103 self.stubs.UnsetAll() def test_rackspace(self): - self.stubs.Set(api.rackspace, 'API', get_api_stub(self)) - api.API()(wsgi_test.get_environ({'PATH_INFO': '/v1.0/cloud'}), - wsgi_test.start_response) - self.assertTrue(self.called) + self.stubs.Set(api.rackspace, 'API', APIStub) + result = webob.Request.blank('/v1.0/cloud').get_response(api.API()) + self.assertEqual(result.body, "/cloud") def test_ec2(self): - self.stubs.Set(api.ec2, 'API', get_api_stub(self)) - api.API()(wsgi_test.get_environ({'PATH_INFO': '/ec2/cloud'}), - wsgi_test.start_response) - self.assertTrue(self.called) + self.stubs.Set(api.ec2, 'API', APIStub) + result = webob.Request.blank('/ec2/cloud').get_response(api.API()) + self.assertEqual(result.body, "/cloud") def test_not_found(self): - self.stubs.Set(api.ec2, 'API', get_api_stub(self)) - self.stubs.Set(api.rackspace, 'API', get_api_stub(self)) - api.API()(wsgi_test.get_environ({'PATH_INFO': '/'}), - wsgi_test.start_response) - self.assertFalse(self.called) + self.stubs.Set(api.ec2, 'API', APIStub) + self.stubs.Set(api.rackspace, 'API', APIStub) + result = webob.Request.blank('/test/cloud').get_response(api.API()) + self.assertNotEqual(result.body, "/cloud") -def get_api_stub(test_object): - """Get a stub class that verifies next part of the request.""" +class APIStub(object): + """Class to verify request and mark it was called.""" - class APIStub(object): - """Class to verify request and mark it was called.""" - test = test_object - - def __call__(self, environ, start_response): - self.test.assertEqual(environ['PATH_INFO'], '/cloud') - self.test.called = True - - return APIStub + @webob.dec.wsgify + def __call__(self, req): + return req.path_info diff --git a/nova/wsgi_test.py b/nova/wsgi_test.py index 02bf067d6..786dc1bce 100644 --- a/nova/wsgi_test.py +++ b/nova/wsgi_test.py @@ -24,41 +24,34 @@ Test WSGI basics and provide some helper functions for other WSGI tests. import unittest import routes +import webob from nova import wsgi class Test(unittest.TestCase): - def setUp(self): # pylint: disable-msg=C0103 - self.called = False - def test_debug(self): class Application(wsgi.Application): """Dummy application to test debug.""" - test = self - def __call__(self, environ, test_start_response): - test_start_response("200", [("X-Test", "checking")]) - self.test.called = True - return ['Test response'] + def __call__(self, environ, start_response): + start_response("200", [("X-Test", "checking")]) + return ['Test result'] - app = wsgi.Debug(Application())(get_environ(), start_response) - self.assertTrue(self.called) - for _ in app: - pass + application = wsgi.Debug(Application()) + result = webob.Request.blank('/').get_response(application) + self.assertEqual(result.body, "Test result") def test_router(self): class Application(wsgi.Application): """Test application to call from router.""" - test = self - def __call__(self, environ, test_start_response): - test_start_response("200", []) - self.test.called = True - return [] + def __call__(self, environ, start_response): + start_response("200", []) + return ['Router result'] class Router(wsgi.Router): """Test router.""" @@ -68,11 +61,10 @@ class Test(unittest.TestCase): mapper.connect("/test", controller=Application()) super(Router, self).__init__(mapper) - Router()(get_environ({'PATH_INFO': '/test'}), start_response) - self.assertTrue(self.called) - self.called = False - Router()(get_environ({'PATH_INFO': '/bad'}), start_response) - self.assertFalse(self.called) + result = webob.Request.blank('/test').get_response(Router()) + self.assertEqual(result.body, "Router result") + result = webob.Request.blank('/bad').get_response(Router()) + self.assertNotEqual(result.body, "Router result") def test_controller(self): @@ -80,11 +72,11 @@ class Test(unittest.TestCase): """Test controller to call from router.""" test = self - def show(self, **kwargs): - """Mark that this has been called.""" - self.test.called = True - self.test.assertEqual(kwargs['id'], '123') - return "Test" + def show(self, req, id): # pylint: disable-msg=W0622,C0103 + """Default action called for requests with an ID.""" + self.test.assertEqual(req.path_info, '/tests/123') + self.test.assertEqual(id, '123') + return id class Router(wsgi.Router): """Test router.""" @@ -94,40 +86,11 @@ class Test(unittest.TestCase): mapper.resource("test", "tests", controller=Controller()) super(Router, self).__init__(mapper) - Router()(get_environ({'PATH_INFO': '/tests/123'}), start_response) - self.assertTrue(self.called) - self.called = False - Router()(get_environ({'PATH_INFO': '/test/123'}), start_response) - self.assertFalse(self.called) + result = webob.Request.blank('/tests/123').get_response(Router()) + self.assertEqual(result.body, "123") + result = webob.Request.blank('/test/123').get_response(Router()) + self.assertNotEqual(result.body, "123") def test_serializer(self): # TODO(eday): Placeholder for serializer testing. pass - - -def get_environ(overwrite={}): # pylint: disable-msg=W0102 - """Get a WSGI environment, overwriting any entries given.""" - environ = {'SERVER_PROTOCOL': 'HTTP/1.1', - 'GATEWAY_INTERFACE': 'CGI/1.1', - 'wsgi.version': (1, 0), - 'SERVER_PORT': '443', - 'SERVER_NAME': '127.0.0.1', - 'REMOTE_ADDR': '127.0.0.1', - 'wsgi.run_once': False, - 'wsgi.errors': None, - 'wsgi.multiprocess': False, - 'SCRIPT_NAME': '', - 'wsgi.url_scheme': 'https', - 'wsgi.input': None, - 'REQUEST_METHOD': 'GET', - 'PATH_INFO': '/', - 'CONTENT_TYPE': 'text/plain', - 'wsgi.multithread': True, - 'QUERY_STRING': '', - 'eventlet.input': None} - return dict(environ, **overwrite) - - -def start_response(_status, _headers): - """Dummy start_response to use with WSGI tests.""" - pass -- cgit From bde9618560665392b00dd320b22804020d411b8a Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 18 Aug 2010 17:38:00 -0700 Subject: Added '-' as possible charater in module rgx. --- pylintrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pylintrc b/pylintrc index 36cc337e5..334d49f8e 100644 --- a/pylintrc +++ b/pylintrc @@ -13,7 +13,7 @@ variable-rgx=[a-z_][a-z0-9_]{0,30}$ method-rgx=[a-z_][a-z0-9_]{2,50}$ # Module names matching nova-* are ok (files in bin/) -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(nova-[a-z0-9_]+))$ +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(nova-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ -- cgit From f7c556324d52095323ec18296c4064e5bb626c96 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 18 Aug 2010 17:38:51 -0700 Subject: fixing more network issues --- bin/nova-dhcpbridge | 10 ++-- nova/auth/manager.py | 20 ++++--- nova/models.py | 125 ++++++++++++++++++++++++++--------------- nova/network/service.py | 20 +++++-- nova/service.py | 23 ++++++-- nova/tests/auth_unittest.py | 1 - nova/tests/network_unittest.py | 28 +++++---- nova/tests/volume_unittest.py | 1 - run_tests.py | 2 +- 9 files changed, 147 insertions(+), 83 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 266fd70ce..bd8fd9785 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -35,6 +35,7 @@ from nova import rpc from nova import utils from nova.network import linux_net from nova.network import service +from nova import datastore # for redis_db flag FLAGS = flags.FLAGS @@ -43,6 +44,8 @@ def add_lease(_mac, ip, _hostname, _interface): """Set the IP that was assigned by the DHCP server.""" if FLAGS.fake_rabbit: logging.debug("leasing_ip") + print FLAGS.redis_db + print FLAGS.sql_connection service.VlanNetworkService().lease_ip(ip) else: rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name), @@ -78,12 +81,8 @@ def main(): utils.default_flagfile(flagfile) argv = FLAGS(sys.argv) interface = os.environ.get('DNSMASQ_INTERFACE', 'br0') - LOG_FILENAME = 'example.log' - logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG) - logging.debug("this is a test") sqlfile = os.environ.get('SQL_DB', '') if int(os.environ.get('TESTING', '0')): - logging.debug("fake rabbit is true") FLAGS.fake_rabbit = True FLAGS.redis_db = 8 FLAGS.network_size = 16 @@ -91,7 +90,8 @@ def main(): FLAGS.fake_network = True FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' FLAGS.num_networks = 5 - FLAGS.sql_connection = 'sqlite:///%s' % sqlfile + FLAGS.sql_connection = 'mysql://root@localhost/test' + #FLAGS.sql_connection = 'sqlite:///%s' % sqlfile action = argv[1] if action in ['add', 'del', 'old']: mac = argv[2] diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 69816882e..eed67d8c3 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -529,11 +529,9 @@ class AuthManager(object): member_users) if project_dict: project = Project(**project_dict) - # FIXME(ja): EVIL HACK - this should poll from a pool - session = models.create_session() - net = models.Network(project_id=project.id, kind='vlan') - session.add(net) - session.commit() + # FIXME(ja): EVIL HACK + net = models.Network(project_id=project.id) + net.save() return project def add_to_project(self, user, project): @@ -580,6 +578,10 @@ class AuthManager(object): def delete_project(self, project): """Deletes a project""" + # FIXME(ja): EVIL HACK + if not isinstance(project, Project): + project = self.get_project(project) + project.network.delete() with self.driver() as drv: return drv.delete_project(Project.safe_id(project)) @@ -714,15 +716,15 @@ class AuthManager(object): zippy.writestr(FLAGS.credential_key_file, private_key) zippy.writestr(FLAGS.credential_cert_file, signed_cert) - network_data = vpn.NetworkData.lookup(pid) - if network_data: + (vpn_ip, vpn_port) = self.get_project_vpn_data(project) + if vpn_ip: configfile = open(FLAGS.vpn_client_template,"r") s = string.Template(configfile.read()) configfile.close() config = s.substitute(keyfile=FLAGS.credential_key_file, certfile=FLAGS.credential_cert_file, - ip=network_data.ip, - port=network_data.port) + ip=vpn_ip, + port=vpn_port) zippy.writestr(FLAGS.credential_vpn_file, config) else: logging.warn("No vpn data for project %s" % diff --git a/nova/models.py b/nova/models.py index aa9f3da09..70010eab1 100644 --- a/nova/models.py +++ b/nova/models.py @@ -65,19 +65,24 @@ class NovaBase(object): @classmethod def all(cls): session = NovaBase.get_session() - return session.query(cls).all() + result = session.query(cls).all() + session.commit() + return result @classmethod def count(cls): session = NovaBase.get_session() - return session.query(cls).count() + result = session.query(cls).count() + session.commit() + return result @classmethod def find(cls, obj_id): session = NovaBase.get_session() - #print cls try: - return session.query(cls).filter_by(id=obj_id).one() + result = session.query(cls).filter_by(id=obj_id).one() + session.commit() + return result except exc.NoResultFound: raise exception.NotFound("No model for id %s" % obj_id) @@ -89,12 +94,13 @@ class NovaBase(object): def delete(self): session = NovaBase.get_session() session.delete(self) - session.flush() + session.commit() def refresh(self): session = NovaBase.get_session() session.refresh(self) + class Image(Base, NovaBase): __tablename__ = 'images' id = Column(Integer, primary_key=True) @@ -128,9 +134,29 @@ class Image(Base, NovaBase): assert(val is None) -class PhysicalNode(Base): +class PhysicalNode(Base, NovaBase): __tablename__ = 'physical_nodes' + id = Column(String(255), primary_key=True) + +class Daemon(Base, NovaBase): + __tablename__ = 'daemons' id = Column(Integer, primary_key=True) + node_name = Column(String(255)) #, ForeignKey('physical_node.id')) + binary = Column(String(255)) + report_count = Column(Integer) + + @classmethod + def find_by_args(cls, node_name, binary): + session = NovaBase.get_session() + try: + query = session.query(cls).filter_by(node_name=node_name) + result = query.filter_by(binary=binary).one() + session.commit() + return result + except exc.NoResultFound: + raise exception.NotFound("No model for %s, %s" % (node_name, + binary)) + class Instance(Base, NovaBase): __tablename__ = 'instances' @@ -153,7 +179,7 @@ class Instance(Base, NovaBase): return "i-%s" % self.id - image_id = Column(Integer, ForeignKey('images.id'), nullable=False) + image_id = Column(Integer, ForeignKey('images.id'), nullable=True) kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True) ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True) @@ -204,8 +230,7 @@ class Volume(Base, NovaBase): user_id = Column(String(255)) #, ForeignKey('users.id'), nullable=False) project_id = Column(String(255)) #, ForeignKey('projects.id')) - # FIXME: should be physical_node_id = Column(Integer) - node_name = Column(String(255)) + node_name = Column(String(255)) #, ForeignKey('physical_node.id')) size = Column(Integer) alvailability_zone = Column(String(255)) # FIXME foreign key? instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) @@ -223,37 +248,6 @@ class ExportDevice(Base, NovaBase): volume = relationship(Volume, backref=backref('export_device', uselist=False)) -class Network(Base, NovaBase): - __tablename__ = 'networks' - id = Column(Integer, primary_key=True) - kind = Column(String(255)) - - injected = Column(Boolean, default=False) - network_str = Column(String(255)) - netmask = Column(String(255)) - bridge = Column(String(255)) - gateway = Column(String(255)) - broadcast = Column(String(255)) - dns = Column(String(255)) - - vlan = Column(Integer) - vpn_public_ip_str = Column(String(255)) - vpn_public_port = Column(Integer) - vpn_private_ip_str = Column(String(255)) - - project_id = Column(String(255)) #, ForeignKey('projects.id'), nullable=False) - # FIXME: should be physical_node_id = Column(Integer) - node_name = Column(String(255)) - - -class NetworkIndex(Base, NovaBase): - __tablename__ = 'network_indexes' - id = Column(Integer, primary_key=True) - index = Column(Integer) - network_id = Column(Integer, ForeignKey('networks.id'), nullable=True) - network = relationship(Network, backref=backref('network_index', - uselist=False)) - #FIXME can these both come from the same baseclass? class FixedIp(Base, NovaBase): @@ -261,7 +255,6 @@ class FixedIp(Base, NovaBase): id = Column(Integer, primary_key=True) ip_str = Column(String(255), unique=True) network_id = Column(Integer, ForeignKey('networks.id'), nullable=False) - network = relationship(Network, backref=backref('fixed_ips')) instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) instance = relationship(Instance, backref=backref('fixed_ip', uselist=False)) @@ -273,10 +266,13 @@ class FixedIp(Base, NovaBase): def find_by_ip_str(cls, ip_str): session = NovaBase.get_session() try: - return session.query(cls).filter_by(ip_str=ip_str).one() + result = session.query(cls).filter_by(ip_str=ip_str).one() + session.commit() + return result except exc.NoResultFound: raise exception.NotFound("No model for ip str %s" % ip_str) + class ElasticIp(Base, NovaBase): __tablename__ = 'elastic_ips' id = Column(Integer, primary_key=True) @@ -285,18 +281,57 @@ class ElasticIp(Base, NovaBase): fixed_ip = relationship(FixedIp, backref=backref('elastic_ips')) project_id = Column(String(255)) #, ForeignKey('projects.id'), nullable=False) - # FIXME: should be physical_node_id = Column(Integer) - node_name = Column(String(255)) + node_name = Column(String(255)) #, ForeignKey('physical_node.id')) @classmethod def find_by_ip_str(cls, ip_str): session = NovaBase.get_session() try: - return session.query(cls).filter_by(ip_str=ip_str).one() + result = session.query(cls).filter_by(ip_str=ip_str).one() + session.commit() + return result except exc.NoResultFound: raise exception.NotFound("No model for ip str %s" % ip_str) +class Network(Base, NovaBase): + __tablename__ = 'networks' + id = Column(Integer, primary_key=True) + kind = Column(String(255)) + + injected = Column(Boolean, default=False) + network_str = Column(String(255)) + netmask = Column(String(255)) + bridge = Column(String(255)) + gateway = Column(String(255)) + broadcast = Column(String(255)) + dns = Column(String(255)) + + vlan = Column(Integer) + vpn_public_ip_str = Column(String(255)) + vpn_public_port = Column(Integer) + vpn_private_ip_str = Column(String(255)) + + project_id = Column(String(255)) #, ForeignKey('projects.id'), nullable=False) + node_name = Column(String(255)) #, ForeignKey('physical_node.id')) + + fixed_ips = relationship(FixedIp, + single_parent=True, + backref=backref('network'), + cascade='all, delete, delete-orphan') + + +class NetworkIndex(Base, NovaBase): + __tablename__ = 'network_indexes' + id = Column(Integer, primary_key=True) + index = Column(Integer) + network_id = Column(Integer, ForeignKey('networks.id'), nullable=True) + network = relationship(Network, backref=backref('network_index', + uselist=False)) + + + + def create_session(engine=None): return NovaBase.get_session() diff --git a/nova/network/service.py b/nova/network/service.py index 938d7832b..45bcf58ad 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -118,12 +118,14 @@ class BaseNetworkService(service.Service): # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if network.node_name: + session.commit() return network.node_name network.node_name = FLAGS.node_name network.kind = FLAGS.network_type session.add(network) session.commit() self._on_set_network_host(network) + return network.node_name def allocate_fixed_ip(self, project_id, instance_id, *args, **kwargs): """Gets fixed ip from the pool""" @@ -132,7 +134,8 @@ class BaseNetworkService(service.Service): session = models.NovaBase.get_session() query = session.query(models.FixedIp).filter_by(network_id=network.id) query = query.filter_by(reserved=False).filter_by(allocated=False) - fixed_ip = query.filter_by(leased=False).with_lockmode("update").first + query = query.filter_by(leased=False).with_lockmode("update") + fixed_ip = query.first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not fixed_ip: @@ -233,16 +236,19 @@ class VlanNetworkService(BaseNetworkService): # NOTE(vish): this should probably be removed and added via # admin command or fixtures if models.NetworkIndex.count() == 0: + session = models.NovaBase.get_session() for i in range(FLAGS.num_networks): network_index = models.NetworkIndex() network_index.index = i - network_index.save() + session.add(network_index) + session.commit() def allocate_fixed_ip(self, project_id, instance_id, is_vpn=False, *args, **kwargs): """Gets a fixed ip from the pool""" network = get_network_for_project(project_id) if is_vpn: + # FIXME concurrency issue? fixed_ip = models.FixedIp.find_by_ip_str(network.vpn_private_ip_str) if fixed_ip.allocated: raise network_exception.AddressAlreadyAllocated() @@ -258,7 +264,6 @@ class VlanNetworkService(BaseNetworkService): else: parent = super(VlanNetworkService, self) ip_str = parent.allocate_fixed_ip(project_id, instance_id) - logging.debug("sql %s", FLAGS.sql_connection) _driver.ensure_vlan_bridge(network.vlan, network.bridge) return ip_str @@ -275,13 +280,16 @@ class VlanNetworkService(BaseNetworkService): def lease_ip(self, fixed_ip_str): """Called by bridge when ip is leased""" - logging.debug("sql %s", FLAGS.sql_connection) fixed_ip = models.FixedIp.find_by_ip_str(fixed_ip_str) if not fixed_ip.allocated: raise network_exception.AddressNotAllocated(fixed_ip_str) logging.debug("Leasing IP %s", fixed_ip_str) fixed_ip.leased = True fixed_ip.save() + print fixed_ip.allocated + print fixed_ip.leased + print fixed_ip.instance_id + print 'ip %s leased' % fixed_ip_str def release_ip(self, fixed_ip_str): """Called by bridge when ip is released""" @@ -321,13 +329,15 @@ class VlanNetworkService(BaseNetworkService): BOTTOM_RESERVED = 3 TOP_RESERVED = 1 + FLAGS.cnt_vpn_clients num_ips = len(project_net) + session = models.NovaBase.get_session() for i in range(num_ips): fixed_ip = models.FixedIp() fixed_ip.ip_str = str(project_net[i]) if i < BOTTOM_RESERVED or num_ips - i < TOP_RESERVED: fixed_ip.reserved = True fixed_ip.network = network - fixed_ip.save() + session.add(fixed_ip) + session.commit() def _get_network_index(self, network): diff --git a/nova/service.py b/nova/service.py index 96281bc6b..4c35bdefa 100644 --- a/nova/service.py +++ b/nova/service.py @@ -28,10 +28,10 @@ from twisted.internet import defer from twisted.internet import task from twisted.application import service -from nova import datastore +from nova import exception from nova import flags +from nova import models from nova import rpc -from nova.compute import model FLAGS = flags.FLAGS @@ -87,17 +87,28 @@ class Service(object, service.Service): return application @defer.inlineCallbacks - def report_state(self, nodename, daemon): + def report_state(self, node_name, binary): + """Update the state of this daemon in the datastore""" # TODO(termie): make this pattern be more elegant. -todd try: - record = model.Daemon(nodename, daemon) - record.heartbeat() + try: + #FIXME abstract this + daemon = models.find_by_args(node_name, binary) + except exception.NotFound(): + daemon = models.Daemon(node_name=node_name, + binary=binary) + self._update_daemon() + self.commit() if getattr(self, "model_disconnected", False): self.model_disconnected = False logging.error("Recovered model server connection!") - except datastore.ConnectionError, ex: + except Exception, ex: #FIXME this should only be connection error if not getattr(self, "model_disconnected", False): self.model_disconnected = True logging.exception("model server went away") yield + + def _update_daemon(daemon): + """Set any extra daemon data here""" + daemon.report_count = daemon.report_count + 1 diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index 0b404bfdc..59a81818c 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -32,7 +32,6 @@ FLAGS = flags.FLAGS class AuthTestCase(test.BaseTestCase): - flush_db = False def setUp(self): super(AuthTestCase, self).setUp() self.flags(connection_type='fake', diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 00aaac346..c94c81f72 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -23,6 +23,7 @@ import os import logging import tempfile +from nova import exception from nova import flags from nova import models from nova import test @@ -40,10 +41,10 @@ class NetworkTestCase(test.TrialTestCase): super(NetworkTestCase, self).setUp() # NOTE(vish): if you change these flags, make sure to change the # flags in the corresponding section in nova-dhcpbridge - fd, sqlfile = tempfile.mkstemp() - self.sqlfile = os.path.abspath(sqlfile) + self.sqlfile = 'test.sqlite' self.flags(connection_type='fake', - sql_connection='sqlite:///%s' % self.sqlfile, + #sql_connection='sqlite:///%s' % self.sqlfile, + sql_connection='mysql://root@localhost/test', fake_storage=True, fake_network=True, auth_driver='nova.auth.ldapdriver.FakeLdapDriver', @@ -53,6 +54,7 @@ class NetworkTestCase(test.TrialTestCase): self.manager = manager.AuthManager() self.user = self.manager.create_user('netuser', 'netuser', 'netuser') self.projects = [] + print FLAGS.sql_connection self.service = service.VlanNetworkService() for i in range(5): name = 'project%s' % i @@ -64,7 +66,6 @@ class NetworkTestCase(test.TrialTestCase): instance = models.Instance() instance.mac_address = utils.generate_mac() instance.hostname = 'fake' - instance.image_id = 'fake' instance.save() self.instance_id = instance.id @@ -73,16 +74,19 @@ class NetworkTestCase(test.TrialTestCase): for project in self.projects: self.manager.delete_project(project) self.manager.delete_user(self.user) - os.unlink(self.sqlfile) def test_public_network_association(self): """Makes sure that we can allocaate a public ip""" # FIXME better way of adding elastic ips pubnet = IPy.IP(flags.FLAGS.public_range) - elastic_ip = models.ElasticIp() - elastic_ip.ip_str = str(pubnet[0]) - elastic_ip.node_name = FLAGS.node_name - elastic_ip.save() + ip_str = str(pubnet[0]) + try: + elastic_ip = models.ElasticIp.find_by_ip_str(ip_str) + except exception.NotFound: + elastic_ip = models.ElasticIp() + elastic_ip.ip_str = ip_str + elastic_ip.node_name = FLAGS.node_name + elastic_ip.save() eaddress = self.service.allocate_elastic_ip(self.projects[0].id) faddress = self.service.allocate_fixed_ip(self.projects[0].id, self.instance_id) @@ -101,7 +105,11 @@ class NetworkTestCase(test.TrialTestCase): self.instance_id) net = service.get_network_for_project(self.projects[0].id) self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) + print 'I just got allocated' issue_ip(address, net.bridge, self.sqlfile) + obj = models.FixedIp.find_by_ip_str(address) + obj.refresh() + print obj.leased self.service.deallocate_fixed_ip(address) # Doesn't go away until it's dhcp released @@ -178,7 +186,7 @@ class NetworkTestCase(test.TrialTestCase): def test_too_many_networks(self): """Ensure error is raised if we run out of vpn ports""" projects = [] - networks_left = FLAGS.num_networks - len(self.projects) + networks_left = FLAGS.num_networks - models.Network.count() for i in range(networks_left): project = self.manager.create_project('many%s' % i, self.user) self.service.set_network_host(project.id) diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index 62ea2a26c..82f71901a 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -86,7 +86,6 @@ class VolumeTestCase(test.TrialTestCase): for i in xrange(self.total_slots): vid = yield self.volume.create_volume(vol_size, user_id, project_id) vols.append(vid) - print models.Volume.find(vid).export_device.volume_id self.assertFailure(self.volume.create_volume(vol_size, user_id, project_id), diff --git a/run_tests.py b/run_tests.py index 77aa9088a..82c1aa9cf 100644 --- a/run_tests.py +++ b/run_tests.py @@ -55,7 +55,7 @@ from nova.tests.api_unittest import * from nova.tests.cloud_unittest import * from nova.tests.compute_unittest import * from nova.tests.flags_unittest import * -from nova.tests.model_unittest import * +#from nova.tests.model_unittest import * from nova.tests.network_unittest import * from nova.tests.objectstore_unittest import * from nova.tests.process_unittest import * -- cgit From b8747fb38eb1234744cdda85cb20bd27cd7fa9e8 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 18 Aug 2010 17:39:12 -0700 Subject: More bin/ pep8/pylint cleanup. --- bin/nova-compute | 2 +- bin/nova-dhcpbridge | 14 +++++++------- bin/nova-import-canonical-imagestore | 4 ++-- bin/nova-instancemonitor | 5 +++-- bin/nova-manage | 6 +++--- bin/nova-network | 1 + bin/nova-objectstore | 2 +- bin/nova-volume | 2 +- 8 files changed, 19 insertions(+), 17 deletions(-) diff --git a/bin/nova-compute b/bin/nova-compute index e0c12354f..ed9a55565 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -29,4 +29,4 @@ if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': - application = service.ComputeService.create() + application = service.ComputeService.create() # pylint: disable-msg=C0103 diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index f70a4482c..1f2ed4f89 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -40,29 +40,29 @@ from nova.network import service FLAGS = flags.FLAGS -def add_lease(_mac, ip, _hostname, _interface): +def add_lease(_mac, ip_address, _hostname, _interface): """Set the IP that was assigned by the DHCP server.""" if FLAGS.fake_rabbit: - service.VlanNetworkService().lease_ip(ip) + service.VlanNetworkService().lease_ip(ip_address) else: rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name), {"method": "lease_ip", - "args": {"fixed_ip": ip}}) + "args": {"fixed_ip": ip_address}}) -def old_lease(_mac, _ip, _hostname, _interface): +def old_lease(_mac, _ip_address, _hostname, _interface): """Do nothing, just an old lease update.""" logging.debug("Adopted old lease or got a change of mac/hostname") -def del_lease(_mac, ip, _hostname, _interface): +def del_lease(_mac, ip_address, _hostname, _interface): """Called when a lease expires.""" if FLAGS.fake_rabbit: - service.VlanNetworkService().release_ip(ip) + service.VlanNetworkService().release_ip(ip_address) else: rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name), {"method": "release_ip", - "args": {"fixed_ip": ip}}) + "args": {"fixed_ip": ip_address}}) def init_leases(interface): diff --git a/bin/nova-import-canonical-imagestore b/bin/nova-import-canonical-imagestore index 5165109b2..e6931d9db 100755 --- a/bin/nova-import-canonical-imagestore +++ b/bin/nova-import-canonical-imagestore @@ -35,12 +35,12 @@ from nova.objectstore import image FLAGS = flags.FLAGS -api_url = 'https://imagestore.canonical.com/api/dashboard' +API_URL = 'https://imagestore.canonical.com/api/dashboard' def get_images(): """Get a list of the images from the imagestore URL.""" - images = json.load(urllib2.urlopen(api_url))['images'] + images = json.load(urllib2.urlopen(API_URL))['images'] images = [img for img in images if img['title'].find('amd64') > -1] return images diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor index 911fb6f42..fbac58889 100755 --- a/bin/nova-instancemonitor +++ b/bin/nova-instancemonitor @@ -35,9 +35,10 @@ if __name__ == '__main__': if __name__ == '__builtin__': logging.warn('Starting instance monitor') - m = monitor.InstanceMonitor() + # pylint: disable-msg=C0103 + monitor = monitor.InstanceMonitor() # This is the parent service that twistd will be looking for when it # parses this file, return it so that we can get it into globals below application = service.Application('nova-instancemonitor') - m.setServiceParent(application) + monitor.setServiceParent(application) diff --git a/bin/nova-manage b/bin/nova-manage index 071436b13..33141a49e 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -211,7 +211,7 @@ class ProjectCommands(object): f.write(zip_file) -categories = [ +CATEGORIES = [ ('user', UserCommands), ('project', ProjectCommands), ('role', RoleCommands), @@ -258,11 +258,11 @@ def main(): if len(argv) < 1: print script_name + " category action []" print "Available categories:" - for k, _ in categories: + for k, _ in CATEGORIES: print "\t%s" % k sys.exit(2) category = argv.pop(0) - matches = lazy_match(category, categories) + matches = lazy_match(category, CATEGORIES) # instantiate the command group object category, fn = matches[0] command_object = fn() diff --git a/bin/nova-network b/bin/nova-network index ba9063f56..5753aafbe 100755 --- a/bin/nova-network +++ b/bin/nova-network @@ -33,4 +33,5 @@ if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': + # pylint: disable-msg=C0103 application = service.type_to_class(FLAGS.network_type).create() diff --git a/bin/nova-objectstore b/bin/nova-objectstore index 02f2bcb48..afcf13e24 100755 --- a/bin/nova-objectstore +++ b/bin/nova-objectstore @@ -35,4 +35,4 @@ if __name__ == '__main__': if __name__ == '__builtin__': utils.default_flagfile() - application = handler.get_application() + application = handler.get_application() # pylint: disable-msg=C0103 diff --git a/bin/nova-volume b/bin/nova-volume index f7a8fad37..8ef006ebc 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -29,4 +29,4 @@ if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': - application = service.VolumeService.create() + application = service.VolumeService.create() # pylint: disable-msg=C0103 -- cgit From 9ab034f8b0cb0946e1fdf44937cce58b53e7530b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 18 Aug 2010 18:03:58 -0700 Subject: last few test fixes --- bin/nova-dhcpbridge | 14 ++++++++---- nova/network/service.py | 9 ++++---- nova/tests/fake_flags.py | 3 ++- nova/tests/network_unittest.py | 48 +++++++++++++++++------------------------- 4 files changed, 35 insertions(+), 39 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index bd8fd9785..b17a56e6e 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -44,7 +44,9 @@ def add_lease(_mac, ip, _hostname, _interface): """Set the IP that was assigned by the DHCP server.""" if FLAGS.fake_rabbit: logging.debug("leasing_ip") - print FLAGS.redis_db + from nova import models + print models.FixedIp.count() + print models.Network.count() print FLAGS.sql_connection service.VlanNetworkService().lease_ip(ip) else: @@ -81,7 +83,6 @@ def main(): utils.default_flagfile(flagfile) argv = FLAGS(sys.argv) interface = os.environ.get('DNSMASQ_INTERFACE', 'br0') - sqlfile = os.environ.get('SQL_DB', '') if int(os.environ.get('TESTING', '0')): FLAGS.fake_rabbit = True FLAGS.redis_db = 8 @@ -90,8 +91,13 @@ def main(): FLAGS.fake_network = True FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' FLAGS.num_networks = 5 - FLAGS.sql_connection = 'mysql://root@localhost/test' - #FLAGS.sql_connection = 'sqlite:///%s' % sqlfile + path = os.path.abspath(os.path.join(os.path.dirname(__file__), + '..', + '_trial_temp', + 'nova.sqlite')) + print path + FLAGS.sql_connection = 'sqlite:///%s' % path + #FLAGS.sql_connection = 'mysql://root@localhost/test' action = argv[1] if action in ['add', 'del', 'old']: mac = argv[2] diff --git a/nova/network/service.py b/nova/network/service.py index 45bcf58ad..16ecfbf3e 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -23,7 +23,6 @@ Network Hosts are responsible for allocating ips and setting up network import logging import IPy -from sqlalchemy.orm import exc from nova import exception from nova import flags @@ -72,6 +71,10 @@ _driver = linux_net def type_to_class(network_type): """Convert a network_type string into an actual Python class""" + if not network_type: + logging.warn("Network type couldn't be determined, using %s" % + FLAGS.network_type) + network_type = FLAGS.network_type if network_type == 'flat': return FlatNetworkService elif network_type == 'vlan': @@ -286,10 +289,6 @@ class VlanNetworkService(BaseNetworkService): logging.debug("Leasing IP %s", fixed_ip_str) fixed_ip.leased = True fixed_ip.save() - print fixed_ip.allocated - print fixed_ip.leased - print fixed_ip.instance_id - print 'ip %s leased' % fixed_ip_str def release_ip(self, fixed_ip_str): """Called by bridge when ip is released""" diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index ecbc65937..7fc83babc 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -26,4 +26,5 @@ FLAGS.fake_rabbit = True FLAGS.fake_network = True FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' FLAGS.verbose = True -FLAGS.sql_connection = 'sqlite:///:memory:' +FLAGS.sql_connection = 'sqlite:///nova.sqlite' +#FLAGS.sql_connection = 'mysql://root@localhost/test' diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index c94c81f72..0f2ce060d 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -41,10 +41,7 @@ class NetworkTestCase(test.TrialTestCase): super(NetworkTestCase, self).setUp() # NOTE(vish): if you change these flags, make sure to change the # flags in the corresponding section in nova-dhcpbridge - self.sqlfile = 'test.sqlite' self.flags(connection_type='fake', - #sql_connection='sqlite:///%s' % self.sqlfile, - sql_connection='mysql://root@localhost/test', fake_storage=True, fake_network=True, auth_driver='nova.auth.ldapdriver.FakeLdapDriver', @@ -54,7 +51,6 @@ class NetworkTestCase(test.TrialTestCase): self.manager = manager.AuthManager() self.user = self.manager.create_user('netuser', 'netuser', 'netuser') self.projects = [] - print FLAGS.sql_connection self.service = service.VlanNetworkService() for i in range(5): name = 'project%s' % i @@ -105,17 +101,13 @@ class NetworkTestCase(test.TrialTestCase): self.instance_id) net = service.get_network_for_project(self.projects[0].id) self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) - print 'I just got allocated' - issue_ip(address, net.bridge, self.sqlfile) - obj = models.FixedIp.find_by_ip_str(address) - obj.refresh() - print obj.leased + issue_ip(address, net.bridge) self.service.deallocate_fixed_ip(address) # Doesn't go away until it's dhcp released self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) - release_ip(address, net.bridge, self.sqlfile) + release_ip(address, net.bridge) self.assertFalse(is_allocated_in_project(address, self.projects[0].id)) def test_side_effects(self): @@ -133,19 +125,19 @@ class NetworkTestCase(test.TrialTestCase): self.assertFalse(is_allocated_in_project(address, self.projects[1].id)) # Addresses are allocated before they're issued - issue_ip(address, net.bridge, self.sqlfile) - issue_ip(address2, net2.bridge, self.sqlfile) + issue_ip(address, net.bridge) + issue_ip(address2, net2.bridge) self.service.deallocate_fixed_ip(address) - release_ip(address, net.bridge, self.sqlfile) + release_ip(address, net.bridge) self.assertFalse(is_allocated_in_project(address, self.projects[0].id)) # First address release shouldn't affect the second self.assertTrue(is_allocated_in_project(address2, self.projects[1].id)) self.service.deallocate_fixed_ip(address2) - issue_ip(address2, net.bridge, self.sqlfile) - release_ip(address2, net2.bridge, self.sqlfile) + issue_ip(address2, net.bridge) + release_ip(address2, net2.bridge) self.assertFalse(is_allocated_in_project(address2, self.projects[1].id)) def test_subnet_edge(self): @@ -158,9 +150,9 @@ class NetworkTestCase(test.TrialTestCase): address2 = self.service.allocate_fixed_ip(project_id, self.instance_id) address3 = self.service.allocate_fixed_ip(project_id, self.instance_id) net = service.get_network_for_project(project_id) - issue_ip(address, net.bridge, self.sqlfile) - issue_ip(address2, net.bridge, self.sqlfile) - issue_ip(address3, net.bridge, self.sqlfile) + issue_ip(address, net.bridge) + issue_ip(address2, net.bridge) + issue_ip(address3, net.bridge) self.assertFalse(is_allocated_in_project(address, self.projects[0].id)) self.assertFalse(is_allocated_in_project(address2, @@ -170,9 +162,9 @@ class NetworkTestCase(test.TrialTestCase): self.service.deallocate_fixed_ip(address) self.service.deallocate_fixed_ip(address2) self.service.deallocate_fixed_ip(address3) - release_ip(address, net.bridge, self.sqlfile) - release_ip(address2, net.bridge, self.sqlfile) - release_ip(address3, net.bridge, self.sqlfile) + release_ip(address, net.bridge) + release_ip(address2, net.bridge) + release_ip(address3, net.bridge) net = service.get_network_for_project(self.projects[0].id) self.service.deallocate_fixed_ip(first) @@ -205,9 +197,9 @@ class NetworkTestCase(test.TrialTestCase): address = self.service.allocate_fixed_ip(self.projects[0].id, self.instance_id) net = service.get_network_for_project(self.projects[0].id) - issue_ip(address, net.bridge, self.sqlfile) + issue_ip(address, net.bridge) self.service.deallocate_fixed_ip(address) - release_ip(address, net.bridge, self.sqlfile) + release_ip(address, net.bridge) address2 = self.service.allocate_fixed_ip(self.projects[0].id, self.instance_id) @@ -245,7 +237,7 @@ class NetworkTestCase(test.TrialTestCase): project_id = self.projects[0].id addresses.append(self.service.allocate_fixed_ip(project_id, self.instance_id)) - issue_ip(addresses[i],network.bridge, self.sqlfile) + issue_ip(addresses[i],network.bridge) self.assertEqual(available_ips(network), 0) self.assertRaises(NoMoreAddresses, @@ -255,7 +247,7 @@ class NetworkTestCase(test.TrialTestCase): for i in range(len(addresses)): self.service.deallocate_fixed_ip(addresses[i]) - release_ip(addresses[i],network.bridge, self.sqlfile) + release_ip(addresses[i],network.bridge) self.assertEqual(available_ips(network), num_available_ips) @@ -291,22 +283,20 @@ def binpath(script): return os.path.abspath(os.path.join(__file__, "../../../bin", script)) -def issue_ip(private_ip, interface, sqlfile): +def issue_ip(private_ip, interface): """Run add command on dhcpbridge""" cmd = "%s add fake %s fake" % (binpath('nova-dhcpbridge'), private_ip) env = {'DNSMASQ_INTERFACE': interface, 'TESTING': '1', - 'SQL_DB': sqlfile, 'FLAGFILE': FLAGS.dhcpbridge_flagfile} (out, err) = utils.execute(cmd, addl_env=env) logging.debug("ISSUE_IP: %s, %s ", out, err) -def release_ip(private_ip, interface, sqlfile): +def release_ip(private_ip, interface): """Run del command on dhcpbridge""" cmd = "%s del fake %s fake" % (binpath('nova-dhcpbridge'), private_ip) env = {'DNSMASQ_INTERFACE': interface, - 'SQL_DB': sqlfile, 'TESTING': '1', 'FLAGFILE': FLAGS.dhcpbridge_flagfile} (out, err) = utils.execute(cmd, addl_env=env) -- cgit From 47e98cdae2a6233cb475c34207758a29c0ef7a4c Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 18 Aug 2010 18:25:16 -0700 Subject: Removed old cloud_topic queue setup, it is no longer used. --- bin/nova-api | 8 -------- nova/endpoint/cloud.py | 1 - nova/flags.py | 1 - nova/tests/cloud_unittest.py | 4 ---- 4 files changed, 14 deletions(-) diff --git a/bin/nova-api b/bin/nova-api index 13baf22a7..a3ad5a0e1 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -26,7 +26,6 @@ from tornado import httpserver from tornado import ioloop from nova import flags -from nova import rpc from nova import server from nova import utils from nova.endpoint import admin @@ -43,14 +42,7 @@ def main(_argv): 'Admin': admin.AdminController()} _app = api.APIServerApplication(controllers) - conn = rpc.Connection.instance() - consumer = rpc.AdapterConsumer(connection=conn, - topic=FLAGS.cloud_topic, - proxy=controllers['Cloud']) - io_inst = ioloop.IOLoop.instance() - _injected = consumer.attach_to_tornado(io_inst) - http_server = httpserver.HTTPServer(_app) http_server.listen(FLAGS.cc_port) logging.debug('Started HTTP server on %s', FLAGS.cc_port) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 30634429d..8e2beb1e3 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -45,7 +45,6 @@ from nova.volume import service FLAGS = flags.FLAGS -flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') def _gen_key(user_id, key_name): diff --git a/nova/flags.py b/nova/flags.py index e3feb252d..f46017f77 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -168,7 +168,6 @@ def DECLARE(name, module_string, flag_values=FLAGS): DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake') DEFINE_integer('s3_port', 3333, 's3 port') DEFINE_string('s3_host', '127.0.0.1', 's3 host') -#DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on') DEFINE_string('volume_topic', 'volume', 'the topic volume nodes listen on') DEFINE_string('network_topic', 'network', 'the topic network nodes listen on') diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py index 3501771cc..900ff5a97 100644 --- a/nova/tests/cloud_unittest.py +++ b/nova/tests/cloud_unittest.py @@ -47,10 +47,6 @@ class CloudTestCase(test.BaseTestCase): # set up our cloud self.cloud = cloud.CloudController() - self.cloud_consumer = rpc.AdapterConsumer(connection=self.conn, - topic=FLAGS.cloud_topic, - proxy=self.cloud) - self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop)) # set up a service self.compute = service.ComputeService() -- cgit From a74f2a3ca4e26c451a002f9a89f3ba4ac4a083c4 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 18 Aug 2010 18:32:08 -0700 Subject: fix report state --- nova/compute/service.py | 26 -------------------------- nova/models.py | 2 +- nova/service.py | 13 +++++++------ 3 files changed, 8 insertions(+), 33 deletions(-) diff --git a/nova/compute/service.py b/nova/compute/service.py index 708134072..3909c8245 100644 --- a/nova/compute/service.py +++ b/nova/compute/service.py @@ -81,22 +81,6 @@ class ComputeService(service.Service): pass return defer.succeed(len(self._instances)) - @defer.inlineCallbacks - def report_state(self, nodename, daemon): - # TODO(termie): make this pattern be more elegant. -todd - try: - record = model.Daemon(nodename, daemon) - record.heartbeat() - if getattr(self, "model_disconnected", False): - self.model_disconnected = False - logging.error("Recovered model server connection!") - - except model.ConnectionError, ex: - if not getattr(self, "model_disconnected", False): - self.model_disconnected = True - logging.exception("model server went away") - yield - @defer.inlineCallbacks @exception.wrap_exception def run_instance(self, instance_id, **_kwargs): @@ -214,13 +198,3 @@ class ComputeService(service.Service): "sudo virsh detach-disk %s %s " % (instance_id, target)) volume.finish_detach() defer.returnValue(True) - - -class Group(object): - def __init__(self, group_id): - self.group_id = group_id - - -class ProductCode(object): - def __init__(self, product_code): - self.product_code = product_code diff --git a/nova/models.py b/nova/models.py index 70010eab1..d0b66d9b7 100644 --- a/nova/models.py +++ b/nova/models.py @@ -143,7 +143,7 @@ class Daemon(Base, NovaBase): id = Column(Integer, primary_key=True) node_name = Column(String(255)) #, ForeignKey('physical_node.id')) binary = Column(String(255)) - report_count = Column(Integer) + report_count = Column(Integer, nullable=False, default=0) @classmethod def find_by_args(cls, node_name, binary): diff --git a/nova/service.py b/nova/service.py index 4c35bdefa..29f47e833 100644 --- a/nova/service.py +++ b/nova/service.py @@ -93,12 +93,13 @@ class Service(object, service.Service): try: try: #FIXME abstract this - daemon = models.find_by_args(node_name, binary) - except exception.NotFound(): + daemon = models.Daemon.find_by_args(node_name, binary) + except exception.NotFound: daemon = models.Daemon(node_name=node_name, - binary=binary) - self._update_daemon() - self.commit() + binary=binary, + report_count=0) + self._update_daemon(daemon) + daemon.save() if getattr(self, "model_disconnected", False): self.model_disconnected = False logging.error("Recovered model server connection!") @@ -109,6 +110,6 @@ class Service(object, service.Service): logging.exception("model server went away") yield - def _update_daemon(daemon): + def _update_daemon(self, daemon): """Set any extra daemon data here""" daemon.report_count = daemon.report_count + 1 -- cgit From 24c7080249113fc6c87a58d97405f5d32c6db5e2 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 18 Aug 2010 18:38:34 -0700 Subject: More pylintrc updates. --- pylintrc | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pylintrc b/pylintrc index 334d49f8e..6702ca895 100644 --- a/pylintrc +++ b/pylintrc @@ -4,10 +4,12 @@ disable-msg=W0511,W0142 [Basic] -# Variables can be 1 to 31 characters long, with -# lowercase and underscores +# Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ +# Argument names can be 2 to 31 characters long, with lowercase and underscores +argument-rgx=[a-z_][a-z0-9_]{1,30}$ + # Method names should be at least 3 characters long # and be lowecased with underscores method-rgx=[a-z_][a-z0-9_]{2,50}$ @@ -21,3 +23,4 @@ no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [Design] max-public-methods=100 min-public-methods=0 +max-args=6 -- cgit From 59c43ba5b8213e39f726acbe2b137998cae39a26 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 18 Aug 2010 22:14:34 -0700 Subject: Cleaned up pep8/pylint style issues in nova/auth. There are still a few pylint warnings in manager.py, but the patch is already fairly large. --- nova/auth/fakeldap.py | 37 +++++++++++++----------- nova/auth/ldapdriver.py | 62 ++++++++++++++++++++-------------------- nova/auth/manager.py | 76 ++++++++++++++++++++++++++----------------------- nova/auth/rbac.py | 38 +++++++++++++++++-------- nova/auth/signer.py | 51 ++++++++++++++++++++------------- 5 files changed, 149 insertions(+), 115 deletions(-) diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py index bc744fa01..bfc3433c5 100644 --- a/nova/auth/fakeldap.py +++ b/nova/auth/fakeldap.py @@ -30,20 +30,23 @@ from nova import datastore SCOPE_BASE = 0 SCOPE_ONELEVEL = 1 # not implemented -SCOPE_SUBTREE = 2 +SCOPE_SUBTREE = 2 MOD_ADD = 0 MOD_DELETE = 1 -class NO_SUCH_OBJECT(Exception): +class NO_SUCH_OBJECT(Exception): # pylint: disable-msg=C0103 + """Duplicate exception class from real LDAP module.""" pass -class OBJECT_CLASS_VIOLATION(Exception): +class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable-msg=C0103 + """Duplicate exception class from real LDAP module.""" pass -def initialize(uri): +def initialize(_uri): + """Opens a fake connection with an LDAP server.""" return FakeLDAP() @@ -68,7 +71,7 @@ def _match_query(query, attrs): # cut off the ! and the nested parentheses return not _match_query(query[2:-1], attrs) - (k, sep, v) = inner.partition('=') + (k, _sep, v) = inner.partition('=') return _match(k, v, attrs) @@ -85,20 +88,20 @@ def _paren_groups(source): if source[pos] == ')': count -= 1 if count == 0: - result.append(source[start:pos+1]) + result.append(source[start:pos + 1]) return result -def _match(k, v, attrs): +def _match(key, value, attrs): """Match a given key and value against an attribute list.""" - if k not in attrs: + if key not in attrs: return False - if k != "objectclass": - return v in attrs[k] + if key != "objectclass": + return value in attrs[key] # it is an objectclass check, so check subclasses - values = _subs(v) - for value in values: - if value in attrs[k]: + values = _subs(value) + for v in values: + if v in attrs[key]: return True return False @@ -145,6 +148,7 @@ def _to_json(unencoded): class FakeLDAP(object): #TODO(vish): refactor this class to use a wrapper instead of accessing # redis directly + """Fake LDAP connection.""" def simple_bind_s(self, dn, password): """This method is ignored, but provided for compatibility.""" @@ -207,6 +211,7 @@ class FakeLDAP(object): # get the attributes from redis attrs = redis.hgetall(key) # turn the values from redis into lists + # pylint: disable-msg=E1103 attrs = dict([(k, _from_json(v)) for k, v in attrs.iteritems()]) # filter the objects by query @@ -215,12 +220,12 @@ class FakeLDAP(object): attrs = dict([(k, v) for k, v in attrs.iteritems() if not fields or k in fields]) objects.append((key[len(self.__redis_prefix):], attrs)) + # pylint: enable-msg=E1103 if objects == []: raise NO_SUCH_OBJECT() return objects @property - def __redis_prefix(self): + def __redis_prefix(self): # pylint: disable-msg=R0201 + """Get the prefix to use for all redis keys.""" return 'ldap:' - - diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 6bf7fcd1e..74ba011b5 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -34,7 +34,7 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('ldap_url', 'ldap://localhost', 'Point this at your ldap server') -flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password') +flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password') flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com', 'DN of admin user') flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users') @@ -63,14 +63,18 @@ flags.DEFINE_string('ldap_developer', # to define a set interface for AuthDrivers. I'm delaying # creating this now because I'm expecting an auth refactor # in which we may want to change the interface a bit more. + + class LdapDriver(object): """Ldap Auth driver Defines enter and exit and therefore supports the with/as syntax. """ + def __init__(self): """Imports the LDAP module""" self.ldap = __import__('ldap') + self.conn = None def __enter__(self): """Creates the connection to LDAP""" @@ -78,7 +82,7 @@ class LdapDriver(object): self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) return self - def __exit__(self, type, value, traceback): + def __exit__(self, exc_type, exc_value, traceback): """Destroys the connection to LDAP""" self.conn.unbind_s() return False @@ -123,11 +127,11 @@ class LdapDriver(object): def get_projects(self, uid=None): """Retrieve list of projects""" - filter = '(objectclass=novaProject)' + pattern = '(objectclass=novaProject)' if uid: - filter = "(&%s(member=%s))" % (filter, self.__uid_to_dn(uid)) + pattern = "(&%s(member=%s))" % (pattern, self.__uid_to_dn(uid)) attrs = self.__find_objects(FLAGS.ldap_project_subtree, - filter) + pattern) return [self.__to_project(attr) for attr in attrs] def create_user(self, name, access_key, secret_key, is_admin): @@ -194,8 +198,7 @@ class LdapDriver(object): ('cn', [name]), ('description', [description]), ('projectManager', [manager_dn]), - ('member', members) - ] + ('member', members)] self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr) return self.__to_project(dict(attr)) @@ -287,7 +290,6 @@ class LdapDriver(object): def __key_pair_exists(self, uid, key_name): """Check if key pair exists""" - return self.get_user(uid) != None return self.get_key_pair(uid, key_name) != None def __project_exists(self, project_id): @@ -310,7 +312,7 @@ class LdapDriver(object): except self.ldap.NO_SUCH_OBJECT: return [] # just return the DNs - return [dn for dn, attributes in res] + return [dn for dn, _attributes in res] def __find_objects(self, dn, query=None, scope=None): """Find objects by query""" @@ -346,7 +348,8 @@ class LdapDriver(object): for key in keys: self.delete_key_pair(uid, key['name']) - def __role_to_dn(self, role, project_id=None): + @staticmethod + def __role_to_dn(role, project_id=None): """Convert role to corresponding dn""" if project_id == None: return FLAGS.__getitem__("ldap_%s" % role).value @@ -356,7 +359,7 @@ class LdapDriver(object): FLAGS.ldap_project_subtree) def __create_group(self, group_dn, name, uid, - description, member_uids = None): + description, member_uids=None): """Create a group""" if self.__group_exists(group_dn): raise exception.Duplicate("Group can't be created because " @@ -375,8 +378,7 @@ class LdapDriver(object): ('objectclass', ['groupOfNames']), ('cn', [name]), ('description', [description]), - ('member', members) - ] + ('member', members)] self.conn.add_s(group_dn, attr) def __is_in_group(self, uid, group_dn): @@ -402,9 +404,7 @@ class LdapDriver(object): if self.__is_in_group(uid, group_dn): raise exception.Duplicate("User %s is already a member of " "the group %s" % (uid, group_dn)) - attr = [ - (self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid)) - ] + attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))] self.conn.modify_s(group_dn, attr) def __remove_from_group(self, uid, group_dn): @@ -432,7 +432,7 @@ class LdapDriver(object): self.conn.modify_s(group_dn, attr) except self.ldap.OBJECT_CLASS_VIOLATION: logging.debug("Attempted to remove the last member of a group. " - "Deleting the group at %s instead." % group_dn ) + "Deleting the group at %s instead.", group_dn) self.__delete_group(group_dn) def __remove_from_all(self, uid): @@ -440,7 +440,6 @@ class LdapDriver(object): if not self.__user_exists(uid): raise exception.NotFound("User %s can't be removed from all " "because the user doesn't exist" % (uid,)) - dn = self.__uid_to_dn(uid) role_dns = self.__find_group_dns_with_member( FLAGS.role_project_subtree, uid) for role_dn in role_dns: @@ -448,7 +447,7 @@ class LdapDriver(object): project_dns = self.__find_group_dns_with_member( FLAGS.ldap_project_subtree, uid) for project_dn in project_dns: - self.__safe_remove_from_group(uid, role_dn) + self.__safe_remove_from_group(uid, project_dn) def __delete_group(self, group_dn): """Delete Group""" @@ -461,7 +460,8 @@ class LdapDriver(object): for role_dn in self.__find_role_dns(project_dn): self.__delete_group(role_dn) - def __to_user(self, attr): + @staticmethod + def __to_user(attr): """Convert ldap attributes to User object""" if attr == None: return None @@ -470,10 +470,10 @@ class LdapDriver(object): 'name': attr['cn'][0], 'access': attr['accessKey'][0], 'secret': attr['secretKey'][0], - 'admin': (attr['isAdmin'][0] == 'TRUE') - } + 'admin': (attr['isAdmin'][0] == 'TRUE')} - def __to_key_pair(self, owner, attr): + @staticmethod + def __to_key_pair(owner, attr): """Convert ldap attributes to KeyPair object""" if attr == None: return None @@ -482,8 +482,7 @@ class LdapDriver(object): 'name': attr['cn'][0], 'owner_id': owner, 'public_key': attr['sshPublicKey'][0], - 'fingerprint': attr['keyFingerprint'][0], - } + 'fingerprint': attr['keyFingerprint'][0]} def __to_project(self, attr): """Convert ldap attributes to Project object""" @@ -495,21 +494,22 @@ class LdapDriver(object): 'name': attr['cn'][0], 'project_manager_id': self.__dn_to_uid(attr['projectManager'][0]), 'description': attr.get('description', [None])[0], - 'member_ids': [self.__dn_to_uid(x) for x in member_dns] - } + 'member_ids': [self.__dn_to_uid(x) for x in member_dns]} - def __dn_to_uid(self, dn): + @staticmethod + def __dn_to_uid(dn): """Convert user dn to uid""" return dn.split(',')[0].split('=')[1] - def __uid_to_dn(self, dn): + @staticmethod + def __uid_to_dn(dn): """Convert uid to dn""" return 'uid=%s,%s' % (dn, FLAGS.ldap_user_subtree) class FakeLdapDriver(LdapDriver): """Fake Ldap Auth driver""" - def __init__(self): + + def __init__(self): # pylint: disable-msg=W0231 __import__('nova.auth.fakeldap') self.ldap = sys.modules['nova.auth.fakeldap'] - diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 80ee78896..284b29502 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -23,7 +23,7 @@ Nova authentication management import logging import os import shutil -import string +import string # pylint: disable-msg=W0402 import tempfile import uuid import zipfile @@ -194,12 +194,12 @@ class Project(AuthBase): @property def vpn_ip(self): - ip, port = AuthManager().get_project_vpn_data(self) + ip, _port = AuthManager().get_project_vpn_data(self) return ip @property def vpn_port(self): - ip, port = AuthManager().get_project_vpn_data(self) + _ip, port = AuthManager().get_project_vpn_data(self) return port def has_manager(self, user): @@ -221,11 +221,9 @@ class Project(AuthBase): return AuthManager().get_credentials(user, self) def __repr__(self): - return "Project('%s', '%s', '%s', '%s', %s)" % (self.id, - self.name, - self.project_manager_id, - self.description, - self.member_ids) + return "Project('%s', '%s', '%s', '%s', %s)" % \ + (self.id, self.name, self.project_manager_id, self.description, + self.member_ids) class AuthManager(object): @@ -297,7 +295,7 @@ class AuthManager(object): @return: User and project that the request represents. """ # TODO(vish): check for valid timestamp - (access_key, sep, project_id) = access.partition(':') + (access_key, _sep, project_id) = access.partition(':') logging.info('Looking up user: %r', access_key) user = self.get_user_from_access_key(access_key) @@ -320,7 +318,8 @@ class AuthManager(object): raise exception.NotFound('User %s is not a member of project %s' % (user.id, project.id)) if check_type == 's3': - expected_signature = signer.Signer(user.secret.encode()).s3_authorization(headers, verb, path) + sign = signer.Signer(user.secret.encode()) + expected_signature = sign.s3_authorization(headers, verb, path) logging.debug('user.secret: %s', user.secret) logging.debug('expected_signature: %s', expected_signature) logging.debug('signature: %s', signature) @@ -465,7 +464,8 @@ class AuthManager(object): with self.driver() as drv: drv.remove_role(User.safe_id(user), role, Project.safe_id(project)) - def get_roles(self, project_roles=True): + @staticmethod + def get_roles(project_roles=True): """Get list of allowed roles""" if project_roles: return list(set(FLAGS.allowed_roles) - set(FLAGS.global_roles)) @@ -518,10 +518,10 @@ class AuthManager(object): if member_users: member_users = [User.safe_id(u) for u in member_users] with self.driver() as drv: - project_dict = drv.create_project(name, - User.safe_id(manager_user), - description, - member_users) + project_dict = drv.create_project(name, + User.safe_id(manager_user), + description, + member_users) if project_dict: return Project(**project_dict) @@ -549,7 +549,8 @@ class AuthManager(object): return drv.remove_from_project(User.safe_id(user), Project.safe_id(project)) - def get_project_vpn_data(self, project): + @staticmethod + def get_project_vpn_data(project): """Gets vpn ip and port for project @type project: Project or project_id @@ -613,8 +614,10 @@ class AuthManager(object): @rtype: User @return: The new user. """ - if access == None: access = str(uuid.uuid4()) - if secret == None: secret = str(uuid.uuid4()) + if access == None: + access = str(uuid.uuid4()) + if secret == None: + secret = str(uuid.uuid4()) with self.driver() as drv: user_dict = drv.create_user(name, access, secret, admin) if user_dict: @@ -656,10 +659,10 @@ class AuthManager(object): def create_key_pair(self, user, key_name, public_key, fingerprint): """Creates a key pair for user""" with self.driver() as drv: - kp_dict = drv.create_key_pair(User.safe_id(user), - key_name, - public_key, - fingerprint) + kp_dict = drv.create_key_pair(User.safe_id(user), + key_name, + public_key, + fingerprint) if kp_dict: return KeyPair(**kp_dict) @@ -702,7 +705,7 @@ class AuthManager(object): network_data = vpn.NetworkData.lookup(pid) if network_data: - configfile = open(FLAGS.vpn_client_template,"r") + configfile = open(FLAGS.vpn_client_template, "r") s = string.Template(configfile.read()) configfile.close() config = s.substitute(keyfile=FLAGS.credential_key_file, @@ -717,10 +720,10 @@ class AuthManager(object): zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id)) zippy.close() with open(zf, 'rb') as f: - buffer = f.read() + read_buffer = f.read() shutil.rmtree(tmpdir) - return buffer + return read_buffer def get_environment_rc(self, user, project=None): """Get credential zip for user in project""" @@ -731,18 +734,18 @@ class AuthManager(object): pid = Project.safe_id(project) return self.__generate_rc(user.access, user.secret, pid) - def __generate_rc(self, access, secret, pid): + @staticmethod + def __generate_rc(access, secret, pid): """Generate rc file for user""" rc = open(FLAGS.credentials_template).read() - rc = rc % { 'access': access, - 'project': pid, - 'secret': secret, - 'ec2': FLAGS.ec2_url, - 's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port), - 'nova': FLAGS.ca_file, - 'cert': FLAGS.credential_cert_file, - 'key': FLAGS.credential_key_file, - } + rc = rc % {'access': access, + 'project': pid, + 'secret': secret, + 'ec2': FLAGS.ec2_url, + 's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port), + 'nova': FLAGS.ca_file, + 'cert': FLAGS.credential_cert_file, + 'key': FLAGS.credential_key_file} return rc def _generate_x509_cert(self, uid, pid): @@ -753,6 +756,7 @@ class AuthManager(object): signed_cert = crypto.sign_csr(csr, pid) return (private_key, signed_cert) - def __cert_subject(self, uid): + @staticmethod + def __cert_subject(uid): """Helper to generate cert subject""" return FLAGS.credential_cert_subject % (uid, utils.isotime()) diff --git a/nova/auth/rbac.py b/nova/auth/rbac.py index 1446e4e27..d157f44b3 100644 --- a/nova/auth/rbac.py +++ b/nova/auth/rbac.py @@ -16,40 +16,54 @@ # License for the specific language governing permissions and limitations # under the License. +"""Role-based access control decorators to use fpr wrapping other +methods with.""" + from nova import exception -from nova.auth import manager def allow(*roles): - def wrap(f): - def wrapped_f(self, context, *args, **kwargs): + """Allow the given roles access the wrapped function.""" + + def wrap(func): # pylint: disable-msg=C0111 + + def wrapped_func(self, context, *args, + **kwargs): # pylint: disable-msg=C0111 if context.user.is_superuser(): - return f(self, context, *args, **kwargs) + return func(self, context, *args, **kwargs) for role in roles: if __matches_role(context, role): - return f(self, context, *args, **kwargs) + return func(self, context, *args, **kwargs) raise exception.NotAuthorized() - return wrapped_f + + return wrapped_func + return wrap def deny(*roles): - def wrap(f): - def wrapped_f(self, context, *args, **kwargs): + """Deny the given roles access the wrapped function.""" + + def wrap(func): # pylint: disable-msg=C0111 + + def wrapped_func(self, context, *args, + **kwargs): # pylint: disable-msg=C0111 if context.user.is_superuser(): - return f(self, context, *args, **kwargs) + return func(self, context, *args, **kwargs) for role in roles: if __matches_role(context, role): raise exception.NotAuthorized() - return f(self, context, *args, **kwargs) - return wrapped_f + return func(self, context, *args, **kwargs) + + return wrapped_func + return wrap def __matches_role(context, role): + """Check if a role is allowed.""" if role == 'all': return True if role == 'none': return False return context.project.has_role(context.user.id, role) - diff --git a/nova/auth/signer.py b/nova/auth/signer.py index 8334806d2..f7d29f534 100644 --- a/nova/auth/signer.py +++ b/nova/auth/signer.py @@ -50,15 +50,15 @@ import logging import urllib # NOTE(vish): for new boto -import boto +import boto # NOTE(vish): for old boto -import boto.utils +import boto.utils from nova.exception import Error class Signer(object): - """ hacked up code from boto/connection.py """ + """Hacked up code from boto/connection.py""" def __init__(self, secret_key): self.hmac = hmac.new(secret_key, digestmod=hashlib.sha1) @@ -66,22 +66,27 @@ class Signer(object): self.hmac_256 = hmac.new(secret_key, digestmod=hashlib.sha256) def s3_authorization(self, headers, verb, path): + """Generate S3 authorization string.""" c_string = boto.utils.canonical_string(verb, path, headers) - hmac = self.hmac.copy() - hmac.update(c_string) - b64_hmac = base64.encodestring(hmac.digest()).strip() + hmac_copy = self.hmac.copy() + hmac_copy.update(c_string) + b64_hmac = base64.encodestring(hmac_copy.digest()).strip() return b64_hmac def generate(self, params, verb, server_string, path): + """Generate auth string according to what SignatureVersion is given.""" if params['SignatureVersion'] == '0': return self._calc_signature_0(params) if params['SignatureVersion'] == '1': return self._calc_signature_1(params) if params['SignatureVersion'] == '2': return self._calc_signature_2(params, verb, server_string, path) - raise Error('Unknown Signature Version: %s' % self.SignatureVersion) + raise Error('Unknown Signature Version: %s' % + params['SignatureVersion']) - def _get_utf8_value(self, value): + @staticmethod + def _get_utf8_value(value): + """Get the UTF8-encoded version of a value.""" if not isinstance(value, str) and not isinstance(value, unicode): value = str(value) if isinstance(value, unicode): @@ -90,10 +95,11 @@ class Signer(object): return value def _calc_signature_0(self, params): + """Generate AWS signature version 0 string.""" s = params['Action'] + params['Timestamp'] self.hmac.update(s) keys = params.keys() - keys.sort(cmp = lambda x, y: cmp(x.lower(), y.lower())) + keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower())) pairs = [] for key in keys: val = self._get_utf8_value(params[key]) @@ -101,8 +107,9 @@ class Signer(object): return base64.b64encode(self.hmac.digest()) def _calc_signature_1(self, params): + """Generate AWS signature version 1 string.""" keys = params.keys() - keys.sort(cmp = lambda x, y: cmp(x.lower(), y.lower())) + keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower())) pairs = [] for key in keys: self.hmac.update(key) @@ -112,30 +119,34 @@ class Signer(object): return base64.b64encode(self.hmac.digest()) def _calc_signature_2(self, params, verb, server_string, path): + """Generate AWS signature version 2 string.""" logging.debug('using _calc_signature_2') string_to_sign = '%s\n%s\n%s\n' % (verb, server_string, path) if self.hmac_256: - hmac = self.hmac_256 + current_hmac = self.hmac_256 params['SignatureMethod'] = 'HmacSHA256' else: - hmac = self.hmac + current_hmac = self.hmac params['SignatureMethod'] = 'HmacSHA1' keys = params.keys() keys.sort() pairs = [] for key in keys: val = self._get_utf8_value(params[key]) - pairs.append(urllib.quote(key, safe='') + '=' + urllib.quote(val, safe='-_~')) + val = urllib.quote(val, safe='-_~') + pairs.append(urllib.quote(key, safe='') + '=' + val) qs = '&'.join(pairs) - logging.debug('query string: %s' % qs) + logging.debug('query string: %s', qs) string_to_sign += qs - logging.debug('string_to_sign: %s' % string_to_sign) - hmac.update(string_to_sign) - b64 = base64.b64encode(hmac.digest()) - logging.debug('len(b64)=%d' % len(b64)) - logging.debug('base64 encoded digest: %s' % b64) + logging.debug('string_to_sign: %s', string_to_sign) + current_hmac.update(string_to_sign) + b64 = base64.b64encode(current_hmac.digest()) + logging.debug('len(b64)=%d', len(b64)) + logging.debug('base64 encoded digest: %s', b64) return b64 if __name__ == '__main__': - print Signer('foo').generate({"SignatureMethod": 'HmacSHA256', 'SignatureVersion': '2'}, "get", "server", "/foo") + print Signer('foo').generate({'SignatureMethod': 'HmacSHA256', + 'SignatureVersion': '2'}, + 'get', 'server', '/foo') -- cgit From f996ec188776ffcae62bcafc1925653a1602880f Mon Sep 17 00:00:00 2001 From: andy Date: Thu, 19 Aug 2010 11:12:44 +0200 Subject: this file isn't being used --- nova/auth.py | 741 ----------------------------------------------------------- 1 file changed, 741 deletions(-) delete mode 100644 nova/auth.py diff --git a/nova/auth.py b/nova/auth.py deleted file mode 100644 index 199a887e1..000000000 --- a/nova/auth.py +++ /dev/null @@ -1,741 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Nova authentication management -""" - -import logging -import os -import shutil -import string -import tempfile -import uuid -import zipfile - -from nova import crypto -from nova import exception -from nova import flags -from nova import utils -from nova.auth import signer -from nova.network import vpn -from nova.models import User - -#unused imports -#from nova import datastore -#from nova.auth import ldapdriver # for flags -#from nova import objectstore # for flags - -FLAGS = flags.FLAGS - -# NOTE(vish): a user with one of these roles will be a superuser and -# have access to all api commands -flags.DEFINE_list('superuser_roles', ['cloudadmin'], - 'Roles that ignore rbac checking completely') - -# NOTE(vish): a user with one of these roles will have it for every -# project, even if he or she is not a member of the project -flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'], - 'Roles that apply to all projects') - - -flags.DEFINE_string('credentials_template', - utils.abspath('auth/novarc.template'), - 'Template for creating users rc file') -flags.DEFINE_string('vpn_client_template', - utils.abspath('cloudpipe/client.ovpn.template'), - 'Template for creating users vpn file') -flags.DEFINE_string('credential_vpn_file', 'nova-vpn.conf', - 'Filename of certificate in credentials zip') -flags.DEFINE_string('credential_key_file', 'pk.pem', - 'Filename of private key in credentials zip') -flags.DEFINE_string('credential_cert_file', 'cert.pem', - 'Filename of certificate in credentials zip') -flags.DEFINE_string('credential_rc_file', 'novarc', - 'Filename of rc in credentials zip') - -flags.DEFINE_string('credential_cert_subject', - '/C=US/ST=California/L=MountainView/O=AnsoLabs/' - 'OU=NovaDev/CN=%s-%s', - 'Subject for certificate for users') - -flags.DEFINE_string('auth_driver', 'nova.auth.ldapdriver.FakeLdapDriver', - 'Driver that auth manager uses') - -class AuthBase(object): - """Base class for objects relating to auth - - Objects derived from this class should be stupid data objects with - an id member. They may optionally contain methods that delegate to - AuthManager, but should not implement logic themselves. - """ - @classmethod - def safe_id(cls, obj): - """Safe get object id - - This method will return the id of the object if the object - is of this class, otherwise it will return the original object. - This allows methods to accept objects or ids as paramaters. - - """ - if isinstance(obj, cls): - return obj.id - else: - return obj - - -# anthony - the User class has moved to nova.models -#class User(AuthBase): -# """Object representing a user""" -# def __init__(self, id, name, access, secret, admin): -# AuthBase.__init__(self) -# self.id = id -# self.name = name -# self.access = access -# self.secret = secret -# self.admin = admin -# -# def is_superuser(self): -# return AuthManager().is_superuser(self) -# -# def is_admin(self): -# return AuthManager().is_admin(self) -# -# def has_role(self, role): -# return AuthManager().has_role(self, role) -# -# def add_role(self, role): -# return AuthManager().add_role(self, role) -# -# def remove_role(self, role): -# return AuthManager().remove_role(self, role) -# -# def is_project_member(self, project): -# return AuthManager().is_project_member(self, project) -# -# def is_project_manager(self, project): -# return AuthManager().is_project_manager(self, project) -# -# def generate_key_pair(self, name): -# return AuthManager().generate_key_pair(self.id, name) -# -# def create_key_pair(self, name, public_key, fingerprint): -# return AuthManager().create_key_pair(self.id, -# name, -# public_key, -# fingerprint) -# -# def get_key_pair(self, name): -# return AuthManager().get_key_pair(self.id, name) -# -# def delete_key_pair(self, name): -# return AuthManager().delete_key_pair(self.id, name) -# -# def get_key_pairs(self): -# return AuthManager().get_key_pairs(self.id) -# -# def __repr__(self): -# return "User('%s', '%s', '%s', '%s', %s)" % (self.id, -# self.name, -# self.access, -# self.secret, -# self.admin) - - -class KeyPair(AuthBase): - """Represents an ssh key returned from the datastore - - Even though this object is named KeyPair, only the public key and - fingerprint is stored. The user's private key is not saved. - """ - def __init__(self, id, name, owner_id, public_key, fingerprint): - AuthBase.__init__(self) - self.id = id - self.name = name - self.owner_id = owner_id - self.public_key = public_key - self.fingerprint = fingerprint - - def __repr__(self): - return "KeyPair('%s', '%s', '%s', '%s', '%s')" % (self.id, - self.name, - self.owner_id, - self.public_key, - self.fingerprint) - - -class Project(AuthBase): - """Represents a Project returned from the datastore""" - def __init__(self, id, name, project_manager_id, description, member_ids): - AuthBase.__init__(self) - self.id = id - self.name = name - self.project_manager_id = project_manager_id - self.description = description - self.member_ids = member_ids - - @property - def project_manager(self): - return AuthManager().get_user(self.project_manager_id) - - @property - def vpn_ip(self): - ip, port = AuthManager().get_project_vpn_data(self) - return ip - - @property - def vpn_port(self): - ip, port = AuthManager().get_project_vpn_data(self) - return port - - def has_manager(self, user): - return AuthManager().is_project_manager(user, self) - - def has_member(self, user): - return AuthManager().is_project_member(user, self) - - def add_role(self, user, role): - return AuthManager().add_role(user, role, self) - - def remove_role(self, user, role): - return AuthManager().remove_role(user, role, self) - - def has_role(self, user, role): - return AuthManager().has_role(user, role, self) - - def get_credentials(self, user): - return AuthManager().get_credentials(user, self) - - def __repr__(self): - return "Project('%s', '%s', '%s', '%s', %s)" % (self.id, - self.name, - self.project_manager_id, - self.description, - self.member_ids) - - - -class AuthManager(object): - """Manager Singleton for dealing with Users, Projects, and Keypairs - - Methods accept objects or ids. - - AuthManager uses a driver object to make requests to the data backend. - See ldapdriver for reference. - - AuthManager also manages associated data related to Auth objects that - need to be more accessible, such as vpn ips and ports. - """ - _instance = None - def __new__(cls, *args, **kwargs): - """Returns the AuthManager singleton""" - if not cls._instance: - cls._instance = super(AuthManager, cls).__new__(cls) - return cls._instance - - def __init__(self, driver=None, *args, **kwargs): - """Inits the driver from parameter or flag - - __init__ is run every time AuthManager() is called, so we only - reset the driver if it is not set or a new driver is specified. - """ - if driver or not getattr(self, 'driver', None): - self.driver = utils.import_class(driver or FLAGS.auth_driver) - - def authenticate(self, access, signature, params, verb='GET', - server_string='127.0.0.1:8773', path='/', - check_type='ec2', headers=None): - """Authenticates AWS request using access key and signature - - If the project is not specified, attempts to authenticate to - a project with the same name as the user. This way, older tools - that have no project knowledge will still work. - - @type access: str - @param access: Access key for user in the form "access:project". - - @type signature: str - @param signature: Signature of the request. - - @type params: list of str - @param params: Web paramaters used for the signature. - - @type verb: str - @param verb: Web request verb ('GET' or 'POST'). - - @type server_string: str - @param server_string: Web request server string. - - @type path: str - @param path: Web request path. - - @type check_type: str - @param check_type: Type of signature to check. 'ec2' for EC2, 's3' for - S3. Any other value will cause signature not to be - checked. - - @type headers: list - @param headers: HTTP headers passed with the request (only needed for - s3 signature checks) - - @rtype: tuple (User, Project) - @return: User and project that the request represents. - """ - # TODO(vish): check for valid timestamp - (access_key, sep, project_id) = access.partition(':') - - logging.info('Looking up user: %r', access_key) - user = self.get_user_from_access_key(access_key) - logging.info('user: %r', user) - if user == None: - raise exception.NotFound('No user found for access key %s' % - access_key) - - # NOTE(vish): if we stop using project name as id we need better - # logic to find a default project for user - if project_id is '': - project_id = user.name - - project = self.get_project(project_id) - if project == None: - raise exception.NotFound('No project called %s could be found' % - project_id) - if not self.is_admin(user) and not self.is_project_member(user, - project): - raise exception.NotFound('User %s is not a member of project %s' % - (user.id, project.id)) - if check_type == 's3': - expected_signature = signer.Signer(user.secret.encode()).s3_authorization(headers, verb, path) - logging.debug('user.secret: %s', user.secret) - logging.debug('expected_signature: %s', expected_signature) - logging.debug('signature: %s', signature) - if signature != expected_signature: - raise exception.NotAuthorized('Signature does not match') - elif check_type == 'ec2': - # NOTE(vish): hmac can't handle unicode, so encode ensures that - # secret isn't unicode - expected_signature = signer.Signer(user.secret.encode()).generate( - params, verb, server_string, path) - logging.debug('user.secret: %s', user.secret) - logging.debug('expected_signature: %s', expected_signature) - logging.debug('signature: %s', signature) - if signature != expected_signature: - raise exception.NotAuthorized('Signature does not match') - return (user, project) - - def get_access_key(self, user, project): - """Get an access key that includes user and project""" - if not isinstance(user, User): - user = self.get_user(user) - return "%s:%s" % (user.access, Project.safe_id(project)) - - def is_superuser(self, user): - """Checks for superuser status, allowing user to bypass rbac - - @type user: User or uid - @param user: User to check. - - @rtype: bool - @return: True for superuser. - """ - if not isinstance(user, User): - user = self.get_user(user) - # NOTE(vish): admin flag on user represents superuser - if user.admin: - return True - for role in FLAGS.superuser_roles: - if self.has_role(user, role): - return True - - def is_admin(self, user): - """Checks for admin status, allowing user to access all projects - - @type user: User or uid - @param user: User to check. - - @rtype: bool - @return: True for admin. - """ - if not isinstance(user, User): - user = self.get_user(user) - if self.is_superuser(user): - return True - for role in FLAGS.global_roles: - if self.has_role(user, role): - return True - - def has_role(self, user, role, project=None): - """Checks existence of role for user - - If project is not specified, checks for a global role. If project - is specified, checks for the union of the global role and the - project role. - - Role 'projectmanager' only works for projects and simply checks to - see if the user is the project_manager of the specified project. It - is the same as calling is_project_manager(user, project). - - @type user: User or uid - @param user: User to check. - - @type role: str - @param role: Role to check. - - @type project: Project or project_id - @param project: Project in which to look for local role. - - @rtype: bool - @return: True if the user has the role. - """ - with self.driver() as drv: - if role == 'projectmanager': - if not project: - raise exception.Error("Must specify project") - return self.is_project_manager(user, project) - - global_role = drv.has_role(User.safe_id(user), - role, - None) - if not global_role: - return global_role - - if not project or role in FLAGS.global_roles: - return global_role - - return drv.has_role(User.safe_id(user), - role, - Project.safe_id(project)) - - def add_role(self, user, role, project=None): - """Adds role for user - - If project is not specified, adds a global role. If project - is specified, adds a local role. - - The 'projectmanager' role is special and can't be added or removed. - - @type user: User or uid - @param user: User to which to add role. - - @type role: str - @param role: Role to add. - - @type project: Project or project_id - @param project: Project in which to add local role. - """ - with self.driver() as drv: - drv.add_role(User.safe_id(user), role, Project.safe_id(project)) - - def remove_role(self, user, role, project=None): - """Removes role for user - - If project is not specified, removes a global role. If project - is specified, removes a local role. - - The 'projectmanager' role is special and can't be added or removed. - - @type user: User or uid - @param user: User from which to remove role. - - @type role: str - @param role: Role to remove. - - @type project: Project or project_id - @param project: Project in which to remove local role. - """ - with self.driver() as drv: - drv.remove_role(User.safe_id(user), role, Project.safe_id(project)) - - def get_project(self, pid): - """Get project object by id""" - with self.driver() as drv: - project_dict = drv.get_project(pid) - if project_dict: - return Project(**project_dict) - - def get_projects(self, user=None): - """Retrieves list of projects, optionally filtered by user""" - with self.driver() as drv: - project_list = drv.get_projects(User.safe_id(user)) - if not project_list: - return [] - return [Project(**project_dict) for project_dict in project_list] - - def create_project(self, name, manager_user, - description=None, member_users=None): - """Create a project - - @type name: str - @param name: Name of the project to create. The name will also be - used as the project id. - - @type manager_user: User or uid - @param manager_user: This user will be the project manager. - - @type description: str - @param project: Description of the project. If no description is - specified, the name of the project will be used. - - @type member_users: list of User or uid - @param: Initial project members. The project manager will always be - added as a member, even if he isn't specified in this list. - - @rtype: Project - @return: The new project. - """ - if member_users: - member_users = [User.safe_id(u) for u in member_users] - with self.driver() as drv: - project_dict = drv.create_project(name, - User.safe_id(manager_user), - description, - member_users) - if project_dict: - return Project(**project_dict) - - def add_to_project(self, user, project): - """Add user to project""" - with self.driver() as drv: - return drv.add_to_project(User.safe_id(user), - Project.safe_id(project)) - - def is_project_manager(self, user, project): - """Checks if user is project manager""" - if not isinstance(project, Project): - project = self.get_project(project) - return User.safe_id(user) == project.project_manager_id - - def is_project_member(self, user, project): - """Checks to see if user is a member of project""" - if not isinstance(project, Project): - project = self.get_project(project) - return User.safe_id(user) in project.member_ids - - def remove_from_project(self, user, project): - """Removes a user from a project""" - with self.driver() as drv: - return drv.remove_from_project(User.safe_id(user), - Project.safe_id(project)) - - def get_project_vpn_data(self, project): - """Gets vpn ip and port for project - - @type project: Project or project_id - @param project: Project from which to get associated vpn data - - @rvalue: tuple of (str, str) - @return: A tuple containing (ip, port) or None, None if vpn has - not been allocated for user. - """ - network_data = vpn.NetworkData.lookup(Project.safe_id(project)) - if not network_data: - raise exception.NotFound('project network data has not been set') - return (network_data.ip, network_data.port) - - def delete_project(self, project): - """Deletes a project""" - with self.driver() as drv: - return drv.delete_project(Project.safe_id(project)) - - def get_user(self, uid): - """Retrieves a user by id""" - with self.driver() as drv: - user_dict = drv.get_user(uid) - if user_dict: - return User(**user_dict) - - def get_user_from_access_key(self, access_key): - """Retrieves a user by access key""" - with self.driver() as drv: - user_dict = drv.get_user_from_access_key(access_key) - if user_dict: - return User(**user_dict) - - def get_users(self): - """Retrieves a list of all users""" - with self.driver() as drv: - user_list = drv.get_users() - if not user_list: - return [] - return [User(**user_dict) for user_dict in user_list] - - def create_user(self, name, access=None, secret=None, admin=False): - """Creates a user - - @type name: str - @param name: Name of the user to create. - - @type access: str - @param access: Access Key (defaults to a random uuid) - - @type secret: str - @param secret: Secret Key (defaults to a random uuid) - - @type admin: bool - @param admin: Whether to set the admin flag. The admin flag gives - superuser status regardless of roles specifed for the user. - - @type create_project: bool - @param: Whether to create a project for the user with the same name. - - @rtype: User - @return: The new user. - """ - if access == None: access = str(uuid.uuid4()) - if secret == None: secret = str(uuid.uuid4()) - with self.driver() as drv: - user_dict = drv.create_user(name, access, secret, admin) - if user_dict: - return User(**user_dict) - - def delete_user(self, user): - """Deletes a user""" - with self.driver() as drv: - drv.delete_user(User.safe_id(user)) - - def generate_key_pair(self, user, key_name): - """Generates a key pair for a user - - Generates a public and private key, stores the public key using the - key_name, and returns the private key and fingerprint. - - @type user: User or uid - @param user: User for which to create key pair. - - @type key_name: str - @param key_name: Name to use for the generated KeyPair. - - @rtype: tuple (private_key, fingerprint) - @return: A tuple containing the private_key and fingerprint. - """ - # NOTE(vish): generating key pair is slow so check for legal - # creation before creating keypair - uid = User.safe_id(user) - with self.driver() as drv: - if not drv.get_user(uid): - raise exception.NotFound("User %s doesn't exist" % user) - if drv.get_key_pair(uid, key_name): - raise exception.Duplicate("The keypair %s already exists" - % key_name) - private_key, public_key, fingerprint = crypto.generate_key_pair() - self.create_key_pair(uid, key_name, public_key, fingerprint) - return private_key, fingerprint - - def create_key_pair(self, user, key_name, public_key, fingerprint): - """Creates a key pair for user""" - with self.driver() as drv: - kp_dict = drv.create_key_pair(User.safe_id(user), - key_name, - public_key, - fingerprint) - if kp_dict: - return KeyPair(**kp_dict) - - def get_key_pair(self, user, key_name): - """Retrieves a key pair for user""" - with self.driver() as drv: - kp_dict = drv.get_key_pair(User.safe_id(user), key_name) - if kp_dict: - return KeyPair(**kp_dict) - - def get_key_pairs(self, user): - """Retrieves all key pairs for user""" - with self.driver() as drv: - kp_list = drv.get_key_pairs(User.safe_id(user)) - if not kp_list: - return [] - return [KeyPair(**kp_dict) for kp_dict in kp_list] - - def delete_key_pair(self, user, key_name): - """Deletes a key pair for user""" - with self.driver() as drv: - drv.delete_key_pair(User.safe_id(user), key_name) - - def get_credentials(self, user, project=None): - """Get credential zip for user in project""" - if not isinstance(user, User): - user = self.get_user(user) - if project is None: - project = user.id - pid = Project.safe_id(project) - rc = self.__generate_rc(user.access, user.secret, pid) - private_key, signed_cert = self._generate_x509_cert(user.id, pid) - - tmpdir = tempfile.mkdtemp() - zf = os.path.join(tmpdir, "temp.zip") - zippy = zipfile.ZipFile(zf, 'w') - zippy.writestr(FLAGS.credential_rc_file, rc) - zippy.writestr(FLAGS.credential_key_file, private_key) - zippy.writestr(FLAGS.credential_cert_file, signed_cert) - - network_data = vpn.NetworkData.lookup(pid) - if network_data: - configfile = open(FLAGS.vpn_client_template,"r") - s = string.Template(configfile.read()) - configfile.close() - config = s.substitute(keyfile=FLAGS.credential_key_file, - certfile=FLAGS.credential_cert_file, - ip=network_data.ip, - port=network_data.port) - zippy.writestr(FLAGS.credential_vpn_file, config) - else: - logging.warn("No vpn data for project %s" % - pid) - - zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id)) - zippy.close() - with open(zf, 'rb') as f: - buffer = f.read() - - shutil.rmtree(tmpdir) - return buffer - - def get_environment_rc(self, user, project=None): - """Get credential zip for user in project""" - if not isinstance(user, User): - user = self.get_user(user) - if project is None: - project = user.id - pid = Project.safe_id(project) - return self.__generate_rc(user.access, user.secret, pid) - - def __generate_rc(self, access, secret, pid): - """Generate rc file for user""" - rc = open(FLAGS.credentials_template).read() - rc = rc % { 'access': access, - 'project': pid, - 'secret': secret, - 'ec2': FLAGS.ec2_url, - 's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port), - 'nova': FLAGS.ca_file, - 'cert': FLAGS.credential_cert_file, - 'key': FLAGS.credential_key_file, - } - return rc - - def _generate_x509_cert(self, uid, pid): - """Generate x509 cert for user""" - (private_key, csr) = crypto.generate_x509_cert( - self.__cert_subject(uid)) - # TODO(joshua): This should be async call back to the cloud controller - signed_cert = crypto.sign_csr(csr, pid) - return (private_key, signed_cert) - - def __cert_subject(self, uid): - """Helper to generate cert subject""" - return FLAGS.credential_cert_subject % (uid, utils.isotime()) -- cgit From a92465922fb74ca2c9b392e1c1b7ed5b5e306a76 Mon Sep 17 00:00:00 2001 From: andy Date: Thu, 19 Aug 2010 12:28:45 +0200 Subject: Data abstraction for compute service --- nova/compute/service.py | 144 +++++++++++++++++++++-------------------- nova/db/__init__.py | 3 + nova/db/api.py | 53 +++++++++++++++ nova/db/sqlalchemy/__init__.py | 0 nova/db/sqlalchemy/api.py | 43 ++++++++++++ nova/models.py | 6 ++ nova/utils.py | 33 ++++++++++ 7 files changed, 211 insertions(+), 71 deletions(-) create mode 100644 nova/db/__init__.py create mode 100644 nova/db/api.py create mode 100644 nova/db/sqlalchemy/__init__.py create mode 100644 nova/db/sqlalchemy/api.py diff --git a/nova/compute/service.py b/nova/compute/service.py index 3909c8245..7a2cb277d 100644 --- a/nova/compute/service.py +++ b/nova/compute/service.py @@ -30,6 +30,7 @@ import os from twisted.internet import defer +from nova import db from nova import exception from nova import flags from nova import process @@ -44,7 +45,7 @@ from nova.volume import service as volume_service FLAGS = flags.FLAGS flags.DEFINE_string('instances_path', utils.abspath('../instances'), - 'where instances are stored on disk') + 'where instances are stored on disk') class ComputeService(service.Service): @@ -52,109 +53,107 @@ class ComputeService(service.Service): Manages the running instances. """ def __init__(self): - """ load configuration options for this node and connect to the hypervisor""" + """Load configuration options and connect to the hypervisor.""" super(ComputeService, self).__init__() self._instances = {} self._conn = virt_connection.get_connection() - # TODO(joshua): This needs to ensure system state, specifically: modprobe aoe + # TODO(joshua): This needs to ensure system state, specifically + # modprobe aoe def noop(self): - """ simple test of an AMQP message call """ + """Simple test of an AMQP message call.""" return defer.succeed('PONG') - def update_state(self, instance_id): - inst = models.Instance.find(instance_id) + def update_state(self, instance_id, context): # FIXME(ja): include other fields from state? - inst.state = self._conn.get_info(inst.name)['state'] - inst.save() - - @exception.wrap_exception - def adopt_instances(self): - """ if there are instances already running, adopt them """ - return defer.succeed(0) - instance_names = self._conn.list_instances() - for name in instance_names: - try: - new_inst = Instance.fromName(self._conn, name) - new_inst.update_state() - except: - pass - return defer.succeed(len(self._instances)) + instance_ref = db.instance_get(context, instance_id) + state = self._conn.get_info(instance_ref.name)['state'] + db.instance_state(context, instance_id, state) @defer.inlineCallbacks @exception.wrap_exception - def run_instance(self, instance_id, **_kwargs): - """ launch a new instance with specified options """ - inst = models.Instance.find(instance_id) - if inst.name in self._conn.list_instances(): + def run_instance(self, instance_id, context=None, **_kwargs): + """Launch a new instance with specified options.""" + instance_ref = db.instance_get(context, instance_id) + if instance_ref['name'] in self._conn.list_instances(): raise exception.Error("Instance has already been created") logging.debug("Starting instance %s..." % (instance_id)) - inst = models.Instance.find(instance_id) + # NOTE(vish): passing network type allows us to express the # network without making a call to network to find # out which type of network to setup - network_service.setup_compute_network(inst.project_id) - inst.node_name = FLAGS.node_name - inst.save() + network_service.setup_compute_network(instance_ref['project_id']) + db.instance_update(context, instance_id, {'node_name': FLAGS.node_name}) # TODO(vish) check to make sure the availability zone matches - inst.set_state(power_state.NOSTATE, 'spawning') + db.instance_state(context, instance_id, power_state.NOSTATE, 'spawning') try: - yield self._conn.spawn(inst) + yield self._conn.spawn(instance_ref) except: - logging.exception("Failed to spawn instance %s" % inst.name) - inst.set_state(power_state.SHUTDOWN) + logging.exception("Failed to spawn instance %s" % + instance_ref['name']) + db.instance_state(context, instance_id, power_state.SHUTDOWN) - self.update_state(instance_id) + self.update_state(instance_id, context) @defer.inlineCallbacks @exception.wrap_exception - def terminate_instance(self, instance_id): - """ terminate an instance on this machine """ + def terminate_instance(self, instance_id, context=None): + """Terminate an instance on this machine.""" logging.debug("Got told to terminate instance %s" % instance_id) - inst = models.Instance.find(instance_id) + instance_ref = db.instance_get(context, instance_id) - if inst.state == power_state.SHUTOFF: - # self.datamodel.destroy() FIXME: RE-ADD ????? + if instance_ref['state'] == power_state.SHUTOFF: + # self.datamodel.destroy() FIXME: RE-ADD? raise exception.Error('trying to destroy already destroyed' ' instance: %s' % instance_id) - inst.set_state(power_state.NOSTATE, 'shutting_down') - yield self._conn.destroy(inst) + db.instance_state( + context, instance_id, power_state.NOSTATE, 'shutting_down') + yield self._conn.destroy(instance_ref) + # FIXME(ja): should we keep it in a terminated state for a bit? - inst.delete() + db.instance_destroy(context, instance_id) @defer.inlineCallbacks @exception.wrap_exception - def reboot_instance(self, instance_id): - """ reboot an instance on this server - KVM doesn't support reboot, so we terminate and restart """ - self.update_state(instance_id) - instance = models.Instance.find(instance_id) + def reboot_instance(self, instance_id, context=None): + """Reboot an instance on this server. + + KVM doesn't support reboot, so we terminate and restart. + + """ + self.update_state(instance_id, context) + instance_ref = db.instance_get(context, instance_id) # FIXME(ja): this is only checking the model state - not state on disk? - if instance.state != power_state.RUNNING: + if instance_ref['state'] != power_state.RUNNING: raise exception.Error( 'trying to reboot a non-running' - 'instance: %s (state: %s excepted: %s)' % (instance.name, instance.state, power_state.RUNNING)) + 'instance: %s (state: %s excepted: %s)' % + (instance_ref['name'], + instance_ref['state'], + power_state.RUNNING)) - logging.debug('rebooting instance %s' % instance.name) - instance.set_state(power_state.NOSTATE, 'rebooting') - yield self._conn.reboot(instance) - self.update_state(instance_id) + logging.debug('rebooting instance %s' % instance_ref['name']) + db.instance_state( + context, instance_id, power_state.NOSTATE, 'rebooting') + yield self._conn.reboot(instance_ref) + self.update_state(instance_id, context) @exception.wrap_exception - def get_console_output(self, instance_id): - """ send the console output for an instance """ + def get_console_output(self, instance_id, context=None): + """Send the console output for an instance.""" # FIXME: Abstract this for Xen logging.debug("Getting console output for %s" % (instance_id)) - inst = models.Instance.find(instance_id) + instance_ref = db.instance_get(context, instance_id) if FLAGS.connection_type == 'libvirt': - fname = os.path.abspath( - os.path.join(FLAGS.instances_path, inst.name, 'console.log')) + fname = os.path.abspath(os.path.join(FLAGS.instances_path, + instance_ref['name'], + 'console.log')) with open(fname, 'r') as f: output = f.read() else: @@ -169,32 +168,35 @@ class ComputeService(service.Service): @defer.inlineCallbacks @exception.wrap_exception - def attach_volume(self, instance_id = None, - volume_id = None, mountpoint = None): - volume = volume_service.get_volume(volume_id) + def attach_volume(self, instance_id=None, volume_id=None, mountpoint=None, + context=None): + """Attach a volume to an instance.""" + # TODO(termie): check that instance_id exists + volume_ref = volume_get(context, volume_id) yield self._init_aoe() yield process.simple_execute( "sudo virsh attach-disk %s /dev/etherd/%s %s" % (instance_id, volume['aoe_device'], mountpoint.rpartition('/dev/')[2])) - volume.finish_attach() + volume_attached(context, volume_id) defer.returnValue(True) - @defer.inlineCallbacks - def _init_aoe(self): - yield process.simple_execute("sudo aoe-discover") - yield process.simple_execute("sudo aoe-stat") - @defer.inlineCallbacks @exception.wrap_exception - def detach_volume(self, instance_id, volume_id): - """ detach a volume from an instance """ + def detach_volume(self, instance_id, volume_id, context=None): + """Detach a volume from an instance.""" # despite the documentation, virsh detach-disk just wants the device # name without the leading /dev/ - volume = volume_service.get_volume(volume_id) + # TODO(termie): check that instance_id exists + volume_ref = volume_get(context, volume_id) target = volume['mountpoint'].rpartition('/dev/')[2] yield process.simple_execute( "sudo virsh detach-disk %s %s " % (instance_id, target)) - volume.finish_detach() + volume_detached(context, volume_id) defer.returnValue(True) + + @defer.inlineCallbacks + def _init_aoe(self): + yield process.simple_execute("sudo aoe-discover") + yield process.simple_execute("sudo aoe-stat") diff --git a/nova/db/__init__.py b/nova/db/__init__.py new file mode 100644 index 000000000..2d893cb36 --- /dev/null +++ b/nova/db/__init__.py @@ -0,0 +1,3 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +from nova.db.api import * diff --git a/nova/db/api.py b/nova/db/api.py new file mode 100644 index 000000000..c1b2dee0d --- /dev/null +++ b/nova/db/api.py @@ -0,0 +1,53 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +from nova import flags +from nova import utils + + +FLAGS = flags.FLAGS +flags.DEFINE_string('db_backend', 'sqlalchemy', + 'The backend to use for db') + + +_impl = utils.LazyPluggable(FLAGS['db_backend'], + sqlalchemy='nova.db.sqlalchemy.api') + + +def instance_destroy(context, instance_id): + """Destroy the instance or raise if it does not exist.""" + return _impl.instance_destroy(context, instance_id) + + +def instance_get(context, instance_id): + """Get an instance or raise if it does not exist.""" + return _impl.instance_get(context, instance_id) + + +def instance_state(context, instance_id, state, description=None): + """Set the state of an instance.""" + return _impl.instance_state(context, instance_id, state, description) + + +def instance_update(context, instance_id, new_values): + """Set the given properties on an instance and update it. + + Raises if instance does not exist. + + """ + return _impl.instance_update(context, instance_id, new_values) + + +def volume_get(context, volume_id): + """Get a volume or raise if it does not exist.""" + return _impl.volume_get(context, volume_id) + + +def volume_attached(context, volume_id): + """Ensure that a volume is set as attached.""" + return _impl.volume_attached(context, volume_id) + + +def volume_detached(context, volume_id): + """Ensure that a volume is set as detached.""" + return _impl.volume_detached(context, volume_id) + diff --git a/nova/db/sqlalchemy/__init__.py b/nova/db/sqlalchemy/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py new file mode 100644 index 000000000..6d9f5fe5f --- /dev/null +++ b/nova/db/sqlalchemy/api.py @@ -0,0 +1,43 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +from nova import models + + +def instance_destroy(context, instance_id): + instance_ref = instance_get(context, instance_id) + instance_ref.delete() + + +def instance_get(context, instance_id): + return models.Instance.find(instance_id) + + +def instance_state(context, instance_id, state, description=None): + instance_ref = instance_get(context, instance_id) + instance_ref.set_state(state, description) + + +def instance_update(context, instance_id, properties): + instance_ref = instance_get(context, instance_id) + for k, v in properties.iteritems(): + instance_ref[k] = v + instance_ref.save() + + +def volume_get(context, volume_id): + return models.Volume.find(volume_id) + + +def volume_attached(context, volume_id): + volume_ref = volume_get(context, volume_id) + volume_ref['attach_status'] = 'attached' + volume_ref.save() + + +def volume_detached(context, volume_id): + volume_ref = volume_get(context, volume_id) + volume_ref['instance_id'] = None + volume_ref['mountpoint'] = None + volume_ref['status'] = 'available' + volume_ref['attach_status'] = 'detached' + volume_ref.save() diff --git a/nova/models.py b/nova/models.py index d0b66d9b7..ea529713c 100644 --- a/nova/models.py +++ b/nova/models.py @@ -100,6 +100,12 @@ class NovaBase(object): session = NovaBase.get_session() session.refresh(self) + def __setitem__(self, key, value): + setattr(self, key, value) + + def __getitem__(self, key): + return getattr(self, key) + class Image(Base, NovaBase): __tablename__ = 'images' diff --git a/nova/utils.py b/nova/utils.py index e826f9b71..9e12a5301 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -142,3 +142,36 @@ def isotime(at=None): def parse_isotime(timestr): return datetime.datetime.strptime(timestr, TIME_FORMAT) + + + +class LazyPluggable(object): + """A pluggable backend loaded lazily based on some value.""" + + def __init__(self, pivot, **backends): + self.__backends = backends + self.__pivot = pivot + self.__backend = None + + def __get_backend(self): + if not self.__backend: + backend_name = self.__pivot.value + if backend_name not in self.__backends: + raise exception.Error('Invalid backend: %s' % backend_name) + + backend = self.__backends[backend_name] + if type(backend) == type(tuple()): + name = backend[0] + fromlist = backend[1] + else: + name = backend + fromlist = backend + + self.__backend = __import__(name, None, None, fromlist) + logging.error('backend %s', self.__backend) + return self.__backend + + def __getattr__(self, key): + backend = self.__get_backend() + return getattr(backend, key) + -- cgit From 567aa0ac862f0cb18786f20d949ab75bd800c3c7 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Thu, 19 Aug 2010 15:05:13 +0100 Subject: Remove whitespace to match style guide. --- nova/virt/xenapi.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index aed4c4fb5..f0bbbbe1f 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -149,7 +149,6 @@ class XenAPIConnection(object): yield self._call_xenapi('VM.start', vm_ref, False, False) logging.info('Spawning VM %s created %s.', instance.name, vm_ref) - @defer.inlineCallbacks def _create_vm(self, instance, kernel, ramdisk): """Create a VM record. Returns a Deferred that gives the new @@ -191,7 +190,6 @@ class XenAPIConnection(object): logging.debug('Created VM %s as %s.', instance.name, vm_ref) defer.returnValue(vm_ref) - @defer.inlineCallbacks def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): """Create a VBD record. Returns a Deferred that gives the new @@ -216,7 +214,6 @@ class XenAPIConnection(object): vdi_ref) defer.returnValue(vbd_ref) - @defer.inlineCallbacks def _create_vif(self, vm_ref, network_ref, mac_address): """Create a VIF record. Returns a Deferred that gives the new @@ -238,7 +235,6 @@ class XenAPIConnection(object): vm_ref, network_ref) defer.returnValue(vif_ref) - @defer.inlineCallbacks def _find_network_with_bridge(self, bridge): expr = 'field "bridge" = "%s"' % bridge @@ -251,7 +247,6 @@ class XenAPIConnection(object): else: raise Exception('Found no network for bridge %s' % bridge) - @defer.inlineCallbacks def _fetch_image(self, image, user, project, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place @@ -273,7 +268,6 @@ class XenAPIConnection(object): uuid = yield self._wait_for_task(task) defer.returnValue(uuid) - @defer.inlineCallbacks def reboot(self, instance): vm = yield self._lookup(instance.name) @@ -282,7 +276,6 @@ class XenAPIConnection(object): task = yield self._call_xenapi('Async.VM.clean_reboot', vm) yield self._wait_for_task(task) - @defer.inlineCallbacks def destroy(self, instance): vm = yield self._lookup(instance.name) @@ -291,7 +284,6 @@ class XenAPIConnection(object): task = yield self._call_xenapi('Async.VM.destroy', vm) yield self._wait_for_task(task) - def get_info(self, instance_id): vm = self._lookup_blocking(instance_id) if vm is None: @@ -303,12 +295,10 @@ class XenAPIConnection(object): 'num_cpu': rec['VCPUs_max'], 'cpu_time': 0} - @deferredToThread def _lookup(self, i): return self._lookup_blocking(i) - def _lookup_blocking(self, i): vms = self._conn.xenapi.VM.get_by_name_label(i) n = len(vms) @@ -319,7 +309,6 @@ class XenAPIConnection(object): else: return vms[0] - def _wait_for_task(self, task): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" @@ -327,7 +316,6 @@ class XenAPIConnection(object): reactor.callLater(0, self._poll_task, task, d) return d - @deferredToThread def _poll_task(self, task, deferred): """Poll the given XenAPI task, and fire the given Deferred if we @@ -352,7 +340,6 @@ class XenAPIConnection(object): logging.warn(exn) deferred.errback(exn) - @deferredToThread def _call_xenapi(self, method, *args): """Call the specified XenAPI method on a background thread. Returns @@ -362,7 +349,6 @@ class XenAPIConnection(object): f = f.__getattr__(m) return f(*args) - @deferredToThread def _async_call_plugin(self, plugin, fn, args): """Call Async.host.call_plugin on a background thread. Returns a @@ -371,7 +357,6 @@ class XenAPIConnection(object): self._conn.xenapi.Async.host.call_plugin, self._get_xenapi_host(), plugin, fn, args) - def _get_xenapi_host(self): return self._conn.xenapi.session.get_this_host(self._conn.handle) -- cgit From 4a23d5d9091823e9b4dc364383a14b566af80cd6 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Thu, 19 Aug 2010 15:12:46 +0100 Subject: Move deferredToThread into utils, as suggested by termie. --- nova/utils.py | 8 ++++++++ nova/virt/xenapi.py | 18 ++++++------------ 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/nova/utils.py b/nova/utils.py index e826f9b71..b0d07af79 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -29,6 +29,8 @@ import subprocess import socket import sys +from twisted.internet.threads import deferToThread + from nova import exception from nova import flags @@ -142,3 +144,9 @@ def isotime(at=None): def parse_isotime(timestr): return datetime.datetime.strptime(timestr, TIME_FORMAT) + + +def deferredToThread(f): + def g(*args, **kwargs): + return deferToThread(f, *args, **kwargs) + return g diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index f0bbbbe1f..b44ac383a 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -19,7 +19,7 @@ A connection to XenServer or Xen Cloud Platform. The concurrency model for this class is as follows: -All XenAPI calls are on a thread (using t.i.t.deferToThread, or the decorator +All XenAPI calls are on a thread (using t.i.t.deferToThread, via the decorator deferredToThread). They are remote calls, and so may hang for the usual reasons. They should not be allowed to block the reactor thread. @@ -41,10 +41,10 @@ import xmlrpclib from twisted.internet import defer from twisted.internet import reactor from twisted.internet import task -from twisted.internet.threads import deferToThread from nova import flags from nova import process +from nova import utils from nova.auth.manager import AuthManager from nova.compute import power_state from nova.virt import images @@ -97,12 +97,6 @@ def get_connection(_): return XenAPIConnection(url, username, password) -def deferredToThread(f): - def g(*args, **kwargs): - return deferToThread(f, *args, **kwargs) - return g - - class XenAPIConnection(object): def __init__(self, url, user, pw): self._conn = XenAPI.Session(url) @@ -295,7 +289,7 @@ class XenAPIConnection(object): 'num_cpu': rec['VCPUs_max'], 'cpu_time': 0} - @deferredToThread + @utils.deferredToThread def _lookup(self, i): return self._lookup_blocking(i) @@ -316,7 +310,7 @@ class XenAPIConnection(object): reactor.callLater(0, self._poll_task, task, d) return d - @deferredToThread + @utils.deferredToThread def _poll_task(self, task, deferred): """Poll the given XenAPI task, and fire the given Deferred if we get a result.""" @@ -340,7 +334,7 @@ class XenAPIConnection(object): logging.warn(exn) deferred.errback(exn) - @deferredToThread + @utils.deferredToThread def _call_xenapi(self, method, *args): """Call the specified XenAPI method on a background thread. Returns a Deferred for the result.""" @@ -349,7 +343,7 @@ class XenAPIConnection(object): f = f.__getattr__(m) return f(*args) - @deferredToThread + @utils.deferredToThread def _async_call_plugin(self, plugin, fn, args): """Call Async.host.call_plugin on a background thread. Returns a Deferred with the task reference.""" -- cgit From b651008e7e4f60f2ccb07497c27d866814156209 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 19 Aug 2010 16:05:27 -0400 Subject: Complete the Image API against a LocalImageService until Glance's API exists (at which point we'll make a GlanceImageService and make the choice of ImageService plugin configurable.) --- nova/api/rackspace/images.py | 83 +++++++++++++++++++++++++++++-------------- nova/api/rackspace/notes.txt | 23 ++++++++++++ nova/api/services/__init__.py | 0 nova/api/services/image.py | 72 +++++++++++++++++++++++++++++++++++++ 4 files changed, 151 insertions(+), 27 deletions(-) create mode 100644 nova/api/rackspace/notes.txt create mode 100644 nova/api/services/__init__.py create mode 100644 nova/api/services/image.py diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py index 57c03894a..e29f737a5 100644 --- a/nova/api/rackspace/images.py +++ b/nova/api/rackspace/images.py @@ -15,12 +15,13 @@ # License for the specific language governing permissions and limitations # under the License. -from nova.endpoint.rackspace.controllers.base import BaseController -from nova.endpoint import images +from nova import datastore +from nova.api.rackspace import base +from nova.api.services.image import ImageService from webob import exc #TODO(gundlach): Serialize return values -class Controller(BaseController): +class Controller(base.Controller): _serialization_metadata = { 'application/xml': { @@ -31,34 +32,62 @@ class Controller(BaseController): } } + def __init__(self): + self._svc = ImageService.load() + self._id_xlator = RackspaceApiImageIdTranslator() + + def _to_rs_id(self, image_id): + """ + Convert an image id from the format of our ImageService strategy + to the Rackspace API format (an int). + """ + strategy = self._svc.__class__.__name__ + return self._id_xlator.to_rs_id(strategy, image_id) + def index(self, req): - context = req.environ['nova.api_request_context'] - return images.list(context) + """Return all public images.""" + data = self._svc.list() + for img in data: + img['id'] = self._to_rs_id(img['id']) + return dict(images=result) def show(self, req, id): - context = req.environ['nova.api_request_context'] - return images.list(context, filter_list=[id]) + """Return data about the given image id.""" + img = self._svc.show(id) + img['id'] = self._to_rs_id(img['id']) + return dict(image=img) def delete(self, req, id): - context = req.environ['nova.api_request_context'] - # TODO(gundlach): make sure it's an image they may delete? - return images.deregister(context, id) + # Only public images are supported for now. + raise exc.HTTPNotFound() + + def create(self, req): + # Only public images are supported for now, so a request to + # make a backup of a server cannot be supproted. + raise exc.HTTPNotFound() + + def update(self, req, id): + # Users may not modify public images, and that's all that + # we support for now. + raise exc.HTTPNotFound() + + +class RackspaceApiImageIdTranslator(object): + """ + Converts Rackspace API image ids to and from the id format for a given + strategy. + """ - def create(self, **kwargs): - # TODO(gundlach): no idea how to hook this up. code below - # is from servers.py. - inst = self.build_server_instance(kwargs['server']) - rpc.cast( - FLAGS.compute_topic, { - "method": "run_instance", - "args": {"instance_id": inst.instance_id}}) + def __init__(self): + self._store = datastore.Redis.instance() - def update(self, **kwargs): - # TODO (gundlach): no idea how to hook this up. code below - # is from servers.py. - instance_id = kwargs['id'] - instance = compute.InstanceDirectory().get(instance_id) - if not instance: - raise ServerNotFound("The requested server was not found") - instance.update(kwargs['server']) - instance.save() + def to_rs_id(self, strategy_name, opaque_id): + """Convert an id from a strategy-specific one to a Rackspace one.""" + key = "rsapi.idstrategies.image.%s" % strategy_name + result = self._store.hget(key, str(opaque_id)) + if result: # we have a mapping from opaque to RS for this strategy + return int(result) + else: + nextid = self._store.incr("%s.lastid" % key) + self._store.hsetnx(key, str(opaque_id), nextid) + return nextid diff --git a/nova/api/rackspace/notes.txt b/nova/api/rackspace/notes.txt new file mode 100644 index 000000000..e133bf5ea --- /dev/null +++ b/nova/api/rackspace/notes.txt @@ -0,0 +1,23 @@ +We will need: + +ImageService +a service that can do crud on image information. not user-specific. opaque +image ids. + +GlanceImageService(ImageService): +image ids are URIs. + +LocalImageService(ImageService): +image ids are random strings. + +RackspaceAPITranslationStore: +translates RS server/images/flavor/etc ids into formats required +by a given ImageService strategy. + +api.rackspace.images.Controller: +uses an ImageService strategy behind the scenes to do its fetching; it just +converts int image id into a strategy-specific image id. + +who maintains the mapping from user to [images he owns]? nobody, because +we have no way of enforcing access to his images, without kryptex which +won't be in Austin. diff --git a/nova/api/services/__init__.py b/nova/api/services/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/api/services/image.py b/nova/api/services/image.py new file mode 100644 index 000000000..c5ea15ba1 --- /dev/null +++ b/nova/api/services/image.py @@ -0,0 +1,72 @@ +import cPickle as pickle +import os.path +import string + +class ImageService(object): + """Provides storage and retrieval of disk image objects.""" + + @staticmethod + def load(): + """Factory method to return image service.""" + #TODO(gundlach): read from config. + class_ = LocalImageService + return class_() + + def index(self): + """ + Return a list of image data dicts. Each dict will contain an + id key whose value is an opaque image id. + """ + + def show(self, id): + """ + Returns a dict containing image data for the given opaque image id. + """ + + +class GlanceImageService(ImageService): + """Provides storage and retrieval of disk image objects within Glance.""" + # TODO(gundlach): once Glance has an API, build this. + pass + + +class LocalImageService(ImageService): + """Image service storing images to local disk.""" + + def __init__(self): + self._path = "/tmp/nova/images" + try: + os.makedirs(self._path) + except OSError: # exists + pass + + def _path_to(self, image_id=''): + return os.path.join(self._path, image_id) + + def _ids(self): + """The list of all image ids.""" + return os.path.listdir(self._path) + + def index(self): + return [ self.show(id) for id in self._ids() ] + + def show(self, id): + return pickle.load(open(self._path_to(id))) + + def create(self, data): + """ + Store the image data and return the new image id. + """ + id = ''.join(random.choice(string.letters) for _ in range(20)) + self.update(id, data) + return id + + def update(self, image_id, data): + """Replace the contents of the given image with the new data.""" + pickle.dump(data, open(self._path_to(image_id), 'w')) + + def delete(self, image_id): + """ + Delete the given image. Raises OSError if the image does not exist. + """ + os.unlink(self._path_to(image_id)) -- cgit From a5a1ba53fdd122f85e61d74756d19d732805a357 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 19 Aug 2010 13:58:43 -0700 Subject: move volume code into datalayer and cleanup --- nova/compute/service.py | 17 +++-- nova/db/api.py | 91 ++++++++++++++++++++++++--- nova/db/sqlalchemy/api.py | 114 ++++++++++++++++++++++++++++++++-- nova/models.py | 1 - nova/tests/volume_unittest.py | 11 ++-- nova/volume/service.py | 140 +++++++++++++----------------------------- 6 files changed, 247 insertions(+), 127 deletions(-) diff --git a/nova/compute/service.py b/nova/compute/service.py index 7a2cb277d..dd16484fe 100644 --- a/nova/compute/service.py +++ b/nova/compute/service.py @@ -36,11 +36,9 @@ from nova import flags from nova import process from nova import service from nova import utils -from nova import models from nova.compute import power_state from nova.network import service as network_service from nova.virt import connection as virt_connection -from nova.volume import service as volume_service FLAGS = flags.FLAGS @@ -122,7 +120,7 @@ class ComputeService(service.Service): """Reboot an instance on this server. KVM doesn't support reboot, so we terminate and restart. - + """ self.update_state(instance_id, context) instance_ref = db.instance_get(context, instance_id) @@ -172,14 +170,14 @@ class ComputeService(service.Service): context=None): """Attach a volume to an instance.""" # TODO(termie): check that instance_id exists - volume_ref = volume_get(context, volume_id) + volume_ref = db.volume_get(context, volume_id) yield self._init_aoe() yield process.simple_execute( "sudo virsh attach-disk %s /dev/etherd/%s %s" % (instance_id, - volume['aoe_device'], + volume_ref['aoe_device'], mountpoint.rpartition('/dev/')[2])) - volume_attached(context, volume_id) + db.volume_attached(context, volume_id) defer.returnValue(True) @defer.inlineCallbacks @@ -189,14 +187,15 @@ class ComputeService(service.Service): # despite the documentation, virsh detach-disk just wants the device # name without the leading /dev/ # TODO(termie): check that instance_id exists - volume_ref = volume_get(context, volume_id) - target = volume['mountpoint'].rpartition('/dev/')[2] + volume_ref = db.volume_get(context, volume_id) + target = volume_ref['mountpoint'].rpartition('/dev/')[2] yield process.simple_execute( "sudo virsh detach-disk %s %s " % (instance_id, target)) - volume_detached(context, volume_id) + db.volume_detached(context, volume_id) defer.returnValue(True) @defer.inlineCallbacks def _init_aoe(self): + # TODO(vish): these shell calls should move into a different layer. yield process.simple_execute("sudo aoe-discover") yield process.simple_execute("sudo aoe-stat") diff --git a/nova/db/api.py b/nova/db/api.py index c1b2dee0d..63783075a 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -1,5 +1,21 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + from nova import flags from nova import utils @@ -23,18 +39,28 @@ def instance_get(context, instance_id): return _impl.instance_get(context, instance_id) +def instance_update(context, instance_id, values): + """Set the given properties on an instance and update it. + + Raises NotFound if instance does not exist. + + """ + return _impl.instance_update(context, instance_id, values) + + +def instance_create(context, values): + """Create an instance from the values dictionary.""" + return _impl.instance_create(context, values) + + def instance_state(context, instance_id, state, description=None): """Set the state of an instance.""" return _impl.instance_state(context, instance_id, state, description) -def instance_update(context, instance_id, new_values): - """Set the given properties on an instance and update it. - - Raises if instance does not exist. - - """ - return _impl.instance_update(context, instance_id, new_values) +def volume_destroy(context, volume_id): + """Destroy the volume or raise if it does not exist.""" + return _impl.volume_destroy(context, volume_id) def volume_get(context, volume_id): @@ -42,12 +68,59 @@ def volume_get(context, volume_id): return _impl.volume_get(context, volume_id) -def volume_attached(context, volume_id): +def volume_attached(context, volume_id, instance_id, mountpoint): """Ensure that a volume is set as attached.""" - return _impl.volume_attached(context, volume_id) + return _impl.volume_attached(context, volume_id, instance_id, mountpoint) def volume_detached(context, volume_id): """Ensure that a volume is set as detached.""" return _impl.volume_detached(context, volume_id) + +def volume_update(context, volume_id, values): + """Set the given properties on an volume and update it. + + Raises NotFound if volume does not exist. + + """ + return _impl.volume_update(context, volume_id, values) + + +def volume_create(context, values): + """Create a volume from the values dictionary.""" + return _impl.volume_create(context, values) + + +def volume_allocate_shelf_and_blade(context, volume_id): + """Allocate a free shelf and blace from the pool.""" + return _impl.volume_allocate_shelf_and_blade(context, volume_id) + + +def volume_get_shelf_and_blade(context, volume_id): + """Get the shelf and blade allocated to the volume.""" + return _impl.volume_get_shelf_and_blade(context, volume_id) + + +def network_destroy(context, network_id): + """Destroy the network or raise if it does not exist.""" + return _impl.network_destroy(context, network_id) + + +def network_get(context, network_id): + """Get an network or raise if it does not exist.""" + return _impl.network_get(context, network_id) + + +def network_update(context, network_id, values): + """Set the given properties on an network and update it. + + Raises NotFound if network does not exist. + + """ + return _impl.network_update(context, network_id, values) + + +def network_create(context, values): + """Create a network from the values dictionary.""" + return _impl.network_create(context, values) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 6d9f5fe5f..1b76eb42a 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1,5 +1,22 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import exception from nova import models @@ -12,24 +29,40 @@ def instance_get(context, instance_id): return models.Instance.find(instance_id) +def instance_update(context, instance_id, values): + instance_ref = instance_get(context, instance_id) + for (key, value) in values.iteritems(): + instance_ref[key] = value + instance_ref.save() + + +def instance_create(context, values): + instance_ref = models.Instance() + for (key, value) in values.iteritems(): + instance_ref[key] = value + instance_ref.save() + return instance_ref.id + + def instance_state(context, instance_id, state, description=None): instance_ref = instance_get(context, instance_id) instance_ref.set_state(state, description) -def instance_update(context, instance_id, properties): - instance_ref = instance_get(context, instance_id) - for k, v in properties.iteritems(): - instance_ref[k] = v - instance_ref.save() +def volume_destroy(context, volume_id): + volume_ref = volume_get(context, volume_id) + volume_ref.delete() def volume_get(context, volume_id): return models.Volume.find(volume_id) -def volume_attached(context, volume_id): +def volume_attached(context, volume_id, instance_id, mountpoint): volume_ref = volume_get(context, volume_id) + volume_ref.instance_id = instance_id + volume_ref['status'] = 'in-use' + volume_ref['mountpoint'] = mountpoint volume_ref['attach_status'] = 'attached' volume_ref.save() @@ -41,3 +74,72 @@ def volume_detached(context, volume_id): volume_ref['status'] = 'available' volume_ref['attach_status'] = 'detached' volume_ref.save() + + +def volume_update(context, volume_id, values): + volume_ref = volume_get(context, volume_id) + for (key, value) in values.iteritems(): + volume_ref[key] = value + volume_ref.save() + + +def volume_create(context, values): + volume_ref = models.Volume() + for (key, value) in values.iteritems(): + volume_ref[key] = value + volume_ref.save() + return volume_ref.id + + +class NoMoreBlades(exception.Error): + pass + + +# FIXME should we just do this in the constructor automatically +# and return the shelf and blade id with volume data in +# volume_get? +def volume_allocate_shelf_and_blade(context, volume_id): + session = models.NovaBase.get_session() + query = session.query(models.ExportDevice).filter_by(volume=None) + export_device = query.with_lockmode("update").first() + # NOTE(vish): if with_lockmode isn't supported, as in sqlite, + # then this has concurrency issues + if not export_device: + # FIXME where should this exception go? + raise NoMoreBlades() + export_device.volume_id = volume_id + session.add(export_device) + session.commit() + return (export_device.shelf_id, export_device.blade_id) + + +def volume_get_shelf_and_blade(context, volume_id): + # FIXME: should probably do this in one call + volume_ref = volume_get(context, volume_id) + export_device = volume_ref.export_device + if not export_device: + raise exception.NotFound() + return (export_device.shelf_id, export_device.blade_id) + +def network_destroy(context, network_id): + network_ref = network_get(context, network_id) + network_ref.delete() + + +def network_get(context, network_id): + return models.Instance.find(network_id) + + +def network_update(context, network_id, values): + network_ref = network_get(context, network_id) + for (key, value) in values.iteritems(): + network_ref[key] = value + network_ref.save() + + +def network_create(context, values): + network_ref = models.Network() + for (key, value) in values.iteritems(): + network_ref[key] = value + network_ref.save() + return network_ref.id diff --git a/nova/models.py b/nova/models.py index ea529713c..ef10398e8 100644 --- a/nova/models.py +++ b/nova/models.py @@ -231,7 +231,6 @@ class Instance(Base, NovaBase): class Volume(Base, NovaBase): __tablename__ = 'volumes' id = Column(Integer, primary_key=True) - volume_id = Column(String(255)) user_id = Column(String(255)) #, ForeignKey('users.id'), nullable=False) project_id = Column(String(255)) #, ForeignKey('projects.id')) diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index 82f71901a..90cd04c65 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -21,6 +21,7 @@ import logging from twisted.internet import defer from nova import exception +from nova import db from nova import flags from nova import models from nova import test @@ -89,7 +90,7 @@ class VolumeTestCase(test.TrialTestCase): self.assertFailure(self.volume.create_volume(vol_size, user_id, project_id), - volume_service.NoMoreBlades) + db.sqlalchemy.api.NoMoreBlades) for id in vols: yield self.volume.delete_volume(id) @@ -102,23 +103,21 @@ class VolumeTestCase(test.TrialTestCase): project_id = 'fake' mountpoint = "/dev/sdf" volume_id = yield self.volume.create_volume(vol_size, user_id, project_id) - vol = models.Volume.find(volume_id) - self.volume.start_attach(volume_id, instance_id, mountpoint) if FLAGS.fake_tests: - self.volume.finish_attach(volume_id) + db.volume_attached(None, volume_id, instance_id, mountpoint) else: rv = yield self.compute.attach_volume(instance_id, volume_id, mountpoint) + vol = db.volume_get(None, volume_id) self.assertEqual(vol.status, "in-use") self.assertEqual(vol.attach_status, "attached") self.assertEqual(vol.instance_id, instance_id) self.assertEqual(vol.mountpoint, mountpoint) self.assertFailure(self.volume.delete_volume(volume_id), exception.Error) - self.volume.start_detach(volume_id) if FLAGS.fake_tests: - self.volume.finish_detach(volume_id) + db.volume_detached(None, volume_id) else: rv = yield self.volume.detach_volume(instance_id, volume_id) diff --git a/nova/volume/service.py b/nova/volume/service.py index c04f85145..34c938aa9 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -26,12 +26,11 @@ import logging from twisted.internet import defer +from nova import db from nova import exception from nova import flags -from nova import models from nova import process from nova import service -from nova import utils from nova import validate @@ -55,10 +54,6 @@ flags.DEFINE_boolean('fake_storage', False, 'Should we make real storage volumes to attach?') -class NoMoreBlades(exception.Error): - pass - - class VolumeService(service.Service): """ There is one VolumeNode running on each host. @@ -71,7 +66,7 @@ class VolumeService(service.Service): @defer.inlineCallbacks @validate.rangetest(size=(0, 1000)) - def create_volume(self, size, user_id, project_id): + def create_volume(self, size, user_id, project_id, context=None): """ Creates an exported volume (fake or real), restarts exports to make it available. @@ -79,108 +74,88 @@ class VolumeService(service.Service): """ logging.debug("Creating volume of size: %s" % (size)) - vol = models.Volume() - vol.volume_id = utils.generate_uid('vol') - vol.node_name = FLAGS.node_name - vol.size = size - vol.user_id = user_id - vol.project_id = project_id - vol.availability_zone = FLAGS.storage_availability_zone - vol.status = "creating" # creating | available | in-use - vol.attach_status = "detached" # attaching | attached | detaching | detached - vol.save() - yield self._exec_create_volume(vol) - yield self._setup_export(vol) + vol = {} + vol['node_name'] = FLAGS.node_name + vol['size'] = size + vol['user_id'] = user_id + vol['project_id'] = project_id + vol['availability_zone'] = FLAGS.storage_availability_zone + vol['status'] = "creating" # creating | available | in-use + # attaching | attached | detaching | detached + vol['attach_status'] = "detached" + volume_id = db.volume_create(context, vol) + yield self._exec_create_volume(volume_id, size) + (shelf_id, blade_id) = db.volume_allocate_shelf_and_blade(context, + volume_id) + yield self._exec_create_export(volume_id, shelf_id, blade_id) # TODO(joshua): We need to trigger a fanout message # for aoe-discover on all the nodes - vol.status = "available" - vol.save() - logging.debug("restarting exports") yield self._exec_ensure_exports() - defer.returnValue(vol.id) + db.volume_update(context, volume_id, {'status': 'available'}) + logging.debug("restarting exports") + defer.returnValue(volume_id) @defer.inlineCallbacks - def delete_volume(self, volume_id): + def delete_volume(self, volume_id, context=None): logging.debug("Deleting volume with id of: %s" % (volume_id)) - vol = models.Volume.find(volume_id) - if vol.attach_status == "attached": + volume_ref = db.volume_get(context, volume_id) + if volume_ref['attach_status'] == "attached": raise exception.Error("Volume is still attached") - if vol.node_name != FLAGS.node_name: + if volume_ref['node_name'] != FLAGS.node_name: raise exception.Error("Volume is not local to this node") - yield self._exec_delete_volume(vol) - yield vol.delete() + shelf_id, blade_id = db.volume_get_shelf_and_blade(context, + volume_id) + yield self._exec_remove_export(volume_id, shelf_id, blade_id) + yield self._exec_delete_volume(volume_id) + db.volume_destroy(context, volume_id) defer.returnValue(True) @defer.inlineCallbacks - def _exec_create_volume(self, vol): + def _exec_create_volume(self, volume_id, size): if FLAGS.fake_storage: defer.returnValue(None) - if str(vol.size) == '0': + if int(size) == 0: sizestr = '100M' else: - sizestr = '%sG' % vol.size + sizestr = '%sG' % size yield process.simple_execute( "sudo lvcreate -L %s -n %s %s" % (sizestr, - vol.volume_id, + volume_id, FLAGS.volume_group), error_ok=1) @defer.inlineCallbacks - def _exec_delete_volume(self, vol): + def _exec_delete_volume(self, volume_id): if FLAGS.fake_storage: defer.returnValue(None) yield process.simple_execute( "sudo lvremove -f %s/%s" % (FLAGS.volume_group, - vol.volume_id), error_ok=1) + volume_id), error_ok=1) @defer.inlineCallbacks - def _setup_export(self, vol): - # FIXME: abstract this. also remove vol.export_device.xxx cheat - session = models.NovaBase.get_session() - query = session.query(models.ExportDevice).filter_by(volume=None) - export_device = query.with_lockmode("update").first() - # NOTE(vish): if with_lockmode isn't supported, as in sqlite, - # then this has concurrency issues - if not export_device: - raise NoMoreBlades() - export_device.volume_id = vol.id - session.add(export_device) - session.commit() - # FIXME: aoe_device is redundant, should be turned into a method - vol.aoe_device = "e%s.%s" % (export_device.shelf_id, - export_device.blade_id) - vol.save() - yield self._exec_setup_export(vol) - - @defer.inlineCallbacks - def _exec_setup_export(self, vol): + def _exec_create_export(self, volume_id, shelf_id, blade_id): if FLAGS.fake_storage: defer.returnValue(None) yield process.simple_execute( "sudo vblade-persist setup %s %s %s /dev/%s/%s" % - (self, vol.export_device.shelf_id, - vol.export_device.blade_id, + (self, + shelf_id, + blade_id, FLAGS.aoe_eth_dev, FLAGS.volume_group, - vol.volume_id), error_ok=1) + volume_id), error_ok=1) - @defer.inlineCallbacks - def _remove_export(self, vol): - if not vol.export_device: - defer.returnValue(False) - yield self._exec_remove_export(vol) - defer.returnValue(True) @defer.inlineCallbacks - def _exec_remove_export(self, vol): + def _exec_remove_export(self, _volume_id, shelf_id, blade_id): if FLAGS.fake_storage: defer.returnValue(None) yield process.simple_execute( - "sudo vblade-persist stop %s %s" % (self, vol.export_device.shelf_id, - vol.export_device.blade_id), error_ok=1) + "sudo vblade-persist stop %s %s" % (self, shelf_id, + blade_id), error_ok=1) yield process.simple_execute( - "sudo vblade-persist destroy %s %s" % (self, vol.export_device.shelf_id, - vol.export_device.blade_id), error_ok=1) + "sudo vblade-persist destroy %s %s" % (self, shelf_id, + blade_id), error_ok=1) @defer.inlineCallbacks def _exec_ensure_exports(self): if FLAGS.fake_storage: @@ -198,30 +173,3 @@ class VolumeService(service.Service): yield process.simple_execute( "sudo vgcreate %s %s" % (FLAGS.volume_group, FLAGS.storage_dev)) - - def start_attach(self, volume_id, instance_id, mountpoint): - vol = models.Volume.find(volume_id) - vol.instance_id = instance_id - vol.mountpoint = mountpoint - vol.status = "in-use" - vol.attach_status = "attaching" - vol.attach_time = utils.isotime() - vol.save() - - def finish_attach(self, volume_id): - vol = models.Volume.find(volume_id) - vol.attach_status = "attached" - vol.save() - - def start_detach(self, volume_id): - vol = models.Volume.find(volume_id) - vol.attach_status = "detaching" - vol.save() - - def finish_detach(self, volume_id): - vol = models.Volume.find(volume_id) - vol.instance_id = None - vol.mountpoint = None - vol.status = "available" - vol.attach_status = "detached" - vol.save() -- cgit From a39a155342ad5aa9d8c7b115fb6fe7498ef00f23 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 20 Aug 2010 10:08:05 -0700 Subject: small fixes to network --- nova/network/service.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/nova/network/service.py b/nova/network/service.py index d3aa1c46f..3dba0a9ef 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -26,7 +26,7 @@ from nova import flags from nova import service from nova import utils from nova.auth import manager -from nova.network import exception +from nova.network import exception as network_exception from nova.network import model from nova.network import vpn @@ -64,8 +64,7 @@ def type_to_class(network_type): def setup_compute_network(network_type, user_id, project_id, security_group): """Sets up the network on a compute host""" srv = type_to_class(network_type) - srv.setup_compute_network(network_type, - user_id, + srv.setup_compute_network(user_id, project_id, security_group) @@ -170,7 +169,7 @@ class FlatNetworkService(BaseNetworkService): redis.sadd('ips', fixed_ip) fixed_ip = redis.spop('ips') if not fixed_ip: - raise exception.NoMoreAddresses() + raise network_exception.NoMoreAddresses() # TODO(vish): some sort of dns handling for hostname should # probably be done here. return {'inject_network': True, -- cgit From 70112ea9941b92aa98e32c0c37f0208877953557 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 20 Aug 2010 13:05:46 -0700 Subject: fix concurrency issue with multiple instances getting the same ip --- nova/network/model.py | 39 +++++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/nova/network/model.py b/nova/network/model.py index 6e4fcc47e..6c12836b7 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -231,7 +231,8 @@ class BaseNetwork(datastore.BasicModel): self.network_id = network_id self.network_str = network_str super(BaseNetwork, self).__init__() - self.save() + if self.is_new_record(): + self._create_assigned_set() @property def network(self): @@ -278,6 +279,16 @@ class BaseNetwork(datastore.BasicModel): """Remove a host from the datastore""" self.address_class(ip_address).destroy() + def _create_assigned_set(self): + for idx in range(self.num_bottom_reserved_ips, + len(self.network) - self.num_top_reserved_ips): + redis = datastore.Redis.instance() + redis.sadd(self._available_key, str(self.network[idx])) + + @property + def _available_key(self): + return 'available:%s' % self.identifier + @property def assigned(self): """Returns a list of all assigned addresses""" @@ -294,15 +305,6 @@ class BaseNetwork(datastore.BasicModel): return self.address_class(ip_address) return None - @property - def available(self): - """Returns a list of all available addresses in the network""" - for idx in range(self.num_bottom_reserved_ips, - len(self.network) - self.num_top_reserved_ips): - address = str(self.network[idx]) - if not address in self.assigned: - yield address - @property def num_bottom_reserved_ips(self): """Returns number of ips reserved at the bottom of the range""" @@ -315,13 +317,14 @@ class BaseNetwork(datastore.BasicModel): def allocate_ip(self, user_id, project_id, mac, hostname=None): """Allocates an ip to a mac address""" - for address in self.available: - logging.debug("Allocating IP %s to %s", address, project_id) - self._add_host(user_id, project_id, address, mac, hostname) - self.express(address=address) - return address - raise exception.NoMoreAddresses("Project %s with network %s" % - (project_id, str(self.network))) + address = datastore.Redis.instance().spop(self._available_key) + if not address: + raise exception.NoMoreAddresses("Project %s with network %s" % + (project_id, str(self.network))) + logging.debug("Allocating IP %s to %s", address, project_id) + self._add_host(user_id, project_id, address, mac, hostname) + self.express(address=address) + return address def lease_ip(self, ip_str): """Called when DHCP lease is activated""" @@ -342,6 +345,7 @@ class BaseNetwork(datastore.BasicModel): logging.debug("Releasing IP %s", ip_str) self._rem_host(ip_str) self.deexpress(address=ip_str) + datastore.Redis.instance().sadd(self._available_key, ip_str) def deallocate_ip(self, ip_str): """Deallocates an allocated ip""" @@ -400,7 +404,6 @@ class BridgedNetwork(BaseNetwork): def __init__(self, *args, **kwargs): super(BridgedNetwork, self).__init__(*args, **kwargs) self['bridge_dev'] = FLAGS.bridge_dev - self.save() def express(self, address=None): super(BridgedNetwork, self).express(address=address) -- cgit From 0d61264b578fe4be91828cd13d93372835ff8764 Mon Sep 17 00:00:00 2001 From: andy Date: Sat, 21 Aug 2010 12:47:21 +0200 Subject: Alphabetize the methods in the db layer. There are enough of them in there that it is probably useful to keep them organized. Also moved the NoMoreBlades to db, it is likely to be shared by any implementation. --- nova/db/api.py | 100 ++++++++++++++++++------------- nova/db/sqlalchemy/api.py | 135 +++++++++++++++++++++++------------------- nova/models.py | 12 ++-- nova/tests/volume_unittest.py | 2 +- nova/volume/service.py | 2 +- 5 files changed, 139 insertions(+), 112 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index 63783075a..e8a1dd9d0 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -16,6 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. +from nova import exception from nova import flags from nova import utils @@ -29,6 +30,18 @@ _impl = utils.LazyPluggable(FLAGS['db_backend'], sqlalchemy='nova.db.sqlalchemy.api') +class NoMoreBlades(exception.Error): + pass + + +################### + + +def instance_create(context, values): + """Create an instance from the values dictionary.""" + return _impl.instance_create(context, values) + + def instance_destroy(context, instance_id): """Destroy the instance or raise if it does not exist.""" return _impl.instance_destroy(context, instance_id) @@ -39,6 +52,11 @@ def instance_get(context, instance_id): return _impl.instance_get(context, instance_id) +def instance_state(context, instance_id, state, description=None): + """Set the state of an instance.""" + return _impl.instance_state(context, instance_id, state, description) + + def instance_update(context, instance_id, values): """Set the given properties on an instance and update it. @@ -48,43 +66,44 @@ def instance_update(context, instance_id, values): return _impl.instance_update(context, instance_id, values) -def instance_create(context, values): - """Create an instance from the values dictionary.""" - return _impl.instance_create(context, values) +#################### -def instance_state(context, instance_id, state, description=None): - """Set the state of an instance.""" - return _impl.instance_state(context, instance_id, state, description) +def network_create(context, values): + """Create a network from the values dictionary.""" + return _impl.network_create(context, values) -def volume_destroy(context, volume_id): - """Destroy the volume or raise if it does not exist.""" - return _impl.volume_destroy(context, volume_id) +def network_destroy(context, network_id): + """Destroy the network or raise if it does not exist.""" + return _impl.network_destroy(context, network_id) -def volume_get(context, volume_id): - """Get a volume or raise if it does not exist.""" - return _impl.volume_get(context, volume_id) +def network_get(context, network_id): + """Get an network or raise if it does not exist.""" + return _impl.network_get(context, network_id) -def volume_attached(context, volume_id, instance_id, mountpoint): - """Ensure that a volume is set as attached.""" - return _impl.volume_attached(context, volume_id, instance_id, mountpoint) +def network_update(context, network_id, values): + """Set the given properties on an network and update it. + Raises NotFound if network does not exist. -def volume_detached(context, volume_id): - """Ensure that a volume is set as detached.""" - return _impl.volume_detached(context, volume_id) + """ + return _impl.network_update(context, network_id, values) -def volume_update(context, volume_id, values): - """Set the given properties on an volume and update it. +################### - Raises NotFound if volume does not exist. - """ - return _impl.volume_update(context, volume_id, values) +def volume_allocate_shelf_and_blade(context, volume_id): + """Atomically allocate a free shelf and blade from the pool.""" + return _impl.volume_allocate_shelf_and_blade(context, volume_id) + + +def volume_attached(context, volume_id, instance_id, mountpoint): + """Ensure that a volume is set as attached.""" + return _impl.volume_attached(context, volume_id, instance_id, mountpoint) def volume_create(context, values): @@ -92,35 +111,32 @@ def volume_create(context, values): return _impl.volume_create(context, values) -def volume_allocate_shelf_and_blade(context, volume_id): - """Allocate a free shelf and blace from the pool.""" - return _impl.volume_allocate_shelf_and_blade(context, volume_id) +def volume_destroy(context, volume_id): + """Destroy the volume or raise if it does not exist.""" + return _impl.volume_destroy(context, volume_id) -def volume_get_shelf_and_blade(context, volume_id): - """Get the shelf and blade allocated to the volume.""" - return _impl.volume_get_shelf_and_blade(context, volume_id) +def volume_detached(context, volume_id): + """Ensure that a volume is set as detached.""" + return _impl.volume_detached(context, volume_id) -def network_destroy(context, network_id): - """Destroy the network or raise if it does not exist.""" - return _impl.network_destroy(context, network_id) +def volume_get(context, volume_id): + """Get a volume or raise if it does not exist.""" + return _impl.volume_get(context, volume_id) -def network_get(context, network_id): - """Get an network or raise if it does not exist.""" - return _impl.network_get(context, network_id) +def volume_get_shelf_and_blade(context, volume_id): + """Get the shelf and blade allocated to the volume.""" + return _impl.volume_get_shelf_and_blade(context, volume_id) -def network_update(context, network_id, values): - """Set the given properties on an network and update it. +def volume_update(context, volume_id, values): + """Set the given properties on an volume and update it. - Raises NotFound if network does not exist. + Raises NotFound if volume does not exist. """ - return _impl.network_update(context, network_id, values) + return _impl.volume_update(context, volume_id, values) -def network_create(context, values): - """Create a network from the values dictionary.""" - return _impl.network_create(context, values) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 1b76eb42a..7a2402690 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -16,10 +16,19 @@ # License for the specific language governing permissions and limitations # under the License. +from nova import db from nova import exception from nova import models +def instance_create(context, values): + instance_ref = models.Instance() + for (key, value) in values.iteritems(): + instance_ref[key] = value + instance_ref.save() + return instance_ref.id + + def instance_destroy(context, instance_id): instance_ref = instance_get(context, instance_id) instance_ref.delete() @@ -29,6 +38,11 @@ def instance_get(context, instance_id): return models.Instance.find(instance_id) +def instance_state(context, instance_id, state, description=None): + instance_ref = instance_get(context, instance_id) + instance_ref.set_state(state, description) + + def instance_update(context, instance_id, values): instance_ref = instance_get(context, instance_id) for (key, value) in values.iteritems(): @@ -36,26 +50,48 @@ def instance_update(context, instance_id, values): instance_ref.save() -def instance_create(context, values): - instance_ref = models.Instance() +##################### + + +def network_create(context, values): + network_ref = models.Network() for (key, value) in values.iteritems(): - instance_ref[key] = value - instance_ref.save() - return instance_ref.id + network_ref[key] = value + network_ref.save() + return network_ref.id -def instance_state(context, instance_id, state, description=None): - instance_ref = instance_get(context, instance_id) - instance_ref.set_state(state, description) +def network_destroy(context, network_id): + network_ref = network_get(context, network_id) + network_ref.delete() -def volume_destroy(context, volume_id): - volume_ref = volume_get(context, volume_id) - volume_ref.delete() +def network_get(context, network_id): + return models.Instance.find(network_id) -def volume_get(context, volume_id): - return models.Volume.find(volume_id) +def network_update(context, network_id, values): + network_ref = network_get(context, network_id) + for (key, value) in values.iteritems(): + network_ref[key] = value + network_ref.save() + + +###################### + + +def volume_allocate_shelf_and_blade(context, volume_id): + session = models.NovaBase.get_session() + query = session.query(models.ExportDevice).filter_by(volume=None) + export_device = query.with_lockmode("update").first() + # NOTE(vish): if with_lockmode isn't supported, as in sqlite, + # then this has concurrency issues + if not export_device: + raise db.NoMoreBlades() + export_device.volume_id = volume_id + session.add(export_device) + session.commit() + return (export_device.shelf_id, export_device.blade_id) def volume_attached(context, volume_id, instance_id, mountpoint): @@ -67,6 +103,19 @@ def volume_attached(context, volume_id, instance_id, mountpoint): volume_ref.save() +def volume_create(context, values): + volume_ref = models.Volume() + for (key, value) in values.iteritems(): + volume_ref[key] = value + volume_ref.save() + return volume_ref.id + + +def volume_destroy(context, volume_id): + volume_ref = volume_get(context, volume_id) + volume_ref.delete() + + def volume_detached(context, volume_id): volume_ref = volume_get(context, volume_id) volume_ref['instance_id'] = None @@ -76,70 +125,32 @@ def volume_detached(context, volume_id): volume_ref.save() -def volume_update(context, volume_id, values): +def volume_get(context, volume_id): + return models.Volume.find(volume_id) + + +def volume_get_shelf_and_blade(context, volume_id): volume_ref = volume_get(context, volume_id) - for (key, value) in values.iteritems(): - volume_ref[key] = value - volume_ref.save() + export_device = volume_ref.export_device + if not export_device: + raise exception.NotFound() + return (export_device.shelf_id, export_device.blade_id) -def volume_create(context, values): - volume_ref = models.Volume() +def volume_update(context, volume_id, values): + volume_ref = volume_get(context, volume_id) for (key, value) in values.iteritems(): volume_ref[key] = value volume_ref.save() - return volume_ref.id -class NoMoreBlades(exception.Error): - pass -# FIXME should we just do this in the constructor automatically -# and return the shelf and blade id with volume data in -# volume_get? -def volume_allocate_shelf_and_blade(context, volume_id): - session = models.NovaBase.get_session() - query = session.query(models.ExportDevice).filter_by(volume=None) - export_device = query.with_lockmode("update").first() - # NOTE(vish): if with_lockmode isn't supported, as in sqlite, - # then this has concurrency issues - if not export_device: - # FIXME where should this exception go? - raise NoMoreBlades() - export_device.volume_id = volume_id - session.add(export_device) - session.commit() - return (export_device.shelf_id, export_device.blade_id) -def volume_get_shelf_and_blade(context, volume_id): - # FIXME: should probably do this in one call - volume_ref = volume_get(context, volume_id) - export_device = volume_ref.export_device - if not export_device: - raise exception.NotFound() - return (export_device.shelf_id, export_device.blade_id) - -def network_destroy(context, network_id): - network_ref = network_get(context, network_id) - network_ref.delete() -def network_get(context, network_id): - return models.Instance.find(network_id) -def network_update(context, network_id, values): - network_ref = network_get(context, network_id) - for (key, value) in values.iteritems(): - network_ref[key] = value - network_ref.save() -def network_create(context, values): - network_ref = models.Network() - for (key, value) in values.iteritems(): - network_ref[key] = value - network_ref.save() - return network_ref.id diff --git a/nova/models.py b/nova/models.py index ef10398e8..e4cd37336 100644 --- a/nova/models.py +++ b/nova/models.py @@ -179,7 +179,7 @@ class Instance(Base, NovaBase): def project(self): return auth.manager.AuthManager().get_project(self.project_id) - # FIXME: make this opaque somehow + # TODO(vish): make this opaque somehow @property def name(self): return "i-%s" % self.id @@ -237,12 +237,12 @@ class Volume(Base, NovaBase): node_name = Column(String(255)) #, ForeignKey('physical_node.id')) size = Column(Integer) - alvailability_zone = Column(String(255)) # FIXME foreign key? + availability_zone = Column(String(255)) # TODO(vish) foreign key? instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) mountpoint = Column(String(255)) - attach_time = Column(String(255)) # FIXME datetime - status = Column(String(255)) # FIXME enum? - attach_status = Column(String(255)) # FIXME enum + attach_time = Column(String(255)) # TODO(vish) datetime + status = Column(String(255)) # TODO(vish) enum? + attach_status = Column(String(255)) # TODO(vish) enum class ExportDevice(Base, NovaBase): __tablename__ = 'export_devices' @@ -254,7 +254,7 @@ class ExportDevice(Base, NovaBase): uselist=False)) -#FIXME can these both come from the same baseclass? +# TODO(vish): can these both come from the same baseclass? class FixedIp(Base, NovaBase): __tablename__ = 'fixed_ips' id = Column(Integer, primary_key=True) diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index 90cd04c65..37ee6c72b 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -90,7 +90,7 @@ class VolumeTestCase(test.TrialTestCase): self.assertFailure(self.volume.create_volume(vol_size, user_id, project_id), - db.sqlalchemy.api.NoMoreBlades) + db.NoMoreBlades) for id in vols: yield self.volume.delete_volume(id) diff --git a/nova/volume/service.py b/nova/volume/service.py index 34c938aa9..513c5edae 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -145,7 +145,6 @@ class VolumeService(service.Service): FLAGS.volume_group, volume_id), error_ok=1) - @defer.inlineCallbacks def _exec_remove_export(self, _volume_id, shelf_id, blade_id): if FLAGS.fake_storage: @@ -156,6 +155,7 @@ class VolumeService(service.Service): yield process.simple_execute( "sudo vblade-persist destroy %s %s" % (self, shelf_id, blade_id), error_ok=1) + @defer.inlineCallbacks def _exec_ensure_exports(self): if FLAGS.fake_storage: -- cgit From 6f5aa18747384f46f8d89ac0d6c82a710849ce59 Mon Sep 17 00:00:00 2001 From: andy Date: Sat, 21 Aug 2010 14:10:36 +0200 Subject: Add db abstraction and unittets for service.py. Also cleans up some style pieces. --- nova/db/api.py | 15 +++++ nova/db/sqlalchemy/api.py | 28 ++++++++- nova/service.py | 55 +++++++++------- nova/tests/service_unittest.py | 139 +++++++++++++++++++++++++++++++++++++++++ run_tests.py | 1 + 5 files changed, 213 insertions(+), 25 deletions(-) create mode 100644 nova/tests/service_unittest.py diff --git a/nova/db/api.py b/nova/db/api.py index e8a1dd9d0..e76e6b057 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -37,6 +37,21 @@ class NoMoreBlades(exception.Error): ################### +def daemon_get(context, node_name, binary): + return _impl.daemon_get(context, node_name, binary) + + +def daemon_create(context, values): + return _impl.daemon_create(context, values) + + +def daemon_update(context, values): + return _impl.daemon_update(context, values) + + +################### + + def instance_create(context, values): """Create an instance from the values dictionary.""" return _impl.instance_create(context, values) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 7a2402690..d80c03c19 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -21,6 +21,30 @@ from nova import exception from nova import models +################### + + +def daemon_get(context, node_name, binary): + return None + return models.Daemon.find_by_args(node_name, binary) + + +def daemon_create(context, values): + daemon_ref = models.Daemon(**values) + daemon_ref.save() + return daemon_ref + + +def daemon_update(context, node_name, binary, values): + daemon_ref = daemon_get(context, node_name, binary) + for (key, value) in values.iteritems(): + daemon_ref[key] = value + daemon_ref.save() + + +################### + + def instance_create(context, values): instance_ref = models.Instance() for (key, value) in values.iteritems(): @@ -50,7 +74,7 @@ def instance_update(context, instance_id, values): instance_ref.save() -##################### +################### def network_create(context, values): @@ -77,7 +101,7 @@ def network_update(context, network_id, values): network_ref.save() -###################### +################### def volume_allocate_shelf_and_blade(context, volume_id): diff --git a/nova/service.py b/nova/service.py index 29f47e833..65016d717 100644 --- a/nova/service.py +++ b/nova/service.py @@ -28,31 +28,34 @@ from twisted.internet import defer from twisted.internet import task from twisted.application import service +from nova import db from nova import exception from nova import flags -from nova import models from nova import rpc FLAGS = flags.FLAGS - flags.DEFINE_integer('report_interval', 10, 'seconds between nodes reporting state to cloud', lower_bound=1) + class Service(object, service.Service): - """Base class for workers that run on hosts""" + """Base class for workers that run on hosts.""" @classmethod - def create(cls, - report_interval=None, # defaults to flag - bin_name=None, # defaults to basename of executable - topic=None): # defaults to basename - "nova-" part - """Instantiates class and passes back application object""" + def create(cls, report_interval=None, bin_name=None, topic=None): + """Instantiates class and passes back application object. + + Args: + report_interval, defaults to flag + bin_name, defaults to basename of executable + topic, defaults to basename - "nova-" part + + """ if not report_interval: - # NOTE(vish): set here because if it is set to flag in the - # parameter list, it wrongly uses the default report_interval = FLAGS.report_interval + # NOTE(vish): magic to automatically determine bin_name and topic if not bin_name: bin_name = os.path.basename(inspect.stack()[-1][1]) @@ -81,25 +84,27 @@ class Service(object, service.Service): consumer_node.attach_to_twisted() # This is the parent service that twistd will be looking for when it - # parses this file, return it so that we can get it into globals below + # parses this file, return it so that we can get it into globals. application = service.Application(bin_name) node_instance.setServiceParent(application) return application @defer.inlineCallbacks - def report_state(self, node_name, binary): - """Update the state of this daemon in the datastore""" - # TODO(termie): make this pattern be more elegant. -todd + def report_state(self, node_name, binary, context=None): + """Update the state of this daemon in the datastore.""" try: try: - #FIXME abstract this - daemon = models.Daemon.find_by_args(node_name, binary) + daemon_ref = db.daemon_get(context, node_name, binary) except exception.NotFound: - daemon = models.Daemon(node_name=node_name, - binary=binary, - report_count=0) - self._update_daemon(daemon) - daemon.save() + daemon_ref = db.daemon_create(context, {'node_name': node_name, + 'binary': binary, + 'report_count': 0}) + + # TODO(termie): I don't think this is really needed, consider + # removing it. + self._update_daemon(daemon_ref, context) + + # TODO(termie): make this pattern be more elegant. if getattr(self, "model_disconnected", False): self.model_disconnected = False logging.error("Recovered model server connection!") @@ -110,6 +115,10 @@ class Service(object, service.Service): logging.exception("model server went away") yield - def _update_daemon(self, daemon): + def _update_daemon(self, daemon_ref, context): """Set any extra daemon data here""" - daemon.report_count = daemon.report_count + 1 + # FIXME(termie): the following is in no way atomic + db.daemon_update(context, + daemon_ref['node_name'], + daemon_ref['binary'], + {'report_count': daemon_ref['report_count'] + 1}) diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py new file mode 100644 index 000000000..449494201 --- /dev/null +++ b/nova/tests/service_unittest.py @@ -0,0 +1,139 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit Tests for remote procedure calls using queue +""" + +import logging + +import mox +from twisted.internet import defer + +from nova import exception +from nova import flags +from nova import rpc +from nova import test +from nova import service + + +FLAGS = flags.FLAGS + + +class ServiceTestCase(test.BaseTestCase): + """Test cases for rpc""" + def setUp(self): # pylint: disable=C0103 + super(ServiceTestCase, self).setUp() + self.mox.StubOutWithMock(service, 'db') + + def test_create(self): + self.mox.StubOutWithMock(rpc, 'AdapterConsumer', use_mock_anything=True) + rpc.AdapterConsumer(connection=mox.IgnoreArg(), + topic='run_tests.py', + proxy=mox.IsA(service.Service) + ).AndReturn(rpc.AdapterConsumer) + + rpc.AdapterConsumer(connection=mox.IgnoreArg(), + topic='run_tests.py.%s' % FLAGS.node_name, + proxy=mox.IsA(service.Service) + ).AndReturn(rpc.AdapterConsumer) + rpc.AdapterConsumer.attach_to_twisted() + rpc.AdapterConsumer.attach_to_twisted() + self.mox.ReplayAll() + + app = service.Service.create() + self.assert_(app) + + # We're testing sort of weird behavior in how report_state decides + # whether it is disconnected, it looks for a variable on itself called + # 'model_disconnected' and report_state doesn't really do much so this + # these are mostly just for coverage + + def test_report_state(self): + node_name = 'foo' + binary = 'bar' + daemon_ref = {'node_name': node_name, + 'binary': binary, + 'report_count': 0 + } + + service.db.daemon_get(None, node_name, binary).AndReturn(daemon_ref) + service.db.daemon_update(None, node_name, binary, + mox.ContainsKeyValue('report_count', 1)) + + self.mox.ReplayAll() + s = service.Service() + rv = yield s.report_state(node_name, binary) + + + def test_report_state_no_daemon(self): + node_name = 'foo' + binary = 'bar' + daemon_ref = {'node_name': node_name, + 'binary': binary, + 'report_count': 0 + } + + service.db.daemon_get(None, node_name, binary).AndRaise( + exception.NotFound()) + service.db.daemon_create(None, daemon_ref).AndReturn(daemon_ref) + service.db.daemon_update(None, node_name, binary, + mox.ContainsKeyValue('report_count', 1)) + + self.mox.ReplayAll() + s = service.Service() + rv = yield s.report_state(node_name, binary) + + + def test_report_state_newly_disconnected(self): + node_name = 'foo' + binary = 'bar' + daemon_ref = {'node_name': node_name, + 'binary': binary, + 'report_count': 0 + } + + service.db.daemon_get(None, node_name, binary).AndRaise( + Exception()) + + self.mox.ReplayAll() + s = service.Service() + rv = yield s.report_state(node_name, binary) + + self.assert_(s.model_disconnected) + + + def test_report_state_newly_connected(self): + node_name = 'foo' + binary = 'bar' + daemon_ref = {'node_name': node_name, + 'binary': binary, + 'report_count': 0 + } + + service.db.daemon_get(None, node_name, binary).AndReturn(daemon_ref) + service.db.daemon_update(None, node_name, binary, + mox.ContainsKeyValue('report_count', 1)) + + self.mox.ReplayAll() + s = service.Service() + s.model_disconnected = True + rv = yield s.report_state(node_name, binary) + + self.assert_(not s.model_disconnected) + diff --git a/run_tests.py b/run_tests.py index 82c1aa9cf..c47cbe2ec 100644 --- a/run_tests.py +++ b/run_tests.py @@ -60,6 +60,7 @@ from nova.tests.network_unittest import * from nova.tests.objectstore_unittest import * from nova.tests.process_unittest import * from nova.tests.rpc_unittest import * +from nova.tests.service_unittest import * from nova.tests.validator_unittest import * from nova.tests.volume_unittest import * -- cgit From 152baf34247c5a4b76f643cac0d33c0158de0bfa Mon Sep 17 00:00:00 2001 From: andy Date: Sat, 21 Aug 2010 15:37:00 +0200 Subject: Moves auth.manager to the data layer. A couple weird things are going on, I added a try-except in Manager.delete_project because it seems to have an issue finding the network to delete, I think something is probably deleting it before the tests get a chance to. Also stubbed out task.LoopingCall in service_unittest because there wasn't a good way to kill the task from outside of service.Service.create() --- nova/auth/manager.py | 35 +++++++++++++++++++---------------- nova/db/api.py | 8 ++++++++ nova/db/sqlalchemy/api.py | 13 ++++++++++++- nova/network/service.py | 7 ++++--- nova/tests/network_unittest.py | 4 ++++ nova/tests/service_unittest.py | 11 +++++++++++ 6 files changed, 58 insertions(+), 20 deletions(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index eed67d8c3..070c5508a 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -29,6 +29,7 @@ import uuid import zipfile from nova import crypto +from nova import db from nova import exception from nova import flags from nova import models @@ -202,11 +203,6 @@ class Project(AuthBase): ip, port = AuthManager().get_project_vpn_data(self) return port - @property - def network(self): - session = models.create_session() - return session.query(models.Network).filter_by(project_id=self.id).first() - def has_manager(self, user): return AuthManager().is_project_manager(user, self) @@ -498,8 +494,8 @@ class AuthManager(object): return [] return [Project(**project_dict) for project_dict in project_list] - def create_project(self, name, manager_user, - description=None, member_users=None): + def create_project(self, name, manager_user, description=None, + member_users=None, context=None): """Create a project @type name: str @@ -530,8 +526,7 @@ class AuthManager(object): if project_dict: project = Project(**project_dict) # FIXME(ja): EVIL HACK - net = models.Network(project_id=project.id) - net.save() + db.network_create(context, {'project_id': project.id}) return project def add_to_project(self, user, project): @@ -558,7 +553,7 @@ class AuthManager(object): return drv.remove_from_project(User.safe_id(user), Project.safe_id(project)) - def get_project_vpn_data(self, project): + def get_project_vpn_data(self, project, context=None): """Gets vpn ip and port for project @type project: Project or project_id @@ -571,19 +566,27 @@ class AuthManager(object): # FIXME(vish): this shouldn't be messing with the datamodel directly if not isinstance(project, Project): project = self.get_project(project) - if not project.network.vpn_public_port: + + network_ref = db.project_get_network(context, project.id) + + if not network_ref['vpn_public_port']: raise exception.NotFound('project network data has not been set') - return (project.network.vpn_public_ip_str, - project.network.vpn_public_port) + return (network_ref['vpn_public_ip_str'], + network_ref['vpn_public_port']) - def delete_project(self, project): + def delete_project(self, project, context=None): """Deletes a project""" # FIXME(ja): EVIL HACK if not isinstance(project, Project): project = self.get_project(project) - project.network.delete() + network_ref = db.project_get_network(context, project.id) + try: + db.network_destroy(context, network_ref['id']) + except: + logging.exception('Could not destroy network: %s', + network_ref['id']) with self.driver() as drv: - return drv.delete_project(Project.safe_id(project)) + drv.delete_project(Project.safe_id(project)) def get_user(self, uid): """Retrieves a user by id""" diff --git a/nova/db/api.py b/nova/db/api.py index e76e6b057..bbd69ec65 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -111,6 +111,14 @@ def network_update(context, network_id, values): ################### +def project_get_network(context, project_id): + """Return the network associated with the project.""" + return _impl.project_get_network(context, project_id) + + +################### + + def volume_allocate_shelf_and_blade(context, volume_id): """Atomically allocate a free shelf and blade from the pool.""" return _impl.volume_allocate_shelf_and_blade(context, volume_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index d80c03c19..e883e14cb 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -82,7 +82,7 @@ def network_create(context, values): for (key, value) in values.iteritems(): network_ref[key] = value network_ref.save() - return network_ref.id + return network_ref def network_destroy(context, network_id): @@ -104,6 +104,17 @@ def network_update(context, network_id, values): ################### +def project_get_network(context, project_id): + session = models.create_session() + rv = session.query(models.Network).filter_by(project_id=project_id).first() + if not rv: + raise exception.NotFound('No network for project: %s' % project_id) + return rv + + +################### + + def volume_allocate_shelf_and_blade(context, volume_id): session = models.NovaBase.get_session() query = session.query(models.ExportDevice).filter_by(volume=None) diff --git a/nova/network/service.py b/nova/network/service.py index 16ecfbf3e..e47f07ef0 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -24,6 +24,7 @@ import logging import IPy +from nova import db from nova import exception from nova import flags from nova import models @@ -89,12 +90,12 @@ def setup_compute_network(project_id): srv.setup_compute_network(network) -def get_network_for_project(project_id): +def get_network_for_project(project_id, context=None): """Get network allocated to project from datastore""" project = manager.AuthManager().get_project(project_id) if not project: raise exception.NotFound("Couldn't find project %s" % project_id) - return project.network + return db.project_get_network(context, project_id) def get_host_for_project(project_id): @@ -246,7 +247,7 @@ class VlanNetworkService(BaseNetworkService): session.add(network_index) session.commit() - def allocate_fixed_ip(self, project_id, instance_id, is_vpn=False, + def allocate_fixed_ip(self, project_id, instance_id, is_vpn=False, *args, **kwargs): """Gets a fixed ip from the pool""" network = get_network_for_project(project_id) diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 0f2ce060d..76c76edbf 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -67,6 +67,8 @@ class NetworkTestCase(test.TrialTestCase): def tearDown(self): # pylint: disable=C0103 super(NetworkTestCase, self).tearDown() + # TODO(termie): this should really be instantiating clean datastores + # in between runs, one failure kills all the tests for project in self.projects: self.manager.delete_project(project) self.manager.delete_user(self.user) @@ -275,6 +277,8 @@ def is_allocated_in_project(address, project_id): fixed_ip = models.FixedIp.find_by_ip_str(address) project_net = service.get_network_for_project(project_id) # instance exists until release + logging.error('fixed_ip.instance: %s', fixed_ip.instance) + logging.error('project_net: %s', project_net) return fixed_ip.instance is not None and fixed_ip.network == project_net diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py index 449494201..482988465 100644 --- a/nova/tests/service_unittest.py +++ b/nova/tests/service_unittest.py @@ -43,6 +43,8 @@ class ServiceTestCase(test.BaseTestCase): def test_create(self): self.mox.StubOutWithMock(rpc, 'AdapterConsumer', use_mock_anything=True) + self.mox.StubOutWithMock( + service.task, 'LoopingCall', use_mock_anything=True) rpc.AdapterConsumer(connection=mox.IgnoreArg(), topic='run_tests.py', proxy=mox.IsA(service.Service) @@ -52,6 +54,15 @@ class ServiceTestCase(test.BaseTestCase): topic='run_tests.py.%s' % FLAGS.node_name, proxy=mox.IsA(service.Service) ).AndReturn(rpc.AdapterConsumer) + + # Stub out looping call a bit needlessly since we don't have an easy + # way to cancel it (yet) when the tests finishes + service.task.LoopingCall( + mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( + service.task.LoopingCall) + service.task.LoopingCall.start(interval=mox.IgnoreArg(), + now=mox.IgnoreArg()) + rpc.AdapterConsumer.attach_to_twisted() rpc.AdapterConsumer.attach_to_twisted() self.mox.ReplayAll() -- cgit From d38f21e0fb382bd8f01cfbc79cb34ea8710cd639 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 23 Aug 2010 10:27:59 -0400 Subject: License --- nova/api/services/image.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/nova/api/services/image.py b/nova/api/services/image.py index c5ea15ba1..bda50fc66 100644 --- a/nova/api/services/image.py +++ b/nova/api/services/image.py @@ -1,3 +1,20 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + import cPickle as pickle import os.path import string -- cgit From e3727d6d88a0631d3b896c4fcdcfec05510dad36 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 23 Aug 2010 12:07:20 -0400 Subject: Support opaque id to rs int id as well --- nova/api/rackspace/images.py | 42 ++++++++++++++++++++++++++++++++---------- nova/api/services/image.py | 8 ++++---- 2 files changed, 36 insertions(+), 14 deletions(-) diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py index e29f737a5..c9cc8e85d 100644 --- a/nova/api/rackspace/images.py +++ b/nova/api/rackspace/images.py @@ -44,18 +44,24 @@ class Controller(base.Controller): strategy = self._svc.__class__.__name__ return self._id_xlator.to_rs_id(strategy, image_id) + def _from_rs_id(self, rs_image_id): + """ + Convert an image id from the Rackspace API format (an int) to the + format of our ImageService strategy. + """ + strategy = self._svc.__class__.__name__ + return self._id_xlator.from_rs_id(strategy, rs_image_id) + def index(self, req): """Return all public images.""" - data = self._svc.list() - for img in data: - img['id'] = self._to_rs_id(img['id']) - return dict(images=result) + data = dict((self._to_rs_id(id), val) + for id, val in self._svc.index().iteritems()) + return dict(images=data) def show(self, req, id): """Return data about the given image id.""" - img = self._svc.show(id) - img['id'] = self._to_rs_id(img['id']) - return dict(image=img) + opaque_id = self._from_rs_id(id) + return dict(image=self._svc.show(opaque_id)) def delete(self, req, id): # Only public images are supported for now. @@ -80,14 +86,30 @@ class RackspaceApiImageIdTranslator(object): def __init__(self): self._store = datastore.Redis.instance() + self._key_template = "rsapi.idstrategies.image.%s.%s" def to_rs_id(self, strategy_name, opaque_id): """Convert an id from a strategy-specific one to a Rackspace one.""" - key = "rsapi.idstrategies.image.%s" % strategy_name + key = self._key_template % (strategy_name, "fwd") result = self._store.hget(key, str(opaque_id)) if result: # we have a mapping from opaque to RS for this strategy return int(result) else: + # Store the mapping. nextid = self._store.incr("%s.lastid" % key) - self._store.hsetnx(key, str(opaque_id), nextid) - return nextid + if self._store.hsetnx(key, str(opaque_id), nextid): + # If someone else didn't beat us to it, store the reverse + # mapping as well. + key = self._key_template % (strategy_name, "rev") + self._store.hset(key, nextid, str(opaque_id)) + return nextid + else: + # Someone beat us to it; use their number instead, and + # discard nextid (which is OK -- we don't require that + # every int id be used.) + return int(self._store.hget(key, str(opaque_id))) + + def from_rs_id(self, strategy_name, rs_id): + """Convert a Rackspace id to a strategy-specific one.""" + key = self._key_template % (strategy_name, "rev") + return self._store.hget(key, rs_id) diff --git a/nova/api/services/image.py b/nova/api/services/image.py index bda50fc66..11e19804a 100644 --- a/nova/api/services/image.py +++ b/nova/api/services/image.py @@ -17,6 +17,7 @@ import cPickle as pickle import os.path +import random import string class ImageService(object): @@ -31,8 +32,7 @@ class ImageService(object): def index(self): """ - Return a list of image data dicts. Each dict will contain an - id key whose value is an opaque image id. + Return a dict from opaque image id to image data. """ def show(self, id): @@ -62,10 +62,10 @@ class LocalImageService(ImageService): def _ids(self): """The list of all image ids.""" - return os.path.listdir(self._path) + return os.listdir(self._path) def index(self): - return [ self.show(id) for id in self._ids() ] + return dict((id, self.show(id)) for id in self._ids()) def show(self, id): return pickle.load(open(self._path_to(id))) -- cgit From 030d01fd10f7f65cdafbea49e04f3b6b147a7348 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 23 Aug 2010 12:46:29 -0400 Subject: Serialize properly --- nova/api/rackspace/base.py | 3 ++- nova/api/rackspace/images.py | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/api/rackspace/base.py b/nova/api/rackspace/base.py index c85fd7b8e..b995d9acc 100644 --- a/nova/api/rackspace/base.py +++ b/nova/api/rackspace/base.py @@ -36,4 +36,5 @@ class Controller(wsgi.Controller): MIME types to information needed to serialize to that type. """ _metadata = getattr(type(self), "_serialization_metadata", {}) - return Serializer(request.environ, _metadata).to_content_type(data) + serializer = wsgi.Serializer(request.environ, _metadata) + return serializer.to_content_type(data) diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py index c9cc8e85d..62e0b24c5 100644 --- a/nova/api/rackspace/images.py +++ b/nova/api/rackspace/images.py @@ -56,12 +56,12 @@ class Controller(base.Controller): """Return all public images.""" data = dict((self._to_rs_id(id), val) for id, val in self._svc.index().iteritems()) - return dict(images=data) + return self.serialize(dict(images=data), req) def show(self, req, id): """Return data about the given image id.""" opaque_id = self._from_rs_id(id) - return dict(image=self._svc.show(opaque_id)) + return self.serialize(dict(image=self._svc.show(opaque_id)), req) def delete(self, req, id): # Only public images are supported for now. -- cgit From a50a200bc2547439a3da17e695224d3d434e14dd Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 23 Aug 2010 12:55:57 -0400 Subject: Move serialize() to wsgi.Controller so __call__ can serialize() action return values if they are dicts. --- nova/api/rackspace/base.py | 10 ---------- nova/wsgi.py | 16 ++++++++++++++-- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/nova/api/rackspace/base.py b/nova/api/rackspace/base.py index b995d9acc..51841925e 100644 --- a/nova/api/rackspace/base.py +++ b/nova/api/rackspace/base.py @@ -28,13 +28,3 @@ class Controller(wsgi.Controller): return {cls.entity_name: cls.render(instance)} else: return { "TODO": "TODO" } - - def serialize(self, data, request): - """ - Serialize the given dict to the response type requested in request. - Uses self._serialization_metadata if it exists, which is a dict mapping - MIME types to information needed to serialize to that type. - """ - _metadata = getattr(type(self), "_serialization_metadata", {}) - serializer = wsgi.Serializer(request.environ, _metadata) - return serializer.to_content_type(data) diff --git a/nova/wsgi.py b/nova/wsgi.py index baf6cccd9..d52bf855d 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -196,7 +196,8 @@ class Controller(object): WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon itself. All action methods must, in addition to their normal parameters, accept a 'req' argument - which is the incoming webob.Request. + which is the incoming webob.Request. They raise a webob.exc exception, + or return a dict which will be serialized by requested content type. """ @webob.dec.wsgify @@ -210,7 +211,18 @@ class Controller(object): del arg_dict['controller'] del arg_dict['action'] arg_dict['req'] = req - return method(**arg_dict) + result = method(**arg_dict) + return self._serialize(result, req) if type(result) is dict else result + + def _serialize(self, data, request): + """ + Serialize the given dict to the response type requested in request. + Uses self._serialization_metadata if it exists, which is a dict mapping + MIME types to information needed to serialize to that type. + """ + _metadata = getattr(type(self), "_serialization_metadata", {}) + serializer = wsgi.Serializer(request.environ, _metadata) + return serializer.to_content_type(data) class Serializer(object): -- cgit From f5c03fdd78a3bb8233e465c7624ed1fdb8f400fe Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 23 Aug 2010 13:06:40 -0400 Subject: Don't serialize in Controller subclass now that wsgi.Controller handles it for us --- nova/api/rackspace/images.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py index 62e0b24c5..070100143 100644 --- a/nova/api/rackspace/images.py +++ b/nova/api/rackspace/images.py @@ -20,7 +20,6 @@ from nova.api.rackspace import base from nova.api.services.image import ImageService from webob import exc -#TODO(gundlach): Serialize return values class Controller(base.Controller): _serialization_metadata = { @@ -56,12 +55,12 @@ class Controller(base.Controller): """Return all public images.""" data = dict((self._to_rs_id(id), val) for id, val in self._svc.index().iteritems()) - return self.serialize(dict(images=data), req) + return dict(images=data) def show(self, req, id): """Return data about the given image id.""" opaque_id = self._from_rs_id(id) - return self.serialize(dict(image=self._svc.show(opaque_id)), req) + return dict(image=self._svc.show(opaque_id)) def delete(self, req, id): # Only public images are supported for now. -- cgit From c49c725e43cfbc9d90b5e9ebbf93a32e71c7e6a9 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 23 Aug 2010 13:07:43 -0400 Subject: Typo --- nova/wsgi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/wsgi.py b/nova/wsgi.py index d52bf855d..096d5843f 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -221,7 +221,7 @@ class Controller(object): MIME types to information needed to serialize to that type. """ _metadata = getattr(type(self), "_serialization_metadata", {}) - serializer = wsgi.Serializer(request.environ, _metadata) + serializer = Serializer(request.environ, _metadata) return serializer.to_content_type(data) -- cgit From 35a08780c41ece1b47b2ded98c061b103a400fea Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 23 Aug 2010 13:26:10 -0400 Subject: Get the output formatting correct. --- nova/api/rackspace/images.py | 9 ++++++--- nova/api/services/image.py | 3 ++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py index 070100143..7d32fa099 100644 --- a/nova/api/rackspace/images.py +++ b/nova/api/rackspace/images.py @@ -53,14 +53,17 @@ class Controller(base.Controller): def index(self, req): """Return all public images.""" - data = dict((self._to_rs_id(id), val) - for id, val in self._svc.index().iteritems()) + data = self._svc.index() + for img in data: + img['id'] = self._to_rs_id(img['id']) return dict(images=data) def show(self, req, id): """Return data about the given image id.""" opaque_id = self._from_rs_id(id) - return dict(image=self._svc.show(opaque_id)) + img = self._svc.show(opaque_id) + img['id'] = id + return dict(image=img) def delete(self, req, id): # Only public images are supported for now. diff --git a/nova/api/services/image.py b/nova/api/services/image.py index 11e19804a..1a7a258b7 100644 --- a/nova/api/services/image.py +++ b/nova/api/services/image.py @@ -65,7 +65,7 @@ class LocalImageService(ImageService): return os.listdir(self._path) def index(self): - return dict((id, self.show(id)) for id in self._ids()) + return [ self.show(id) for id in self._ids() ] def show(self, id): return pickle.load(open(self._path_to(id))) @@ -75,6 +75,7 @@ class LocalImageService(ImageService): Store the image data and return the new image id. """ id = ''.join(random.choice(string.letters) for _ in range(20)) + data['id'] = id self.update(id, data) return id -- cgit From 41e2e91ccfb1409f1ea47d92a9d15f47ab37e65d Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 23 Aug 2010 16:43:25 -0400 Subject: Merge fail --- nova/api/rackspace/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/rackspace/base.py b/nova/api/rackspace/base.py index 51841925e..dd2c6543c 100644 --- a/nova/api/rackspace/base.py +++ b/nova/api/rackspace/base.py @@ -27,4 +27,4 @@ class Controller(wsgi.Controller): if isinstance(instance, list): return {cls.entity_name: cls.render(instance)} else: - return { "TODO": "TODO" } + return {"TODO": "TODO"} -- cgit From 78c2175898a468ae734e27dfbc8f5b70f90fd477 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 23 Aug 2010 13:55:16 -0700 Subject: Refactored network model access into data abstraction layer. Also changed the name to floating_ip. --- bin/nova-dhcpbridge | 23 +-- nova/db/api.py | 112 +++++++++++++- nova/db/sqlalchemy/api.py | 187 ++++++++++++++++++++++- nova/endpoint/cloud.py | 28 ++-- nova/models.py | 8 +- nova/network/linux_net.py | 24 +-- nova/network/service.py | 328 ++++++++++++++++------------------------- nova/tests/network_unittest.py | 70 ++++----- 8 files changed, 497 insertions(+), 283 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index b17a56e6e..8008100f6 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -25,9 +25,9 @@ import logging import os import sys -#TODO(joshua): there is concern that the user dnsmasq runs under will not -# have nova in the path. This should be verified and if it is -# not true the ugly line below can be removed +# TODO(joshua): there is concern that the user dnsmasq runs under will not +# have nova in the path. This should be verified and if it is +# not true the ugly line below can be removed sys.path.append(os.path.abspath(os.path.join(__file__, "../../"))) from nova import flags @@ -36,6 +36,7 @@ from nova import utils from nova.network import linux_net from nova.network import service from nova import datastore # for redis_db flag +from nova.auth import manager # for auth flags FLAGS = flags.FLAGS @@ -43,16 +44,16 @@ FLAGS = flags.FLAGS def add_lease(_mac, ip, _hostname, _interface): """Set the IP that was assigned by the DHCP server.""" if FLAGS.fake_rabbit: - logging.debug("leasing_ip") + logging.debug("leasing ip") from nova import models print models.FixedIp.count() print models.Network.count() print FLAGS.sql_connection - service.VlanNetworkService().lease_ip(ip) + service.VlanNetworkService().lease_fixed_ip(ip) else: rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name), - {"method": "lease_ip", - "args": {"fixed_ip_str": ip}}) + {"method": "lease_fixed_ip", + "args": {"address": ip}}) def old_lease(_mac, _ip, _hostname, _interface): @@ -63,12 +64,12 @@ def old_lease(_mac, _ip, _hostname, _interface): def del_lease(_mac, ip, _hostname, _interface): """Called when a lease expires.""" if FLAGS.fake_rabbit: - logging.debug("releasing_ip") - service.VlanNetworkService().release_ip(ip) + logging.debug("releasing ip") + service.VlanNetworkService().release_fixed_ip(ip) else: rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name), - {"method": "release_ip", - "args": {"fixed_ip_str": ip}}) + {"method": "release_fixed_ip", + "args": {"address": ip}}) def init_leases(interface): diff --git a/nova/db/api.py b/nova/db/api.py index bbd69ec65..a0e2b3715 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -30,16 +30,24 @@ _impl = utils.LazyPluggable(FLAGS['db_backend'], sqlalchemy='nova.db.sqlalchemy.api') +class NoMoreAddresses(exception.Error): + pass + + class NoMoreBlades(exception.Error): pass +class NoMoreNetworks(exception.Error): + pass + + ################### def daemon_get(context, node_name, binary): return _impl.daemon_get(context, node_name, binary) - + def daemon_create(context, values): return _impl.daemon_create(context, values) @@ -52,6 +60,78 @@ def daemon_update(context, values): ################### +def floating_ip_allocate_address(context, node_name, project_id): + """Allocate free floating ip and return the address. + + Raises if one is not available. + """ + return _impl.floating_ip_allocate_address(context, node_name, project_id) + + +def floating_ip_fixed_ip_associate(context, floating_address, fixed_address): + """Associate an floating ip to a fixed_ip by address.""" + return _impl.floating_ip_fixed_ip_associate(context, + floating_address, + fixed_address) + + +def floating_ip_disassociate(context, address): + """Disassociate an floating ip from a fixed ip by address. + + Returns the address of the existing fixed ip. + """ + return _impl.floating_ip_disassociate(context, address) + + +def floating_ip_deallocate(context, address): + """Deallocate an floating ip by address""" + return _impl.floating_ip_deallocate(context, address) + + +#################### + + +def fixed_ip_allocate_address(context, network_id): + """Allocate free fixed ip and return the address. + + Raises if one is not available. + """ + return _impl.fixed_ip_allocate_address(context, network_id) + + +def fixed_ip_get_by_address(context, address): + """Get a fixed ip by address.""" + return _impl.fixed_ip_get_by_address(context, address) + + +def fixed_ip_lease(context, address): + """Lease a fixed ip by address.""" + return _impl.fixed_ip_lease(context, address) + + +def fixed_ip_release(context, address): + """Un-Lease a fixed ip by address.""" + return _impl.fixed_ip_release(context, address) + + +def fixed_ip_deallocate(context, address): + """Deallocate a fixed ip by address.""" + return _impl.fixed_ip_deallocate(context, address) + + +def fixed_ip_instance_associate(context, address, instance_id): + """Associate a fixed ip to an instance by address.""" + return _impl.fixed_ip_instance_associate(context, address, instance_id) + + +def fixed_ip_instance_disassociate(context, address): + """Disassociate a fixed ip from an instance by address.""" + return _impl.fixed_ip_instance_disassociate(context, address) + + +#################### + + def instance_create(context, values): """Create an instance from the values dictionary.""" return _impl.instance_create(context, values) @@ -89,16 +169,46 @@ def network_create(context, values): return _impl.network_create(context, values) +def network_create_fixed_ips(context, network_id, num_vpn_clients): + """Create the ips for the network, reserving sepecified ips.""" + return _impl.network_create_fixed_ips(context, network_id, num_vpn_clients) + + def network_destroy(context, network_id): """Destroy the network or raise if it does not exist.""" return _impl.network_destroy(context, network_id) +def network_ensure_indexes(context, num_networks): + """Ensure that network indexes exist, creating them if necessary.""" + return _impl.network_ensure_indexes(context, num_networks) + + def network_get(context, network_id): """Get an network or raise if it does not exist.""" return _impl.network_get(context, network_id) +def network_get_host(context, network_id): + """Get host assigned to network or raise""" + return _impl.network_get_host(context, network_id) + + +def network_get_index(context, network_id): + """Gets non-conflicting index for network""" + return _impl.network_get_index(context, network_id) + + +def network_set_cidr(context, network_id, cidr): + """Set the Classless Inner Domain Routing for the network""" + return _impl.network_set_cidr(context, network_id, cidr) + + +def network_set_host(context, network_id, host_id): + """Safely set the host for network""" + return _impl.network_set_host(context, network_id, host_id) + + def network_update(context, network_id, values): """Set the given properties on an network and update it. diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index e883e14cb..a3a5ff8de 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -16,6 +16,8 @@ # License for the specific language governing permissions and limitations # under the License. +import IPy + from nova import db from nova import exception from nova import models @@ -27,7 +29,7 @@ from nova import models def daemon_get(context, node_name, binary): return None return models.Daemon.find_by_args(node_name, binary) - + def daemon_create(context, values): daemon_ref = models.Daemon(**values) @@ -45,6 +47,99 @@ def daemon_update(context, node_name, binary, values): ################### +def floating_ip_allocate_address(context, node_name, project_id): + session = models.NovaBase.get_session() + query = session.query(models.FloatingIp).filter_by(node_name=node_name) + query = query.filter_by(fixed_ip_id=None).with_lockmode("update") + floating_ip_ref = query.first() + # NOTE(vish): if with_lockmode isn't supported, as in sqlite, + # then this has concurrency issues + if not floating_ip_ref: + raise db.NoMoreAddresses() + floating_ip_ref['project_id'] = project_id + session.add(floating_ip_ref) + session.commit() + return floating_ip_ref['ip_str'] + + +def floating_ip_fixed_ip_associate(context, floating_address, fixed_address): + floating_ip_ref = models.FloatingIp.find_by_ip_str(floating_address) + fixed_ip_ref = models.FixedIp.find_by_ip_str(fixed_address) + floating_ip_ref.fixed_ip = fixed_ip_ref + floating_ip_ref.save() + + +def floating_ip_disassociate(context, address): + floating_ip_ref = models.FloatingIp.find_by_ip_str(address) + fixed_ip_address = floating_ip_ref.fixed_ip['ip_str'] + floating_ip_ref['fixed_ip'] = None + floating_ip_ref.save() + return fixed_ip_address + +def floating_ip_deallocate(context, address): + floating_ip_ref = models.FloatingIp.find_by_ip_str(address) + floating_ip_ref['project_id'] = None + floating_ip_ref.save() + +################### + + +def fixed_ip_allocate_address(context, network_id): + session = models.NovaBase.get_session() + query = session.query(models.FixedIp).filter_by(network_id=network_id) + query = query.filter_by(reserved=False).filter_by(allocated=False) + query = query.filter_by(leased=False).with_lockmode("update") + fixed_ip_ref = query.first() + # NOTE(vish): if with_lockmode isn't supported, as in sqlite, + # then this has concurrency issues + if not fixed_ip_ref: + raise db.NoMoreAddresses() + fixed_ip_ref['allocated'] = True + session.add(fixed_ip_ref) + session.commit() + return fixed_ip_ref['ip_str'] + + +def fixed_ip_get_by_address(context, address): + return models.FixedIp.find_by_ip_str(address) + + +def fixed_ip_lease(context, address): + fixed_ip_ref = fixed_ip_get_by_address(context, address) + if not fixed_ip_ref['allocated']: + raise db.AddressNotAllocated(address) + fixed_ip_ref['leased'] = True + fixed_ip_ref.save() + + +def fixed_ip_release(context, address): + fixed_ip_ref = fixed_ip_get_by_address(context, address) + fixed_ip_ref['allocated'] = False + fixed_ip_ref['leased'] = False + fixed_ip_ref.save() + + +def fixed_ip_deallocate(context, address): + fixed_ip_ref = fixed_ip_get_by_address(context, address) + fixed_ip_ref['allocated'] = False + fixed_ip_ref.save() + + +def fixed_ip_instance_associate(context, address, instance_id): + fixed_ip_ref = fixed_ip_get_by_address(context, address) + fixed_ip_ref.instance = instance_get(context, instance_id) + fixed_ip_ref.save() + + +def fixed_ip_instance_disassociate(context, address): + fixed_ip_ref = fixed_ip_get_by_address(context, address) + fixed_ip_ref.instance = None + fixed_ip_ref.save() + + +################### + + def instance_create(context, values): instance_ref = models.Instance() for (key, value) in values.iteritems(): @@ -85,13 +180,99 @@ def network_create(context, values): return network_ref +def network_create_fixed_ips(context, network_id, num_vpn_clients): + network_ref = network_get(context, network_id) + # NOTE(vish): should these be properties of the network as opposed + # to constants? + BOTTOM_RESERVED = 3 + TOP_RESERVED = 1 + num_vpn_clients + project_net = IPy.IP(network_ref['cidr']) + num_ips = len(project_net) + session = models.NovaBase.get_session() + for i in range(num_ips): + fixed_ip = models.FixedIp() + fixed_ip.ip_str = str(project_net[i]) + if i < BOTTOM_RESERVED or num_ips - i < TOP_RESERVED: + fixed_ip['reserved'] = True + fixed_ip['network'] = network_get(context, network_id) + session.add(fixed_ip) + session.commit() + + +def network_ensure_indexes(context, num_networks): + if models.NetworkIndex.count() == 0: + session = models.NovaBase.get_session() + for i in range(num_networks): + network_index = models.NetworkIndex() + network_index.index = i + session.add(network_index) + session.commit() + + def network_destroy(context, network_id): network_ref = network_get(context, network_id) network_ref.delete() def network_get(context, network_id): - return models.Instance.find(network_id) + return models.Network.find(network_id) + + +def network_get_vpn_ip(context, network_id): + # TODO(vish): possible concurrency issue here + network = network_get(context, network_id) + address = network['vpn_private_ip_str'] + fixed_ip = fixed_ip_get_by_address(context, address) + if fixed_ip['allocated']: + raise db.AddressAlreadyAllocated() + db.fixed_ip_allocate(context, {'allocated': True}) + + +def network_get_host(context, network_id): + network_ref = network_get(context, network_id) + return network_ref['node_name'] + + +def network_get_index(context, network_id): + session = models.NovaBase.get_session() + query = session.query(models.NetworkIndex).filter_by(network_id=None) + network_index = query.with_lockmode("update").first() + if not network_index: + raise db.NoMoreNetworks() + network_index['network'] = network_get(context, network_id) + session.add(network_index) + session.commit() + return network_index['index'] + + +def network_set_cidr(context, network_id, cidr): + network_ref = network_get(context, network_id) + project_net = IPy.IP(cidr) + network_ref['cidr'] = cidr + # FIXME we can turn these into properties + network_ref['netmask'] = str(project_net.netmask()) + network_ref['gateway'] = str(project_net[1]) + network_ref['broadcast'] = str(project_net.broadcast()) + network_ref['vpn_private_ip_str'] = str(project_net[2]) + + +def network_set_host(context, network_id, host_id): + session = models.NovaBase.get_session() + # FIXME will a second request fail or wait for first to finish? + query = session.query(models.Network).filter_by(id=network_id) + network = query.with_lockmode("update").first() + if not network: + raise exception.NotFound("Couldn't find network with %s" % + network_id) + # NOTE(vish): if with_lockmode isn't supported, as in sqlite, + # then this has concurrency issues + if network.node_name: + session.commit() + return network['node_name'] + network['node_name'] = host_id + session.add(network) + session.commit() + return network['node_name'] def network_update(context, network_id, values): @@ -110,7 +291,7 @@ def project_get_network(context, project_id): if not rv: raise exception.NotFound('No network for project: %s' % project_id) return rv - + ################### diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index e5d4661df..e64005c2e 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -311,7 +311,7 @@ class CloudController(object): def _get_address(self, context, public_ip): # FIXME(vish) this should move into network.py - address = network_model.ElasticIp.lookup(public_ip) + address = network_model.FloatingIp.lookup(public_ip) if address and (context.user.is_admin() or address['project_id'] == context.project.id): return address raise exception.NotFound("Address at ip %s not found" % public_ip) @@ -459,7 +459,7 @@ class CloudController(object): def format_addresses(self, context): addresses = [] - for address in network_model.ElasticIp.all(): + for address in network_model.FloatingIp.all(): # TODO(vish): implement a by_project iterator for addresses if (context.user.is_admin() or address['project_id'] == context.project.id): @@ -481,7 +481,7 @@ class CloudController(object): def allocate_address(self, context, **kwargs): network_topic = yield self._get_network_topic(context) public_ip = yield rpc.call(network_topic, - {"method": "allocate_elastic_ip", + {"method": "allocate_floating_ip", "args": {"user_id": context.user.id, "project_id": context.project.id}}) defer.returnValue({'addressSet': [{'publicIp': public_ip}]}) @@ -492,8 +492,8 @@ class CloudController(object): # NOTE(vish): Should we make sure this works? network_topic = yield self._get_network_topic(context) rpc.cast(network_topic, - {"method": "deallocate_elastic_ip", - "args": {"elastic_ip": public_ip}}) + {"method": "deallocate_floating_ip", + "args": {"floating_ip": public_ip}}) defer.returnValue({'releaseResponse': ["Address released."]}) @rbac.allow('netadmin') @@ -503,8 +503,8 @@ class CloudController(object): address = self._get_address(context, public_ip) network_topic = yield self._get_network_topic(context) rpc.cast(network_topic, - {"method": "associate_elastic_ip", - "args": {"elastic_ip": address['address'], + {"method": "associate_floating_ip", + "args": {"floating_ip": address['address'], "fixed_ip": instance['private_dns_name'], "instance_id": instance['instance_id']}}) defer.returnValue({'associateResponse': ["Address associated."]}) @@ -515,8 +515,8 @@ class CloudController(object): address = self._get_address(context, public_ip) network_topic = yield self._get_network_topic(context) rpc.cast(network_topic, - {"method": "disassociate_elastic_ip", - "args": {"elastic_ip": address['address']}}) + {"method": "disassociate_floating_ip", + "args": {"floating_ip": address['address']}}) defer.returnValue({'disassociateResponse': ["Address disassociated."]}) @defer.inlineCallbacks @@ -617,15 +617,15 @@ class CloudController(object): logging.warning("Instance %s was not found during terminate" % i) continue - elastic_ip = network_model.get_public_ip_for_instance(i) - if elastic_ip: - logging.debug("Disassociating address %s" % elastic_ip) + floating_ip = network_model.get_public_ip_for_instance(i) + if floating_ip: + logging.debug("Disassociating address %s" % floating_ip) # NOTE(vish): Right now we don't really care if the ip is # disassociated. We may need to worry about # checking this later. Perhaps in the scheduler? rpc.cast(network_topic, - {"method": "disassociate_elastic_ip", - "args": {"elastic_ip": elastic_ip}}) + {"method": "disassociate_floating_ip", + "args": {"floating_ip": floating_ip}}) fixed_ip = instance.get('private_dns_name', None) if fixed_ip: diff --git a/nova/models.py b/nova/models.py index e4cd37336..70caeff76 100644 --- a/nova/models.py +++ b/nova/models.py @@ -278,12 +278,12 @@ class FixedIp(Base, NovaBase): raise exception.NotFound("No model for ip str %s" % ip_str) -class ElasticIp(Base, NovaBase): - __tablename__ = 'elastic_ips' +class FloatingIp(Base, NovaBase): + __tablename__ = 'floating_ips' id = Column(Integer, primary_key=True) ip_str = Column(String(255), unique=True) fixed_ip_id = Column(Integer, ForeignKey('fixed_ips.id'), nullable=True) - fixed_ip = relationship(FixedIp, backref=backref('elastic_ips')) + fixed_ip = relationship(FixedIp, backref=backref('floating_ips')) project_id = Column(String(255)) #, ForeignKey('projects.id'), nullable=False) node_name = Column(String(255)) #, ForeignKey('physical_node.id')) @@ -305,7 +305,7 @@ class Network(Base, NovaBase): kind = Column(String(255)) injected = Column(Boolean, default=False) - network_str = Column(String(255)) + cidr = Column(String(255)) netmask = Column(String(255)) bridge = Column(String(255)) gateway = Column(String(255)) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 6fa3bae73..4a57a8393 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -40,15 +40,15 @@ flags.DEFINE_string('public_interface', 'vlan1', flags.DEFINE_string('bridge_dev', 'eth0', 'network device for bridges') -def bind_elastic_ip(elastic_ip): +def bind_floating_ip(floating_ip): """Bind ip to public interface""" - _execute("sudo ip addr add %s dev %s" % (elastic_ip, + _execute("sudo ip addr add %s dev %s" % (floating_ip, FLAGS.public_interface)) -def unbind_elastic_ip(elastic_ip): +def unbind_floating_ip(floating_ip): """Unbind a public ip from public interface""" - _execute("sudo ip addr del %s dev %s" % (elastic_ip, + _execute("sudo ip addr del %s dev %s" % (floating_ip, FLAGS.public_interface)) @@ -61,12 +61,12 @@ def ensure_vlan_forward(public_ip, port, private_ip): DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] -def ensure_elastic_forward(elastic_ip, fixed_ip): - """Ensure elastic ip forwarding rule""" +def ensure_floating_forward(floating_ip, fixed_ip): + """Ensure floating ip forwarding rule""" _confirm_rule("PREROUTING -t nat -d %s -j DNAT --to %s" - % (elastic_ip, fixed_ip)) + % (floating_ip, fixed_ip)) _confirm_rule("POSTROUTING -t nat -s %s -j SNAT --to %s" - % (fixed_ip, elastic_ip)) + % (fixed_ip, floating_ip)) # TODO(joshua): Get these from the secgroup datastore entries _confirm_rule("FORWARD -d %s -p icmp -j ACCEPT" % (fixed_ip)) @@ -75,12 +75,12 @@ def ensure_elastic_forward(elastic_ip, fixed_ip): "FORWARD -d %s -p %s --dport %s -j ACCEPT" % (fixed_ip, protocol, port)) -def remove_elastic_forward(elastic_ip, fixed_ip): - """Remove forwarding for elastic ip""" +def remove_floating_forward(floating_ip, fixed_ip): + """Remove forwarding for floating ip""" _remove_rule("PREROUTING -t nat -d %s -j DNAT --to %s" - % (elastic_ip, fixed_ip)) + % (floating_ip, fixed_ip)) _remove_rule("POSTROUTING -t nat -s %s -j SNAT --to %s" - % (fixed_ip, elastic_ip)) + % (fixed_ip, floating_ip)) _remove_rule("FORWARD -d %s -p icmp -j ACCEPT" % (fixed_ip)) for (protocol, port) in DEFAULT_PORTS: diff --git a/nova/network/service.py b/nova/network/service.py index e47f07ef0..bb2e4ae8a 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -21,17 +21,15 @@ Network Hosts are responsible for allocating ips and setting up network """ import logging +import math import IPy from nova import db from nova import exception from nova import flags -from nova import models from nova import service from nova import utils -from nova.auth import manager -from nova.network import exception as network_exception from nova.network import linux_net @@ -67,9 +65,19 @@ flags.DEFINE_string('private_range', '10.0.0.0/8', 'Private IP address block') flags.DEFINE_integer('cnt_vpn_clients', 5, 'Number of addresses reserved for vpn clients') + +class AddressAlreadyAllocated(exception.Error): + pass + + +class AddressNotAllocated(exception.Error): + pass + + # TODO(vish): some better type of dependency injection? _driver = linux_net + def type_to_class(network_type): """Convert a network_type string into an actual Python class""" if not network_type: @@ -85,22 +93,14 @@ def type_to_class(network_type): def setup_compute_network(project_id): """Sets up the network on a compute host""" - network = get_network_for_project(project_id) + network = db.project_get_network(None, project_id) srv = type_to_class(network.kind) srv.setup_compute_network(network) -def get_network_for_project(project_id, context=None): - """Get network allocated to project from datastore""" - project = manager.AuthManager().get_project(project_id) - if not project: - raise exception.NotFound("Couldn't find project %s" % project_id) - return db.project_get_network(context, project_id) - - def get_host_for_project(project_id): """Get host allocated to project from datastore""" - return get_network_for_project(project_id).node_name + return db.project_get_network(None, project_id).node_name class BaseNetworkService(service.Service): @@ -109,57 +109,35 @@ class BaseNetworkService(service.Service): This class must be subclassed. """ - def set_network_host(self, project_id): + def set_network_host(self, project_id, context=None): """Safely sets the host of the projects network""" - # FIXME abstract this - session = models.NovaBase.get_session() - # FIXME will a second request fail or wait for first to finish? - query = session.query(models.Network).filter_by(project_id=project_id) - network = query.with_lockmode("update").first() - if not network: - raise exception.NotFound("Couldn't find network for %s" % - project_id) - # NOTE(vish): if with_lockmode isn't supported, as in sqlite, - # then this has concurrency issues - if network.node_name: - session.commit() - return network.node_name - network.node_name = FLAGS.node_name - network.kind = FLAGS.network_type - session.add(network) - session.commit() - self._on_set_network_host(network) - return network.node_name - - def allocate_fixed_ip(self, project_id, instance_id, *args, **kwargs): + network_ref = db.project_get_network(context, project_id) + # TODO(vish): can we minimize db access by just getting the + # id here instead of the ref? + network_id = network_ref['id'] + host = db.network_set_host(context, + network_id, + FLAGS.node_name) + self._on_set_network_host(context, network_id) + return host + + def allocate_fixed_ip(self, project_id, instance_id, context=None, + *args, **kwargs): """Gets fixed ip from the pool""" - # FIXME abstract this - network = get_network_for_project(project_id) - session = models.NovaBase.get_session() - query = session.query(models.FixedIp).filter_by(network_id=network.id) - query = query.filter_by(reserved=False).filter_by(allocated=False) - query = query.filter_by(leased=False).with_lockmode("update") - fixed_ip = query.first() - # NOTE(vish): if with_lockmode isn't supported, as in sqlite, - # then this has concurrency issues - if not fixed_ip: - raise network_exception.NoMoreAddresses() - # FIXME will this set backreference? - fixed_ip.instance_id = instance_id - fixed_ip.allocated = True - session.add(fixed_ip) - session.commit() - return fixed_ip.ip_str - - def deallocate_fixed_ip(self, fixed_ip_str, *args, **kwargs): + network_ref = db.project_get_network(context, project_id) + address = db.fixed_ip_allocate_address(context, network_ref['id']) + db.fixed_ip_instance_associate(context, + address, + instance_id) + return address + + def deallocate_fixed_ip(self, address, context=None): """Returns a fixed ip to the pool""" - fixed_ip = models.FixedIp.find_by_ip_str(fixed_ip_str) - fixed_ip.instance = None - fixed_ip.allocated = False - fixed_ip.save() + db.fixed_ip_deallocate(context, address) + db.fixed_ip_instance_disassociate(context, address) - def _on_set_network_host(self, network, *args, **kwargs): + def _on_set_network_host(self, context, network_id): """Called when this host becomes the host for a project""" pass @@ -168,45 +146,32 @@ class BaseNetworkService(service.Service): """Sets up matching network for compute hosts""" raise NotImplementedError() - def allocate_elastic_ip(self, project_id): - """Gets an elastic ip from the pool""" - # FIXME: add elastic ips through manage command - # FIXME: abstract this - session = models.NovaBase.get_session() - node_name = FLAGS.node_name - query = session.query(models.ElasticIp).filter_by(node_name=node_name) - query = query.filter_by(fixed_ip_id=None).with_lockmode("update") - elastic_ip = query.first() - if not elastic_ip: - raise network_exception.NoMoreAddresses() - elastic_ip.project_id = project_id - session.add(elastic_ip) - session.commit() - return elastic_ip.ip_str - - def associate_elastic_ip(self, elastic_ip_str, fixed_ip_str): - """Associates an elastic ip to a fixed ip""" - elastic_ip = models.ElasticIp.find_by_ip_str(elastic_ip_str) - fixed_ip = models.FixedIp.find_by_ip_str(fixed_ip_str) - elastic_ip.fixed_ip = fixed_ip - _driver.bind_elastic_ip(elastic_ip_str) - _driver.ensure_elastic_forward(elastic_ip_str, fixed_ip_str) - elastic_ip.save() - - def disassociate_elastic_ip(self, elastic_ip_str): - """Disassociates a elastic ip""" - elastic_ip = models.ElasticIp.find_by_ip_str(elastic_ip_str) - fixed_ip_str = elastic_ip.fixed_ip.ip_str - elastic_ip.fixed_ip = None - _driver.unbind_elastic_ip(elastic_ip_str) - _driver.remove_elastic_forward(elastic_ip_str, fixed_ip_str) - elastic_ip.save() - - def deallocate_elastic_ip(self, elastic_ip_str): - """Returns an elastic ip to the pool""" - elastic_ip = models.ElasticIp.find_by_ip_str(elastic_ip_str) - elastic_ip.project_id = None - elastic_ip.save() + def allocate_floating_ip(self, project_id, context=None): + """Gets an floating ip from the pool""" + # TODO(vish): add floating ips through manage command + return db.floating_ip_allocate_address(context, + FLAGS.node_name, + project_id) + + def associate_floating_ip(self, floating_address, fixed_address, + context=None): + """Associates an floating ip to a fixed ip""" + db.floating_ip_fixed_ip_associate(context, + floating_address, + fixed_address) + _driver.bind_floating_ip(floating_address) + _driver.ensure_floating_forward(floating_address, fixed_address) + + def disassociate_floating_ip(self, floating_address, context=None): + """Disassociates a floating ip""" + fixed_address = db.floating_ip_disassociate(context, + floating_address) + _driver.unbind_floating_ip(floating_address) + _driver.remove_floating_forward(floating_address, fixed_address) + + def deallocate_floating_ip(self, floating_address, context=None): + """Returns an floating ip to the pool""" + db.floating_ip_deallocate(context, floating_address) class FlatNetworkService(BaseNetworkService): @@ -217,141 +182,96 @@ class FlatNetworkService(BaseNetworkService): """Network is created manually""" pass - def _on_set_network_host(self, network, *args, **kwargs): + def _on_set_network_host(self, context, network_id): """Called when this host becomes the host for a project""" - # FIXME should there be two types of network objects in the database? - network.injected = True - network.network_str=FLAGS.flat_network_network - network.netmask=FLAGS.flat_network_netmask - network.bridge=FLAGS.flat_network_bridge - network.gateway=FLAGS.flat_network_gateway - network.broadcast=FLAGS.flat_network_broadcast - network.dns=FLAGS.flat_network_dns - network.save() - # FIXME add public ips from flags to the datastore + # NOTE(vish): should there be two types of network objects + # in the database? + net = {} + net['injected'] = True + net['kind'] = FLAGS.network_type + net['network_str']=FLAGS.flat_network_network + net['netmask']=FLAGS.flat_network_netmask + net['bridge']=FLAGS.flat_network_bridge + net['gateway']=FLAGS.flat_network_gateway + net['broadcast']=FLAGS.flat_network_broadcast + net['dns']=FLAGS.flat_network_dns + db.network_update(context, network_id, net) + # TODO(vish): add public ips from flags to the datastore class VlanNetworkService(BaseNetworkService): """Vlan network with dhcp""" def __init__(self, *args, **kwargs): super(VlanNetworkService, self).__init__(*args, **kwargs) - self._ensure_network_indexes() - - def _ensure_network_indexes(self): # NOTE(vish): this should probably be removed and added via # admin command or fixtures - if models.NetworkIndex.count() == 0: - session = models.NovaBase.get_session() - for i in range(FLAGS.num_networks): - network_index = models.NetworkIndex() - network_index.index = i - session.add(network_index) - session.commit() + db.network_ensure_indexes(None, FLAGS.num_networks) def allocate_fixed_ip(self, project_id, instance_id, is_vpn=False, - *args, **kwargs): + context=None, *args, **kwargs): """Gets a fixed ip from the pool""" - network = get_network_for_project(project_id) + network_ref = db.project_get_network(context, project_id) if is_vpn: - # FIXME concurrency issue? - fixed_ip = models.FixedIp.find_by_ip_str(network.vpn_private_ip_str) - if fixed_ip.allocated: - raise network_exception.AddressAlreadyAllocated() - # FIXME will this set backreference? - fixed_ip.instance_id = instance_id - fixed_ip.allocated = True - fixed_ip.save() - _driver.ensure_vlan_forward(network.vpn_public_ip_str, - network.vpn_public_port, - network.vpn_private_ip_str) - ip_str = fixed_ip.ip_str - logging.debug("Allocating vpn IP %s", ip_str) + address = db.network_get_vpn_ip_address(context, + network_ref['id']) + logging.debug("Allocating vpn IP %s", address) + db.fixed_ip_instance_associate(context, + address, + instance_id) + _driver.ensure_vlan_forward(network_ref['vpn_public_ip_str'], + network_ref['vpn_public_port'], + network_ref['vpn_private_ip_str']) else: parent = super(VlanNetworkService, self) - ip_str = parent.allocate_fixed_ip(project_id, instance_id) - _driver.ensure_vlan_bridge(network.vlan, network.bridge) - return ip_str - - def deallocate_fixed_ip(self, fixed_ip_str): + address = parent.allocate_fixed_ip(project_id, + instance_id, + context) + _driver.ensure_vlan_bridge(network_ref['vlan'], + network_ref['bridge']) + return address + + def deallocate_fixed_ip(self, address, context=None): """Returns an ip to the pool""" - fixed_ip = models.FixedIp.find_by_ip_str(fixed_ip_str) - if fixed_ip.leased: - logging.debug("Deallocating IP %s", fixed_ip_str) - fixed_ip.allocated = False - # keep instance id until release occurs - fixed_ip.save() + fixed_ip_ref = db.fixed_ip_get_by_address(context, address) + if fixed_ip_ref['leased']: + logging.debug("Deallocating IP %s", address) + db.fixed_ip_deallocate(context, address) + # NOTE(vish): we keep instance id until release occurs else: - self.release_ip(fixed_ip_str) + self.release_fixed_ip(address, context) - def lease_ip(self, fixed_ip_str): + def lease_fixed_ip(self, address, context=None): """Called by bridge when ip is leased""" - fixed_ip = models.FixedIp.find_by_ip_str(fixed_ip_str) - if not fixed_ip.allocated: - raise network_exception.AddressNotAllocated(fixed_ip_str) - logging.debug("Leasing IP %s", fixed_ip_str) - fixed_ip.leased = True - fixed_ip.save() - - def release_ip(self, fixed_ip_str): - """Called by bridge when ip is released""" - fixed_ip = models.FixedIp.find_by_ip_str(fixed_ip_str) - logging.debug("Releasing IP %s", fixed_ip_str) - fixed_ip.leased = False - fixed_ip.allocated = False - fixed_ip.instance = None - fixed_ip.save() + logging.debug("Leasing IP %s", address) + db.fixed_ip_lease(context, address) + def release_fixed_ip(self, address, context=None): + """Called by bridge when ip is released""" + logging.debug("Releasing IP %s", address) + db.fixed_ip_release(context, address) + db.fixed_ip_instance_disassociate(context, address) def restart_nets(self): """Ensure the network for each user is enabled""" # FIXME pass - def _on_set_network_host(self, network): + def _on_set_network_host(self, context, network_id): """Called when this host becomes the host for a project""" - index = self._get_network_index(network) + index = db.network_get_index(context, network_id) private_net = IPy.IP(FLAGS.private_range) start = index * FLAGS.network_size - # minus one for the gateway. - network_str = "%s-%s" % (private_net[start], - private_net[start + FLAGS.network_size - 1]) + significant_bits = 32 - int(math.log(FLAGS.network_size, 2)) + cidr = "%s/%s" % (private_net[start], significant_bits) + db.network_set_cidr(context, network_id, cidr) vlan = FLAGS.vlan_start + index - project_net = IPy.IP(network_str) - network.network_str = network_str - network.netmask = str(project_net.netmask()) - network.vlan = vlan - network.bridge = 'br%s' % vlan - network.gateway = str(project_net[1]) - network.broadcast = str(project_net.broadcast()) - network.vpn_private_ip_str = str(project_net[2]) - network.vpn_public_ip_str = FLAGS.vpn_ip - network.vpn_public_port = FLAGS.vpn_start + index - # create network fixed ips - BOTTOM_RESERVED = 3 - TOP_RESERVED = 1 + FLAGS.cnt_vpn_clients - num_ips = len(project_net) - session = models.NovaBase.get_session() - for i in range(num_ips): - fixed_ip = models.FixedIp() - fixed_ip.ip_str = str(project_net[i]) - if i < BOTTOM_RESERVED or num_ips - i < TOP_RESERVED: - fixed_ip.reserved = True - fixed_ip.network = network - session.add(fixed_ip) - session.commit() - - - def _get_network_index(self, network): - """Get non-conflicting index for network""" - session = models.NovaBase.get_session() - node_name = FLAGS.node_name - query = session.query(models.NetworkIndex).filter_by(network_id=None) - network_index = query.with_lockmode("update").first() - if not network_index: - raise network_exception.NoMoreNetworks() - network_index.network = network - session.add(network_index) - session.commit() - return network_index.index + net = {} + net['kind'] = FLAGS.network_type + net['vlan'] = vlan + net['bridge'] = 'br%s' % vlan + net['vpn_public_ip_str'] = FLAGS.vpn_ip + net['vpn_public_port'] = FLAGS.vpn_start + index + db.network_update(context, network_id, net) + db.network_create_fixed_ips(context, network_id, FLAGS.cnt_vpn_clients) @classmethod diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 76c76edbf..c4c496219 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -21,8 +21,8 @@ Unit Tests for network code import IPy import os import logging -import tempfile +from nova import db from nova import exception from nova import flags from nova import models @@ -30,7 +30,6 @@ from nova import test from nova import utils from nova.auth import manager from nova.network import service -from nova.network.exception import NoMoreAddresses, NoMoreNetworks FLAGS = flags.FLAGS @@ -59,49 +58,52 @@ class NetworkTestCase(test.TrialTestCase): name)) # create the necessary network data for the project self.service.set_network_host(self.projects[i].id) - instance = models.Instance() - instance.mac_address = utils.generate_mac() - instance.hostname = 'fake' - instance.save() - self.instance_id = instance.id + instance_id = db.instance_create(None, + {'mac_address': utils.generate_mac()}) + self.instance_id = instance_id + instance_id = db.instance_create(None, + {'mac_address': utils.generate_mac()}) + self.instance2_id = instance_id def tearDown(self): # pylint: disable=C0103 super(NetworkTestCase, self).tearDown() # TODO(termie): this should really be instantiating clean datastores # in between runs, one failure kills all the tests + db.instance_destroy(None, self.instance_id) + db.instance_destroy(None, self.instance2_id) for project in self.projects: self.manager.delete_project(project) self.manager.delete_user(self.user) def test_public_network_association(self): """Makes sure that we can allocaate a public ip""" - # FIXME better way of adding elastic ips + # TODO(vish): better way of adding floating ips pubnet = IPy.IP(flags.FLAGS.public_range) ip_str = str(pubnet[0]) try: - elastic_ip = models.ElasticIp.find_by_ip_str(ip_str) + floating_ip = models.FloatingIp.find_by_ip_str(ip_str) except exception.NotFound: - elastic_ip = models.ElasticIp() - elastic_ip.ip_str = ip_str - elastic_ip.node_name = FLAGS.node_name - elastic_ip.save() - eaddress = self.service.allocate_elastic_ip(self.projects[0].id) + floating_ip = models.FloatingIp() + floating_ip.ip_str = ip_str + floating_ip.node_name = FLAGS.node_name + floating_ip.save() + eaddress = self.service.allocate_floating_ip(self.projects[0].id) faddress = self.service.allocate_fixed_ip(self.projects[0].id, self.instance_id) self.assertEqual(eaddress, str(pubnet[0])) - self.service.associate_elastic_ip(eaddress, faddress) + self.service.associate_floating_ip(eaddress, faddress) # FIXME datamodel abstraction - self.assertEqual(elastic_ip.fixed_ip.ip_str, faddress) - self.service.disassociate_elastic_ip(eaddress) - self.assertEqual(elastic_ip.fixed_ip, None) - self.service.deallocate_elastic_ip(eaddress) + self.assertEqual(floating_ip.fixed_ip.ip_str, faddress) + self.service.disassociate_floating_ip(eaddress) + self.assertEqual(floating_ip.fixed_ip, None) + self.service.deallocate_floating_ip(eaddress) self.service.deallocate_fixed_ip(faddress) def test_allocate_deallocate_fixed_ip(self): """Makes sure that we can allocate and deallocate a fixed ip""" address = self.service.allocate_fixed_ip(self.projects[0].id, self.instance_id) - net = service.get_network_for_project(self.projects[0].id) + net = db.project_get_network(None, self.projects[0].id) self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) issue_ip(address, net.bridge) self.service.deallocate_fixed_ip(address) @@ -117,10 +119,10 @@ class NetworkTestCase(test.TrialTestCase): address = self.service.allocate_fixed_ip(self.projects[0].id, self.instance_id) address2 = self.service.allocate_fixed_ip(self.projects[1].id, - self.instance_id) + self.instance2_id) - net = service.get_network_for_project(self.projects[0].id) - net2 = service.get_network_for_project(self.projects[1].id) + net = db.project_get_network(None, self.projects[0].id) + net2 = db.project_get_network(None, self.projects[1].id) self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) self.assertTrue(is_allocated_in_project(address2, self.projects[1].id)) @@ -151,7 +153,7 @@ class NetworkTestCase(test.TrialTestCase): address = self.service.allocate_fixed_ip(project_id, self.instance_id) address2 = self.service.allocate_fixed_ip(project_id, self.instance_id) address3 = self.service.allocate_fixed_ip(project_id, self.instance_id) - net = service.get_network_for_project(project_id) + net = db.project_get_network(None, project_id) issue_ip(address, net.bridge) issue_ip(address2, net.bridge) issue_ip(address3, net.bridge) @@ -167,7 +169,7 @@ class NetworkTestCase(test.TrialTestCase): release_ip(address, net.bridge) release_ip(address2, net.bridge) release_ip(address3, net.bridge) - net = service.get_network_for_project(self.projects[0].id) + net = db.project_get_network(None, self.projects[0].id) self.service.deallocate_fixed_ip(first) def test_vpn_ip_and_port_looks_valid(self): @@ -186,7 +188,7 @@ class NetworkTestCase(test.TrialTestCase): self.service.set_network_host(project.id) projects.append(project) project = self.manager.create_project('boom' , self.user) - self.assertRaises(NoMoreNetworks, + self.assertRaises(db.NoMoreNetworks, self.service.set_network_host, project.id) self.manager.delete_project(project) @@ -198,7 +200,7 @@ class NetworkTestCase(test.TrialTestCase): """Makes sure that ip addresses that are deallocated get reused""" address = self.service.allocate_fixed_ip(self.projects[0].id, self.instance_id) - net = service.get_network_for_project(self.projects[0].id) + net = db.project_get_network(None, self.projects[0].id) issue_ip(address, net.bridge) self.service.deallocate_fixed_ip(address) release_ip(address, net.bridge) @@ -219,7 +221,7 @@ class NetworkTestCase(test.TrialTestCase): There are ips reserved at the bottom and top of the range. services (network, gateway, CloudPipe, broadcast) """ - network = service.get_network_for_project(self.projects[0].id) + network = db.project_get_network(None, self.projects[0].id) net_size = flags.FLAGS.network_size total_ips = (available_ips(network) + reserved_ips(network) + @@ -229,7 +231,7 @@ class NetworkTestCase(test.TrialTestCase): def test_too_many_addresses(self): """Test for a NoMoreAddresses exception when all fixed ips are used. """ - network = service.get_network_for_project(self.projects[0].id) + network = db.project_get_network(None, self.projects[0].id) # Number of availaible ips is len of the available list @@ -242,7 +244,7 @@ class NetworkTestCase(test.TrialTestCase): issue_ip(addresses[i],network.bridge) self.assertEqual(available_ips(network), 0) - self.assertRaises(NoMoreAddresses, + self.assertRaises(db.NoMoreAddresses, self.service.allocate_fixed_ip, self.projects[0].id, self.instance_id) @@ -274,11 +276,11 @@ def reserved_ips(network): def is_allocated_in_project(address, project_id): """Returns true if address is in specified project""" - fixed_ip = models.FixedIp.find_by_ip_str(address) - project_net = service.get_network_for_project(project_id) + fixed_ip = db.fixed_ip_get_by_address(None, address) + project_net = db.project_get_network(None, project_id) # instance exists until release - logging.error('fixed_ip.instance: %s', fixed_ip.instance) - logging.error('project_net: %s', project_net) + logging.debug('fixed_ip.instance: %s', fixed_ip.instance) + logging.debug('project_net: %s', project_net) return fixed_ip.instance is not None and fixed_ip.network == project_net -- cgit From 4b5c1b9137f46f811be8f7e55cc540c5898b3369 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 23 Aug 2010 20:39:19 -0700 Subject: fix daemons and move network code --- nova/auth/manager.py | 8 ++++--- nova/db/api.py | 25 +++++++++++++++++---- nova/db/sqlalchemy/api.py | 51 ++++++++++++++++++++++++++++-------------- nova/network/service.py | 16 +------------ nova/service.py | 21 +++++------------ nova/tests/network_unittest.py | 11 +++++---- 6 files changed, 72 insertions(+), 60 deletions(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index fc9aec071..e4d4afb7b 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -524,8 +524,11 @@ class AuthManager(object): member_users) if project_dict: project = Project(**project_dict) - # FIXME(ja): EVIL HACK - db.network_create(context, {'project_id': project.id}) + try: + db.network_allocate(context, project.id) + except: + drv.delete_project(project.id) + raise return project def add_to_project(self, user, project): @@ -574,7 +577,6 @@ class AuthManager(object): def delete_project(self, project, context=None): """Deletes a project""" - # FIXME(ja): EVIL HACK network_ref = db.project_get_network(context, Project.safe_id(project)) try: diff --git a/nova/db/api.py b/nova/db/api.py index b7c2010fe..ad1b78cfb 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -48,16 +48,28 @@ class NoMoreNetworks(exception.Error): ################### -def daemon_get(context, node_name, binary): - return _impl.daemon_get(context, node_name, binary) +def daemon_get(context, daemon_id): + """Get an daemon or raise if it does not exist.""" + return _impl.daemon_get(context, daemon_id) + + +def daemon_get_by_args(context, node_name, binary): + """Get the state of an daemon by node name and binary.""" + return _impl.daemon_get_by_args(context, node_name, binary) def daemon_create(context, values): + """Create a daemon from the values dictionary.""" return _impl.daemon_create(context, values) -def daemon_update(context, values): - return _impl.daemon_update(context, values) +def daemon_update(context, daemon_id, values): + """Set the given properties on an daemon and update it. + + Raises NotFound if daemon does not exist. + + """ + return _impl.daemon_update(context, daemon_id, values) ################### @@ -167,6 +179,11 @@ def instance_update(context, instance_id, values): #################### +def network_allocate(context, project_id): + """Allocate a network for a project.""" + return _impl.network_allocate(context, project_id) + + def network_create(context, values): """Create a network from the values dictionary.""" return _impl.network_create(context, values) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index a3a5ff8de..6a472d1a1 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -16,18 +16,25 @@ # License for the specific language governing permissions and limitations # under the License. +import math + import IPy from nova import db from nova import exception +from nova import flags from nova import models +FLAGS = flags.FLAGS ################### -def daemon_get(context, node_name, binary): - return None +def daemon_get(context, daemon_id): + return models.Daemon.find(daemon_id) + + +def daemon_get_by_args(context, node_name, binary): return models.Daemon.find_by_args(node_name, binary) @@ -37,8 +44,8 @@ def daemon_create(context, values): return daemon_ref -def daemon_update(context, node_name, binary, values): - daemon_ref = daemon_get(context, node_name, binary) +def daemon_update(context, daemon_id, values): + daemon_ref = daemon_get(context, daemon_id) for (key, value) in values.iteritems(): daemon_ref[key] = value daemon_ref.save() @@ -171,6 +178,28 @@ def instance_update(context, instance_id, values): ################### +# NOTE(vish): is there a better place for this logic? +def network_allocate(context, project_id): + """Set up the network""" + db.network_ensure_indexes(context, FLAGS.num_networks) + network_ref = db.network_create(context, {'project_id': project_id}) + network_id = network_ref['id'] + private_net = IPy.IP(FLAGS.private_range) + index = db.network_get_index(context, network_id) + vlan = FLAGS.vlan_start + index + start = index * FLAGS.network_size + significant_bits = 32 - int(math.log(FLAGS.network_size, 2)) + cidr = "%s/%s" % (private_net[start], significant_bits) + db.network_set_cidr(context, network_id, cidr) + net = {} + net['kind'] = FLAGS.network_type + net['vlan'] = vlan + net['bridge'] = 'br%s' % vlan + net['vpn_public_ip_str'] = FLAGS.vpn_ip + net['vpn_public_port'] = FLAGS.vpn_start + index + db.network_update(context, network_id, net) + db.network_create_fixed_ips(context, network_id, FLAGS.cnt_vpn_clients) + def network_create(context, values): network_ref = models.Network() @@ -206,7 +235,7 @@ def network_ensure_indexes(context, num_networks): network_index = models.NetworkIndex() network_index.index = i session.add(network_index) - session.commit() + session.commit() def network_destroy(context, network_id): @@ -358,15 +387,3 @@ def volume_update(context, volume_id, values): for (key, value) in values.iteritems(): volume_ref[key] = value volume_ref.save() - - - - - - - - - - - - diff --git a/nova/network/service.py b/nova/network/service.py index bb2e4ae8a..368d99cbd 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -257,21 +257,7 @@ class VlanNetworkService(BaseNetworkService): def _on_set_network_host(self, context, network_id): """Called when this host becomes the host for a project""" - index = db.network_get_index(context, network_id) - private_net = IPy.IP(FLAGS.private_range) - start = index * FLAGS.network_size - significant_bits = 32 - int(math.log(FLAGS.network_size, 2)) - cidr = "%s/%s" % (private_net[start], significant_bits) - db.network_set_cidr(context, network_id, cidr) - vlan = FLAGS.vlan_start + index - net = {} - net['kind'] = FLAGS.network_type - net['vlan'] = vlan - net['bridge'] = 'br%s' % vlan - net['vpn_public_ip_str'] = FLAGS.vpn_ip - net['vpn_public_port'] = FLAGS.vpn_start + index - db.network_update(context, network_id, net) - db.network_create_fixed_ips(context, network_id, FLAGS.cnt_vpn_clients) + pass @classmethod diff --git a/nova/service.py b/nova/service.py index 65016d717..cfc3aff6a 100644 --- a/nova/service.py +++ b/nova/service.py @@ -46,12 +46,12 @@ class Service(object, service.Service): @classmethod def create(cls, report_interval=None, bin_name=None, topic=None): """Instantiates class and passes back application object. - + Args: report_interval, defaults to flag bin_name, defaults to basename of executable topic, defaults to basename - "nova-" part - + """ if not report_interval: report_interval = FLAGS.report_interval @@ -94,15 +94,14 @@ class Service(object, service.Service): """Update the state of this daemon in the datastore.""" try: try: - daemon_ref = db.daemon_get(context, node_name, binary) + daemon_ref = db.daemon_get_by_args(context, node_name, binary) except exception.NotFound: daemon_ref = db.daemon_create(context, {'node_name': node_name, 'binary': binary, 'report_count': 0}) - - # TODO(termie): I don't think this is really needed, consider - # removing it. - self._update_daemon(daemon_ref, context) + db.daemon_update(context, + daemon_ref['id'], + {'report_count': daemon_ref['report_count'] + 1}) # TODO(termie): make this pattern be more elegant. if getattr(self, "model_disconnected", False): @@ -114,11 +113,3 @@ class Service(object, service.Service): self.model_disconnected = True logging.exception("model server went away") yield - - def _update_daemon(self, daemon_ref, context): - """Set any extra daemon data here""" - # FIXME(termie): the following is in no way atomic - db.daemon_update(context, - daemon_ref['node_name'], - daemon_ref['binary'], - {'report_count': daemon_ref['report_count'] + 1}) diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 3552a77bb..afa217673 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -179,18 +179,17 @@ class NetworkTestCase(test.TrialTestCase): FLAGS.num_networks) def test_too_many_networks(self): - """Ensure error is raised if we run out of vpn ports""" + """Ensure error is raised if we run out of networks""" projects = [] + # TODO(vish): use data layer for count networks_left = FLAGS.num_networks - models.Network.count() for i in range(networks_left): project = self.manager.create_project('many%s' % i, self.user) - self.service.set_network_host(project.id) projects.append(project) - project = self.manager.create_project('boom' , self.user) self.assertRaises(db.NoMoreNetworks, - self.service.set_network_host, - project.id) - self.manager.delete_project(project) + self.manager.create_project, + 'boom', + self.user) for project in projects: self.manager.delete_project(project) -- cgit From ce658b72aebe3d2caf41d5250c56e40474501014 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 23 Aug 2010 22:06:49 -0700 Subject: moving network code and fixing run_instances --- nova/db/api.py | 25 +++++++++++++++++++----- nova/db/sqlalchemy/api.py | 26 +++++++++++++++++++------ nova/endpoint/cloud.py | 49 +++++++++++++++++++++++++++++------------------ nova/network/service.py | 12 ++++++------ 4 files changed, 76 insertions(+), 36 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index a3c54df24..b460859c4 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -106,12 +106,12 @@ def floating_ip_deallocate(context, address): #################### -def fixed_ip_allocate_address(context, network_id): +def fixed_ip_allocate(context, network_id): """Allocate free fixed ip and return the address. Raises if one is not available. """ - return _impl.fixed_ip_allocate_address(context, network_id) + return _impl.fixed_ip_allocate(context, network_id) def fixed_ip_get_by_address(context, address): @@ -163,20 +163,30 @@ def instance_get(context, instance_id): def instance_get_all(context): - """Gets all instances.""" + """Get all instances.""" return _impl.instance_get_all(context) +def instance_get_by_name(context, name): + """Get an instance by name.""" + return _impl.instance_get_by_project(context, name) + + def instance_get_by_project(context, project_id): - """Gets all instance belonging to a project.""" + """Get all instance belonging to a project.""" return _impl.instance_get_by_project(context, project_id) def instance_get_by_reservation(context, reservation_id): - """Gets all instance belonging to a reservation.""" + """Get all instance belonging to a reservation.""" return _impl.instance_get_by_reservation(context, reservation_id) +def instance_get_host(context, instance_id): + """Get the host that the instance is running on.""" + return _impl.instance_get_all(context, instance_id) + + def instance_state(context, instance_id, state, description=None): """Set the state of an instance.""" return _impl.instance_state(context, instance_id, state, description) @@ -234,6 +244,11 @@ def network_get_index(context, network_id): return _impl.network_get_index(context, network_id) +def network_get_vpn_ip(context, network_id): + """Gets non-conflicting index for network""" + return _impl.network_get_vpn_ip(context, network_id) + + def network_set_cidr(context, network_id, cidr): """Set the Classless Inner Domain Routing for the network""" return _impl.network_set_cidr(context, network_id, cidr) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index e05563c13..73833a9f3 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -91,7 +91,7 @@ def floating_ip_deallocate(context, address): ################### -def fixed_ip_allocate_address(context, network_id): +def fixed_ip_allocate(context, network_id): session = models.NovaBase.get_session() query = session.query(models.FixedIp).filter_by(network_id=network_id) query = query.filter_by(reserved=False).filter_by(allocated=False) @@ -104,7 +104,7 @@ def fixed_ip_allocate_address(context, network_id): fixed_ip_ref['allocated'] = True session.add(fixed_ip_ref) session.commit() - return fixed_ip_ref['ip_str'] + fixed_ip_ref def fixed_ip_get_by_address(context, address): @@ -150,6 +150,7 @@ def fixed_ip_instance_disassociate(context, address): def instance_create(context, values): instance_ref = models.Instance() for (key, value) in values.iteritems(): + print key instance_ref[key] = value instance_ref.save() return instance_ref.id @@ -168,6 +169,11 @@ def instance_get_all(context): return models.Instance.all() +def instance_get_by_name(context, name): + # NOTE(vish): remove the 'i-' + return models.Instance.find(name[2:]) + + def instance_get_by_project(context, project_id): session = models.NovaBase.get_session() query = session.query(models.Instance) @@ -184,6 +190,11 @@ def instance_get_by_reservation(context, reservation_id): return results +def instance_get_host(context, instance_id): + instance_ref = instance_get(context, instance_id) + return instance_ref['node_name'] + + def instance_state(context, instance_id, state, description=None): instance_ref = instance_get(context, instance_id) instance_ref.set_state(state, description) @@ -198,6 +209,7 @@ def instance_update(context, instance_id, values): ################### + # NOTE(vish): is there a better place for this logic? def network_allocate(context, project_id): """Set up the network""" @@ -219,6 +231,7 @@ def network_allocate(context, project_id): net['vpn_public_port'] = FLAGS.vpn_start + index db.network_update(context, network_id, net) db.network_create_fixed_ips(context, network_id, FLAGS.cnt_vpn_clients) + return network_ref def network_create(context, values): @@ -274,7 +287,8 @@ def network_get_vpn_ip(context, network_id): fixed_ip = fixed_ip_get_by_address(context, address) if fixed_ip['allocated']: raise db.AddressAlreadyAllocated() - db.fixed_ip_allocate(context, {'allocated': True}) + db.fixed_ip_update(context, fixed_ip['id'], {'allocated': True}) + return fixed_ip def network_get_host(context, network_id): @@ -340,10 +354,10 @@ def project_get_network(context, project_id): if not rv: raise exception.NotFound('No network for project: %s' % project_id) return rv - - + + ################### - + def queue_get_for(context, topic, physical_node_id): return "%s.%s" % (topic, physical_node_id) # FIXME(ja): this should be servername? diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index dd489cd95..db79c585e 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -522,16 +522,16 @@ class CloudController(object): @defer.inlineCallbacks def _get_network_topic(self, context): """Retrieves the network host for a project""" - host = network_service.get_host_for_project(context.project.id) + network_ref = db.project_get_network(context, context.project.id) + host = db.network_get_host(context, network_ref['id']) if not host: host = yield rpc.call(FLAGS.network_topic, {"method": "set_network_host", - "args": {"user_id": context.user.id, - "project_id": context.project.id}}) - defer.returnValue('%s.%s' %(FLAGS.network_topic, host)) + "args": {"project_id": context.project.id}}) + defer.returnValue(db.queue_get_for(FLAGS.network_topic, host)) @rbac.allow('projectmanager', 'sysadmin') - #@defer.inlineCallbacks + @defer.inlineCallbacks def run_instances(self, context, **kwargs): # make sure user can access the image # vpn image is private so it doesn't show up on lists @@ -571,11 +571,16 @@ class CloudController(object): for num in range(int(kwargs['max_count'])): inst = {} - inst['mac_address'] = utils.generate_mac() - inst['fixed_ip'] = db.fixed_ip_allocate_address(context, network_ref['id']) inst['image_id'] = image_id inst['kernel_id'] = kernel_id inst['ramdisk_id'] = ramdisk_id + instance_ref = db.instance_create(context, inst) + inst_id = instance_ref['id'] + if db.instance_is_vpn(instance_ref['id']): + fixed_ip = db.fixed_ip_allocate(context, network_ref['id']) + else: + fixed_ip = db.network_get_vpn_ip(context, network_ref['id']) + inst['mac_address'] = utils.generate_mac() inst['user_data'] = kwargs.get('user_data', '') inst['instance_type'] = kwargs.get('instance_type', 'm1.small') inst['reservation_id'] = reservation_id @@ -585,16 +590,23 @@ class CloudController(object): inst['project_id'] = context.project.id # FIXME(ja) inst['launch_index'] = num inst['security_group'] = security_group - # inst['hostname'] = inst.id # FIXME(ja): id isn't assigned until create + inst['hostname'] = inst_id # FIXME(ja): id isn't assigned until create + db.instance_update(context, inst_id, inst) + + + # TODO(vish): This probably should be done in the scheduler + # network is setup when host is assigned + network_topic = yield self.get_network_topic() + rpc.call(network_topic, + {"method": "setup_fixed_ip", + "args": {"fixed_ip": fixed_ip['id']}}) - inst_id = db.instance_create(context, inst) rpc.cast(FLAGS.compute_topic, {"method": "run_instance", "args": {"instance_id": inst_id}}) logging.debug("Casting to node for %s/%s's instance %s" % (context.project.name, context.user.name, inst_id)) - # defer.returnValue(self._format_instances(context, reservation_id)) - return self._format_run_instances(context, reservation_id) + defer.returnValue(self._format_instances(context, reservation_id)) @rbac.allow('projectmanager', 'sysadmin') @@ -605,13 +617,12 @@ class CloudController(object): for name in instance_id: logging.debug("Going to try and terminate %s" % name) try: - inst_id = name[2:] # remove the i- - instance_ref = db.instance_get(context, inst_id) + instance_ref = db.instance_get_by_name(context, name) except exception.NotFound: logging.warning("Instance %s was not found during terminate" % name) continue - + # FIXME(ja): where should network deallocate occur? # floating_ip = network_model.get_public_ip_for_instance(i) # if floating_ip: @@ -622,7 +633,7 @@ class CloudController(object): # rpc.cast(network_topic, # {"method": "disassociate_floating_ip", # "args": {"floating_ip": floating_ip}}) - # + # # fixed_ip = instance.get('private_dns_name', None) # if fixed_ip: # logging.debug("Deallocating address %s" % fixed_ip) @@ -633,14 +644,14 @@ class CloudController(object): # {"method": "deallocate_fixed_ip", # "args": {"fixed_ip": fixed_ip}}) - if instance_ref['physical_node_id'] is not None: + host = db.instance_get_host(context, instance_ref['id']) + if host is not None: # NOTE(joshua?): It's also internal default - rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, - instance_ref['physical_node_id']), + rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "terminate_instance", "args": {"instance_id": name}}) else: - db.instance_destroy(context, inst_id) + db.instance_destroy(context, instance_ref['id']) # defer.returnValue(True) return True diff --git a/nova/network/service.py b/nova/network/service.py index 368d99cbd..7eed2c10a 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -206,11 +206,11 @@ class VlanNetworkService(BaseNetworkService): # admin command or fixtures db.network_ensure_indexes(None, FLAGS.num_networks) - def allocate_fixed_ip(self, project_id, instance_id, is_vpn=False, - context=None, *args, **kwargs): + def setup_fixed_ip(self, project_id, instance_id, context=None, + *args, **kwargs): """Gets a fixed ip from the pool""" network_ref = db.project_get_network(context, project_id) - if is_vpn: + if db.instance_is_vpn(context, instance_id): address = db.network_get_vpn_ip_address(context, network_ref['id']) logging.debug("Allocating vpn IP %s", address) @@ -225,8 +225,6 @@ class VlanNetworkService(BaseNetworkService): address = parent.allocate_fixed_ip(project_id, instance_id, context) - _driver.ensure_vlan_bridge(network_ref['vlan'], - network_ref['bridge']) return address def deallocate_fixed_ip(self, address, context=None): @@ -257,7 +255,9 @@ class VlanNetworkService(BaseNetworkService): def _on_set_network_host(self, context, network_id): """Called when this host becomes the host for a project""" - pass + network_ref = db.network_get(network_id) + _driver.ensure_vlan_bridge(network_ref['vlan'], + network_ref['bridge']) @classmethod -- cgit From ea471ab48c50555a938b9d0d11330f6ee14b9b10 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 23 Aug 2010 22:25:09 -0700 Subject: bunch more fixes --- nova/db/api.py | 5 ++++ nova/db/sqlalchemy/api.py | 5 ++++ nova/endpoint/cloud.py | 2 +- nova/network/service.py | 72 +++++++++++------------------------------------ 4 files changed, 28 insertions(+), 56 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index b460859c4..430384b0a 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -187,6 +187,11 @@ def instance_get_host(context, instance_id): return _impl.instance_get_all(context, instance_id) +def instance_is_vpn(context, instance_id): + """True if instance is a vpn.""" + return _impl.instance_is_vpn(context, instance_id) + + def instance_state(context, instance_id, state, description=None): """Set the state of an instance.""" return _impl.instance_state(context, instance_id, state, description) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 73833a9f3..5abd33c33 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -195,6 +195,11 @@ def instance_get_host(context, instance_id): return instance_ref['node_name'] +def instance_is_vpn(context, instance_id): + instance_ref = instance_get(context, instance_id) + return instance_ref['image_id'] == FLAGS.vpn_image_id + + def instance_state(context, instance_id, state, description=None): instance_ref = instance_get(context, instance_id) instance_ref.set_state(state, description) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index db79c585e..1c4c3483e 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -599,7 +599,7 @@ class CloudController(object): network_topic = yield self.get_network_topic() rpc.call(network_topic, {"method": "setup_fixed_ip", - "args": {"fixed_ip": fixed_ip['id']}}) + "args": {"fixed_ip_id": fixed_ip['id']}}) rpc.cast(FLAGS.compute_topic, {"method": "run_instance", diff --git a/nova/network/service.py b/nova/network/service.py index 7eed2c10a..4f8751a6f 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -121,25 +121,13 @@ class BaseNetworkService(service.Service): self._on_set_network_host(context, network_id) return host - def allocate_fixed_ip(self, project_id, instance_id, context=None, - *args, **kwargs): - """Gets fixed ip from the pool""" - network_ref = db.project_get_network(context, project_id) - address = db.fixed_ip_allocate_address(context, network_ref['id']) - db.fixed_ip_instance_associate(context, - address, - instance_id) - return address - - def deallocate_fixed_ip(self, address, context=None): - """Returns a fixed ip to the pool""" - db.fixed_ip_deallocate(context, address) - db.fixed_ip_instance_disassociate(context, address) - + def setup_fixed_ip(self, fixed_ip_id): + """Sets up rules for fixed ip""" + raise NotImplementedError() def _on_set_network_host(self, context, network_id): """Called when this host becomes the host for a project""" - pass + raise NotImplementedError() @classmethod def setup_compute_network(cls, network): @@ -182,6 +170,10 @@ class FlatNetworkService(BaseNetworkService): """Network is created manually""" pass + def setup_fixed_ip(self, fixed_ip_id): + """Currently no setup""" + pass + def _on_set_network_host(self, context, network_id): """Called when this host becomes the host for a project""" # NOTE(vish): should there be two types of network objects @@ -206,47 +198,15 @@ class VlanNetworkService(BaseNetworkService): # admin command or fixtures db.network_ensure_indexes(None, FLAGS.num_networks) - def setup_fixed_ip(self, project_id, instance_id, context=None, - *args, **kwargs): + def setup_fixed_ip(self, fixed_ip_id, context=None): """Gets a fixed ip from the pool""" - network_ref = db.project_get_network(context, project_id) - if db.instance_is_vpn(context, instance_id): - address = db.network_get_vpn_ip_address(context, - network_ref['id']) - logging.debug("Allocating vpn IP %s", address) - db.fixed_ip_instance_associate(context, - address, - instance_id) + fixed_ip_ref = db.project_get_fixed_ip(context, fixed_ip_id) + network_ref = db.fixed_ip_get_network(context, fixed_ip_id) + if db.instance_is_vpn(context, fixed_ip_ref['instance_id']): _driver.ensure_vlan_forward(network_ref['vpn_public_ip_str'], network_ref['vpn_public_port'], network_ref['vpn_private_ip_str']) - else: - parent = super(VlanNetworkService, self) - address = parent.allocate_fixed_ip(project_id, - instance_id, - context) - return address - - def deallocate_fixed_ip(self, address, context=None): - """Returns an ip to the pool""" - fixed_ip_ref = db.fixed_ip_get_by_address(context, address) - if fixed_ip_ref['leased']: - logging.debug("Deallocating IP %s", address) - db.fixed_ip_deallocate(context, address) - # NOTE(vish): we keep instance id until release occurs - else: - self.release_fixed_ip(address, context) - - def lease_fixed_ip(self, address, context=None): - """Called by bridge when ip is leased""" - logging.debug("Leasing IP %s", address) - db.fixed_ip_lease(context, address) - - def release_fixed_ip(self, address, context=None): - """Called by bridge when ip is released""" - logging.debug("Releasing IP %s", address) - db.fixed_ip_release(context, address) - db.fixed_ip_instance_disassociate(context, address) + _driver.update_dhcp(network_ref) def restart_nets(self): """Ensure the network for each user is enabled""" @@ -261,6 +221,8 @@ class VlanNetworkService(BaseNetworkService): @classmethod - def setup_compute_network(cls, network): + def setup_compute_network(cls, network_id): """Sets up matching network for compute hosts""" - _driver.ensure_vlan_bridge(network.vlan, network.bridge) + network_ref = db.network_get(network_id) + _driver.ensure_vlan_bridge(network_ref['vlan'], + network_ref['bridge']) -- cgit From a2cb9dee1d041bb60b3e61cb4b94308ff200fe7e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 23 Aug 2010 22:35:46 -0700 Subject: removed old imports and moved flags --- nova/db/api.py | 14 ++++++++++++++ nova/network/service.py | 15 --------------- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index 9b3169bd6..996592088 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -25,6 +25,20 @@ FLAGS = flags.FLAGS flags.DEFINE_string('db_backend', 'sqlalchemy', 'The backend to use for db') +# TODO(vish): where should these flags go +flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks') +flags.DEFINE_integer('num_networks', 1000, 'Number of networks to support') +flags.DEFINE_string('vpn_ip', utils.get_my_ip(), + 'Public IP for the cloudpipe VPN servers') +flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks') +flags.DEFINE_integer('network_size', 256, + 'Number of addresses in each private subnet') +flags.DEFINE_string('public_range', '4.4.4.0/24', 'Public IP address block') +flags.DEFINE_string('private_range', '10.0.0.0/8', 'Private IP address block') +flags.DEFINE_integer('cnt_vpn_clients', 5, + 'Number of addresses reserved for vpn clients') + + _impl = utils.LazyPluggable(FLAGS['db_backend'], sqlalchemy='nova.db.sqlalchemy.api') diff --git a/nova/network/service.py b/nova/network/service.py index 4f8751a6f..609cd6be3 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -21,9 +21,6 @@ Network Hosts are responsible for allocating ips and setting up network """ import logging -import math - -import IPy from nova import db from nova import exception @@ -53,18 +50,6 @@ flags.DEFINE_string('flat_network_broadcast', '192.168.0.255', flags.DEFINE_string('flat_network_dns', '8.8.4.4', 'Dns for simple network') -flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks') -flags.DEFINE_integer('num_networks', 1000, 'Number of networks to support') -flags.DEFINE_string('vpn_ip', utils.get_my_ip(), - 'Public IP for the cloudpipe VPN servers') -flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks') -flags.DEFINE_integer('network_size', 256, - 'Number of addresses in each private subnet') -flags.DEFINE_string('public_range', '4.4.4.0/24', 'Public IP address block') -flags.DEFINE_string('private_range', '10.0.0.0/8', 'Private IP address block') -flags.DEFINE_integer('cnt_vpn_clients', 5, - 'Number of addresses reserved for vpn clients') - class AddressAlreadyAllocated(exception.Error): pass -- cgit From 91892a5e3f51957d858fe34e64758526515a4824 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 23 Aug 2010 22:48:16 -0700 Subject: add back in the needed calls for dhcpbridge --- nova/network/service.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/nova/network/service.py b/nova/network/service.py index 609cd6be3..b7569207a 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -26,7 +26,6 @@ from nova import db from nova import exception from nova import flags from nova import service -from nova import utils from nova.network import linux_net @@ -83,11 +82,6 @@ def setup_compute_network(project_id): srv.setup_compute_network(network) -def get_host_for_project(project_id): - """Get host allocated to project from datastore""" - return db.project_get_network(None, project_id).node_name - - class BaseNetworkService(service.Service): """Implements common network service functionality @@ -193,6 +187,17 @@ class VlanNetworkService(BaseNetworkService): network_ref['vpn_private_ip_str']) _driver.update_dhcp(network_ref) + def lease_fixed_ip(self, address, context=None): + """Called by bridge when ip is leased""" + logging.debug("Leasing IP %s", address) + db.fixed_ip_lease(context, address) + + def release_fixed_ip(self, address, context=None): + """Called by bridge when ip is released""" + logging.debug("Releasing IP %s", address) + db.fixed_ip_release(context, address) + db.fixed_ip_instance_disassociate(context, address) + def restart_nets(self): """Ensure the network for each user is enabled""" # FIXME -- cgit From d832003f1743ab0e1c4ef935f3e4f1d02691bc39 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 24 Aug 2010 01:30:48 -0700 Subject: typo in release_ip --- bin/nova-dhcpbridge | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index cd0917390..018293e24 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -70,7 +70,7 @@ def del_lease(_mac, ip_address, _hostname, _interface): else: rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name), {"method": "release_fixed_ip", - "args": {"fixed_ip": ip_address}}) + "args": {"address": ip_address}}) def init_leases(interface): -- cgit From 393eef48ce792206a3e2a678933aa120b535309e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 24 Aug 2010 01:54:16 -0700 Subject: fix some errors with networking rules --- nova/network/linux_net.py | 18 +++++++++--------- nova/network/service.py | 3 ++- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 3e20ce8e8..1e14b4716 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -88,10 +88,10 @@ def remove_floating_forward(floating_ip, fixed_ip): % (fixed_ip, protocol, port)) -def ensure_vlan_bridge(vlan_num, bridge, network=None): +def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): """Create a vlan and bridge unless they already exist""" interface = ensure_vlan(vlan_num) - ensure_bridge(bridge, interface, network) + ensure_bridge(bridge, interface, net_attrs) def ensure_vlan(vlan_num): interface = "vlan%s" % vlan_num @@ -103,7 +103,7 @@ def ensure_vlan(vlan_num): return interface -def ensure_bridge(bridge, interface, network=None): +def ensure_bridge(bridge, interface, net_attrs=None): if not _device_exists(bridge): logging.debug("Starting Bridge inteface for %s", interface) _execute("sudo brctl addbr %s" % bridge) @@ -111,13 +111,13 @@ def ensure_bridge(bridge, interface, network=None): # _execute("sudo brctl setageing %s 10" % bridge) _execute("sudo brctl stp %s off" % bridge) _execute("sudo brctl addif %s %s" % (bridge, interface)) - if network: + if net_attrs: _execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \ (bridge, - network['gateway'], - network['broadcast'], - network['netmask'])) - _confirm_rule("FORWARD --in-bridge %s -j ACCEPT" % bridge) + net_attrs['gateway'], + net_attrs['broadcast'], + net_attrs['netmask'])) + _confirm_rule("FORWARD --in-interface %s -j ACCEPT" % bridge) else: _execute("sudo ifconfig %s up" % bridge) @@ -188,7 +188,7 @@ def _device_exists(device): def _confirm_rule(cmd): """Delete and re-add iptables rule""" - _execute("sudo iptables --delete %s" % (cmd)) + _execute("sudo iptables --delete %s" % (cmd), check_exit_code=False) _execute("sudo iptables -I %s" % (cmd)) diff --git a/nova/network/service.py b/nova/network/service.py index c501f523b..baaaff521 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -204,7 +204,8 @@ class VlanNetworkService(BaseNetworkService): network_ref = db.network_get(context, network_id) print 'making the bridge' _driver.ensure_vlan_bridge(network_ref['vlan'], - network_ref['bridge']) + network_ref['bridge'], + network_ref) @classmethod -- cgit From d94eec3d2995c97c38006e4d6177740375860f8f Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 24 Aug 2010 11:19:51 -0400 Subject: Style fixes --- nova/api/rackspace/images.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py index 7d32fa099..36a26688c 100644 --- a/nova/api/rackspace/images.py +++ b/nova/api/rackspace/images.py @@ -17,7 +17,7 @@ from nova import datastore from nova.api.rackspace import base -from nova.api.services.image import ImageService +from nova.api.services import image from webob import exc class Controller(base.Controller): @@ -32,28 +32,28 @@ class Controller(base.Controller): } def __init__(self): - self._svc = ImageService.load() - self._id_xlator = RackspaceApiImageIdTranslator() + self._service = image.ImageService.load() + self._id_translator = RackspaceAPIImageIdTranslator() def _to_rs_id(self, image_id): """ Convert an image id from the format of our ImageService strategy to the Rackspace API format (an int). """ - strategy = self._svc.__class__.__name__ - return self._id_xlator.to_rs_id(strategy, image_id) + strategy = self._service.__class__.__name__ + return self._id_translator.to_rs_id(strategy, image_id) def _from_rs_id(self, rs_image_id): """ Convert an image id from the Rackspace API format (an int) to the format of our ImageService strategy. """ - strategy = self._svc.__class__.__name__ - return self._id_xlator.from_rs_id(strategy, rs_image_id) + strategy = self._service.__class__.__name__ + return self._id_translator.from_rs_id(strategy, rs_image_id) def index(self, req): """Return all public images.""" - data = self._svc.index() + data = self._service.index() for img in data: img['id'] = self._to_rs_id(img['id']) return dict(images=data) @@ -61,7 +61,7 @@ class Controller(base.Controller): def show(self, req, id): """Return data about the given image id.""" opaque_id = self._from_rs_id(id) - img = self._svc.show(opaque_id) + img = self._service.show(opaque_id) img['id'] = id return dict(image=img) @@ -80,7 +80,7 @@ class Controller(base.Controller): raise exc.HTTPNotFound() -class RackspaceApiImageIdTranslator(object): +class RackspaceAPIImageIdTranslator(object): """ Converts Rackspace API image ids to and from the id format for a given strategy. -- cgit From 4d1b2539d2d2f39ca53e9383e317af76dbc71905 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 24 Aug 2010 13:37:18 -0400 Subject: OK, break out ternary operator (good to know that it slowed you down to read it) --- nova/wsgi.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nova/wsgi.py b/nova/wsgi.py index 096d5843f..bec0a7b1c 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -212,7 +212,10 @@ class Controller(object): del arg_dict['action'] arg_dict['req'] = req result = method(**arg_dict) - return self._serialize(result, req) if type(result) is dict else result + if type(result) is dict: + return self._serialize(result, req) + else: + return result def _serialize(self, data, request): """ -- cgit From 4a28728cac1d94b3ec88f83ac4dbcfad11b08b02 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 24 Aug 2010 15:23:52 -0400 Subject: getting run/terminate/describe to work --- nova/db/api.py | 25 +++++++- nova/db/sqlalchemy/api.py | 26 ++++++++ nova/endpoint/cloud.py | 156 +++++++++++++++++++++++----------------------- 3 files changed, 126 insertions(+), 81 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index b7c2010fe..107623f71 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -150,6 +150,21 @@ def instance_get(context, instance_id): return _impl.instance_get(context, instance_id) +def instance_get_all(context): + """Gets all instances.""" + return _impl.instance_get_all(context) + + +def instance_get_by_project(context, project_id): + """Gets all instance belonging to a project.""" + return _impl.instance_get_by_project(context, project_id) + + +def instance_get_by_reservation(context, reservation_id): + """Gets all instance belonging to a reservation.""" + return _impl.instance_get_by_reservation(context, reservation_id) + + def instance_state(context, instance_id, state, description=None): """Set the state of an instance.""" return _impl.instance_state(context, instance_id, state, description) @@ -232,6 +247,14 @@ def project_get_network(context, project_id): ################### +def queue_get_for(context, topic, physical_node_id): + """Return a channel to send a message to a node with a topic.""" + return _impl.queue_get_for(context, topic, physical_node_id) + + +################### + + def volume_allocate_shelf_and_blade(context, volume_id): """Atomically allocate a free shelf and blade from the pool.""" return _impl.volume_allocate_shelf_and_blade(context, volume_id) @@ -274,5 +297,3 @@ def volume_update(context, volume_id, values): """ return _impl.volume_update(context, volume_id, values) - - diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index a3a5ff8de..5708d4d5a 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -157,6 +157,26 @@ def instance_get(context, instance_id): return models.Instance.find(instance_id) +def instance_get_all(context): + return models.Instance.all() + + +def instance_get_by_project(context, project_id): + session = models.NovaBase.get_session() + query = session.query(models.Instance) + results = query.filter_by(project_id=project_id).all() + session.commit() + return results + + +def instance_get_by_reservation(context, reservation_id): + session = models.NovaBase.get_session() + query = session.query(models.Instance) + results = query.filter_by(reservation_id=reservation_id).all() + session.commit() + return results + + def instance_state(context, instance_id, state, description=None): instance_ref = instance_get(context, instance_id) instance_ref.set_state(state, description) @@ -291,7 +311,13 @@ def project_get_network(context, project_id): if not rv: raise exception.NotFound('No network for project: %s' % project_id) return rv + + +################### + +def queue_get_for(context, topic, physical_node_id): + return "%s.%s" % (topic, physical_node_id) # FIXME(ja): this should be servername? ################### diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index e64005c2e..dd489cd95 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -29,7 +29,7 @@ import time from twisted.internet import defer -from nova import datastore +from nova import db from nova import exception from nova import flags from nova import models @@ -407,22 +407,22 @@ class CloudController(object): assert len(i) == 1 return i[0] - def _format_instances(self, context, reservation_id = None): + def _format_instances(self, context, reservation_id=None): reservations = {} - if context.user.is_admin(): - instgenerator = models.Instance.all() + if reservation_id: + instances = db.instance_get_by_reservation(context, reservation_id) else: - instgenerator = models.Instance.all() # FIXME - for instance in instgenerator: - res_id = instance.reservation_id - if reservation_id != None and reservation_id != res_id: - continue + if not context.user.is_admin(): + instances = db.instance_get_all(context) + else: + instances = db.instance_get_by_project(context, context.project.id) + for instance in instances: if not context.user.is_admin(): if instance['image_id'] == FLAGS.vpn_image_id: continue i = {} - i['instanceId'] = instance.name - i['imageId'] = instance.image_id + i['instanceId'] = instance['name'] + i['imageId'] = instance['image_id'] i['instanceState'] = { 'code': instance.state, 'name': instance.state_description @@ -442,14 +442,14 @@ class CloudController(object): i['instance_type'] = instance.instance_type i['launch_time'] = instance.created_at i['ami_launch_index'] = instance.launch_index - if not reservations.has_key(res_id): + if not reservations.has_key(instance['reservation_id']): r = {} - r['reservation_id'] = res_id + r['reservation_id'] = instance['reservation_id'] r['owner_id'] = instance.project_id r['group_set'] = self._convert_to_set([], 'groups') r['instances_set'] = [] - reservations[res_id] = r - reservations[res_id]['instances_set'].append(i) + reservations[instance['reservation_id']] = r + reservations[instance['reservation_id']]['instances_set'].append(i) return list(reservations.values()) @@ -563,88 +563,86 @@ class CloudController(object): raise exception.ApiError('Key Pair %s not found' % kwargs['key_name']) key_data = key_pair.public_key - # network_topic = yield self._get_network_topic(context) + # TODO: Get the real security group of launch in here security_group = "default" + + network_ref = db.project_get_network(context, context.project.id) + for num in range(int(kwargs['max_count'])): - is_vpn = False - if image_id == FLAGS.vpn_image_id: - is_vpn = True - inst = models.Instance() - #allocate_data = yield rpc.call(network_topic, - # {"method": "allocate_fixed_ip", - # "args": {"user_id": context.user.id, - # "project_id": context.project.id, - # "security_group": security_group, - # "is_vpn": is_vpn, - # "hostname": inst.instance_id}}) - allocate_data = {'mac_address': utils.generate_mac(), - 'fixed_ip': '192.168.0.100'} - inst.image_id = image_id - inst.kernel_id = kernel_id - inst.ramdisk_id = ramdisk_id - inst.user_data = kwargs.get('user_data', '') - inst.instance_type = kwargs.get('instance_type', 'm1.small') - inst.reservation_id = reservation_id - inst.key_data = key_data - inst.key_name = kwargs.get('key_name', None) - inst.user_id = context.user.id - inst.project_id = context.project.id - inst.launch_index = num - inst.security_group = security_group - inst.hostname = inst.id - for (key, value) in allocate_data.iteritems(): - setattr(inst, key, value) - inst.save() + inst = {} + inst['mac_address'] = utils.generate_mac() + inst['fixed_ip'] = db.fixed_ip_allocate_address(context, network_ref['id']) + inst['image_id'] = image_id + inst['kernel_id'] = kernel_id + inst['ramdisk_id'] = ramdisk_id + inst['user_data'] = kwargs.get('user_data', '') + inst['instance_type'] = kwargs.get('instance_type', 'm1.small') + inst['reservation_id'] = reservation_id + inst['key_data'] = key_data + inst['key_name'] = kwargs.get('key_name', None) + inst['user_id'] = context.user.id # FIXME(ja) + inst['project_id'] = context.project.id # FIXME(ja) + inst['launch_index'] = num + inst['security_group'] = security_group + # inst['hostname'] = inst.id # FIXME(ja): id isn't assigned until create + + inst_id = db.instance_create(context, inst) rpc.cast(FLAGS.compute_topic, {"method": "run_instance", - "args": {"instance_id": inst.id}}) - logging.debug("Casting to node for %s's instance with IP of %s" % - (context.user.name, inst.fixed_ip)) + "args": {"instance_id": inst_id}}) + logging.debug("Casting to node for %s/%s's instance %s" % + (context.project.name, context.user.name, inst_id)) # defer.returnValue(self._format_instances(context, reservation_id)) return self._format_run_instances(context, reservation_id) + @rbac.allow('projectmanager', 'sysadmin') - @defer.inlineCallbacks + # @defer.inlineCallbacks def terminate_instances(self, context, instance_id, **kwargs): logging.debug("Going to start terminating instances") - network_topic = yield self._get_network_topic(context) - for i in instance_id: - logging.debug("Going to try and terminate %s" % i) + # network_topic = yield self._get_network_topic(context) + for name in instance_id: + logging.debug("Going to try and terminate %s" % name) try: - instance = self._get_instance(context, i) + inst_id = name[2:] # remove the i- + instance_ref = db.instance_get(context, inst_id) except exception.NotFound: logging.warning("Instance %s was not found during terminate" - % i) + % name) continue - floating_ip = network_model.get_public_ip_for_instance(i) - if floating_ip: - logging.debug("Disassociating address %s" % floating_ip) - # NOTE(vish): Right now we don't really care if the ip is - # disassociated. We may need to worry about - # checking this later. Perhaps in the scheduler? - rpc.cast(network_topic, - {"method": "disassociate_floating_ip", - "args": {"floating_ip": floating_ip}}) - - fixed_ip = instance.get('private_dns_name', None) - if fixed_ip: - logging.debug("Deallocating address %s" % fixed_ip) - # NOTE(vish): Right now we don't really care if the ip is - # actually removed. We may need to worry about - # checking this later. Perhaps in the scheduler? - rpc.cast(network_topic, - {"method": "deallocate_fixed_ip", - "args": {"fixed_ip": fixed_ip}}) - - if instance.get('node_name', 'unassigned') != 'unassigned': + + # FIXME(ja): where should network deallocate occur? + # floating_ip = network_model.get_public_ip_for_instance(i) + # if floating_ip: + # logging.debug("Disassociating address %s" % floating_ip) + # # NOTE(vish): Right now we don't really care if the ip is + # # disassociated. We may need to worry about + # # checking this later. Perhaps in the scheduler? + # rpc.cast(network_topic, + # {"method": "disassociate_floating_ip", + # "args": {"floating_ip": floating_ip}}) + # + # fixed_ip = instance.get('private_dns_name', None) + # if fixed_ip: + # logging.debug("Deallocating address %s" % fixed_ip) + # # NOTE(vish): Right now we don't really care if the ip is + # # actually removed. We may need to worry about + # # checking this later. Perhaps in the scheduler? + # rpc.cast(network_topic, + # {"method": "deallocate_fixed_ip", + # "args": {"fixed_ip": fixed_ip}}) + + if instance_ref['physical_node_id'] is not None: # NOTE(joshua?): It's also internal default - rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), + rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, + instance_ref['physical_node_id']), {"method": "terminate_instance", - "args": {"instance_id": i}}) + "args": {"instance_id": name}}) else: - instance.destroy() - defer.returnValue(True) + db.instance_destroy(context, inst_id) + # defer.returnValue(True) + return True @rbac.allow('projectmanager', 'sysadmin') def reboot_instances(self, context, instance_id, **kwargs): -- cgit From 09bc71460b976f28c7bc6a507006d6c7c12c5824 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 24 Aug 2010 16:16:41 -0400 Subject: Move imageservice to its own directory --- nova/api/rackspace/images.py | 4 +- nova/api/services/image.py | 90 -------------------------------------------- nova/image/service.py | 90 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 92 insertions(+), 92 deletions(-) delete mode 100644 nova/api/services/image.py create mode 100644 nova/image/service.py diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py index 36a26688c..b7668a1e1 100644 --- a/nova/api/rackspace/images.py +++ b/nova/api/rackspace/images.py @@ -17,7 +17,7 @@ from nova import datastore from nova.api.rackspace import base -from nova.api.services import image +from nova import image from webob import exc class Controller(base.Controller): @@ -32,7 +32,7 @@ class Controller(base.Controller): } def __init__(self): - self._service = image.ImageService.load() + self._service = image.service.ImageService.load() self._id_translator = RackspaceAPIImageIdTranslator() def _to_rs_id(self, image_id): diff --git a/nova/api/services/image.py b/nova/api/services/image.py deleted file mode 100644 index 1a7a258b7..000000000 --- a/nova/api/services/image.py +++ /dev/null @@ -1,90 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import cPickle as pickle -import os.path -import random -import string - -class ImageService(object): - """Provides storage and retrieval of disk image objects.""" - - @staticmethod - def load(): - """Factory method to return image service.""" - #TODO(gundlach): read from config. - class_ = LocalImageService - return class_() - - def index(self): - """ - Return a dict from opaque image id to image data. - """ - - def show(self, id): - """ - Returns a dict containing image data for the given opaque image id. - """ - - -class GlanceImageService(ImageService): - """Provides storage and retrieval of disk image objects within Glance.""" - # TODO(gundlach): once Glance has an API, build this. - pass - - -class LocalImageService(ImageService): - """Image service storing images to local disk.""" - - def __init__(self): - self._path = "/tmp/nova/images" - try: - os.makedirs(self._path) - except OSError: # exists - pass - - def _path_to(self, image_id=''): - return os.path.join(self._path, image_id) - - def _ids(self): - """The list of all image ids.""" - return os.listdir(self._path) - - def index(self): - return [ self.show(id) for id in self._ids() ] - - def show(self, id): - return pickle.load(open(self._path_to(id))) - - def create(self, data): - """ - Store the image data and return the new image id. - """ - id = ''.join(random.choice(string.letters) for _ in range(20)) - data['id'] = id - self.update(id, data) - return id - - def update(self, image_id, data): - """Replace the contents of the given image with the new data.""" - pickle.dump(data, open(self._path_to(image_id), 'w')) - - def delete(self, image_id): - """ - Delete the given image. Raises OSError if the image does not exist. - """ - os.unlink(self._path_to(image_id)) diff --git a/nova/image/service.py b/nova/image/service.py new file mode 100644 index 000000000..1a7a258b7 --- /dev/null +++ b/nova/image/service.py @@ -0,0 +1,90 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import cPickle as pickle +import os.path +import random +import string + +class ImageService(object): + """Provides storage and retrieval of disk image objects.""" + + @staticmethod + def load(): + """Factory method to return image service.""" + #TODO(gundlach): read from config. + class_ = LocalImageService + return class_() + + def index(self): + """ + Return a dict from opaque image id to image data. + """ + + def show(self, id): + """ + Returns a dict containing image data for the given opaque image id. + """ + + +class GlanceImageService(ImageService): + """Provides storage and retrieval of disk image objects within Glance.""" + # TODO(gundlach): once Glance has an API, build this. + pass + + +class LocalImageService(ImageService): + """Image service storing images to local disk.""" + + def __init__(self): + self._path = "/tmp/nova/images" + try: + os.makedirs(self._path) + except OSError: # exists + pass + + def _path_to(self, image_id=''): + return os.path.join(self._path, image_id) + + def _ids(self): + """The list of all image ids.""" + return os.listdir(self._path) + + def index(self): + return [ self.show(id) for id in self._ids() ] + + def show(self, id): + return pickle.load(open(self._path_to(id))) + + def create(self, data): + """ + Store the image data and return the new image id. + """ + id = ''.join(random.choice(string.letters) for _ in range(20)) + data['id'] = id + self.update(id, data) + return id + + def update(self, image_id, data): + """Replace the contents of the given image with the new data.""" + pickle.dump(data, open(self._path_to(image_id), 'w')) + + def delete(self, image_id): + """ + Delete the given image. Raises OSError if the image does not exist. + """ + os.unlink(self._path_to(image_id)) -- cgit From 5f832cd5ea9fb858f5e8b09992cbd47d8d16f665 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 24 Aug 2010 16:17:06 -0400 Subject: Delete unused directory --- nova/api/services/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 nova/api/services/__init__.py diff --git a/nova/api/services/__init__.py b/nova/api/services/__init__.py deleted file mode 100644 index e69de29bb..000000000 -- cgit From 96ae5e2776218adfee2cb22a4c0d7358a498a451 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 24 Aug 2010 16:24:24 -0400 Subject: pep8 --- nova/api/rackspace/images.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py index b7668a1e1..370980fe9 100644 --- a/nova/api/rackspace/images.py +++ b/nova/api/rackspace/images.py @@ -16,8 +16,8 @@ # under the License. from nova import datastore -from nova.api.rackspace import base from nova import image +from nova.api.rackspace import base from webob import exc class Controller(base.Controller): -- cgit From 3b70003d932607ccc13fe4cd9381475035603a70 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 24 Aug 2010 17:03:03 -0400 Subject: Flavors work --- nova/api/rackspace/flavors.py | 34 +++++++++++++++++++++++- nova/api/rackspace/images.py | 62 ++++--------------------------------------- 2 files changed, 38 insertions(+), 58 deletions(-) diff --git a/nova/api/rackspace/flavors.py b/nova/api/rackspace/flavors.py index 986f11434..8c5ffa438 100644 --- a/nova/api/rackspace/flavors.py +++ b/nova/api/rackspace/flavors.py @@ -15,4 +15,36 @@ # License for the specific language governing permissions and limitations # under the License. -class Controller(object): pass +from nova.api.rackspace import base +from nova.api.rackspace import _id_translator +from nova import flavor +from webob import exc + +class Controller(base.Controller): + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "flavor": [ "id", "name", "ram", "disk" ] + } + } + } + + def __init__(self): + self._service = flavor.service.FlavorService.load() + self._id_translator = self._id_translator.RackspaceAPIIdTranslator( + "flavor", self._service.__class__.__name__) + + def index(self, req): + """Return all flavors.""" + items = self._service.index() + for flavor in items: + flavor['id'] = self._id_translator.to_rs_id(flavor['id']) + return dict(flavors=items) + + def show(self, req, id): + """Return data about the given flavor id.""" + opaque_id = self._id_translator.from_rs_id(id) + item = self._service.show(opaque_id) + item['id'] = id + return dict(flavor=item) diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py index 36a26688c..09f55ea96 100644 --- a/nova/api/rackspace/images.py +++ b/nova/api/rackspace/images.py @@ -15,8 +15,8 @@ # License for the specific language governing permissions and limitations # under the License. -from nova import datastore from nova.api.rackspace import base +from nova.api.rackspace import _id_translator from nova.api.services import image from webob import exc @@ -33,34 +33,19 @@ class Controller(base.Controller): def __init__(self): self._service = image.ImageService.load() - self._id_translator = RackspaceAPIImageIdTranslator() - - def _to_rs_id(self, image_id): - """ - Convert an image id from the format of our ImageService strategy - to the Rackspace API format (an int). - """ - strategy = self._service.__class__.__name__ - return self._id_translator.to_rs_id(strategy, image_id) - - def _from_rs_id(self, rs_image_id): - """ - Convert an image id from the Rackspace API format (an int) to the - format of our ImageService strategy. - """ - strategy = self._service.__class__.__name__ - return self._id_translator.from_rs_id(strategy, rs_image_id) + self._id_translator = _id_translator.RackspaceAPIIdTranslator( + "image", self._service.__class__.__name__) def index(self, req): """Return all public images.""" data = self._service.index() for img in data: - img['id'] = self._to_rs_id(img['id']) + img['id'] = self._id_translator.to_rs_id(img['id']) return dict(images=data) def show(self, req, id): """Return data about the given image id.""" - opaque_id = self._from_rs_id(id) + opaque_id = self._id_translator.from_rs_id(id) img = self._service.show(opaque_id) img['id'] = id return dict(image=img) @@ -78,40 +63,3 @@ class Controller(base.Controller): # Users may not modify public images, and that's all that # we support for now. raise exc.HTTPNotFound() - - -class RackspaceAPIImageIdTranslator(object): - """ - Converts Rackspace API image ids to and from the id format for a given - strategy. - """ - - def __init__(self): - self._store = datastore.Redis.instance() - self._key_template = "rsapi.idstrategies.image.%s.%s" - - def to_rs_id(self, strategy_name, opaque_id): - """Convert an id from a strategy-specific one to a Rackspace one.""" - key = self._key_template % (strategy_name, "fwd") - result = self._store.hget(key, str(opaque_id)) - if result: # we have a mapping from opaque to RS for this strategy - return int(result) - else: - # Store the mapping. - nextid = self._store.incr("%s.lastid" % key) - if self._store.hsetnx(key, str(opaque_id), nextid): - # If someone else didn't beat us to it, store the reverse - # mapping as well. - key = self._key_template % (strategy_name, "rev") - self._store.hset(key, nextid, str(opaque_id)) - return nextid - else: - # Someone beat us to it; use their number instead, and - # discard nextid (which is OK -- we don't require that - # every int id be used.) - return int(self._store.hget(key, str(opaque_id))) - - def from_rs_id(self, strategy_name, rs_id): - """Convert a Rackspace id to a strategy-specific one.""" - key = self._key_template % (strategy_name, "rev") - return self._store.hget(key, rs_id) -- cgit From f4ff3290f86edfde896248ff5adaaed5f67dd963 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 24 Aug 2010 18:01:32 -0400 Subject: more cleanup --- nova/db/api.py | 5 +++ nova/db/sqlalchemy/api.py | 4 +++ nova/endpoint/cloud.py | 81 +++++++++++++++-------------------------------- nova/endpoint/images.py | 10 +++++- 4 files changed, 43 insertions(+), 57 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index 107623f71..6c6938a21 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -155,6 +155,11 @@ def instance_get_all(context): return _impl.instance_get_all(context) +def instance_get_by_ip(context, ip): + """Gets an instance by fixed ipaddress or raise if it does not exist.""" + return _impl.instance_get_by_ip(context, ip) + + def instance_get_by_project(context, project_id): """Gets all instance belonging to a project.""" return _impl.instance_get_by_project(context, project_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 5708d4d5a..c78c358fc 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -161,6 +161,10 @@ def instance_get_all(context): return models.Instance.all() +def instance_get_by_ip(context, ip): + raise Exception("fixme(vish): add logic here!") + + def instance_get_by_project(context, project_id): session = models.NovaBase.get_session() query = session.query(models.Instance) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index dd489cd95..afb62cc69 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -64,13 +64,12 @@ class CloudController(object): sent to the other nodes. """ def __init__(self): - self.instdir = model.InstanceDirectory() self.setup() @property def instances(self): """ All instances in the system, as dicts """ - return self.instdir.all + return db.instance_get_all(None) @property def volumes(self): @@ -84,6 +83,8 @@ class CloudController(object): def setup(self): """ Ensure the keychains and folders exist. """ + # FIXME(ja): this should be moved to a nova-manage command, + # if not setup throw exceptions instead of running # Create keys folder, if it doesn't exist if not os.path.exists(FLAGS.keys_path): os.makedirs(FLAGS.keys_path) @@ -92,27 +93,23 @@ class CloudController(object): if not os.path.exists(root_ca_path): start = os.getcwd() os.chdir(FLAGS.ca_path) + # TODO: Do this with M2Crypto instead utils.runthis("Generating root CA: %s", "sh genrootca.sh") os.chdir(start) - # TODO: Do this with M2Crypto instead - - def get_instance_by_ip(self, ip): - return self.instdir.by_ip(ip) def _get_mpi_data(self, project_id): result = {} - for instance in self.instdir.all: - if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], - INSTANCE_TYPES[instance['instance_type']]['vcpus']) - if instance['key_name'] in result: - result[instance['key_name']].append(line) - else: - result[instance['key_name']] = [line] + for instance in db.instance_get_by_project(project_id): + line = '%s slots=%d' % (instance['private_dns_name'], + INSTANCE_TYPES[instance['instance_type']]['vcpus']) + if instance['key_name'] in result: + result[instance['key_name']].append(line) + else: + result[instance['key_name']] = [line] return result def get_metadata(self, ipaddress): - i = self.get_instance_by_ip(ipaddress) + i = db.instance_get_by_ip(ipaddress) if i is None: return None mpi = self._get_mpi_data(i['project_id']) @@ -252,17 +249,11 @@ class CloudController(object): @rbac.allow('projectmanager', 'sysadmin') def get_console_output(self, context, instance_id, **kwargs): # instance_id is passed in as a list of instances - instance = self._get_instance(context, instance_id[0]) + instance = db.instance_get(context, instance_id[0]) return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "get_console_output", "args": {"instance_id": instance_id[0]}}) - def _get_user_id(self, context): - if context and context.user: - return context.user.id - else: - return None - @rbac.allow('projectmanager', 'sysadmin') def describe_volumes(self, context, **kwargs): volumes = [] @@ -301,12 +292,12 @@ class CloudController(object): @defer.inlineCallbacks def create_volume(self, context, size, **kwargs): # TODO(vish): refactor this to create the volume object here and tell service to create it - result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume", + volume_id = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume", "args": {"size": size, "user_id": context.user.id, "project_id": context.project.id}}) # NOTE(vish): rpc returned value is in the result key in the dictionary - volume = self._get_volume(context, result) + volume = db.volume_get(context, volume_id) defer.returnValue({'volumeSet': [self.format_volume(context, volume)]}) def _get_address(self, context, public_ip): @@ -316,31 +307,9 @@ class CloudController(object): return address raise exception.NotFound("Address at ip %s not found" % public_ip) - def _get_image(self, context, image_id): - """passes in context because - objectstore does its own authorization""" - result = images.list(context, [image_id]) - if not result: - raise exception.NotFound('Image %s could not be found' % image_id) - image = result[0] - return image - - def _get_instance(self, context, instance_id): - for instance in self.instdir.all: - if instance['instance_id'] == instance_id: - if context.user.is_admin() or instance['project_id'] == context.project.id: - return instance - raise exception.NotFound('Instance %s could not be found' % instance_id) - - def _get_volume(self, context, volume_id): - volume = service.get_volume(volume_id) - if context.user.is_admin() or volume['project_id'] == context.project.id: - return volume - raise exception.NotFound('Volume %s could not be found' % volume_id) - @rbac.allow('projectmanager', 'sysadmin') def attach_volume(self, context, volume_id, instance_id, device, **kwargs): - volume = self._get_volume(context, volume_id) + volume = db.volume_get(context, volume_id) if volume['status'] == "attached": raise exception.ApiError("Volume is already attached") # TODO(vish): looping through all volumes is slow. We should probably maintain an index @@ -348,7 +317,7 @@ class CloudController(object): if vol['instance_id'] == instance_id and vol['mountpoint'] == device: raise exception.ApiError("Volume %s is already attached to %s" % (vol['volume_id'], vol['mountpoint'])) volume.start_attach(instance_id, device) - instance = self._get_instance(context, instance_id) + instance = db.instance_get(context, instance_id) compute_node = instance['node_name'] rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node), {"method": "attach_volume", @@ -364,7 +333,7 @@ class CloudController(object): @rbac.allow('projectmanager', 'sysadmin') def detach_volume(self, context, volume_id, **kwargs): - volume = self._get_volume(context, volume_id) + volume = db.volume_get(context, volume_id) instance_id = volume.get('instance_id', None) if not instance_id: raise exception.Error("Volume isn't attached to anything!") @@ -372,7 +341,7 @@ class CloudController(object): raise exception.Error("Volume is already detached") try: volume.start_detach() - instance = self._get_instance(context, instance_id) + instance = db.instance_get(context, instance_id) rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "detach_volume", "args": {"instance_id": instance_id, @@ -499,7 +468,7 @@ class CloudController(object): @rbac.allow('netadmin') @defer.inlineCallbacks def associate_address(self, context, instance_id, public_ip, **kwargs): - instance = self._get_instance(context, instance_id) + instance = db.instance_get(context, instance_id) address = self._get_address(context, public_ip) network_topic = yield self._get_network_topic(context) rpc.cast(network_topic, @@ -536,7 +505,7 @@ class CloudController(object): # make sure user can access the image # vpn image is private so it doesn't show up on lists if kwargs['image_id'] != FLAGS.vpn_image_id: - image = self._get_image(context, kwargs['image_id']) + image = images.get(context, kwargs['image_id']) # FIXME(ja): if image is cloudpipe, this breaks @@ -550,8 +519,8 @@ class CloudController(object): ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id) # make sure we have access to kernel and ramdisk - self._get_image(context, kernel_id) - self._get_image(context, ramdisk_id) + images.get(context, kernel_id) + images.get(context, ramdisk_id) logging.debug("Going to run instances...") reservation_id = utils.generate_uid('r') @@ -648,7 +617,7 @@ class CloudController(object): def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids""" for i in instance_id: - instance = self._get_instance(context, i) + instance = db.instance_get(context, i) rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "reboot_instance", "args": {"instance_id": i}}) @@ -657,7 +626,7 @@ class CloudController(object): @rbac.allow('projectmanager', 'sysadmin') def delete_volume(self, context, volume_id, **kwargs): # TODO: return error if not authorized - volume = self._get_volume(context, volume_id) + volume = db.volume_get(context, volume_id) volume_node = volume['node_name'] rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node), {"method": "delete_volume", diff --git a/nova/endpoint/images.py b/nova/endpoint/images.py index 2a88d66af..f72c277a0 100644 --- a/nova/endpoint/images.py +++ b/nova/endpoint/images.py @@ -26,6 +26,7 @@ import urllib import boto.s3.connection +from nova import exception from nova import flags from nova import utils from nova.auth import manager @@ -55,7 +56,6 @@ def register(context, image_location): return image_id - def list(context, filter_list=[]): """ return a list of all images that a user can see @@ -71,6 +71,14 @@ def list(context, filter_list=[]): return [i for i in result if i['imageId'] in filter_list] return result +def get(context, image_id): + """return a image object if the context has permissions""" + result = list(context, [image_id]) + if not result: + raise exception.NotFound('Image %s could not be found' % image_id) + image = result[0] + return image + def deregister(context, image_id): """ unregister an image """ -- cgit From 304495ea8e7584a19b0e3738cf0069eb30b1ec01 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 24 Aug 2010 18:30:06 -0400 Subject: more work on getting running instances to work --- nova/endpoint/cloud.py | 49 ++++++++++++++++++++++++++----------------------- 1 file changed, 26 insertions(+), 23 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 826a4cb40..6f9370222 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -504,11 +504,12 @@ class CloudController(object): def run_instances(self, context, **kwargs): # make sure user can access the image # vpn image is private so it doesn't show up on lists - if kwargs['image_id'] != FLAGS.vpn_image_id: + vpn = kwargs['image_id'] == FLAGS.vpn_image_id + + if not vpn: image = images.get(context, kwargs['image_id']) - # FIXME(ja): if image is cloudpipe, this breaks - + # FIXME(ja): if image is vpn, this breaks # get defaults from imagestore image_id = image['imageId'] kernel_id = image.get('kernelId', FLAGS.default_kernel) @@ -523,7 +524,6 @@ class CloudController(object): images.get(context, ramdisk_id) logging.debug("Going to run instances...") - reservation_id = utils.generate_uid('r') launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) key_data = None if kwargs.has_key('key_name'): @@ -537,35 +537,38 @@ class CloudController(object): security_group = "default" network_ref = db.project_get_network(context, context.project.id) + + base_options = {} + base_options['image_id'] = image_id + base_options['kernel_id'] = kernel_id + base_options['ramdisk_id'] = ramdisk_id + base_options['reservation_id'] = utils.generate_uid('r') + base_options['key_data'] = key_data + base_options['key_name'] = kwargs.get('key_name', None) + base_options['user_id'] = context.user.id + base_options['project_id'] = context.project.id + base_options['user_data'] = kwargs.get('user_data', '') + base_options['instance_type'] = kwargs.get('instance_type', 'm1.small') + base_options['security_group'] = security_group for num in range(int(kwargs['max_count'])): - inst = {} - inst['image_id'] = image_id - inst['kernel_id'] = kernel_id - inst['ramdisk_id'] = ramdisk_id - instance_ref = db.instance_create(context, inst) - inst_id = instance_ref['id'] - if db.instance_is_vpn(instance_ref['id']): - fixed_ip = db.fixed_ip_allocate(context, network_ref['id']) - else: + inst_id = db.instance_create(context, base_options) + + if vpn: fixed_ip = db.network_get_vpn_ip(context, network_ref['id']) + else: + fixed_ip = db.fixed_ip_allocate(context, network_ref['id']) + + inst = {} inst['mac_address'] = utils.generate_mac() - inst['user_data'] = kwargs.get('user_data', '') - inst['instance_type'] = kwargs.get('instance_type', 'm1.small') - inst['reservation_id'] = reservation_id - inst['key_data'] = key_data - inst['key_name'] = kwargs.get('key_name', None) - inst['user_id'] = context.user.id # FIXME(ja) - inst['project_id'] = context.project.id # FIXME(ja) inst['launch_index'] = num - inst['security_group'] = security_group - inst['hostname'] = inst_id # FIXME(ja): id isn't assigned until create + inst['hostname'] = inst_id db.instance_update(context, inst_id, inst) # TODO(vish): This probably should be done in the scheduler # network is setup when host is assigned - network_topic = yield self.get_network_topic() + network_topic = yield self._get_network_topic(context) rpc.call(network_topic, {"method": "setup_fixed_ip", "args": {"fixed_ip": fixed_ip['id']}}) -- cgit From 3760ea4635174c26baeb5ba906621ff1abb2459f Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 24 Aug 2010 18:56:07 -0700 Subject: use vlan for network type since it works --- nova/network/service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/network/service.py b/nova/network/service.py index baaaff521..5590cce99 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -31,7 +31,7 @@ from nova.network import linux_net FLAGS = flags.FLAGS flags.DEFINE_string('network_type', - 'flat', + 'vlan', 'Service Class for Networking') flags.DEFINE_string('flat_network_bridge', 'br100', 'Bridge for simple network instances') -- cgit From 548ae499c29341d58ad18ed5262f965ad0b5b0a9 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 24 Aug 2010 19:15:35 -0700 Subject: fix setup compute network --- nova/compute/service.py | 5 +---- nova/network/service.py | 16 ++++++++-------- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/nova/compute/service.py b/nova/compute/service.py index dd16484fe..a44f17a69 100644 --- a/nova/compute/service.py +++ b/nova/compute/service.py @@ -77,10 +77,7 @@ class ComputeService(service.Service): raise exception.Error("Instance has already been created") logging.debug("Starting instance %s..." % (instance_id)) - # NOTE(vish): passing network type allows us to express the - # network without making a call to network to find - # out which type of network to setup - network_service.setup_compute_network(instance_ref['project_id']) + network_service.setup_compute_network(context, instance_ref['project_id']) db.instance_update(context, instance_id, {'node_name': FLAGS.node_name}) # TODO(vish) check to make sure the availability zone matches diff --git a/nova/network/service.py b/nova/network/service.py index 5590cce99..2ead3d2c1 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -75,11 +75,11 @@ def type_to_class(network_type): raise exception.NotFound("Couldn't find %s network type" % network_type) -def setup_compute_network(project_id): +def setup_compute_network(context, project_id): """Sets up the network on a compute host""" - network = db.project_get_network(None, project_id) - srv = type_to_class(network.kind) - srv.setup_compute_network(network) + network_ref = db.project_get_network(None, project_id) + srv = type_to_class(network_ref.kind) + srv.setup_compute_network(context, network_ref['id']) class BaseNetworkService(service.Service): @@ -110,7 +110,7 @@ class BaseNetworkService(service.Service): raise NotImplementedError() @classmethod - def setup_compute_network(cls, network): + def setup_compute_network(cls, context, network_id): """Sets up matching network for compute hosts""" raise NotImplementedError() @@ -146,7 +146,7 @@ class FlatNetworkService(BaseNetworkService): """Basic network where no vlans are used""" @classmethod - def setup_compute_network(cls, network): + def setup_compute_network(cls, context, network_id): """Network is created manually""" pass @@ -209,8 +209,8 @@ class VlanNetworkService(BaseNetworkService): @classmethod - def setup_compute_network(cls, network_id): + def setup_compute_network(cls, context, network_id): """Sets up matching network for compute hosts""" - network_ref = db.network_get(network_id) + network_ref = db.network_get(context, network_id) _driver.ensure_vlan_bridge(network_ref['vlan'], network_ref['bridge']) -- cgit From ac48bf5c1b4701640e69747c43ca10cf3442e6ff Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 24 Aug 2010 22:41:34 -0400 Subject: work towards volumes using db layer --- nova/db/api.py | 11 ++++++++++- nova/db/sqlalchemy/api.py | 12 ++++++++++++ nova/endpoint/cloud.py | 12 +++++++----- 3 files changed, 29 insertions(+), 6 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index e4d79d16f..edc3b7bdc 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -195,7 +195,6 @@ def instance_get_by_name(context, name): return _impl.instance_get_by_project(context, name) - def instance_get_by_project(context, project_id): """Get all instance belonging to a project.""" return _impl.instance_get_by_project(context, project_id) @@ -356,6 +355,16 @@ def volume_get(context, volume_id): return _impl.volume_get(context, volume_id) +def volume_get_all(context): + """Get all volumes.""" + return _impl.volume_get_all(context) + + +def volume_get_by_project(context, project_id): + """Get all volumes belonging to a project.""" + return _impl.volume_get_by_project(context, project_id) + + def volume_get_shelf_and_blade(context, volume_id): """Get the shelf and blade allocated to the volume.""" return _impl.volume_get_shelf_and_blade(context, volume_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 2c5434b8f..2ce54a1d7 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -441,6 +441,18 @@ def volume_get(context, volume_id): return models.Volume.find(volume_id) +def volume_get_all(context): + return models.Volume.all() + + +def volume_get_by_project(context, project_id): + session = models.NovaBase.get_session() + query = session.query(models.Volume) + results = query.filter_by(project_id=project_id).all() + session.commit() + return results + + def volume_get_shelf_and_blade(context, volume_id): volume_ref = volume_get(context, volume_id) export_device = volume_ref.export_device diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 97d978ccd..e261abc7b 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -256,11 +256,13 @@ class CloudController(object): @rbac.allow('projectmanager', 'sysadmin') def describe_volumes(self, context, **kwargs): - volumes = [] - for volume in self.volumes: - if context.user.is_admin() or volume['project_id'] == context.project.id: - v = self.format_volume(context, volume) - volumes.append(v) + if context.user.is_admin(): + volumes = db.volume_get_all(context) + else: + volumes = db.volume_get_by_project(context, context.project.id) + + voluems = [self.format_volume(context, v) for v in volumes] + return defer.succeed({'volumeSet': volumes}) def format_volume(self, context, volume): -- cgit From df7f1cb26261a454e6885d151a0970c93d884163 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 24 Aug 2010 22:51:48 -0400 Subject: move create volume to work like instances --- nova/endpoint/cloud.py | 35 +++++++++++++++++++++-------------- nova/volume/service.py | 23 +++++++++-------------- 2 files changed, 30 insertions(+), 28 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index e261abc7b..e7e751b56 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -261,24 +261,24 @@ class CloudController(object): else: volumes = db.volume_get_by_project(context, context.project.id) - voluems = [self.format_volume(context, v) for v in volumes] + volumes = [self.format_volume(context, v) for v in volumes] - return defer.succeed({'volumeSet': volumes}) + return {'volumeSet': volumes} def format_volume(self, context, volume): v = {} - v['volumeId'] = volume['volume_id'] + v['volumeId'] = volume['id'] v['status'] = volume['status'] v['size'] = volume['size'] v['availabilityZone'] = volume['availability_zone'] - v['createTime'] = volume['create_time'] + # v['createTime'] = volume['create_time'] if context.user.is_admin(): v['status'] = '%s (%s, %s, %s, %s)' % ( - volume.get('status', None), - volume.get('user_id', None), - volume.get('node_name', None), - volume.get('instance_id', ''), - volume.get('mountpoint', '')) + volume['status'], + volume['user_id'], + 'node_name', + volume['instance_id'], + volume['mountpoint']) if volume['attach_status'] == 'attached': v['attachmentSet'] = [{'attachTime': volume['attach_time'], 'deleteOnTermination': volume['delete_on_termination'], @@ -293,11 +293,18 @@ class CloudController(object): @rbac.allow('projectmanager', 'sysadmin') @defer.inlineCallbacks def create_volume(self, context, size, **kwargs): - # TODO(vish): refactor this to create the volume object here and tell service to create it - volume_id = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume", - "args": {"size": size, - "user_id": context.user.id, - "project_id": context.project.id}}) + vol = {} + vol['size'] = size + vol['user_id'] = context.user.id + vol['project_id'] = context.project.id + vol['availability_zone'] = FLAGS.storage_availability_zone + vol['status'] = "creating" + vol['attach_status'] = "detached" + volume_id = db.volume_create(context, vol) + + yield rpc.cast(FLAGS.volume_topic, {"method": "create_volume", + "args": {"volume_id": volume_id}}) + # NOTE(vish): rpc returned value is in the result key in the dictionary volume = db.volume_get(context, volume_id) defer.returnValue({'volumeSet': [self.format_volume(context, volume)]}) diff --git a/nova/volume/service.py b/nova/volume/service.py index 37781252a..0f3fa20f3 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -66,25 +66,20 @@ class VolumeService(service.Service): @defer.inlineCallbacks @validate.rangetest(size=(0, 1000)) - def create_volume(self, size, user_id, project_id, context=None): + def create_volume(self, volume_id, context=None): """ Creates an exported volume (fake or real), restarts exports to make it available. Volume at this point has size, owner, and zone. """ - logging.debug("Creating volume of size: %s" % (size)) - - vol = {} - vol['node_name'] = FLAGS.node_name - vol['size'] = size - vol['user_id'] = user_id - vol['project_id'] = project_id - vol['availability_zone'] = FLAGS.storage_availability_zone - vol['status'] = "creating" # creating | available | in-use - # attaching | attached | detaching | detached - vol['attach_status'] = "detached" - volume_id = db.volume_create(context, vol) - yield self._exec_create_volume(volume_id, size) + logging.debug("Creating volume %s" % (volume_id)) + + volume_ref = db.volume_get(volume_id) + + # db.volume_update(context, volume_id, {'node_name': FLAGS.node_name}) + + yield self._exec_create_volume(volume_id, volume_ref['size']) + (shelf_id, blade_id) = db.volume_allocate_shelf_and_blade(context, volume_id) yield self._exec_create_export(volume_id, shelf_id, blade_id) -- cgit From 3647e375a34565140e033704c496895761fef1c9 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 24 Aug 2010 23:09:00 -0400 Subject: small tweaks --- nova/endpoint/cloud.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index e7e751b56..64a705e6d 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -261,11 +261,11 @@ class CloudController(object): else: volumes = db.volume_get_by_project(context, context.project.id) - volumes = [self.format_volume(context, v) for v in volumes] + volumes = [self._format_volume(context, v) for v in volumes] return {'volumeSet': volumes} - def format_volume(self, context, volume): + def _format_volume(self, context, volume): v = {} v['volumeId'] = volume['id'] v['status'] = volume['status'] @@ -305,9 +305,8 @@ class CloudController(object): yield rpc.cast(FLAGS.volume_topic, {"method": "create_volume", "args": {"volume_id": volume_id}}) - # NOTE(vish): rpc returned value is in the result key in the dictionary volume = db.volume_get(context, volume_id) - defer.returnValue({'volumeSet': [self.format_volume(context, volume)]}) + defer.returnValue({'volumeSet': [self._format_volume(context, volume)]}) def _get_address(self, context, public_ip): # FIXME(vish) this should move into network.py @@ -343,8 +342,7 @@ class CloudController(object): @rbac.allow('projectmanager', 'sysadmin') def detach_volume(self, context, volume_id, **kwargs): volume = db.volume_get(context, volume_id) - instance_id = volume.get('instance_id', None) - if not instance_id: + if volume['instance_id'] is None: raise exception.Error("Volume isn't attached to anything!") if volume['status'] == "available": raise exception.Error("Volume is already detached") -- cgit From 736e4d1112247553c048798761fc41f26fc27456 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Wed, 25 Aug 2010 12:59:54 -0400 Subject: update volume create code --- nova/volume/service.py | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/nova/volume/service.py b/nova/volume/service.py index 0f3fa20f3..bcaabbd6d 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -72,22 +72,33 @@ class VolumeService(service.Service): restarts exports to make it available. Volume at this point has size, owner, and zone. """ - logging.debug("Creating volume %s" % (volume_id)) + + logging.info("volume %s: creating" % (volume_id)) volume_ref = db.volume_get(volume_id) # db.volume_update(context, volume_id, {'node_name': FLAGS.node_name}) + logging.debug("volume %s: creating lv of size %sG" % (volume_id, size)) yield self._exec_create_volume(volume_id, volume_ref['size']) + logging.debug("volume %s: allocating shelf & blade" % (volume_id)) (shelf_id, blade_id) = db.volume_allocate_shelf_and_blade(context, volume_id) + + logging.debug("volume %s: exporting shelf %s & blade %s" % (volume_id, + shelf_id, blade_id)) + yield self._exec_create_export(volume_id, shelf_id, blade_id) # TODO(joshua): We need to trigger a fanout message # for aoe-discover on all the nodes + + logging.debug("volume %s: re-exporting all values" % (volume_id)) yield self._exec_ensure_exports() + db.volume_update(context, volume_id, {'status': 'available'}) - logging.debug("restarting exports") + + logging.debug("volume %s: created successfully" % (volume_id)) defer.returnValue(volume_id) @defer.inlineCallbacks @@ -134,8 +145,7 @@ class VolumeService(service.Service): defer.returnValue(None) yield process.simple_execute( "sudo vblade-persist setup %s %s %s /dev/%s/%s" % - (self, - shelf_id, + (shelf_id, blade_id, FLAGS.aoe_eth_dev, FLAGS.volume_group, @@ -147,12 +157,10 @@ class VolumeService(service.Service): if FLAGS.fake_storage: defer.returnValue(None) yield process.simple_execute( - "sudo vblade-persist stop %s %s" % (self, shelf_id, - blade_id), + "sudo vblade-persist stop %s %s" % (shelf_id, blade_id), terminate_on_stderr=False) yield process.simple_execute( - "sudo vblade-persist destroy %s %s" % (self, shelf_id, - blade_id), + "sudo vblade-persist destroy %s %s" % (shelf_id, blade_id), terminate_on_stderr=False) @defer.inlineCallbacks -- cgit From f3796786629d9374ba4813917694419a63dfb197 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Wed, 25 Aug 2010 13:11:30 -0400 Subject: missing context and move volume_update to before the export --- nova/volume/service.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/nova/volume/service.py b/nova/volume/service.py index bcaabbd6d..6a14d7177 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -65,23 +65,23 @@ class VolumeService(service.Service): self._exec_init_volumes() @defer.inlineCallbacks - @validate.rangetest(size=(0, 1000)) + # @validate.rangetest(size=(0, 1000)) def create_volume(self, volume_id, context=None): """ Creates an exported volume (fake or real), restarts exports to make it available. Volume at this point has size, owner, and zone. """ - logging.info("volume %s: creating" % (volume_id)) - volume_ref = db.volume_get(volume_id) + volume_ref = db.volume_get(context, volume_id) # db.volume_update(context, volume_id, {'node_name': FLAGS.node_name}) + size = volume_ref['size'] logging.debug("volume %s: creating lv of size %sG" % (volume_id, size)) - yield self._exec_create_volume(volume_id, volume_ref['size']) - + yield self._exec_create_volume(volume_id, size) + logging.debug("volume %s: allocating shelf & blade" % (volume_id)) (shelf_id, blade_id) = db.volume_allocate_shelf_and_blade(context, volume_id) @@ -93,11 +93,11 @@ class VolumeService(service.Service): # TODO(joshua): We need to trigger a fanout message # for aoe-discover on all the nodes + db.volume_update(context, volume_id, {'status': 'available'}) + logging.debug("volume %s: re-exporting all values" % (volume_id)) yield self._exec_ensure_exports() - db.volume_update(context, volume_id, {'status': 'available'}) - logging.debug("volume %s: created successfully" % (volume_id)) defer.returnValue(volume_id) -- cgit From 9fa4543e9f6c6c5bb0954954649b7c691e462e3c Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Wed, 25 Aug 2010 13:27:36 -0400 Subject: improve the volume export - sleep & check export --- nova/volume/service.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/volume/service.py b/nova/volume/service.py index 6a14d7177..7e32f2d8d 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -167,11 +167,12 @@ class VolumeService(service.Service): def _exec_ensure_exports(self): if FLAGS.fake_storage: defer.returnValue(None) - # NOTE(vish): these commands sometimes sends output to stderr for warnings + + yield process.simple_execute("sleep 5") # wait for blades to appear yield process.simple_execute("sudo vblade-persist auto all", - terminate_on_stderr=False) + check_exit_code=False) yield process.simple_execute("sudo vblade-persist start all", - terminate_on_stderr=False) + check_exit_code=False) @defer.inlineCallbacks def _exec_init_volumes(self): -- cgit From 674a5dae7c0630aef346e22950706db0caeb244b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 25 Aug 2010 13:14:49 -0700 Subject: more data layer breakouts, lots of fixes to cloud.py --- nova/compute/service.py | 10 +- nova/db/api.py | 39 ++++++-- nova/db/sqlalchemy/api.py | 54 +++++++--- nova/endpoint/cloud.py | 221 +++++++++++++++++------------------------ nova/models.py | 37 +++++-- nova/tests/network_unittest.py | 2 +- 6 files changed, 197 insertions(+), 166 deletions(-) diff --git a/nova/compute/service.py b/nova/compute/service.py index a44f17a69..877246ef6 100644 --- a/nova/compute/service.py +++ b/nova/compute/service.py @@ -73,7 +73,7 @@ class ComputeService(service.Service): def run_instance(self, instance_id, context=None, **_kwargs): """Launch a new instance with specified options.""" instance_ref = db.instance_get(context, instance_id) - if instance_ref['name'] in self._conn.list_instances(): + if instance_ref['str_id'] in self._conn.list_instances(): raise exception.Error("Instance has already been created") logging.debug("Starting instance %s..." % (instance_id)) @@ -87,7 +87,7 @@ class ComputeService(service.Service): yield self._conn.spawn(instance_ref) except: logging.exception("Failed to spawn instance %s" % - instance_ref['name']) + instance_ref['str_id']) db.instance_state(context, instance_id, power_state.SHUTDOWN) self.update_state(instance_id, context) @@ -127,11 +127,11 @@ class ComputeService(service.Service): raise exception.Error( 'trying to reboot a non-running' 'instance: %s (state: %s excepted: %s)' % - (instance_ref['name'], + (instance_ref['str_id'], instance_ref['state'], power_state.RUNNING)) - logging.debug('rebooting instance %s' % instance_ref['name']) + logging.debug('rebooting instance %s' % instance_ref['str_id']) db.instance_state( context, instance_id, power_state.NOSTATE, 'rebooting') yield self._conn.reboot(instance_ref) @@ -147,7 +147,7 @@ class ComputeService(service.Service): if FLAGS.connection_type == 'libvirt': fname = os.path.abspath(os.path.join(FLAGS.instances_path, - instance_ref['name'], + instance_ref['str_id'], 'console.log')) with open(fname, 'r') as f: output = f.read() diff --git a/nova/db/api.py b/nova/db/api.py index edc3b7bdc..9efbcf76b 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -47,6 +47,7 @@ _impl = utils.LazyPluggable(FLAGS['db_backend'], class AddressNotAllocated(exception.Error): pass + class NoMoreAddresses(exception.Error): pass @@ -185,14 +186,9 @@ def instance_get_all(context): return _impl.instance_get_all(context) -def instance_get_by_ip(context, ip): - """Gets an instance by fixed ipaddress or raise if it does not exist.""" - return _impl.instance_get_by_ip(context, ip) - - -def instance_get_by_name(context, name): - """Get an instance by name.""" - return _impl.instance_get_by_project(context, name) +def instance_get_by_address(context, address): + """Gets an instance by fixed ip address or raise if it does not exist.""" + return _impl.instance_get_by_address(context, address) def instance_get_by_project(context, project_id): @@ -205,9 +201,24 @@ def instance_get_by_reservation(context, reservation_id): return _impl.instance_get_by_reservation(context, reservation_id) +def instance_get_fixed_address(context, instance_id): + """Get the fixed ip address of an instance.""" + return _impl.instance_get_fixed_address(context, instance_id) + + +def instance_get_floating_address(context, instance_id): + """Get the first floating ip address of an instance.""" + return _impl.instance_get_floating_address(context, instance_id) + + +def instance_get_by_str(context, str_id): + """Get an instance by string id.""" + return _impl.instance_get_by_str(context, str_id) + + def instance_get_host(context, instance_id): """Get the host that the instance is running on.""" - return _impl.instance_get_all(context, instance_id) + return _impl.instance_get_host(context, instance_id) def instance_is_vpn(context, instance_id): @@ -365,6 +376,16 @@ def volume_get_by_project(context, project_id): return _impl.volume_get_by_project(context, project_id) +def volume_get_by_str(context, str_id): + """Get a volume by string id.""" + return _impl.volume_get_by_str(context, str_id) + + +def volume_get_host(context, volume_id): + """Get the host that the volume is running on.""" + return _impl.volume_get_host(context, volume_id) + + def volume_get_shelf_and_blade(context, volume_id): """Get the shelf and blade allocated to the volume.""" return _impl.volume_get_shelf_and_blade(context, volume_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 2ce54a1d7..047a6c108 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -70,21 +70,21 @@ def floating_ip_allocate_address(context, node_name, project_id): def floating_ip_fixed_ip_associate(context, floating_address, fixed_address): - floating_ip_ref = models.FloatingIp.find_by_ip_str(floating_address) - fixed_ip_ref = models.FixedIp.find_by_ip_str(fixed_address) + floating_ip_ref = models.FloatingIp.find_by_str(floating_address) + fixed_ip_ref = models.FixedIp.find_by_str(fixed_address) floating_ip_ref.fixed_ip = fixed_ip_ref floating_ip_ref.save() def floating_ip_disassociate(context, address): - floating_ip_ref = models.FloatingIp.find_by_ip_str(address) + floating_ip_ref = models.FloatingIp.find_by_str(address) fixed_ip_address = floating_ip_ref.fixed_ip['ip_str'] floating_ip_ref['fixed_ip'] = None floating_ip_ref.save() return fixed_ip_address def floating_ip_deallocate(context, address): - floating_ip_ref = models.FloatingIp.find_by_ip_str(address) + floating_ip_ref = models.FloatingIp.find_by_str(address) floating_ip_ref['project_id'] = None floating_ip_ref.save() @@ -108,11 +108,11 @@ def fixed_ip_allocate(context, network_id): def fixed_ip_get_by_address(context, address): - return models.FixedIp.find_by_ip_str(address) + return models.FixedIp.find_by_str(address) def fixed_ip_get_network(context, address): - return models.FixedIp.find_by_ip_str(address).network + return models.FixedIp.find_by_str(address).network def fixed_ip_lease(context, address): @@ -172,13 +172,11 @@ def instance_get_all(context): return models.Instance.all() -def instance_get_by_ip(context, ip): - raise Exception("fixme(vish): add logic here!") - - -def instance_get_by_name(context, name): - # NOTE(vish): remove the 'i-' - return models.Instance.find(name[2:]) +def instance_get_by_address(context, address): + fixed_ip_ref = db.fixed_ip_get_by_address(address) + if not fixed_ip_ref.instance: + raise exception.NotFound("No instance found for address %s" % address) + return fixed_ip_ref.instance def instance_get_by_project(context, project_id): @@ -197,6 +195,27 @@ def instance_get_by_reservation(context, reservation_id): return results +def instance_get_by_str(context, str_id): + return models.Instance.find_by_str(str_id) + + +def instance_get_fixed_address(context, instance_id): + instance_ref = instance_get(context, instance_id) + if not instance_ref.fixed_ip: + return None + return instance_ref.fixed_ip['str_id'] + + +def instance_get_floating_address(context, instance_id): + instance_ref = instance_get(context, instance_id) + if not instance_ref.fixed_ip: + return None + if not instance_ref.fixed_ip.floating_ips: + return None + # NOTE(vish): this just returns the first floating ip + return instance_ref.fixed_ip.floating_ips[0]['str_id'] + + def instance_get_host(context, instance_id): instance_ref = instance_get(context, instance_id) return instance_ref['node_name'] @@ -453,6 +472,15 @@ def volume_get_by_project(context, project_id): return results +def volume_get_by_str(context, str_id): + return models.Volume.find_by_str(str_id) + + +def volume_get_host(context, volume_id): + volume_ref = volume_get(context, volume_id) + return volume_ref['node_name'] + + def volume_get_shelf_and_blade(context, volume_id): volume_ref = volume_get(context, volume_id) export_device = volume_ref.export_device diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 64a705e6d..ffe3d3cc7 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -32,16 +32,12 @@ from twisted.internet import defer from nova import db from nova import exception from nova import flags -from nova import models from nova import rpc from nova import utils from nova.auth import rbac from nova.auth import manager -from nova.compute import model from nova.compute.instance_types import INSTANCE_TYPES from nova.endpoint import images -from nova.network import service as network_service -from nova.volume import service FLAGS = flags.FLAGS @@ -66,18 +62,6 @@ class CloudController(object): def __init__(self): self.setup() - @property - def instances(self): - """ All instances in the system, as dicts """ - return db.instance_get_all(None) - - @property - def volumes(self): - """ returns a list of all volumes """ - for volume_id in datastore.Redis.instance().smembers("volumes"): - volume = service.get_volume(volume_id) - yield volume - def __str__(self): return 'CloudController' @@ -100,7 +84,7 @@ class CloudController(object): def _get_mpi_data(self, project_id): result = {} for instance in db.instance_get_by_project(project_id): - line = '%s slots=%d' % (instance['private_dns_name'], + line = '%s slots=%d' % (instance.fixed_ip['str_id'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) if instance['key_name'] in result: result[instance['key_name']].append(line) @@ -109,7 +93,7 @@ class CloudController(object): return result def get_metadata(self, ipaddress): - i = db.instance_get_by_ip(ipaddress) + i = db.instance_get_by_address(ipaddress) if i is None: return None mpi = self._get_mpi_data(i['project_id']) @@ -122,12 +106,7 @@ class CloudController(object): } else: keys = '' - - address_record = network_model.FixedIp(i['private_dns_name']) - if address_record: - hostname = address_record['hostname'] - else: - hostname = 'ip-%s' % i['private_dns_name'].replace('.', '-') + hostname = i['hostname'] data = { 'user-data': base64.b64decode(i['user_data']), 'meta-data': { @@ -249,10 +228,11 @@ class CloudController(object): @rbac.allow('projectmanager', 'sysadmin') def get_console_output(self, context, instance_id, **kwargs): # instance_id is passed in as a list of instances - instance = db.instance_get(context, instance_id[0]) - return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']), - {"method": "get_console_output", - "args": {"instance_id": instance_id[0]}}) + instance_ref = db.instance_get_by_str(context, instance_id[0]) + return rpc.call('%s.%s' % (FLAGS.compute_topic, + instance_ref['node_name']), + {"method": "get_console_output", + "args": {"instance_id": instance_ref['id']}}) @rbac.allow('projectmanager', 'sysadmin') def describe_volumes(self, context, **kwargs): @@ -267,7 +247,7 @@ class CloudController(object): def _format_volume(self, context, volume): v = {} - v['volumeId'] = volume['id'] + v['volumeId'] = volume['str_id'] v['status'] = volume['status'] v['size'] = volume['size'] v['availabilityZone'] = volume['availability_zone'] @@ -298,7 +278,7 @@ class CloudController(object): vol['user_id'] = context.user.id vol['project_id'] = context.project.id vol['availability_zone'] = FLAGS.storage_availability_zone - vol['status'] = "creating" + vol['status'] = "creating" vol['attach_status'] = "detached" volume_id = db.volume_create(context, vol) @@ -308,61 +288,54 @@ class CloudController(object): volume = db.volume_get(context, volume_id) defer.returnValue({'volumeSet': [self._format_volume(context, volume)]}) - def _get_address(self, context, public_ip): - # FIXME(vish) this should move into network.py - address = network_model.FloatingIp.lookup(public_ip) - if address and (context.user.is_admin() or address['project_id'] == context.project.id): - return address - raise exception.NotFound("Address at ip %s not found" % public_ip) @rbac.allow('projectmanager', 'sysadmin') def attach_volume(self, context, volume_id, instance_id, device, **kwargs): - volume = db.volume_get(context, volume_id) - if volume['status'] == "attached": + volume_ref = db.volume_get_by_str(context, volume_id) + # TODO(vish): abstract status checking? + if volume_ref['status'] == "attached": raise exception.ApiError("Volume is already attached") - # TODO(vish): looping through all volumes is slow. We should probably maintain an index - for vol in self.volumes: - if vol['instance_id'] == instance_id and vol['mountpoint'] == device: - raise exception.ApiError("Volume %s is already attached to %s" % (vol['volume_id'], vol['mountpoint'])) - volume.start_attach(instance_id, device) - instance = db.instance_get(context, instance_id) - compute_node = instance['node_name'] - rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node), + #volume.start_attach(instance_id, device) + instance_ref = db.instance_get_by_str(context, instance_id) + host = db.instance_get_host(context, instance_ref['id']) + rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "attach_volume", - "args": {"volume_id": volume_id, - "instance_id": instance_id, + "args": {"volume_id": volume_ref['id'], + "instance_id": instance_ref['id'], "mountpoint": device}}) - return defer.succeed({'attachTime': volume['attach_time'], - 'device': volume['mountpoint'], - 'instanceId': instance_id, + return defer.succeed({'attachTime': volume_ref['attach_time'], + 'device': volume_ref['mountpoint'], + 'instanceId': instance_ref['id_str'], 'requestId': context.request_id, - 'status': volume['attach_status'], - 'volumeId': volume_id}) + 'status': volume_ref['attach_status'], + 'volumeId': volume_ref['id']}) @rbac.allow('projectmanager', 'sysadmin') def detach_volume(self, context, volume_id, **kwargs): - volume = db.volume_get(context, volume_id) - if volume['instance_id'] is None: + volume_ref = db.volume_get_by_str(context, volume_id) + instance_ref = db.volume_get_instance(context, volume_ref['id']) + if not instance_ref: raise exception.Error("Volume isn't attached to anything!") - if volume['status'] == "available": + # TODO(vish): abstract status checking? + if volume_ref['status'] == "available": raise exception.Error("Volume is already detached") try: - volume.start_detach() - instance = db.instance_get(context, instance_id) - rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), + #volume.start_detach() + host = db.instance_get_host(context, instance_ref['id']) + rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "detach_volume", - "args": {"instance_id": instance_id, - "volume_id": volume_id}}) + "args": {"instance_id": instance_ref['id'], + "volume_id": volume_ref['id']}}) except exception.NotFound: # If the instance doesn't exist anymore, # then we need to call detach blind - volume.finish_detach() - return defer.succeed({'attachTime': volume['attach_time'], - 'device': volume['mountpoint'], - 'instanceId': instance_id, + db.volume_detached(context) + return defer.succeed({'attachTime': volume_ref['attach_time'], + 'device': volume_ref['mountpoint'], + 'instanceId': instance_ref['id_str'], 'requestId': context.request_id, - 'status': volume['attach_status'], - 'volumeId': volume_id}) + 'status': volume_ref['attach_status'], + 'volumeId': volume_ref['id']}) def _convert_to_set(self, lst, label): if lst == None or lst == []: @@ -397,15 +370,18 @@ class CloudController(object): if instance['image_id'] == FLAGS.vpn_image_id: continue i = {} - i['instanceId'] = instance['name'] + i['instanceId'] = instance['str_id'] i['imageId'] = instance['image_id'] i['instanceState'] = { - 'code': instance.state, - 'name': instance.state_description + 'code': instance['state'], + 'name': instance['state_description'] } - i['public_dns_name'] = None #network_model.get_public_ip_for_instance( - # i['instance_id']) - i['private_dns_name'] = instance.fixed_ip['ip_str'] + floating_addr = db.instance_get_floating_address(context, + instance['id']) + i['public_dns_name'] = floating_addr + fixed_addr = db.instance_get_fixed_address(context, + instance['id']) + i['private_dns_name'] = fixed_addr if not i['public_dns_name']: i['public_dns_name'] = i['private_dns_name'] i['dns_name'] = None @@ -435,20 +411,23 @@ class CloudController(object): def format_addresses(self, context): addresses = [] - for address in network_model.FloatingIp.all(): - # TODO(vish): implement a by_project iterator for addresses - if (context.user.is_admin() or - address['project_id'] == context.project.id): - address_rv = { - 'public_ip': address['address'], - 'instance_id': address.get('instance_id', 'free') - } - if context.user.is_admin(): - address_rv['instance_id'] = "%s (%s, %s)" % ( - address['instance_id'], - address['user_id'], - address['project_id'], - ) + if context.user.is_admin(): + iterator = db.floating_ip_get_all(context) + else: + iterator = db.floating_ip_get_by_project(context, + context.project.id) + for floating_ip_ref in iterator: + address = floating_ip_ref['id_str'] + instance_ref = db.instance_get_by_address(address) + address_rv = { + 'public_ip': address, + 'instance_id': instance_ref['id_str'] + } + if context.user.is_admin(): + address_rv['instance_id'] = "%s (%s)" % ( + address_rv['instance_id'], + floating_ip_ref['project_id'], + ) addresses.append(address_rv) return {'addressesSet': addresses} @@ -458,41 +437,42 @@ class CloudController(object): network_topic = yield self._get_network_topic(context) public_ip = yield rpc.call(network_topic, {"method": "allocate_floating_ip", - "args": {"user_id": context.user.id, - "project_id": context.project.id}}) + "args": {"project_id": context.project.id}}) defer.returnValue({'addressSet': [{'publicIp': public_ip}]}) @rbac.allow('netadmin') @defer.inlineCallbacks def release_address(self, context, public_ip, **kwargs): # NOTE(vish): Should we make sure this works? + floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) network_topic = yield self._get_network_topic(context) rpc.cast(network_topic, {"method": "deallocate_floating_ip", - "args": {"floating_ip": public_ip}}) + "args": {"floating_ip": floating_ip_ref['str_id']}}) defer.returnValue({'releaseResponse': ["Address released."]}) @rbac.allow('netadmin') @defer.inlineCallbacks def associate_address(self, context, instance_id, public_ip, **kwargs): - instance = db.instance_get(context, instance_id) - address = self._get_address(context, public_ip) + instance_ref = db.instance_get_by_str(context, instance_id) + fixed_ip_ref = db.fixed_ip_get_by_instance(context, instance_ref['id']) + floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) network_topic = yield self._get_network_topic(context) rpc.cast(network_topic, {"method": "associate_floating_ip", - "args": {"floating_ip": address['address'], - "fixed_ip": instance['private_dns_name'], - "instance_id": instance['instance_id']}}) + "args": {"floating_ip": floating_ip_ref['str_id'], + "fixed_ip": fixed_ip_ref['str_id'], + "instance_id": instance_ref['id']}}) defer.returnValue({'associateResponse': ["Address associated."]}) @rbac.allow('netadmin') @defer.inlineCallbacks def disassociate_address(self, context, public_ip, **kwargs): - address = self._get_address(context, public_ip) + floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) network_topic = yield self._get_network_topic(context) rpc.cast(network_topic, {"method": "disassociate_floating_ip", - "args": {"floating_ip": address['address']}}) + "args": {"floating_ip": floating_ip_ref['str_id']}}) defer.returnValue({'disassociateResponse': ["Address disassociated."]}) @defer.inlineCallbacks @@ -596,13 +576,13 @@ class CloudController(object): def terminate_instances(self, context, instance_id, **kwargs): logging.debug("Going to start terminating instances") # network_topic = yield self._get_network_topic(context) - for name in instance_id: - logging.debug("Going to try and terminate %s" % name) + for id_str in instance_id: + logging.debug("Going to try and terminate %s" % id_str) try: - instance_ref = db.instance_get_by_name(context, name) + instance_ref = db.instance_get_by_str(context, id_str) except exception.NotFound: logging.warning("Instance %s was not found during terminate" - % name) + % id_str) continue # FIXME(ja): where should network deallocate occur? @@ -631,7 +611,7 @@ class CloudController(object): # NOTE(joshua?): It's also internal default rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "terminate_instance", - "args": {"instance_id": name}}) + "args": {"instance_id": instance_ref['id']}}) else: db.instance_destroy(context, instance_ref['id']) # defer.returnValue(True) @@ -640,19 +620,20 @@ class CloudController(object): @rbac.allow('projectmanager', 'sysadmin') def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids""" - for i in instance_id: - instance = db.instance_get(context, i) - rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), - {"method": "reboot_instance", - "args": {"instance_id": i}}) + for id_str in instance_id: + instance_ref = db.instance_get_by_str(context, id_str) + host = db.instance_get_host(context, instance_ref['id']) + rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "reboot_instance", + "args": {"instance_id": instance_ref['id']}}) return defer.succeed(True) @rbac.allow('projectmanager', 'sysadmin') def delete_volume(self, context, volume_id, **kwargs): # TODO: return error if not authorized - volume = db.volume_get(context, volume_id) - volume_node = volume['node_name'] - rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node), + volume_ref = db.volume_get_by_str(context, volume_id) + host = db.volume_get_host(context, volume_ref['id']) + rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "delete_volume", "args": {"volume_id": volume_id}}) return defer.succeed(True) @@ -705,23 +686,3 @@ class CloudController(object): raise exception.ApiError('operation_type must be add or remove') result = images.modify(context, image_id, operation_type) return defer.succeed(result) - - def update_state(self, topic, value): - """ accepts status reports from the queue and consolidates them """ - # TODO(jmc): if an instance has disappeared from - # the node, call instance_death - if topic == "instances": - return defer.succeed(True) - aggregate_state = getattr(self, topic) - node_name = value.keys()[0] - items = value[node_name] - - logging.debug("Updating %s state for %s" % (topic, node_name)) - - for item_id in items.keys(): - if (aggregate_state.has_key('pending') and - aggregate_state['pending'].has_key(item_id)): - del aggregate_state['pending'][item_id] - aggregate_state[node_name] = items - - return defer.succeed(True) diff --git a/nova/models.py b/nova/models.py index c7ca9bb74..7ad379814 100644 --- a/nova/models.py +++ b/nova/models.py @@ -40,6 +40,7 @@ flags.DEFINE_string('sql_connection', class NovaBase(object): __table_args__ = {'mysql_engine':'InnoDB'} + __prefix__ = 'none' created_at = Column(DateTime) updated_at = Column(DateTime) @@ -86,6 +87,15 @@ class NovaBase(object): except exc.NoResultFound: raise exception.NotFound("No model for id %s" % obj_id) + @classmethod + def find_by_str(cls, str_id): + id = int(str_id.rpartition('-')[2]) + return cls.find(id) + + @property + def str_id(self): + return "%s-%s" % (self.__prefix__, self.id) + def save(self): session = NovaBase.get_session() session.add(self) @@ -109,6 +119,7 @@ class NovaBase(object): class Image(Base, NovaBase): __tablename__ = 'images' + __prefix__ = 'ami' id = Column(Integer, primary_key=True) user_id = Column(String(255))#, ForeignKey('users.id'), nullable=False) project_id = Column(String(255))#, ForeignKey('projects.id'), nullable=False) @@ -166,6 +177,7 @@ class Daemon(Base, NovaBase): class Instance(Base, NovaBase): __tablename__ = 'instances' + __prefix__ = 'i' id = Column(Integer, primary_key=True) user_id = Column(String(255)) #, ForeignKey('users.id'), nullable=False) @@ -182,7 +194,7 @@ class Instance(Base, NovaBase): # TODO(vish): make this opaque somehow @property def name(self): - return "i-%s" % self.id + return self.str_id image_id = Column(Integer, ForeignKey('images.id'), nullable=True) @@ -198,7 +210,7 @@ class Instance(Base, NovaBase): state_description = Column(String(255)) hostname = Column(String(255)) - physical_node_id = Column(Integer) + node_name = Column(String(255)) #, ForeignKey('physical_node.id')) instance_type = Column(Integer) @@ -230,6 +242,7 @@ class Instance(Base, NovaBase): class Volume(Base, NovaBase): __tablename__ = 'volumes' + __prefix__ = 'vol' id = Column(Integer, primary_key=True) user_id = Column(String(255)) #, ForeignKey('users.id'), nullable=False) @@ -267,15 +280,19 @@ class FixedIp(Base, NovaBase): leased = Column(Boolean, default=False) reserved = Column(Boolean, default=False) + @property + def str_id(self): + return self.ip_str + @classmethod - def find_by_ip_str(cls, ip_str): + def find_by_str(cls, str_id): session = NovaBase.get_session() try: - result = session.query(cls).filter_by(ip_str=ip_str).one() + result = session.query(cls).filter_by(ip_str=str_id).one() session.commit() return result except exc.NoResultFound: - raise exception.NotFound("No model for ip str %s" % ip_str) + raise exception.NotFound("No model for ip str %s" % str_id) class FloatingIp(Base, NovaBase): @@ -288,15 +305,19 @@ class FloatingIp(Base, NovaBase): project_id = Column(String(255)) #, ForeignKey('projects.id'), nullable=False) node_name = Column(String(255)) #, ForeignKey('physical_node.id')) + @property + def str_id(self): + return self.ip_str + @classmethod - def find_by_ip_str(cls, ip_str): + def find_by_str(cls, str_id): session = NovaBase.get_session() try: - result = session.query(cls).filter_by(ip_str=ip_str).one() + result = session.query(cls).filter_by(ip_str=str_id).one() session.commit() return result except exc.NoResultFound: - raise exception.NotFound("No model for ip str %s" % ip_str) + raise exception.NotFound("No model for ip str %s" % str_id) class Network(Base, NovaBase): diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index afa217673..d8a398aa4 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -81,7 +81,7 @@ class NetworkTestCase(test.TrialTestCase): pubnet = IPy.IP(flags.FLAGS.public_range) ip_str = str(pubnet[0]) try: - floating_ip = models.FloatingIp.find_by_ip_str(ip_str) + floating_ip = models.FloatingIp.find_by_str(ip_str) except exception.NotFound: floating_ip = models.FloatingIp() floating_ip.ip_str = ip_str -- cgit From 0828326898e3bc219c8205e27a3cc942e2790934 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 25 Aug 2010 16:27:01 -0400 Subject: Use compute.instance_types for flavor data instead of a FlavorService --- nova/api/rackspace/flavors.py | 27 +++++++++++++-------------- nova/compute/instance_types.py | 14 +++++++------- 2 files changed, 20 insertions(+), 21 deletions(-) diff --git a/nova/api/rackspace/flavors.py b/nova/api/rackspace/flavors.py index 8c5ffa438..59981f1c5 100644 --- a/nova/api/rackspace/flavors.py +++ b/nova/api/rackspace/flavors.py @@ -16,11 +16,11 @@ # under the License. from nova.api.rackspace import base -from nova.api.rackspace import _id_translator -from nova import flavor +from nova.compute import instance_types from webob import exc class Controller(base.Controller): + """Flavor controller for the Rackspace API.""" _serialization_metadata = { 'application/xml': { @@ -30,21 +30,20 @@ class Controller(base.Controller): } } - def __init__(self): - self._service = flavor.service.FlavorService.load() - self._id_translator = self._id_translator.RackspaceAPIIdTranslator( - "flavor", self._service.__class__.__name__) - def index(self, req): """Return all flavors.""" - items = self._service.index() - for flavor in items: - flavor['id'] = self._id_translator.to_rs_id(flavor['id']) + items = [self.show(req, id)['flavor'] for id in self._all_ids()] return dict(flavors=items) def show(self, req, id): """Return data about the given flavor id.""" - opaque_id = self._id_translator.from_rs_id(id) - item = self._service.show(opaque_id) - item['id'] = id - return dict(flavor=item) + for name, val in instance_types.INSTANCE_TYPES.iteritems(): + if val['flavorid'] == int(id): + item = dict(ram=val['memory_mb'], disk=val['local_gb'], + id=val['flavorid'], name=name) + return dict(flavor=item) + raise exc.HTTPNotFound() + + def _all_ids(self): + """Return the list of all flavorids.""" + return [i['flavorid'] for i in instance_types.INSTANCE_TYPES.values()] diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py index 439be3c7d..0102bae54 100644 --- a/nova/compute/instance_types.py +++ b/nova/compute/instance_types.py @@ -21,10 +21,10 @@ The built-in instance properties. """ -INSTANCE_TYPES = {} -INSTANCE_TYPES['m1.tiny'] = {'memory_mb': 512, 'vcpus': 1, 'local_gb': 0} -INSTANCE_TYPES['m1.small'] = {'memory_mb': 1024, 'vcpus': 1, 'local_gb': 10} -INSTANCE_TYPES['m1.medium'] = {'memory_mb': 2048, 'vcpus': 2, 'local_gb': 10} -INSTANCE_TYPES['m1.large'] = {'memory_mb': 4096, 'vcpus': 4, 'local_gb': 10} -INSTANCE_TYPES['m1.xlarge'] = {'memory_mb': 8192, 'vcpus': 4, 'local_gb': 10} -INSTANCE_TYPES['c1.medium'] = {'memory_mb': 2048, 'vcpus': 4, 'local_gb': 10} +INSTANCE_TYPES = { + 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1), + 'm1.small': dict(memory_mb=1024, vcpus=1, local_gb=10, flavorid=2), + 'm1.medium': dict(memory_mb=2048, vcpus=2, local_gb=10, flavorid=3), + 'm1.large': dict(memory_mb=4096, vcpus=4, local_gb=10, flavorid=4), + 'm1.xlarge': dict(memory_mb=8192, vcpus=4, local_gb=10, flavorid=5), + 'c1.medium': dict(memory_mb=2048, vcpus=4, local_gb=10, flavorid=6)} -- cgit From 35c589d18651f576935bf6d742fcfac00f61433b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 25 Aug 2010 13:33:25 -0700 Subject: move network_type flag so it is accesible in data layer --- nova/db/api.py | 3 +++ nova/network/service.py | 3 --- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index 9efbcf76b..a30ec2cd5 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -26,6 +26,9 @@ flags.DEFINE_string('db_backend', 'sqlalchemy', 'The backend to use for db') # TODO(vish): where should these flags go +flags.DEFINE_string('network_type', + 'vlan', + 'Service Class for Networking') flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks') flags.DEFINE_integer('num_networks', 1000, 'Number of networks to support') flags.DEFINE_string('vpn_ip', utils.get_my_ip(), diff --git a/nova/network/service.py b/nova/network/service.py index 2ead3d2c1..de2c7a16c 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -30,9 +30,6 @@ from nova.network import linux_net FLAGS = flags.FLAGS -flags.DEFINE_string('network_type', - 'vlan', - 'Service Class for Networking') flags.DEFINE_string('flat_network_bridge', 'br100', 'Bridge for simple network instances') flags.DEFINE_list('flat_network_ips', -- cgit From cf0b5de1f78fd81ada2bada8c84e26b3238b8596 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 25 Aug 2010 16:46:53 -0400 Subject: Turn imageid translator into general translator for rackspace api ids --- nova/api/rackspace/_id_translator.py | 42 ++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 nova/api/rackspace/_id_translator.py diff --git a/nova/api/rackspace/_id_translator.py b/nova/api/rackspace/_id_translator.py new file mode 100644 index 000000000..aec5fb6a5 --- /dev/null +++ b/nova/api/rackspace/_id_translator.py @@ -0,0 +1,42 @@ +from nova import datastore + +class RackspaceAPIIdTranslator(object): + """ + Converts Rackspace API ids to and from the id format for a given + strategy. + """ + + def __init__(self, id_type, service_name): + """ + Creates a translator for ids of the given type (e.g. 'flavor'), for the + given storage service backend class name (e.g. 'LocalFlavorService'). + """ + + self._store = datastore.Redis.instance() + key_prefix = "rsapi.idtranslator.%s.%s" % (id_type, service_name) + # Forward (strategy format -> RS format) and reverse translation keys + self._fwd_key = "%s.fwd" % key_prefix + self._rev_key = "%s.rev" % key_prefix + + def to_rs_id(self, opaque_id): + """Convert an id from a strategy-specific one to a Rackspace one.""" + result = self._store.hget(self._fwd_key, str(opaque_id)) + if result: # we have a mapping from opaque to RS for this strategy + return int(result) + else: + # Store the mapping. + nextid = self._store.incr("%s.lastid" % self._fwd_key) + if self._store.hsetnx(self._fwd_key, str(opaque_id), nextid): + # If someone else didn't beat us to it, store the reverse + # mapping as well. + self._store.hset(self._rev_key, nextid, str(opaque_id)) + return nextid + else: + # Someone beat us to it; use their number instead, and + # discard nextid (which is OK -- we don't require that + # every int id be used.) + return int(self._store.hget(self._fwd_key, str(opaque_id))) + + def from_rs_id(self, strategy_name, rs_id): + """Convert a Rackspace id to a strategy-specific one.""" + return self._store.hget(self._rev_key, rs_id) -- cgit From c9d3b7c3ae71bbbe6f3077dcee13be41a14a6733 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 25 Aug 2010 17:48:08 -0400 Subject: Support GET //detail --- nova/api/rackspace/__init__.py | 6 ++++-- nova/api/rackspace/flavors.py | 7 ++++++- nova/api/rackspace/images.py | 11 ++++++++--- nova/image/__init__.py | 0 4 files changed, 18 insertions(+), 6 deletions(-) create mode 100644 nova/image/__init__.py diff --git a/nova/api/rackspace/__init__.py b/nova/api/rackspace/__init__.py index 27e78f801..b4d666d63 100644 --- a/nova/api/rackspace/__init__.py +++ b/nova/api/rackspace/__init__.py @@ -74,8 +74,10 @@ class APIRouter(wsgi.Router): def __init__(self): mapper = routes.Mapper() mapper.resource("server", "servers", controller=servers.Controller()) - mapper.resource("image", "images", controller=images.Controller()) - mapper.resource("flavor", "flavors", controller=flavors.Controller()) + mapper.resource("image", "images", controller=images.Controller(), + collection={'detail': 'GET'}) + mapper.resource("flavor", "flavors", controller=flavors.Controller(), + collection={'detail': 'GET'}) mapper.resource("sharedipgroup", "sharedipgroups", controller=sharedipgroups.Controller()) super(APIRouter, self).__init__(mapper) diff --git a/nova/api/rackspace/flavors.py b/nova/api/rackspace/flavors.py index 59981f1c5..60b35c939 100644 --- a/nova/api/rackspace/flavors.py +++ b/nova/api/rackspace/flavors.py @@ -31,7 +31,12 @@ class Controller(base.Controller): } def index(self, req): - """Return all flavors.""" + """Return all flavors in brief.""" + return dict(flavors=[dict(id=flavor['id'], name=flavor['name']) + for flavor in self.detail(req)['flavors']]) + + def detail(self, req): + """Return all flavors in detail.""" items = [self.show(req, id)['flavor'] for id in self._all_ids()] return dict(flavors=items) diff --git a/nova/api/rackspace/images.py b/nova/api/rackspace/images.py index 06fb0d38f..2f3e928b9 100644 --- a/nova/api/rackspace/images.py +++ b/nova/api/rackspace/images.py @@ -15,7 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -from nova import image +import nova.image.service from nova.api.rackspace import base from nova.api.rackspace import _id_translator from webob import exc @@ -32,12 +32,17 @@ class Controller(base.Controller): } def __init__(self): - self._service = image.ImageService.load() + self._service = nova.image.service.ImageService.load() self._id_translator = _id_translator.RackspaceAPIIdTranslator( "image", self._service.__class__.__name__) def index(self, req): - """Return all public images.""" + """Return all public images in brief.""" + return dict(images=[dict(id=img['id'], name=img['name']) + for img in self.detail(req)['images']]) + + def detail(self, req): + """Return all public images in detail.""" data = self._service.index() for img in data: img['id'] = self._id_translator.to_rs_id(img['id']) diff --git a/nova/image/__init__.py b/nova/image/__init__.py new file mode 100644 index 000000000..e69de29bb -- cgit From a6784ba13821dccfb852cff3ca16f7db30bb3c05 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 25 Aug 2010 16:44:10 -0700 Subject: network tests pass again --- bin/nova-dhcpbridge | 4 -- nova/db/api.py | 4 +- nova/flags.py | 19 +++--- nova/models.py | 5 -- nova/network/service.py | 2 - nova/tests/network_unittest.py | 137 +++++++++++++++++++++-------------------- 6 files changed, 83 insertions(+), 88 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 018293e24..6747a3a0e 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -46,10 +46,6 @@ def add_lease(_mac, ip_address, _hostname, _interface): """Set the IP that was assigned by the DHCP server.""" if FLAGS.fake_rabbit: logging.debug("leasing ip") - from nova import models - print models.FixedIp.count() - print models.Network.count() - print FLAGS.sql_connection service.VlanNetworkService().lease_fixed_ip(ip_address) else: rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name), diff --git a/nova/db/api.py b/nova/db/api.py index a30ec2cd5..2f759cb44 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -138,7 +138,7 @@ def fixed_ip_get_by_address(context, address): def fixed_ip_get_network(context, address): - """Get a fixed ip by address.""" + """Get a network for a fixed ip by address.""" return _impl.fixed_ip_get_network(context, address) def fixed_ip_lease(context, address): @@ -280,12 +280,12 @@ def network_get_associated_fixed_ips(context, network_id): """Get all network's ips that have been associated.""" return _impl.network_get_associated_fixed_ips(context, network_id) + def network_get_by_bridge(context, bridge): """Get an network or raise if it does not exist.""" return _impl.network_get_by_bridge(context, bridge) - def network_get_host(context, network_id): """Get host assigned to network or raise""" return _impl.network_get_host(context, network_id) diff --git a/nova/flags.py b/nova/flags.py index e3feb252d..d4b2b7c3b 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -22,6 +22,7 @@ where they're used. """ import getopt +import os import socket import sys @@ -34,7 +35,7 @@ class FlagValues(gflags.FlagValues): Unknown flags will be ignored when parsing the command line, but the command line will be kept so that it can be replayed if new flags are defined after the initial parsing. - + """ def __init__(self): @@ -50,7 +51,7 @@ class FlagValues(gflags.FlagValues): # leftover args at the end sneaky_unparsed_args = {"value": None} original_argv = list(argv) - + if self.IsGnuGetOpt(): orig_getopt = getattr(getopt, 'gnu_getopt') orig_name = 'gnu_getopt' @@ -74,14 +75,14 @@ class FlagValues(gflags.FlagValues): unparsed_args = sneaky_unparsed_args['value'] if unparsed_args: if self.IsGnuGetOpt(): - args = argv[:1] + unparsed + args = argv[:1] + unparsed_args else: args = argv[:1] + original_argv[-len(unparsed_args):] else: args = argv[:1] finally: setattr(getopt, orig_name, orig_getopt) - + # Store the arguments for later, we'll need them for new flags # added at runtime self.__dict__['__stored_argv'] = original_argv @@ -92,7 +93,7 @@ class FlagValues(gflags.FlagValues): def SetDirty(self, name): """Mark a flag as dirty so that accessing it will case a reparse.""" self.__dict__['__dirty'].append(name) - + def IsDirty(self, name): return name in self.__dict__['__dirty'] @@ -113,12 +114,12 @@ class FlagValues(gflags.FlagValues): for k in self.__dict__['__dirty']: setattr(self, k, getattr(new_flags, k)) self.ClearDirty() - + def __setitem__(self, name, flag): gflags.FlagValues.__setitem__(self, name, flag) if self.WasAlreadyParsed(): self.SetDirty(name) - + def __getitem__(self, name): if self.IsDirty(name): self.ParseNewFlags() @@ -208,3 +209,7 @@ DEFINE_string('node_availability_zone', 'nova', DEFINE_string('node_name', socket.gethostname(), 'name of this node') +DEFINE_string('sql_connection', + 'sqlite:///%s/nova.sqlite' % os.path.abspath("./"), + 'connection string for sql database') + diff --git a/nova/models.py b/nova/models.py index 7ad379814..36d6cf3ad 100644 --- a/nova/models.py +++ b/nova/models.py @@ -19,7 +19,6 @@ """ SQLAlchemy models for nova data """ -import os from sqlalchemy.orm import relationship, backref, validates, exc from sqlalchemy import Table, Column, Integer, String @@ -34,10 +33,6 @@ FLAGS=flags.FLAGS Base = declarative_base() -flags.DEFINE_string('sql_connection', - 'sqlite:///%s/nova.sqlite' % os.path.abspath("./"), - 'connection string for sql database') - class NovaBase(object): __table_args__ = {'mysql_engine':'InnoDB'} __prefix__ = 'none' diff --git a/nova/network/service.py b/nova/network/service.py index de2c7a16c..da2953470 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -94,7 +94,6 @@ class BaseNetworkService(service.Service): host = db.network_set_host(context, network_id, FLAGS.node_name) - print 'set host' self._on_set_network_host(context, network_id) return host @@ -199,7 +198,6 @@ class VlanNetworkService(BaseNetworkService): def _on_set_network_host(self, context, network_id): """Called when this host becomes the host for a project""" network_ref = db.network_get(context, network_id) - print 'making the bridge' _driver.ensure_vlan_bridge(network_ref['vlan'], network_ref['bridge'], network_ref) diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index d8a398aa4..c982b18dd 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -75,6 +75,15 @@ class NetworkTestCase(test.TrialTestCase): self.manager.delete_project(project) self.manager.delete_user(self.user) + def _create_address(self, project_num, instance_id=None): + net = db.project_get_network(None, self.projects[project_num].id) + fixed_ip = db.fixed_ip_allocate(None, net['id']) + address = fixed_ip['str_id'] + if instance_id is None: + instance_id = self.instance_id + db.fixed_ip_instance_associate(None, address, instance_id) + return address + def test_public_network_association(self): """Makes sure that we can allocaate a public ip""" # TODO(vish): better way of adding floating ips @@ -87,89 +96,82 @@ class NetworkTestCase(test.TrialTestCase): floating_ip.ip_str = ip_str floating_ip.node_name = FLAGS.node_name floating_ip.save() - eaddress = self.service.allocate_floating_ip(self.projects[0].id) - faddress = self.service.allocate_fixed_ip(self.projects[0].id, - self.instance_id) - self.assertEqual(eaddress, str(pubnet[0])) - self.service.associate_floating_ip(eaddress, faddress) + float_addr = self.service.allocate_floating_ip(self.projects[0].id) + fix_addr = self._create_address(0) + self.assertEqual(float_addr, str(pubnet[0])) + self.service.associate_floating_ip(float_addr, fix_addr) # FIXME datamodel abstraction - self.assertEqual(floating_ip.fixed_ip.ip_str, faddress) - self.service.disassociate_floating_ip(eaddress) - self.assertEqual(floating_ip.fixed_ip, None) - self.service.deallocate_floating_ip(eaddress) - self.service.deallocate_fixed_ip(faddress) + address = db.instance_get_floating_address(None, self.instance_id) + self.assertEqual(address, float_addr) + self.service.disassociate_floating_ip(float_addr) + address = db.instance_get_floating_address(None, self.instance_id) + self.assertEqual(address, None) + self.service.deallocate_floating_ip(float_addr) + db.fixed_ip_deallocate(None, fix_addr) def test_allocate_deallocate_fixed_ip(self): """Makes sure that we can allocate and deallocate a fixed ip""" - address = self.service.allocate_fixed_ip(self.projects[0].id, - self.instance_id) - net = db.project_get_network(None, self.projects[0].id) + address = self._create_address(0) self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) - issue_ip(address, net.bridge) - self.service.deallocate_fixed_ip(address) + lease_ip(address) + db.fixed_ip_deallocate(None, address) # Doesn't go away until it's dhcp released self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) - release_ip(address, net.bridge) + release_ip(address) self.assertFalse(is_allocated_in_project(address, self.projects[0].id)) def test_side_effects(self): """Ensures allocating and releasing has no side effects""" - address = self.service.allocate_fixed_ip(self.projects[0].id, - self.instance_id) - address2 = self.service.allocate_fixed_ip(self.projects[1].id, - self.instance2_id) - - net = db.project_get_network(None, self.projects[0].id) - net2 = db.project_get_network(None, self.projects[1].id) + address = self._create_address(0) + address2 = self._create_address(1, self.instance2_id) self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) self.assertTrue(is_allocated_in_project(address2, self.projects[1].id)) self.assertFalse(is_allocated_in_project(address, self.projects[1].id)) # Addresses are allocated before they're issued - issue_ip(address, net.bridge) - issue_ip(address2, net2.bridge) + lease_ip(address) + lease_ip(address2) - self.service.deallocate_fixed_ip(address) - release_ip(address, net.bridge) + db.fixed_ip_deallocate(None, address) + release_ip(address) self.assertFalse(is_allocated_in_project(address, self.projects[0].id)) # First address release shouldn't affect the second self.assertTrue(is_allocated_in_project(address2, self.projects[1].id)) - self.service.deallocate_fixed_ip(address2) - release_ip(address2, net2.bridge) + db.fixed_ip_deallocate(None, address2) + release_ip(address2) self.assertFalse(is_allocated_in_project(address2, self.projects[1].id)) def test_subnet_edge(self): """Makes sure that private ips don't overlap""" - first = self.service.allocate_fixed_ip(self.projects[0].id, - self.instance_id) + first = self._create_address(0) + lease_ip(first) for i in range(1, 5): project_id = self.projects[i].id - address = self.service.allocate_fixed_ip(project_id, self.instance_id) - address2 = self.service.allocate_fixed_ip(project_id, self.instance_id) - address3 = self.service.allocate_fixed_ip(project_id, self.instance_id) - net = db.project_get_network(None, project_id) - issue_ip(address, net.bridge) - issue_ip(address2, net.bridge) - issue_ip(address3, net.bridge) + address = self._create_address(i) + address2 = self._create_address(i) + address3 = self._create_address(i) + lease_ip(address) + lease_ip(address2) + lease_ip(address3) self.assertFalse(is_allocated_in_project(address, self.projects[0].id)) self.assertFalse(is_allocated_in_project(address2, self.projects[0].id)) self.assertFalse(is_allocated_in_project(address3, self.projects[0].id)) - self.service.deallocate_fixed_ip(address) - self.service.deallocate_fixed_ip(address2) - self.service.deallocate_fixed_ip(address3) - release_ip(address, net.bridge) - release_ip(address2, net.bridge) - release_ip(address3, net.bridge) - net = db.project_get_network(None, self.projects[0].id) - self.service.deallocate_fixed_ip(first) + db.fixed_ip_deallocate(None, address) + db.fixed_ip_deallocate(None, address2) + db.fixed_ip_deallocate(None, address3) + release_ip(address) + release_ip(address2) + release_ip(address3) + release_ip(first) + db.fixed_ip_deallocate(None, first) def test_vpn_ip_and_port_looks_valid(self): """Ensure the vpn ip and port are reasonable""" @@ -196,17 +198,14 @@ class NetworkTestCase(test.TrialTestCase): def test_ips_are_reused(self): """Makes sure that ip addresses that are deallocated get reused""" - address = self.service.allocate_fixed_ip(self.projects[0].id, - self.instance_id) - net = db.project_get_network(None, self.projects[0].id) - issue_ip(address, net.bridge) - self.service.deallocate_fixed_ip(address) - release_ip(address, net.bridge) - - address2 = self.service.allocate_fixed_ip(self.projects[0].id, - self.instance_id) + address = self._create_address(0) + lease_ip(address) + db.fixed_ip_deallocate(None, address) + release_ip(address) + + address2 = self._create_address(0) self.assertEqual(address, address2) - self.service.deallocate_fixed_ip(address2) + db.fixed_ip_deallocate(None, address2) def test_available_ips(self): """Make sure the number of available ips for the network is correct @@ -237,19 +236,19 @@ class NetworkTestCase(test.TrialTestCase): addresses = [] for i in range(num_available_ips): project_id = self.projects[0].id - addresses.append(self.service.allocate_fixed_ip(project_id, - self.instance_id)) - issue_ip(addresses[i],network.bridge) + address = self._create_address(0) + addresses.append(address) + lease_ip(address) self.assertEqual(available_ips(network), 0) self.assertRaises(db.NoMoreAddresses, - self.service.allocate_fixed_ip, - self.projects[0].id, - self.instance_id) + db.fixed_ip_allocate, + None, + network['id']) for i in range(len(addresses)): - self.service.deallocate_fixed_ip(addresses[i]) - release_ip(addresses[i],network.bridge) + db.fixed_ip_deallocate(None, addresses[i]) + release_ip(addresses[i]) self.assertEqual(available_ips(network), num_available_ips) @@ -287,20 +286,22 @@ def binpath(script): return os.path.abspath(os.path.join(__file__, "../../../bin", script)) -def issue_ip(private_ip, interface): +def lease_ip(private_ip): """Run add command on dhcpbridge""" + network_ref = db.fixed_ip_get_network(None, private_ip) cmd = "%s add fake %s fake" % (binpath('nova-dhcpbridge'), private_ip) - env = {'DNSMASQ_INTERFACE': interface, + env = {'DNSMASQ_INTERFACE': network_ref['bridge'], 'TESTING': '1', 'FLAGFILE': FLAGS.dhcpbridge_flagfile} (out, err) = utils.execute(cmd, addl_env=env) logging.debug("ISSUE_IP: %s, %s ", out, err) -def release_ip(private_ip, interface): +def release_ip(private_ip): """Run del command on dhcpbridge""" + network_ref = db.fixed_ip_get_network(None, private_ip) cmd = "%s del fake %s fake" % (binpath('nova-dhcpbridge'), private_ip) - env = {'DNSMASQ_INTERFACE': interface, + env = {'DNSMASQ_INTERFACE': network_ref['bridge'], 'TESTING': '1', 'FLAGFILE': FLAGS.dhcpbridge_flagfile} (out, err) = utils.execute(cmd, addl_env=env) -- cgit From 00ecd70fa6ec5a6d4f8444472f7fab20174815b3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 26 Aug 2010 11:28:05 -0700 Subject: fixed volume unit tests --- nova/db/api.py | 1 + nova/service.py | 1 + nova/tests/fake_flags.py | 2 ++ nova/tests/volume_unittest.py | 63 ++++++++++++++++++++++++------------------- nova/volume/service.py | 6 ++--- 5 files changed, 41 insertions(+), 32 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index 2f759cb44..7b3ded004 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -19,6 +19,7 @@ from nova import exception from nova import flags from nova import utils +from nova import validate FLAGS = flags.FLAGS diff --git a/nova/service.py b/nova/service.py index dc1f9efb6..9c536c354 100644 --- a/nova/service.py +++ b/nova/service.py @@ -100,6 +100,7 @@ class Service(object, service.Service): daemon_id = db.daemon_create(context, {'node_name': node_name, 'binary': binary, 'report_count': 0}) + daemon_ref = db.daemon_get(context, daemon_id) db.daemon_update(context, daemon_id, {'report_count': daemon_ref['report_count'] + 1}) diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 7fc83babc..543641a1b 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -25,6 +25,8 @@ FLAGS.fake_storage = True FLAGS.fake_rabbit = True FLAGS.fake_network = True FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' +FLAGS.network_size = 16 +FLAGS.num_networks = 5 FLAGS.verbose = True FLAGS.sql_connection = 'sqlite:///nova.sqlite' #FLAGS.sql_connection = 'mysql://root@localhost/test' diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index 37ee6c72b..e6b7b07ce 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -55,12 +55,20 @@ class VolumeTestCase(test.TrialTestCase): for device in self.devices: device.delete() + def _create_volume(self, size='0'): + vol = {} + vol['size'] = '0' + vol['user_id'] = 'fake' + vol['project_id'] = 'fake' + vol['availability_zone'] = FLAGS.storage_availability_zone + vol['status'] = "creating" + vol['attach_status'] = "detached" + return db.volume_create(None, vol) + @defer.inlineCallbacks def test_run_create_volume(self): - vol_size = '0' - user_id = 'fake' - project_id = 'fake' - volume_id = yield self.volume.create_volume(vol_size, user_id, project_id) + volume_id = self._create_volume() + yield self.volume.create_volume(volume_id) self.assertEqual(volume_id, models.Volume.find(volume_id).id) @@ -69,28 +77,27 @@ class VolumeTestCase(test.TrialTestCase): @defer.inlineCallbacks def test_too_big_volume(self): - vol_size = '1001' - user_id = 'fake' - project_id = 'fake' + # FIXME(vish): validation needs to move into the data layer in + # volume_create + defer.returnValue(True) try: - yield self.volume.create_volume(vol_size, user_id, project_id) + volume_id = self._create_volume('1001') + yield self.volume.create_volume(volume_id) self.fail("Should have thrown TypeError") except TypeError: pass @defer.inlineCallbacks def test_too_many_volumes(self): - vol_size = '1' - user_id = 'fake' - project_id = 'fake' vols = [] for i in xrange(self.total_slots): - vid = yield self.volume.create_volume(vol_size, user_id, project_id) - vols.append(vid) - self.assertFailure(self.volume.create_volume(vol_size, - user_id, - project_id), + volume_id = self._create_volume() + yield self.volume.create_volume(volume_id) + vols.append(volume_id) + volume_id = self._create_volume() + self.assertFailure(self.volume.create_volume(volume_id), db.NoMoreBlades) + db.volume_destroy(None, volume_id) for id in vols: yield self.volume.delete_volume(id) @@ -98,11 +105,9 @@ class VolumeTestCase(test.TrialTestCase): def test_run_attach_detach_volume(self): # Create one volume and one compute to test with instance_id = "storage-test" - vol_size = "5" - user_id = "fake" - project_id = 'fake' mountpoint = "/dev/sdf" - volume_id = yield self.volume.create_volume(vol_size, user_id, project_id) + volume_id = self._create_volume() + yield self.volume.create_volume(volume_id) if FLAGS.fake_tests: db.volume_attached(None, volume_id, instance_id, mountpoint) else: @@ -110,10 +115,10 @@ class VolumeTestCase(test.TrialTestCase): volume_id, mountpoint) vol = db.volume_get(None, volume_id) - self.assertEqual(vol.status, "in-use") - self.assertEqual(vol.attach_status, "attached") - self.assertEqual(vol.instance_id, instance_id) - self.assertEqual(vol.mountpoint, mountpoint) + self.assertEqual(vol['status'], "in-use") + self.assertEqual(vol['attach_status'], "attached") + self.assertEqual(vol['instance_id'], instance_id) + self.assertEqual(vol['mountpoint'], mountpoint) self.assertFailure(self.volume.delete_volume(volume_id), exception.Error) if FLAGS.fake_tests: @@ -121,11 +126,12 @@ class VolumeTestCase(test.TrialTestCase): else: rv = yield self.volume.detach_volume(instance_id, volume_id) - self.assertEqual(vol.status, "available") + self.assertEqual(vol['status'], "available") rv = self.volume.delete_volume(volume_id) self.assertRaises(exception.Error, - models.Volume.find, + db.volume_get, + None, volume_id) @defer.inlineCallbacks @@ -137,7 +143,7 @@ class VolumeTestCase(test.TrialTestCase): volume_ids = [] def _check(volume_id): volume_ids.append(volume_id) - vol = models.Volume.find(volume_id) + vol = db.volume_get(None, volume_id) shelf_blade = '%s.%s' % (vol.export_device.shelf_id, vol.export_device.blade_id) self.assert_(shelf_blade not in shelf_blades) @@ -145,7 +151,8 @@ class VolumeTestCase(test.TrialTestCase): logging.debug("got %s" % shelf_blade) deferreds = [] for i in range(self.total_slots): - d = self.volume.create_volume(vol_size, user_id, project_id) + volume_id = self._create_volume() + d = self.volume.create_volume(volume_id) d.addCallback(_check) d.addErrback(self.fail) deferreds.append(d) diff --git a/nova/volume/service.py b/nova/volume/service.py index 7e32f2d8d..fbafd3fb5 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -31,7 +31,6 @@ from nova import exception from nova import flags from nova import process from nova import service -from nova import validate FLAGS = flags.FLAGS @@ -65,7 +64,6 @@ class VolumeService(service.Service): self._exec_init_volumes() @defer.inlineCallbacks - # @validate.rangetest(size=(0, 1000)) def create_volume(self, volume_id, context=None): """ Creates an exported volume (fake or real), @@ -76,7 +74,7 @@ class VolumeService(service.Service): volume_ref = db.volume_get(context, volume_id) - # db.volume_update(context, volume_id, {'node_name': FLAGS.node_name}) + db.volume_update(context, volume_id, {'node_name': FLAGS.node_name}) size = volume_ref['size'] logging.debug("volume %s: creating lv of size %sG" % (volume_id, size)) @@ -97,7 +95,7 @@ class VolumeService(service.Service): logging.debug("volume %s: re-exporting all values" % (volume_id)) yield self._exec_ensure_exports() - + logging.debug("volume %s: created successfully" % (volume_id)) defer.returnValue(volume_id) -- cgit From e401280bb88672017e621c82e6d3d611887c1002 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 26 Aug 2010 12:56:07 -0700 Subject: fixed service mox test cases --- nova/tests/service_unittest.py | 56 +++++++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 23 deletions(-) diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py index 482988465..0b9d60024 100644 --- a/nova/tests/service_unittest.py +++ b/nova/tests/service_unittest.py @@ -80,13 +80,15 @@ class ServiceTestCase(test.BaseTestCase): binary = 'bar' daemon_ref = {'node_name': node_name, 'binary': binary, - 'report_count': 0 - } + 'report_count': 0, + 'id': 1} - service.db.daemon_get(None, node_name, binary).AndReturn(daemon_ref) - service.db.daemon_update(None, node_name, binary, + service.db.daemon_get_by_args(None, + node_name, + binary).AndReturn(daemon_ref) + service.db.daemon_update(None, daemon_ref['id'], mox.ContainsKeyValue('report_count', 1)) - + self.mox.ReplayAll() s = service.Service() rv = yield s.report_state(node_name, binary) @@ -95,17 +97,22 @@ class ServiceTestCase(test.BaseTestCase): def test_report_state_no_daemon(self): node_name = 'foo' binary = 'bar' + daemon_create = {'node_name': node_name, + 'binary': binary, + 'report_count': 0} daemon_ref = {'node_name': node_name, 'binary': binary, - 'report_count': 0 - } - - service.db.daemon_get(None, node_name, binary).AndRaise( - exception.NotFound()) - service.db.daemon_create(None, daemon_ref).AndReturn(daemon_ref) - service.db.daemon_update(None, node_name, binary, + 'report_count': 0, + 'id': 1} + + service.db.daemon_get_by_args(None, + node_name, + binary).AndRaise(exception.NotFound()) + service.db.daemon_create(None, daemon_create).AndReturn(daemon_ref['id']) + service.db.daemon_get(None, daemon_ref['id']).AndReturn(daemon_ref) + service.db.daemon_update(None, daemon_ref['id'], mox.ContainsKeyValue('report_count', 1)) - + self.mox.ReplayAll() s = service.Service() rv = yield s.report_state(node_name, binary) @@ -116,12 +123,13 @@ class ServiceTestCase(test.BaseTestCase): binary = 'bar' daemon_ref = {'node_name': node_name, 'binary': binary, - 'report_count': 0 - } + 'report_count': 0, + 'id': 1} + + service.db.daemon_get_by_args(None, + node_name, + binary).AndRaise(Exception()) - service.db.daemon_get(None, node_name, binary).AndRaise( - Exception()) - self.mox.ReplayAll() s = service.Service() rv = yield s.report_state(node_name, binary) @@ -134,13 +142,15 @@ class ServiceTestCase(test.BaseTestCase): binary = 'bar' daemon_ref = {'node_name': node_name, 'binary': binary, - 'report_count': 0 - } + 'report_count': 0, + 'id': 1} - service.db.daemon_get(None, node_name, binary).AndReturn(daemon_ref) - service.db.daemon_update(None, node_name, binary, + service.db.daemon_get_by_args(None, + node_name, + binary).AndReturn(daemon_ref) + service.db.daemon_update(None, daemon_ref['id'], mox.ContainsKeyValue('report_count', 1)) - + self.mox.ReplayAll() s = service.Service() s.model_disconnected = True -- cgit From 974573b738cea3b1125cb8498cb97eb79714db32 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 27 Aug 2010 13:45:05 -0700 Subject: removed the last few references to models.py --- nova/db/api.py | 53 +++++++++++++++++++++++++++++---- nova/db/sqlalchemy/api.py | 66 +++++++++++++++++++++++++++++++++++++----- nova/tests/compute_unittest.py | 39 ++++++++++++------------- nova/tests/fake_flags.py | 2 ++ nova/tests/network_unittest.py | 50 +++++++++----------------------- nova/tests/volume_unittest.py | 32 +++++++------------- nova/volume/service.py | 3 -- 7 files changed, 150 insertions(+), 95 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index 7b3ded004..536ef1e25 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -26,7 +26,7 @@ FLAGS = flags.FLAGS flags.DEFINE_string('db_backend', 'sqlalchemy', 'The backend to use for db') -# TODO(vish): where should these flags go +# TODO(vish): where should these flags go? flags.DEFINE_string('network_type', 'vlan', 'Service Class for Networking') @@ -41,6 +41,12 @@ flags.DEFINE_string('public_range', '4.4.4.0/24', 'Public IP address block') flags.DEFINE_string('private_range', '10.0.0.0/8', 'Private IP address block') flags.DEFINE_integer('cnt_vpn_clients', 5, 'Number of addresses reserved for vpn clients') +flags.DEFINE_integer('num_shelves', + 100, + 'Number of vblade shelves') +flags.DEFINE_integer('blades_per_shelf', + 16, + 'Number of vblade blades per shelf') @@ -102,11 +108,9 @@ def floating_ip_allocate_address(context, node_name, project_id): return _impl.floating_ip_allocate_address(context, node_name, project_id) -def floating_ip_fixed_ip_associate(context, floating_address, fixed_address): - """Associate an floating ip to a fixed_ip by address.""" - return _impl.floating_ip_fixed_ip_associate(context, - floating_address, - fixed_address) +def floating_ip_create(context, address, host): + """Create a floating ip for a given address on the specified host.""" + return _impl.floating_ip_create(context, address, host) def floating_ip_disassociate(context, address): @@ -122,6 +126,18 @@ def floating_ip_deallocate(context, address): return _impl.floating_ip_deallocate(context, address) +def floating_ip_fixed_ip_associate(context, floating_address, fixed_address): + """Associate an floating ip to a fixed_ip by address.""" + return _impl.floating_ip_fixed_ip_associate(context, + floating_address, + fixed_address) + + +def floating_ip_get_by_address(context, address): + """Get a floating ip by address.""" + return _impl.floating_ip_get_by_address(context, address) + + #################### @@ -252,6 +268,26 @@ def network_allocate(context, project_id): return _impl.network_allocate(context, project_id) +def network_count(context): + """Return the number of networks.""" + return _impl.network_count(context) + + +def network_count_allocated_ips(context, network_id): + """Return the number of allocated non-reserved ips in the network.""" + return _impl.network_count_allocated_ips(context, network_id) + + +def network_count_available_ips(context, network_id): + """Return the number of available ips in the network.""" + return _impl.network_count_available_ips(context, network_id) + + +def network_count_reserved_ips(context, network_id): + """Return the number of reserved ips in the network.""" + return _impl.network_count_reserved_ips(context, network_id) + + def network_create(context, values): """Create a network from the values dictionary.""" return _impl.network_create(context, values) @@ -355,6 +391,11 @@ def volume_create(context, values): return _impl.volume_create(context, values) +def volume_ensure_blades(context, num_shelves, blades_per_shelf): + """Ensure shelves and blades have been created in the datastore.""" + return _impl.volume_ensure_blades(context, num_shelves, blades_per_shelf) + + def volume_destroy(context, volume_id): """Destroy the volume or raise if it does not exist.""" return _impl.volume_destroy(context, volume_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 047a6c108..55367cec2 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -66,28 +66,40 @@ def floating_ip_allocate_address(context, node_name, project_id): floating_ip_ref['project_id'] = project_id session.add(floating_ip_ref) session.commit() - return floating_ip_ref['ip_str'] + return floating_ip_ref['str_id'] + + +def floating_ip_create(context, address, host): + floating_ip_ref = models.FloatingIp() + floating_ip_ref['ip_str'] = address + floating_ip_ref['node_name'] = host + floating_ip_ref.save() + return floating_ip_ref def floating_ip_fixed_ip_associate(context, floating_address, fixed_address): - floating_ip_ref = models.FloatingIp.find_by_str(floating_address) + floating_ip_ref = db.floating_ip_get_by_address(context, floating_address) fixed_ip_ref = models.FixedIp.find_by_str(fixed_address) floating_ip_ref.fixed_ip = fixed_ip_ref floating_ip_ref.save() def floating_ip_disassociate(context, address): - floating_ip_ref = models.FloatingIp.find_by_str(address) - fixed_ip_address = floating_ip_ref.fixed_ip['ip_str'] + floating_ip_ref = db.floating_ip_get_by_address(context, address) + fixed_ip_address = floating_ip_ref.fixed_ip['str_id'] floating_ip_ref['fixed_ip'] = None floating_ip_ref.save() return fixed_ip_address def floating_ip_deallocate(context, address): - floating_ip_ref = models.FloatingIp.find_by_str(address) + floating_ip_ref = db.floating_ip_get_by_address(context, address) floating_ip_ref['project_id'] = None floating_ip_ref.save() +def floating_ip_get_by_address(context, address): + return models.FloatingIp.find_by_str(address) + + ################### @@ -264,6 +276,30 @@ def network_allocate(context, project_id): return network_id +def network_count(context): + return models.Network.count() + +def network_count_allocated_ips(context, network_id): + session = models.NovaBase.get_session() + query = session.query(models.FixedIp).filter_by(network_id=network_id) + query = query.filter_by(allocated=True) + return query.count() + + +def network_count_available_ips(context, network_id): + session = models.NovaBase.get_session() + query = session.query(models.FixedIp).filter_by(network_id=network_id) + query = query.filter_by(allocated=False).filter_by(reserved=False) + return query.count() + + +def network_count_reserved_ips(context, network_id): + session = models.NovaBase.get_session() + query = session.query(models.FixedIp).filter_by(network_id=network_id) + query = query.filter_by(reserved=True) + return query.count() + + def network_create(context, values): network_ref = models.Network() for (key, value) in values.iteritems(): @@ -283,7 +319,7 @@ def network_create_fixed_ips(context, network_id, num_vpn_clients): session = models.NovaBase.get_session() for i in range(num_ips): fixed_ip = models.FixedIp() - fixed_ip.ip_str = str(project_net[i]) + fixed_ip['ip_str'] = str(project_net[i]) if i < BOTTOM_RESERVED or num_ips - i < TOP_RESERVED: fixed_ip['reserved'] = True fixed_ip['network'] = network_get(context, network_id) @@ -310,7 +346,7 @@ def network_get(context, network_id): return models.Network.find(network_id) -def network_get_associated_fixed_ips(contex, network_id): +def network_get_associated_fixed_ips(context, network_id): session = models.NovaBase.get_session() query = session.query(models.FixedIp) fixed_ips = query.filter(models.FixedIp.instance_id != None).all() @@ -367,7 +403,6 @@ def network_set_cidr(context, network_id, cidr): def network_set_host(context, network_id, host_id): session = models.NovaBase.get_session() - # FIXME will a second request fail or wait for first to finish? query = session.query(models.Network).filter_by(id=network_id) network = query.with_lockmode("update").first() if not network: @@ -412,6 +447,9 @@ def queue_get_for(context, topic, physical_node_id): def volume_allocate_shelf_and_blade(context, volume_id): + db.volume_ensure_blades(context, + FLAGS.num_shelves, + FLAGS.blades_per_shelf) session = models.NovaBase.get_session() query = session.query(models.ExportDevice).filter_by(volume=None) export_device = query.with_lockmode("update").first() @@ -456,6 +494,18 @@ def volume_detached(context, volume_id): volume_ref.save() +# NOTE(vish): should this code go up a layer? +def volume_ensure_blades(context, num_shelves, blades_per_shelf): + if models.ExportDevice.count() >= num_shelves * blades_per_shelf: + return + for shelf_id in xrange(num_shelves): + for blade_id in xrange(blades_per_shelf): + export_device = models.ExportDevice() + export_device.shelf_id = shelf_id + export_device.blade_id = blade_id + export_device.save() + + def volume_get(context, volume_id): return models.Volume.find(volume_id) diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index 44cc6ac15..e85973837 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -21,11 +21,11 @@ import time from twisted.internet import defer from xml.etree import ElementTree +from nova import db from nova import exception from nova import flags from nova import test from nova import utils -from nova import models from nova.auth import manager from nova.compute import service @@ -69,47 +69,44 @@ class ComputeConnectionTestCase(test.TrialTestCase): self.manager.delete_user('fake') self.manager.delete_project('fake') - def create_instance(self): - inst = models.Instance(user_id='fake', project_id='fake', image_id='ami-test') - inst.save(); - # TODO(ja): add ami, ari, aki, user_data - # inst['reservation_id'] = 'r-fakeres' - # inst['launch_time'] = '10' - #inst['user_id'] = 'fake' - #inst['project_id'] = 'fake' - #inst['instance_type'] = 'm1.tiny' - #inst['node_name'] = FLAGS.node_name - #inst['mac_address'] = utils.generate_mac() - #inst['ami_launch_index'] = 0 - #inst.save() - return inst.id + def _create_instance(self): + inst = {} + inst['image_id'] = 'ami-test' + inst['reservation_id'] = 'r-fakeres' + inst['launch_time'] = '10' + inst['user_id'] = 'fake' + inst['project_id'] = 'fake' + inst['instance_type'] = 'm1.tiny' + inst['mac_address'] = utils.generate_mac() + inst['ami_launch_index'] = 0 + return db.instance_create(None, inst) @defer.inlineCallbacks def test_run_describe_terminate(self): - instance_id = self.create_instance() + instance_id = self._create_instance() yield self.compute.run_instance(instance_id) - instances = models.Instance.all() + instances = db.instance_get_all(None) logging.info("Running instances: %s", instances) self.assertEqual(len(instances), 1) yield self.compute.terminate_instance(instance_id) - instances = models.Instance.all() + instances = db.instance_get_all(None) logging.info("After terminating instances: %s", instances) self.assertEqual(len(instances), 0) @defer.inlineCallbacks def test_reboot(self): - instance_id = self.create_instance() + instance_id = self._create_instance() yield self.compute.run_instance(instance_id) yield self.compute.reboot_instance(instance_id) yield self.compute.terminate_instance(instance_id) @defer.inlineCallbacks def test_console_output(self): - instance_id = self.create_instance() + instance_id = self._create_instance() rv = yield self.compute.run_instance(instance_id) console = yield self.compute.get_console_output(instance_id) @@ -118,7 +115,7 @@ class ComputeConnectionTestCase(test.TrialTestCase): @defer.inlineCallbacks def test_run_instance_existing(self): - instance_id = self.create_instance() + instance_id = self._create_instance() yield self.compute.run_instance(instance_id) self.assertFailure(self.compute.run_instance(instance_id), exception.Error) yield self.compute.terminate_instance(instance_id) diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 543641a1b..42a13e4e3 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -27,6 +27,8 @@ FLAGS.fake_network = True FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' FLAGS.network_size = 16 FLAGS.num_networks = 5 +FLAGS.num_shelves = 2 +FLAGS.blades_per_shelf = 4 FLAGS.verbose = True FLAGS.sql_connection = 'sqlite:///nova.sqlite' #FLAGS.sql_connection = 'mysql://root@localhost/test' diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index c982b18dd..d487c2e45 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -25,7 +25,6 @@ import logging from nova import db from nova import exception from nova import flags -from nova import models from nova import test from nova import utils from nova.auth import manager @@ -90,17 +89,13 @@ class NetworkTestCase(test.TrialTestCase): pubnet = IPy.IP(flags.FLAGS.public_range) ip_str = str(pubnet[0]) try: - floating_ip = models.FloatingIp.find_by_str(ip_str) + db.floating_ip_get_by_address(None, ip_str) except exception.NotFound: - floating_ip = models.FloatingIp() - floating_ip.ip_str = ip_str - floating_ip.node_name = FLAGS.node_name - floating_ip.save() + db.floating_ip_create(None, ip_str, FLAGS.node_name) float_addr = self.service.allocate_floating_ip(self.projects[0].id) fix_addr = self._create_address(0) self.assertEqual(float_addr, str(pubnet[0])) self.service.associate_floating_ip(float_addr, fix_addr) - # FIXME datamodel abstraction address = db.instance_get_floating_address(None, self.instance_id) self.assertEqual(address, float_addr) self.service.disassociate_floating_ip(float_addr) @@ -183,8 +178,7 @@ class NetworkTestCase(test.TrialTestCase): def test_too_many_networks(self): """Ensure error is raised if we run out of networks""" projects = [] - # TODO(vish): use data layer for count - networks_left = FLAGS.num_networks - models.Network.count() + networks_left = FLAGS.num_networks - db.network_count(None) for i in range(networks_left): project = self.manager.create_project('many%s' % i, self.user) projects.append(project) @@ -220,9 +214,9 @@ class NetworkTestCase(test.TrialTestCase): """ network = db.project_get_network(None, self.projects[0].id) net_size = flags.FLAGS.network_size - total_ips = (available_ips(network) + - reserved_ips(network) + - allocated_ips(network)) + total_ips = (db.network_count_available_ips(None, network['id']) + + db.network_count_reserved_ips(None, network['id']) + + db.network_count_allocated_ips(None, network['id'])) self.assertEqual(total_ips, net_size) def test_too_many_addresses(self): @@ -230,9 +224,9 @@ class NetworkTestCase(test.TrialTestCase): """ network = db.project_get_network(None, self.projects[0].id) - # Number of availaible ips is len of the available list - num_available_ips = available_ips(network) + num_available_ips = db.network_count_available_ips(None, + network['id']) addresses = [] for i in range(num_available_ips): project_id = self.projects[0].id @@ -240,7 +234,8 @@ class NetworkTestCase(test.TrialTestCase): addresses.append(address) lease_ip(address) - self.assertEqual(available_ips(network), 0) + self.assertEqual(db.network_count_available_ips(None, + network['id']), 0) self.assertRaises(db.NoMoreAddresses, db.fixed_ip_allocate, None, @@ -249,27 +244,10 @@ class NetworkTestCase(test.TrialTestCase): for i in range(len(addresses)): db.fixed_ip_deallocate(None, addresses[i]) release_ip(addresses[i]) - self.assertEqual(available_ips(network), num_available_ips) - - -# FIXME move these to abstraction layer -def available_ips(network): - session = models.NovaBase.get_session() - query = session.query(models.FixedIp).filter_by(network_id=network.id) - query = query.filter_by(allocated=False).filter_by(reserved=False) - return query.count() - -def allocated_ips(network): - session = models.NovaBase.get_session() - query = session.query(models.FixedIp).filter_by(network_id=network.id) - query = query.filter_by(allocated=True) - return query.count() - -def reserved_ips(network): - session = models.NovaBase.get_session() - query = session.query(models.FixedIp).filter_by(network_id=network.id) - query = query.filter_by(reserved=True) - return query.count() + self.assertEqual(db.network_count_available_ips(None, + network['id']), + num_available_ips) + def is_allocated_in_project(address, project_id): """Returns true if address is in specified project""" diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index e6b7b07ce..a03e0e6e3 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -23,7 +23,6 @@ from twisted.internet import defer from nova import exception from nova import db from nova import flags -from nova import models from nova import test from nova.compute import service as compute_service from nova.volume import service as volume_service @@ -40,20 +39,7 @@ class VolumeTestCase(test.TrialTestCase): self.flags(connection_type='fake', fake_storage=True) self.volume = volume_service.VolumeService() - self.total_slots = 10 - # FIXME this should test actual creation method - self.devices = [] - for i in xrange(self.total_slots): - export_device = models.ExportDevice() - export_device.shelf_id = 0 - export_device.blade_id = i - export_device.save() - self.devices.append(export_device) - - def tearDown(self): - super(VolumeTestCase, self).tearDown() - for device in self.devices: - device.delete() + def _create_volume(self, size='0'): vol = {} @@ -69,11 +55,13 @@ class VolumeTestCase(test.TrialTestCase): def test_run_create_volume(self): volume_id = self._create_volume() yield self.volume.create_volume(volume_id) - self.assertEqual(volume_id, - models.Volume.find(volume_id).id) + self.assertEqual(volume_id, db.volume_get(None, volume_id).id) yield self.volume.delete_volume(volume_id) - self.assertRaises(exception.NotFound, models.Volume.find, volume_id) + self.assertRaises(exception.NotFound, + db.volume_get, + None, + volume_id) @defer.inlineCallbacks def test_too_big_volume(self): @@ -90,7 +78,8 @@ class VolumeTestCase(test.TrialTestCase): @defer.inlineCallbacks def test_too_many_volumes(self): vols = [] - for i in xrange(self.total_slots): + total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf + for i in xrange(total_slots): volume_id = self._create_volume() yield self.volume.create_volume(volume_id) vols.append(volume_id) @@ -150,7 +139,8 @@ class VolumeTestCase(test.TrialTestCase): shelf_blades.append(shelf_blade) logging.debug("got %s" % shelf_blade) deferreds = [] - for i in range(self.total_slots): + total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf + for i in range(total_slots): volume_id = self._create_volume() d = self.volume.create_volume(volume_id) d.addCallback(_check) @@ -158,7 +148,7 @@ class VolumeTestCase(test.TrialTestCase): deferreds.append(d) yield defer.DeferredList(deferreds) for volume_id in volume_ids: - vol = models.Volume.find(volume_id) + vol = db.volume_get(None, volume_id) vol.delete() def test_multi_node(self): diff --git a/nova/volume/service.py b/nova/volume/service.py index fbafd3fb5..7f6747577 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -43,9 +43,6 @@ flags.DEFINE_string('aoe_eth_dev', 'eth0', flags.DEFINE_string('aoe_export_dir', '/var/lib/vblade-persist/vblades', 'AoE directory where exports are created') -flags.DEFINE_integer('blades_per_shelf', - 16, - 'Number of AoE blades per shelf') flags.DEFINE_string('storage_availability_zone', 'nova', 'availability zone of this service') -- cgit From ff72e7baff179bb814e3b9df9fc50659a48249f3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 27 Aug 2010 13:46:27 -0700 Subject: moved models.py --- nova/db/sqlalchemy/api.py | 2 +- nova/db/sqlalchemy/models.py | 368 +++++++++++++++++++++++++++++++++++++++++++ nova/models.py | 368 ------------------------------------------- 3 files changed, 369 insertions(+), 369 deletions(-) create mode 100644 nova/db/sqlalchemy/models.py delete mode 100644 nova/models.py diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 55367cec2..cba85ccb7 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -23,7 +23,7 @@ import IPy from nova import db from nova import exception from nova import flags -from nova import models +from nova.db.sqlalchemy import models FLAGS = flags.FLAGS diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py new file mode 100644 index 000000000..36d6cf3ad --- /dev/null +++ b/nova/db/sqlalchemy/models.py @@ -0,0 +1,368 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +SQLAlchemy models for nova data +""" + +from sqlalchemy.orm import relationship, backref, validates, exc +from sqlalchemy import Table, Column, Integer, String +from sqlalchemy import MetaData, ForeignKey, DateTime, Boolean, Text +from sqlalchemy.ext.declarative import declarative_base + +from nova import auth +from nova import exception +from nova import flags + +FLAGS=flags.FLAGS + +Base = declarative_base() + +class NovaBase(object): + __table_args__ = {'mysql_engine':'InnoDB'} + __prefix__ = 'none' + created_at = Column(DateTime) + updated_at = Column(DateTime) + + _session = None + _engine = None + @classmethod + def create_engine(cls): + if NovaBase._engine is not None: + return NovaBase._engine + from sqlalchemy import create_engine + NovaBase._engine = create_engine(FLAGS.sql_connection, echo=False) + Base.metadata.create_all(NovaBase._engine) + return NovaBase._engine + + @classmethod + def get_session(cls): + from sqlalchemy.orm import sessionmaker + if NovaBase._session == None: + NovaBase.create_engine() + NovaBase._session = sessionmaker(bind=NovaBase._engine)() + return NovaBase._session + + @classmethod + def all(cls): + session = NovaBase.get_session() + result = session.query(cls).all() + session.commit() + return result + + @classmethod + def count(cls): + session = NovaBase.get_session() + result = session.query(cls).count() + session.commit() + return result + + @classmethod + def find(cls, obj_id): + session = NovaBase.get_session() + try: + result = session.query(cls).filter_by(id=obj_id).one() + session.commit() + return result + except exc.NoResultFound: + raise exception.NotFound("No model for id %s" % obj_id) + + @classmethod + def find_by_str(cls, str_id): + id = int(str_id.rpartition('-')[2]) + return cls.find(id) + + @property + def str_id(self): + return "%s-%s" % (self.__prefix__, self.id) + + def save(self): + session = NovaBase.get_session() + session.add(self) + session.commit() + + def delete(self): + session = NovaBase.get_session() + session.delete(self) + session.commit() + + def refresh(self): + session = NovaBase.get_session() + session.refresh(self) + + def __setitem__(self, key, value): + setattr(self, key, value) + + def __getitem__(self, key): + return getattr(self, key) + + +class Image(Base, NovaBase): + __tablename__ = 'images' + __prefix__ = 'ami' + id = Column(Integer, primary_key=True) + user_id = Column(String(255))#, ForeignKey('users.id'), nullable=False) + project_id = Column(String(255))#, ForeignKey('projects.id'), nullable=False) + + image_type = Column(String(255)) + public = Column(Boolean, default=False) + state = Column(String(255)) + location = Column(String(255)) + arch = Column(String(255)) + default_kernel_id = Column(String(255)) + default_ramdisk_id = Column(String(255)) + + @validates('image_type') + def validate_image_type(self, key, image_type): + assert(image_type in ['machine', 'kernel', 'ramdisk', 'raw']) + + @validates('state') + def validate_state(self, key, state): + assert(state in ['available', 'pending', 'disabled']) + + @validates('default_kernel_id') + def validate_kernel_id(self, key, val): + if val != 'machine': + assert(val is None) + + @validates('default_ramdisk_id') + def validate_ramdisk_id(self, key, val): + if val != 'machine': + assert(val is None) + + +class PhysicalNode(Base, NovaBase): + __tablename__ = 'physical_nodes' + id = Column(String(255), primary_key=True) + +class Daemon(Base, NovaBase): + __tablename__ = 'daemons' + id = Column(Integer, primary_key=True) + node_name = Column(String(255)) #, ForeignKey('physical_node.id')) + binary = Column(String(255)) + report_count = Column(Integer, nullable=False, default=0) + + @classmethod + def find_by_args(cls, node_name, binary): + session = NovaBase.get_session() + try: + query = session.query(cls).filter_by(node_name=node_name) + result = query.filter_by(binary=binary).one() + session.commit() + return result + except exc.NoResultFound: + raise exception.NotFound("No model for %s, %s" % (node_name, + binary)) + + +class Instance(Base, NovaBase): + __tablename__ = 'instances' + __prefix__ = 'i' + id = Column(Integer, primary_key=True) + + user_id = Column(String(255)) #, ForeignKey('users.id'), nullable=False) + project_id = Column(String(255)) #, ForeignKey('projects.id')) + + @property + def user(self): + return auth.manager.AuthManager().get_user(self.user_id) + + @property + def project(self): + return auth.manager.AuthManager().get_project(self.project_id) + + # TODO(vish): make this opaque somehow + @property + def name(self): + return self.str_id + + + image_id = Column(Integer, ForeignKey('images.id'), nullable=True) + kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True) + ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True) + + launch_index = Column(Integer) + key_name = Column(String(255)) + key_data = Column(Text) + security_group = Column(String(255)) + + state = Column(Integer) + state_description = Column(String(255)) + + hostname = Column(String(255)) + node_name = Column(String(255)) #, ForeignKey('physical_node.id')) + + instance_type = Column(Integer) + + user_data = Column(Text) + + reservation_id = Column(String(255)) + mac_address = Column(String(255)) + + def set_state(self, state_code, state_description=None): + from nova.compute import power_state + self.state = state_code + if not state_description: + state_description = power_state.name(state_code) + self.state_description = state_description + self.save() + +# ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id)) +# kernel = relationship(Kernel, backref=backref('instances', order_by=id)) +# project = relationship(Project, backref=backref('instances', order_by=id)) + +#TODO - see Ewan's email about state improvements + # vmstate_state = running, halted, suspended, paused + # power_state = what we have + # task_state = transitory and may trigger power state transition + + #@validates('state') + #def validate_state(self, key, state): + # assert(state in ['nostate', 'running', 'blocked', 'paused', 'shutdown', 'shutoff', 'crashed']) + +class Volume(Base, NovaBase): + __tablename__ = 'volumes' + __prefix__ = 'vol' + id = Column(Integer, primary_key=True) + + user_id = Column(String(255)) #, ForeignKey('users.id'), nullable=False) + project_id = Column(String(255)) #, ForeignKey('projects.id')) + + node_name = Column(String(255)) #, ForeignKey('physical_node.id')) + size = Column(Integer) + availability_zone = Column(String(255)) # TODO(vish) foreign key? + instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) + mountpoint = Column(String(255)) + attach_time = Column(String(255)) # TODO(vish) datetime + status = Column(String(255)) # TODO(vish) enum? + attach_status = Column(String(255)) # TODO(vish) enum + +class ExportDevice(Base, NovaBase): + __tablename__ = 'export_devices' + id = Column(Integer, primary_key=True) + shelf_id = Column(Integer) + blade_id = Column(Integer) + volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=True) + volume = relationship(Volume, backref=backref('export_device', + uselist=False)) + + +# TODO(vish): can these both come from the same baseclass? +class FixedIp(Base, NovaBase): + __tablename__ = 'fixed_ips' + id = Column(Integer, primary_key=True) + ip_str = Column(String(255), unique=True) + network_id = Column(Integer, ForeignKey('networks.id'), nullable=False) + instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) + instance = relationship(Instance, backref=backref('fixed_ip', + uselist=False)) + allocated = Column(Boolean, default=False) + leased = Column(Boolean, default=False) + reserved = Column(Boolean, default=False) + + @property + def str_id(self): + return self.ip_str + + @classmethod + def find_by_str(cls, str_id): + session = NovaBase.get_session() + try: + result = session.query(cls).filter_by(ip_str=str_id).one() + session.commit() + return result + except exc.NoResultFound: + raise exception.NotFound("No model for ip str %s" % str_id) + + +class FloatingIp(Base, NovaBase): + __tablename__ = 'floating_ips' + id = Column(Integer, primary_key=True) + ip_str = Column(String(255), unique=True) + fixed_ip_id = Column(Integer, ForeignKey('fixed_ips.id'), nullable=True) + fixed_ip = relationship(FixedIp, backref=backref('floating_ips')) + + project_id = Column(String(255)) #, ForeignKey('projects.id'), nullable=False) + node_name = Column(String(255)) #, ForeignKey('physical_node.id')) + + @property + def str_id(self): + return self.ip_str + + @classmethod + def find_by_str(cls, str_id): + session = NovaBase.get_session() + try: + result = session.query(cls).filter_by(ip_str=str_id).one() + session.commit() + return result + except exc.NoResultFound: + raise exception.NotFound("No model for ip str %s" % str_id) + + +class Network(Base, NovaBase): + __tablename__ = 'networks' + id = Column(Integer, primary_key=True) + kind = Column(String(255)) + + injected = Column(Boolean, default=False) + cidr = Column(String(255)) + netmask = Column(String(255)) + bridge = Column(String(255)) + gateway = Column(String(255)) + broadcast = Column(String(255)) + dns = Column(String(255)) + + vlan = Column(Integer) + vpn_public_ip_str = Column(String(255)) + vpn_public_port = Column(Integer) + vpn_private_ip_str = Column(String(255)) + dhcp_start = Column(String(255)) + + project_id = Column(String(255)) #, ForeignKey('projects.id'), nullable=False) + node_name = Column(String(255)) #, ForeignKey('physical_node.id')) + + fixed_ips = relationship(FixedIp, + single_parent=True, + backref=backref('network'), + cascade='all, delete, delete-orphan') + + +class NetworkIndex(Base, NovaBase): + __tablename__ = 'network_indexes' + id = Column(Integer, primary_key=True) + index = Column(Integer) + network_id = Column(Integer, ForeignKey('networks.id'), nullable=True) + network = relationship(Network, backref=backref('network_index', + uselist=False)) + + + + +def create_session(engine=None): + return NovaBase.get_session() + +if __name__ == '__main__': + engine = NovaBase.create_engine() + session = NovaBase.create_session(engine) + + instance = Instance(image_id='as', ramdisk_id='AS', user_id='anthony') + user = User(id='anthony') + session.add(instance) + session.commit() + diff --git a/nova/models.py b/nova/models.py deleted file mode 100644 index 36d6cf3ad..000000000 --- a/nova/models.py +++ /dev/null @@ -1,368 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -SQLAlchemy models for nova data -""" - -from sqlalchemy.orm import relationship, backref, validates, exc -from sqlalchemy import Table, Column, Integer, String -from sqlalchemy import MetaData, ForeignKey, DateTime, Boolean, Text -from sqlalchemy.ext.declarative import declarative_base - -from nova import auth -from nova import exception -from nova import flags - -FLAGS=flags.FLAGS - -Base = declarative_base() - -class NovaBase(object): - __table_args__ = {'mysql_engine':'InnoDB'} - __prefix__ = 'none' - created_at = Column(DateTime) - updated_at = Column(DateTime) - - _session = None - _engine = None - @classmethod - def create_engine(cls): - if NovaBase._engine is not None: - return NovaBase._engine - from sqlalchemy import create_engine - NovaBase._engine = create_engine(FLAGS.sql_connection, echo=False) - Base.metadata.create_all(NovaBase._engine) - return NovaBase._engine - - @classmethod - def get_session(cls): - from sqlalchemy.orm import sessionmaker - if NovaBase._session == None: - NovaBase.create_engine() - NovaBase._session = sessionmaker(bind=NovaBase._engine)() - return NovaBase._session - - @classmethod - def all(cls): - session = NovaBase.get_session() - result = session.query(cls).all() - session.commit() - return result - - @classmethod - def count(cls): - session = NovaBase.get_session() - result = session.query(cls).count() - session.commit() - return result - - @classmethod - def find(cls, obj_id): - session = NovaBase.get_session() - try: - result = session.query(cls).filter_by(id=obj_id).one() - session.commit() - return result - except exc.NoResultFound: - raise exception.NotFound("No model for id %s" % obj_id) - - @classmethod - def find_by_str(cls, str_id): - id = int(str_id.rpartition('-')[2]) - return cls.find(id) - - @property - def str_id(self): - return "%s-%s" % (self.__prefix__, self.id) - - def save(self): - session = NovaBase.get_session() - session.add(self) - session.commit() - - def delete(self): - session = NovaBase.get_session() - session.delete(self) - session.commit() - - def refresh(self): - session = NovaBase.get_session() - session.refresh(self) - - def __setitem__(self, key, value): - setattr(self, key, value) - - def __getitem__(self, key): - return getattr(self, key) - - -class Image(Base, NovaBase): - __tablename__ = 'images' - __prefix__ = 'ami' - id = Column(Integer, primary_key=True) - user_id = Column(String(255))#, ForeignKey('users.id'), nullable=False) - project_id = Column(String(255))#, ForeignKey('projects.id'), nullable=False) - - image_type = Column(String(255)) - public = Column(Boolean, default=False) - state = Column(String(255)) - location = Column(String(255)) - arch = Column(String(255)) - default_kernel_id = Column(String(255)) - default_ramdisk_id = Column(String(255)) - - @validates('image_type') - def validate_image_type(self, key, image_type): - assert(image_type in ['machine', 'kernel', 'ramdisk', 'raw']) - - @validates('state') - def validate_state(self, key, state): - assert(state in ['available', 'pending', 'disabled']) - - @validates('default_kernel_id') - def validate_kernel_id(self, key, val): - if val != 'machine': - assert(val is None) - - @validates('default_ramdisk_id') - def validate_ramdisk_id(self, key, val): - if val != 'machine': - assert(val is None) - - -class PhysicalNode(Base, NovaBase): - __tablename__ = 'physical_nodes' - id = Column(String(255), primary_key=True) - -class Daemon(Base, NovaBase): - __tablename__ = 'daemons' - id = Column(Integer, primary_key=True) - node_name = Column(String(255)) #, ForeignKey('physical_node.id')) - binary = Column(String(255)) - report_count = Column(Integer, nullable=False, default=0) - - @classmethod - def find_by_args(cls, node_name, binary): - session = NovaBase.get_session() - try: - query = session.query(cls).filter_by(node_name=node_name) - result = query.filter_by(binary=binary).one() - session.commit() - return result - except exc.NoResultFound: - raise exception.NotFound("No model for %s, %s" % (node_name, - binary)) - - -class Instance(Base, NovaBase): - __tablename__ = 'instances' - __prefix__ = 'i' - id = Column(Integer, primary_key=True) - - user_id = Column(String(255)) #, ForeignKey('users.id'), nullable=False) - project_id = Column(String(255)) #, ForeignKey('projects.id')) - - @property - def user(self): - return auth.manager.AuthManager().get_user(self.user_id) - - @property - def project(self): - return auth.manager.AuthManager().get_project(self.project_id) - - # TODO(vish): make this opaque somehow - @property - def name(self): - return self.str_id - - - image_id = Column(Integer, ForeignKey('images.id'), nullable=True) - kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True) - ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True) - - launch_index = Column(Integer) - key_name = Column(String(255)) - key_data = Column(Text) - security_group = Column(String(255)) - - state = Column(Integer) - state_description = Column(String(255)) - - hostname = Column(String(255)) - node_name = Column(String(255)) #, ForeignKey('physical_node.id')) - - instance_type = Column(Integer) - - user_data = Column(Text) - - reservation_id = Column(String(255)) - mac_address = Column(String(255)) - - def set_state(self, state_code, state_description=None): - from nova.compute import power_state - self.state = state_code - if not state_description: - state_description = power_state.name(state_code) - self.state_description = state_description - self.save() - -# ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id)) -# kernel = relationship(Kernel, backref=backref('instances', order_by=id)) -# project = relationship(Project, backref=backref('instances', order_by=id)) - -#TODO - see Ewan's email about state improvements - # vmstate_state = running, halted, suspended, paused - # power_state = what we have - # task_state = transitory and may trigger power state transition - - #@validates('state') - #def validate_state(self, key, state): - # assert(state in ['nostate', 'running', 'blocked', 'paused', 'shutdown', 'shutoff', 'crashed']) - -class Volume(Base, NovaBase): - __tablename__ = 'volumes' - __prefix__ = 'vol' - id = Column(Integer, primary_key=True) - - user_id = Column(String(255)) #, ForeignKey('users.id'), nullable=False) - project_id = Column(String(255)) #, ForeignKey('projects.id')) - - node_name = Column(String(255)) #, ForeignKey('physical_node.id')) - size = Column(Integer) - availability_zone = Column(String(255)) # TODO(vish) foreign key? - instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) - mountpoint = Column(String(255)) - attach_time = Column(String(255)) # TODO(vish) datetime - status = Column(String(255)) # TODO(vish) enum? - attach_status = Column(String(255)) # TODO(vish) enum - -class ExportDevice(Base, NovaBase): - __tablename__ = 'export_devices' - id = Column(Integer, primary_key=True) - shelf_id = Column(Integer) - blade_id = Column(Integer) - volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=True) - volume = relationship(Volume, backref=backref('export_device', - uselist=False)) - - -# TODO(vish): can these both come from the same baseclass? -class FixedIp(Base, NovaBase): - __tablename__ = 'fixed_ips' - id = Column(Integer, primary_key=True) - ip_str = Column(String(255), unique=True) - network_id = Column(Integer, ForeignKey('networks.id'), nullable=False) - instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) - instance = relationship(Instance, backref=backref('fixed_ip', - uselist=False)) - allocated = Column(Boolean, default=False) - leased = Column(Boolean, default=False) - reserved = Column(Boolean, default=False) - - @property - def str_id(self): - return self.ip_str - - @classmethod - def find_by_str(cls, str_id): - session = NovaBase.get_session() - try: - result = session.query(cls).filter_by(ip_str=str_id).one() - session.commit() - return result - except exc.NoResultFound: - raise exception.NotFound("No model for ip str %s" % str_id) - - -class FloatingIp(Base, NovaBase): - __tablename__ = 'floating_ips' - id = Column(Integer, primary_key=True) - ip_str = Column(String(255), unique=True) - fixed_ip_id = Column(Integer, ForeignKey('fixed_ips.id'), nullable=True) - fixed_ip = relationship(FixedIp, backref=backref('floating_ips')) - - project_id = Column(String(255)) #, ForeignKey('projects.id'), nullable=False) - node_name = Column(String(255)) #, ForeignKey('physical_node.id')) - - @property - def str_id(self): - return self.ip_str - - @classmethod - def find_by_str(cls, str_id): - session = NovaBase.get_session() - try: - result = session.query(cls).filter_by(ip_str=str_id).one() - session.commit() - return result - except exc.NoResultFound: - raise exception.NotFound("No model for ip str %s" % str_id) - - -class Network(Base, NovaBase): - __tablename__ = 'networks' - id = Column(Integer, primary_key=True) - kind = Column(String(255)) - - injected = Column(Boolean, default=False) - cidr = Column(String(255)) - netmask = Column(String(255)) - bridge = Column(String(255)) - gateway = Column(String(255)) - broadcast = Column(String(255)) - dns = Column(String(255)) - - vlan = Column(Integer) - vpn_public_ip_str = Column(String(255)) - vpn_public_port = Column(Integer) - vpn_private_ip_str = Column(String(255)) - dhcp_start = Column(String(255)) - - project_id = Column(String(255)) #, ForeignKey('projects.id'), nullable=False) - node_name = Column(String(255)) #, ForeignKey('physical_node.id')) - - fixed_ips = relationship(FixedIp, - single_parent=True, - backref=backref('network'), - cascade='all, delete, delete-orphan') - - -class NetworkIndex(Base, NovaBase): - __tablename__ = 'network_indexes' - id = Column(Integer, primary_key=True) - index = Column(Integer) - network_id = Column(Integer, ForeignKey('networks.id'), nullable=True) - network = relationship(Network, backref=backref('network_index', - uselist=False)) - - - - -def create_session(engine=None): - return NovaBase.get_session() - -if __name__ == '__main__': - engine = NovaBase.create_engine() - session = NovaBase.create_session(engine) - - instance = Instance(image_id='as', ramdisk_id='AS', user_id='anthony') - user = User(id='anthony') - session.add(instance) - session.commit() - -- cgit From 8d0f96432b7b07fa608cae433459645880f4a44c Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 27 Aug 2010 23:10:57 -0700 Subject: split volume into service/manager/driver --- nova/db/api.py | 25 ++++---- nova/db/sqlalchemy/api.py | 32 +++++----- nova/endpoint/cloud.py | 10 ++- nova/manager.py | 38 ++++++++++++ nova/service.py | 32 +++++++--- nova/utils.py | 23 ++++--- nova/volume/driver.py | 120 +++++++++++++++++++++++++++++++++++ nova/volume/manager.py | 122 ++++++++++++++++++++++++++++++++++++ nova/volume/service.py | 155 ++-------------------------------------------- 9 files changed, 359 insertions(+), 198 deletions(-) create mode 100644 nova/manager.py create mode 100644 nova/volume/driver.py create mode 100644 nova/volume/manager.py diff --git a/nova/db/api.py b/nova/db/api.py index 536ef1e25..5e04ee998 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -41,13 +41,6 @@ flags.DEFINE_string('public_range', '4.4.4.0/24', 'Public IP address block') flags.DEFINE_string('private_range', '10.0.0.0/8', 'Private IP address block') flags.DEFINE_integer('cnt_vpn_clients', 5, 'Number of addresses reserved for vpn clients') -flags.DEFINE_integer('num_shelves', - 100, - 'Number of vblade shelves') -flags.DEFINE_integer('blades_per_shelf', - 16, - 'Number of vblade blades per shelf') - _impl = utils.LazyPluggable(FLAGS['db_backend'], @@ -376,6 +369,19 @@ def queue_get_for(context, topic, physical_node_id): ################### +def export_device_count(context): + """Return count of export devices.""" + return _impl.export_device_count(context) + + +def export_device_create(context, values): + """Create an export_device from the values dictionary.""" + return _impl.export_device_create(context, values) + + +################### + + def volume_allocate_shelf_and_blade(context, volume_id): """Atomically allocate a free shelf and blade from the pool.""" return _impl.volume_allocate_shelf_and_blade(context, volume_id) @@ -391,11 +397,6 @@ def volume_create(context, values): return _impl.volume_create(context, values) -def volume_ensure_blades(context, num_shelves, blades_per_shelf): - """Ensure shelves and blades have been created in the datastore.""" - return _impl.volume_ensure_blades(context, num_shelves, blades_per_shelf) - - def volume_destroy(context, volume_id): """Destroy the volume or raise if it does not exist.""" return _impl.volume_destroy(context, volume_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index cba85ccb7..1e688495a 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -446,10 +446,22 @@ def queue_get_for(context, topic, physical_node_id): ################### +def export_device_count(context): + return models.ExportDevice.count() + + +def export_device_create(context, values): + export_device_ref = models.ExportDevice() + for (key, value) in values.iteritems(): + export_device_ref[key] = value + export_device_ref.save() + return export_device_ref + + +################### + + def volume_allocate_shelf_and_blade(context, volume_id): - db.volume_ensure_blades(context, - FLAGS.num_shelves, - FLAGS.blades_per_shelf) session = models.NovaBase.get_session() query = session.query(models.ExportDevice).filter_by(volume=None) export_device = query.with_lockmode("update").first() @@ -477,7 +489,7 @@ def volume_create(context, values): for (key, value) in values.iteritems(): volume_ref[key] = value volume_ref.save() - return volume_ref.id + return volume_ref def volume_destroy(context, volume_id): @@ -494,18 +506,6 @@ def volume_detached(context, volume_id): volume_ref.save() -# NOTE(vish): should this code go up a layer? -def volume_ensure_blades(context, num_shelves, blades_per_shelf): - if models.ExportDevice.count() >= num_shelves * blades_per_shelf: - return - for shelf_id in xrange(num_shelves): - for blade_id in xrange(blades_per_shelf): - export_device = models.ExportDevice() - export_device.shelf_id = shelf_id - export_device.blade_id = blade_id - export_device.save() - - def volume_get(context, volume_id): return models.Volume.find(volume_id) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index ffe3d3cc7..6d59c8225 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -271,7 +271,6 @@ class CloudController(object): return v @rbac.allow('projectmanager', 'sysadmin') - @defer.inlineCallbacks def create_volume(self, context, size, **kwargs): vol = {} vol['size'] = size @@ -280,13 +279,12 @@ class CloudController(object): vol['availability_zone'] = FLAGS.storage_availability_zone vol['status'] = "creating" vol['attach_status'] = "detached" - volume_id = db.volume_create(context, vol) + volume_ref = db.volume_create(context, vol) - yield rpc.cast(FLAGS.volume_topic, {"method": "create_volume", - "args": {"volume_id": volume_id}}) + rpc.cast(FLAGS.volume_topic, {"method": "create_volume", + "args": {"volume_id": volume_ref['id']}}) - volume = db.volume_get(context, volume_id) - defer.returnValue({'volumeSet': [self._format_volume(context, volume)]}) + return {'volumeSet': [self._format_volume(context, volume_ref)]} @rbac.allow('projectmanager', 'sysadmin') diff --git a/nova/manager.py b/nova/manager.py new file mode 100644 index 000000000..4f212a41b --- /dev/null +++ b/nova/manager.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Base class for managers of different parts of the system +""" + +from nova import utils +from nova import flags + + +FLAGS = flags.FLAGS +flags.DEFINE_string('db_driver', 'nova.db.api' + 'driver to use for volume creation') + + +class Manager(object): + """DB driver is injected in the init method""" + def __init__(self, db_driver=None): + if not db_driver: + db_driver=FLAGS.db_driver + self.db = utils.import_object(db_driver) + diff --git a/nova/service.py b/nova/service.py index 9c536c354..59da6f04e 100644 --- a/nova/service.py +++ b/nova/service.py @@ -32,6 +32,7 @@ from nova import db from nova import exception from nova import flags from nova import rpc +from nova import utils FLAGS = flags.FLAGS @@ -43,15 +44,29 @@ flags.DEFINE_integer('report_interval', 10, class Service(object, service.Service): """Base class for workers that run on hosts.""" + def __init__(self, manager, *args, **kwargs): + self.manager = manager + super(self, Service).__init__(*args, **kwargs) + + def __getattr__(self, key): + try: + super(Service, self).__getattr__(key) + except AttributeError: + self.manager.__getattr__(key) + @classmethod - def create(cls, report_interval=None, bin_name=None, topic=None): + def create(cls, + report_interval=None, + bin_name=None, + topic=None, + manager=None): """Instantiates class and passes back application object. Args: report_interval, defaults to flag bin_name, defaults to basename of executable topic, defaults to basename - "nova-" part - + manager, defaults to FLAGS._manager """ if not report_interval: report_interval = FLAGS.report_interval @@ -61,21 +76,24 @@ class Service(object, service.Service): bin_name = os.path.basename(inspect.stack()[-1][1]) if not topic: topic = bin_name.rpartition("nova-")[2] + if not manager: + manager = FLAGS.get('%s_manager' % topic) + manager_ref = utils.import_object(manager) logging.warn("Starting %s node" % topic) - node_instance = cls() + service_ref = cls(manager_ref) conn = rpc.Connection.instance() consumer_all = rpc.AdapterConsumer( connection=conn, topic='%s' % topic, - proxy=node_instance) + proxy=service_ref) consumer_node = rpc.AdapterConsumer( connection=conn, topic='%s.%s' % (topic, FLAGS.node_name), - proxy=node_instance) + proxy=service_ref) - pulse = task.LoopingCall(node_instance.report_state, + pulse = task.LoopingCall(service_ref.report_state, FLAGS.node_name, bin_name) pulse.start(interval=report_interval, now=False) @@ -86,7 +104,7 @@ class Service(object, service.Service): # This is the parent service that twistd will be looking for when it # parses this file, return it so that we can get it into globals. application = service.Application(bin_name) - node_instance.setServiceParent(application) + service_ref.setServiceParent(application) return application @defer.inlineCallbacks diff --git a/nova/utils.py b/nova/utils.py index c4a8f17e9..392fa8c46 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -46,6 +46,13 @@ def import_class(import_str): except (ImportError, ValueError, AttributeError): raise exception.NotFound('Class %s cannot be found' % class_str) +def import_object(import_str): + """Returns an object including a module or module and class""" + cls = import_class(import_str) + try: + return cls() + except TypeError: + return cls def fetchfile(url, target): logging.debug("Fetching %s" % url) @@ -73,7 +80,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): if obj.returncode: logging.debug("Result was %s" % (obj.returncode)) if check_exit_code and obj.returncode <> 0: - raise Exception( "Unexpected exit code: %s. result=%s" + raise Exception( "Unexpected exit code: %s. result=%s" % (obj.returncode, result)) return result @@ -105,7 +112,7 @@ def runthis(prompt, cmd, check_exit_code = True): exit_code = subprocess.call(cmd.split(" ")) logging.debug(prompt % (exit_code)) if check_exit_code and exit_code <> 0: - raise Exception( "Unexpected exit code: %s from cmd: %s" + raise Exception( "Unexpected exit code: %s from cmd: %s" % (exit_code, cmd)) @@ -150,21 +157,21 @@ def parse_isotime(timestr): return datetime.datetime.strptime(timestr, TIME_FORMAT) - + class LazyPluggable(object): """A pluggable backend loaded lazily based on some value.""" - + def __init__(self, pivot, **backends): self.__backends = backends self.__pivot = pivot self.__backend = None - + def __get_backend(self): if not self.__backend: backend_name = self.__pivot.value if backend_name not in self.__backends: raise exception.Error('Invalid backend: %s' % backend_name) - + backend = self.__backends[backend_name] if type(backend) == type(tuple()): name = backend[0] @@ -172,11 +179,11 @@ class LazyPluggable(object): else: name = backend fromlist = backend - + self.__backend = __import__(name, None, None, fromlist) logging.error('backend %s', self.__backend) return self.__backend - + def __getattr__(self, key): backend = self.__get_backend() return getattr(backend, key) diff --git a/nova/volume/driver.py b/nova/volume/driver.py new file mode 100644 index 000000000..579472047 --- /dev/null +++ b/nova/volume/driver.py @@ -0,0 +1,120 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Drivers for volumes +""" + +import logging + +from twisted.internet import defer + +from nova import flags +from nova import process +from nova import utils + + +FLAGS = flags.FLAGS +flags.DEFINE_string('storage_dev', '/dev/sdb', + 'Physical device to use for volumes') +flags.DEFINE_string('volume_group', 'nova-volumes', + 'Name for the VG that will contain exported volumes') +flags.DEFINE_string('aoe_eth_dev', 'eth0', + 'Which device to export the volumes on') + + +class FakeAOEDriver(object): + @defer.inlineCallbacks + def create_volume(self, volume_id, size): + logging.debug("Fake AOE: create_volume %s, %s", volume_id, size) + + @defer.inlineCallbacks + def delete_volume(self, volume_id): + logging.debug("Fake AOE: delete_volume %s", volume_id) + + @defer.inlineCallbacks + def create_export(self, volume_id, shelf_id, blade_id): + logging.debug("Fake AOE: create_export %s, %s, %s", + volume_id, shelf_id, blade_id) + + @defer.inlineCallbacks + def remove_export(self, volume_id, shelf_id, blade_id): + logging.debug("Fake AOE: remove_export %s, %s, %s", + volume_id, shelf_id, blade_id) + + @defer.inlineCallbacks + def ensure_exports(self): + logging.debug("Fake AOE: ensure_export") + + +class AOEDriver(object): + def __init__(self, *args, **kwargs): + super(AOEDriver, self).__init__(*args, **kwargs) + # NOTE(vish): no need for thise to be async, but it may be + # best to explicitly do them at some other time + utils.execute("sudo pvcreate %s" % (FLAGS.storage_dev)) + utils.execute("sudo vgcreate %s %s" % (FLAGS.volume_group, + FLAGS.storage_dev)) + @defer.inlineCallbacks + def create_volume(self, volume_id, size): + if int(size) == 0: + sizestr = '100M' + else: + sizestr = '%sG' % size + yield process.simple_execute( + "sudo lvcreate -L %s -n %s %s" % (sizestr, + volume_id, + FLAGS.volume_group), + terminate_on_stderr=False) + + @defer.inlineCallbacks + def delete_volume(self, volume_id): + yield process.simple_execute( + "sudo lvremove -f %s/%s" % (FLAGS.volume_group, + volume_id), + terminate_on_stderr=False) + + @defer.inlineCallbacks + def create_export(self, volume_id, shelf_id, blade_id): + yield process.simple_execute( + "sudo vblade-persist setup %s %s %s /dev/%s/%s" % + (shelf_id, + blade_id, + FLAGS.aoe_eth_dev, + FLAGS.volume_group, + volume_id), + terminate_on_stderr=False) + + @defer.inlineCallbacks + def remove_export(self, _volume_id, shelf_id, blade_id): + yield process.simple_execute( + "sudo vblade-persist stop %s %s" % (shelf_id, blade_id), + terminate_on_stderr=False) + yield process.simple_execute( + "sudo vblade-persist destroy %s %s" % (shelf_id, blade_id), + terminate_on_stderr=False) + + @defer.inlineCallbacks + def ensure_exports(self): + # NOTE(ja): wait for blades to appear + yield process.simple_execute("sleep 5") + yield process.simple_execute("sudo vblade-persist auto all", + check_exit_code=False) + yield process.simple_execute("sudo vblade-persist start all", + check_exit_code=False) + diff --git a/nova/volume/manager.py b/nova/volume/manager.py new file mode 100644 index 000000000..c4686a75c --- /dev/null +++ b/nova/volume/manager.py @@ -0,0 +1,122 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Volume manager manages creating, attaching, detaching, and +destroying persistent storage volumes, ala EBS. +""" + +import logging + +from twisted.internet import defer + +from nova import db +from nova import exception +from nova import flags +from nova import manager +from nova import process +from nova import service +from nova import utils +from nova.volume import driver + + +FLAGS = flags.FLAGS +flags.DEFINE_string('storage_availability_zone', + 'nova', + 'availability zone of this service') +flags.DEFINE_boolean('fake_storage', False, + 'Should we make real storage volumes to attach?') +flags.DEFINE_string('volume_driver', 'nova.volume.driver.AOEDriver' + 'Driver to use for volume creation') +flags.DEFINE_integer('num_shelves', + 100, + 'Number of vblade shelves') +flags.DEFINE_integer('blades_per_shelf', + 16, + 'Number of vblade blades per shelf') + + +class AOEManager(manager.Manager): + def __init__(self, volume_driver=None, *args, **kwargs): + if not volume_driver: + # NOTE(vish): support the legacy fake storage flag + if FLAGS.fake_storage: + volume_driver='nova.volume.driver.FakeAOEDriver' + else: + volume_driver=FLAGS.volume_driver + self.driver = utils.import_object(volume_driver) + super(AOEManager, self).__init__(*args, **kwargs) + + def _ensure_blades(self, context): + total_blades = FLAGS.num_shelves, FLAGS.blades_per_shelf + if self.db.export_device_count(context) >= total_blades: + return + for shelf_id in xrange(FLAGS.num_shelves): + for blade_id in xrange(FLAGS.blades_per_shelf): + dev = {'shelf_id': shelf_id, 'blade_id': blade_id} + self.db.export_device_create(context, dev) + + @defer.inlineCallbacks + def create_volume(self, volume_id, context=None): + """Creates and exports the volume.""" + logging.info("volume %s: creating" % (volume_id)) + + volume_ref = self.db.volume_get(context, volume_id) + + self.db.volume_update(context, + volume_id, + {'node_name': FLAGS.node_name}) + + size = volume_ref['size'] + logging.debug("volume %s: creating lv of size %sG" % (volume_id, size)) + yield self.driver.create_volume(volume_id, size) + + logging.debug("volume %s: allocating shelf & blade" % (volume_id)) + rval = self.db.volume_allocate_shelf_and_blade(context, volume_id) + (shelf_id, blade_id) = rval + + logging.debug("volume %s: exporting shelf %s & blade %s" % (volume_id, + shelf_id, blade_id)) + + yield self.driver.create_export(volume_id, shelf_id, blade_id) + # TODO(joshua): We need to trigger a fanout message + # for aoe-discover on all the nodes + + self.db.volume_update(context, volume_id, {'status': 'available'}) + + logging.debug("volume %s: re-exporting all values" % (volume_id)) + yield self.driver.ensure_exports() + + logging.debug("volume %s: created successfully" % (volume_id)) + defer.returnValue(volume_id) + + @defer.inlineCallbacks + def delete_volume(self, volume_id, context=None): + logging.debug("Deleting volume with id of: %s" % (volume_id)) + volume_ref = self.db.volume_get(context, volume_id) + if volume_ref['attach_status'] == "attached": + raise exception.Error("Volume is still attached") + if volume_ref['node_name'] != FLAGS.node_name: + raise exception.Error("Volume is not local to this node") + shelf_id, blade_id = self.db.volume_get_shelf_and_blade(context, + volume_id) + yield self.driver.remove_export(volume_id, shelf_id, blade_id) + yield self.driver.delete_volume(volume_id) + self.db.volume_destroy(context, volume_id) + defer.returnValue(True) + diff --git a/nova/volume/service.py b/nova/volume/service.py index 7f6747577..423359007 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -17,164 +17,21 @@ # under the License. """ -Nova Storage manages creating, attaching, detaching, and -destroying persistent storage volumes, ala EBS. -Currently uses Ata-over-Ethernet. +Volume service allows rpc calls to the volume manager and reports state +to the database. """ -import logging - -from twisted.internet import defer - -from nova import db -from nova import exception from nova import flags -from nova import process from nova import service FLAGS = flags.FLAGS -flags.DEFINE_string('storage_dev', '/dev/sdb', - 'Physical device to use for volumes') -flags.DEFINE_string('volume_group', 'nova-volumes', - 'Name for the VG that will contain exported volumes') -flags.DEFINE_string('aoe_eth_dev', 'eth0', - 'Which device to export the volumes on') -flags.DEFINE_string('aoe_export_dir', - '/var/lib/vblade-persist/vblades', - 'AoE directory where exports are created') -flags.DEFINE_string('storage_availability_zone', - 'nova', - 'availability zone of this service') -flags.DEFINE_boolean('fake_storage', False, - 'Should we make real storage volumes to attach?') +flags.DEFINE_string('volume_manager', 'nova.volume.manager.AOEManager', + 'Manager for volumes') class VolumeService(service.Service): """ - There is one VolumeNode running on each host. - However, each VolumeNode can report on the state of - *all* volumes in the cluster. + Volume Service automatically passes commands on to the Volume Manager """ - def __init__(self): - super(VolumeService, self).__init__() - self._exec_init_volumes() - - @defer.inlineCallbacks - def create_volume(self, volume_id, context=None): - """ - Creates an exported volume (fake or real), - restarts exports to make it available. - Volume at this point has size, owner, and zone. - """ - logging.info("volume %s: creating" % (volume_id)) - - volume_ref = db.volume_get(context, volume_id) - - db.volume_update(context, volume_id, {'node_name': FLAGS.node_name}) - - size = volume_ref['size'] - logging.debug("volume %s: creating lv of size %sG" % (volume_id, size)) - yield self._exec_create_volume(volume_id, size) - - logging.debug("volume %s: allocating shelf & blade" % (volume_id)) - (shelf_id, blade_id) = db.volume_allocate_shelf_and_blade(context, - volume_id) - - logging.debug("volume %s: exporting shelf %s & blade %s" % (volume_id, - shelf_id, blade_id)) - - yield self._exec_create_export(volume_id, shelf_id, blade_id) - # TODO(joshua): We need to trigger a fanout message - # for aoe-discover on all the nodes - - db.volume_update(context, volume_id, {'status': 'available'}) - - logging.debug("volume %s: re-exporting all values" % (volume_id)) - yield self._exec_ensure_exports() - - logging.debug("volume %s: created successfully" % (volume_id)) - defer.returnValue(volume_id) - - @defer.inlineCallbacks - def delete_volume(self, volume_id, context=None): - logging.debug("Deleting volume with id of: %s" % (volume_id)) - volume_ref = db.volume_get(context, volume_id) - if volume_ref['attach_status'] == "attached": - raise exception.Error("Volume is still attached") - if volume_ref['node_name'] != FLAGS.node_name: - raise exception.Error("Volume is not local to this node") - shelf_id, blade_id = db.volume_get_shelf_and_blade(context, - volume_id) - yield self._exec_remove_export(volume_id, shelf_id, blade_id) - yield self._exec_delete_volume(volume_id) - db.volume_destroy(context, volume_id) - defer.returnValue(True) - - @defer.inlineCallbacks - def _exec_create_volume(self, volume_id, size): - if FLAGS.fake_storage: - defer.returnValue(None) - if int(size) == 0: - sizestr = '100M' - else: - sizestr = '%sG' % size - yield process.simple_execute( - "sudo lvcreate -L %s -n %s %s" % (sizestr, - volume_id, - FLAGS.volume_group), - terminate_on_stderr=False) - - @defer.inlineCallbacks - def _exec_delete_volume(self, volume_id): - if FLAGS.fake_storage: - defer.returnValue(None) - yield process.simple_execute( - "sudo lvremove -f %s/%s" % (FLAGS.volume_group, - volume_id), - terminate_on_stderr=False) - - @defer.inlineCallbacks - def _exec_create_export(self, volume_id, shelf_id, blade_id): - if FLAGS.fake_storage: - defer.returnValue(None) - yield process.simple_execute( - "sudo vblade-persist setup %s %s %s /dev/%s/%s" % - (shelf_id, - blade_id, - FLAGS.aoe_eth_dev, - FLAGS.volume_group, - volume_id), - terminate_on_stderr=False) - - @defer.inlineCallbacks - def _exec_remove_export(self, _volume_id, shelf_id, blade_id): - if FLAGS.fake_storage: - defer.returnValue(None) - yield process.simple_execute( - "sudo vblade-persist stop %s %s" % (shelf_id, blade_id), - terminate_on_stderr=False) - yield process.simple_execute( - "sudo vblade-persist destroy %s %s" % (shelf_id, blade_id), - terminate_on_stderr=False) - - @defer.inlineCallbacks - def _exec_ensure_exports(self): - if FLAGS.fake_storage: - defer.returnValue(None) - - yield process.simple_execute("sleep 5") # wait for blades to appear - yield process.simple_execute("sudo vblade-persist auto all", - check_exit_code=False) - yield process.simple_execute("sudo vblade-persist start all", - check_exit_code=False) - - @defer.inlineCallbacks - def _exec_init_volumes(self): - if FLAGS.fake_storage: - defer.returnValue(None) - yield process.simple_execute( - "sudo pvcreate %s" % (FLAGS.storage_dev)) - yield process.simple_execute( - "sudo vgcreate %s %s" % (FLAGS.volume_group, - FLAGS.storage_dev)) + pass -- cgit From d3f55cffc903af8250993efc58fb67d84510c8c3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 27 Aug 2010 23:16:31 -0700 Subject: move None context up into cloud --- nova/endpoint/cloud.py | 6 ++++-- nova/volume/manager.py | 8 +++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 6d59c8225..cb676aea1 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -282,7 +282,8 @@ class CloudController(object): volume_ref = db.volume_create(context, vol) rpc.cast(FLAGS.volume_topic, {"method": "create_volume", - "args": {"volume_id": volume_ref['id']}}) + "args": {"context": None, + "volume_id": volume_ref['id']}}) return {'volumeSet': [self._format_volume(context, volume_ref)]} @@ -633,7 +634,8 @@ class CloudController(object): host = db.volume_get_host(context, volume_ref['id']) rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "delete_volume", - "args": {"volume_id": volume_id}}) + "args": {"context": None, + "volume_id": volume_id}}) return defer.succeed(True) @rbac.allow('all') diff --git a/nova/volume/manager.py b/nova/volume/manager.py index c4686a75c..0683703a1 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -25,12 +25,9 @@ import logging from twisted.internet import defer -from nova import db from nova import exception from nova import flags from nova import manager -from nova import process -from nova import service from nova import utils from nova.volume import driver @@ -72,7 +69,7 @@ class AOEManager(manager.Manager): self.db.export_device_create(context, dev) @defer.inlineCallbacks - def create_volume(self, volume_id, context=None): + def create_volume(self, context, volume_id): """Creates and exports the volume.""" logging.info("volume %s: creating" % (volume_id)) @@ -87,6 +84,7 @@ class AOEManager(manager.Manager): yield self.driver.create_volume(volume_id, size) logging.debug("volume %s: allocating shelf & blade" % (volume_id)) + self._ensure_blades(context) rval = self.db.volume_allocate_shelf_and_blade(context, volume_id) (shelf_id, blade_id) = rval @@ -106,7 +104,7 @@ class AOEManager(manager.Manager): defer.returnValue(volume_id) @defer.inlineCallbacks - def delete_volume(self, volume_id, context=None): + def delete_volume(self, context, volume_id): logging.debug("Deleting volume with id of: %s" % (volume_id)) volume_ref = self.db.volume_get(context, volume_id) if volume_ref['attach_status'] == "attached": -- cgit From 74e5e817905322e609870e60ce55863f35ce7893 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sat, 28 Aug 2010 02:02:07 -0700 Subject: moved network code into business layer --- nova/db/api.py | 46 ++++------- nova/db/sqlalchemy/api.py | 113 +++++---------------------- nova/endpoint/cloud.py | 19 ++--- nova/flags.py | 8 ++ nova/network/service.py | 192 ++-------------------------------------------- nova/volume/service.py | 6 -- 6 files changed, 56 insertions(+), 328 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index 5e04ee998..699118b16 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -26,31 +26,12 @@ FLAGS = flags.FLAGS flags.DEFINE_string('db_backend', 'sqlalchemy', 'The backend to use for db') -# TODO(vish): where should these flags go? -flags.DEFINE_string('network_type', - 'vlan', - 'Service Class for Networking') -flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks') -flags.DEFINE_integer('num_networks', 1000, 'Number of networks to support') -flags.DEFINE_string('vpn_ip', utils.get_my_ip(), - 'Public IP for the cloudpipe VPN servers') -flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks') -flags.DEFINE_integer('network_size', 256, - 'Number of addresses in each private subnet') -flags.DEFINE_string('public_range', '4.4.4.0/24', 'Public IP address block') -flags.DEFINE_string('private_range', '10.0.0.0/8', 'Private IP address block') -flags.DEFINE_integer('cnt_vpn_clients', 5, - 'Number of addresses reserved for vpn clients') - _impl = utils.LazyPluggable(FLAGS['db_backend'], sqlalchemy='nova.db.sqlalchemy.api') -class AddressNotAllocated(exception.Error): - pass - - +# TODO(vish): where should these exceptions go? class NoMoreAddresses(exception.Error): pass @@ -151,6 +132,7 @@ def fixed_ip_get_network(context, address): """Get a network for a fixed ip by address.""" return _impl.fixed_ip_get_network(context, address) + def fixed_ip_lease(context, address): """Lease a fixed ip by address.""" return _impl.fixed_ip_lease(context, address) @@ -256,11 +238,6 @@ def instance_update(context, instance_id, values): #################### -def network_allocate(context, project_id): - """Allocate a network for a project.""" - return _impl.network_allocate(context, project_id) - - def network_count(context): """Return the number of networks.""" return _impl.network_count(context) @@ -296,11 +273,6 @@ def network_destroy(context, network_id): return _impl.network_destroy(context, network_id) -def network_ensure_indexes(context, num_networks): - """Ensure that network indexes exist, creating them if necessary.""" - return _impl.network_ensure_indexes(context, num_networks) - - def network_get(context, network_id): """Get an network or raise if it does not exist.""" return _impl.network_get(context, network_id) @@ -322,15 +294,25 @@ def network_get_host(context, network_id): def network_get_index(context, network_id): - """Gets non-conflicting index for network""" + """Get non-conflicting index for network""" return _impl.network_get_index(context, network_id) def network_get_vpn_ip(context, network_id): - """Gets non-conflicting index for network""" + """Get non-conflicting index for network""" return _impl.network_get_vpn_ip(context, network_id) +def network_index_count(context): + """Return count of network indexes""" + return _impl.network_index_count(context) + + +def network_index_create(context, values): + """Create a network index from the values dict""" + return _impl.network_index_create(context, values) + + def network_set_cidr(context, network_id, cidr): """Set the Classless Inner Domain Routing for the network""" return _impl.network_set_cidr(context, network_id, cidr) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 1e688495a..b95346861 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -16,10 +16,6 @@ # License for the specific language governing permissions and limitations # under the License. -import math - -import IPy - from nova import db from nova import exception from nova import flags @@ -119,6 +115,14 @@ def fixed_ip_allocate(context, network_id): return fixed_ip_ref +def fixed_ip_create(context, network_id, address): + fixed_ip_ref = models.FixedIp() + fixed_ip_ref.network = db.network_get(context, network_id) + fixed_ip_ref['ip_str'] = address + fixed_ip_ref.save() + return fixed_ip_ref + + def fixed_ip_get_by_address(context, address): return models.FixedIp.find_by_str(address) @@ -127,21 +131,6 @@ def fixed_ip_get_network(context, address): return models.FixedIp.find_by_str(address).network -def fixed_ip_lease(context, address): - fixed_ip_ref = fixed_ip_get_by_address(context, address) - if not fixed_ip_ref['allocated']: - raise db.AddressNotAllocated(address) - fixed_ip_ref['leased'] = True - fixed_ip_ref.save() - - -def fixed_ip_release(context, address): - fixed_ip_ref = fixed_ip_get_by_address(context, address) - fixed_ip_ref['allocated'] = False - fixed_ip_ref['leased'] = False - fixed_ip_ref.save() - - def fixed_ip_deallocate(context, address): fixed_ip_ref = fixed_ip_get_by_address(context, address) fixed_ip_ref['allocated'] = False @@ -253,32 +242,10 @@ def instance_update(context, instance_id, values): ################### -# NOTE(vish): is there a better place for this logic? -def network_allocate(context, project_id): - """Set up the network""" - db.network_ensure_indexes(context, FLAGS.num_networks) - network_id = db.network_create(context, {'project_id': project_id}) - private_net = IPy.IP(FLAGS.private_range) - index = db.network_get_index(context, network_id) - vlan = FLAGS.vlan_start + index - start = index * FLAGS.network_size - significant_bits = 32 - int(math.log(FLAGS.network_size, 2)) - cidr = "%s/%s" % (private_net[start], significant_bits) - db.network_set_cidr(context, network_id, cidr) - net = {} - net['kind'] = FLAGS.network_type - net['vlan'] = vlan - net['bridge'] = 'br%s' % vlan - net['vpn_public_ip_str'] = FLAGS.vpn_ip - net['vpn_public_port'] = FLAGS.vpn_start + index - db.network_update(context, network_id, net) - db.network_create_fixed_ips(context, network_id, FLAGS.cnt_vpn_clients) - return network_id - - def network_count(context): return models.Network.count() + def network_count_allocated_ips(context, network_id): session = models.NovaBase.get_session() query = session.query(models.FixedIp).filter_by(network_id=network_id) @@ -305,36 +272,7 @@ def network_create(context, values): for (key, value) in values.iteritems(): network_ref[key] = value network_ref.save() - return network_ref.id - - -def network_create_fixed_ips(context, network_id, num_vpn_clients): - network_ref = network_get(context, network_id) - # NOTE(vish): should these be properties of the network as opposed - # to constants? - BOTTOM_RESERVED = 3 - TOP_RESERVED = 1 + num_vpn_clients - project_net = IPy.IP(network_ref['cidr']) - num_ips = len(project_net) - session = models.NovaBase.get_session() - for i in range(num_ips): - fixed_ip = models.FixedIp() - fixed_ip['ip_str'] = str(project_net[i]) - if i < BOTTOM_RESERVED or num_ips - i < TOP_RESERVED: - fixed_ip['reserved'] = True - fixed_ip['network'] = network_get(context, network_id) - session.add(fixed_ip) - session.commit() - - -def network_ensure_indexes(context, num_networks): - if models.NetworkIndex.count() == 0: - session = models.NovaBase.get_session() - for i in range(num_networks): - network_index = models.NetworkIndex() - network_index.index = i - session.add(network_index) - session.commit() + return network_ref def network_destroy(context, network_id): @@ -353,6 +291,7 @@ def network_get_associated_fixed_ips(context, network_id): session.commit() return fixed_ips + def network_get_by_bridge(context, bridge): session = models.NovaBase.get_session() rv = session.query(models.Network).filter_by(bridge=bridge).first() @@ -361,17 +300,6 @@ def network_get_by_bridge(context, bridge): return rv -def network_get_vpn_ip(context, network_id): - # TODO(vish): possible concurrency issue here - network = network_get(context, network_id) - address = network['vpn_private_ip_str'] - fixed_ip = fixed_ip_get_by_address(context, address) - if fixed_ip['allocated']: - raise db.AddressAlreadyAllocated() - db.fixed_ip_update(context, fixed_ip['id'], {'allocated': True}) - return fixed_ip - - def network_get_host(context, network_id): network_ref = network_get(context, network_id) return network_ref['node_name'] @@ -389,16 +317,15 @@ def network_get_index(context, network_id): return network_index['index'] -def network_set_cidr(context, network_id, cidr): - network_ref = network_get(context, network_id) - project_net = IPy.IP(cidr) - network_ref['cidr'] = cidr - # FIXME we can turn these into properties - network_ref['netmask'] = str(project_net.netmask()) - network_ref['gateway'] = str(project_net[1]) - network_ref['broadcast'] = str(project_net.broadcast()) - network_ref['vpn_private_ip_str'] = str(project_net[2]) - network_ref['dhcp_start'] = str(project_net[3]) +def network_index_count(context): + return models.NetworkIndex.count() + + +def network_index_create(context, values): + network_index_ref = models.NetworkIndex() + for (key, value) in values.iteritems(): + network_index_ref[key] = value + network_index_ref.save() def network_set_host(context, network_id, host_id): diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index cb676aea1..ceff0f827 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -60,6 +60,7 @@ class CloudController(object): sent to the other nodes. """ def __init__(self): + self.network_manager = utils.load_object(FLAGS.network_manager) self.setup() def __str__(self): @@ -522,7 +523,6 @@ class CloudController(object): # TODO: Get the real security group of launch in here security_group = "default" - network_ref = db.project_get_network(context, context.project.id) reservation_id = utils.generate_uid('r') base_options = {} base_options['image_id'] = image_id @@ -540,30 +540,27 @@ class CloudController(object): for num in range(int(kwargs['max_count'])): inst_id = db.instance_create(context, base_options) - if vpn: - fixed_ip = db.network_get_vpn_ip(context, network_ref['id']) - else: - fixed_ip = db.fixed_ip_allocate(context, network_ref['id']) - print fixed_ip['ip_str'], inst_id - db.fixed_ip_instance_associate(context, fixed_ip['ip_str'], inst_id) - print fixed_ip.instance inst = {} inst['mac_address'] = utils.generate_mac() inst['launch_index'] = num inst['hostname'] = inst_id db.instance_update(context, inst_id, inst) - + address = self.network_manager.allocate_fixed_ip(context, + inst_id, + vpn) # TODO(vish): This probably should be done in the scheduler # network is setup when host is assigned network_topic = yield self._get_network_topic(context) rpc.call(network_topic, {"method": "setup_fixed_ip", - "args": {"address": fixed_ip['ip_str']}}) + "args": {"context": None, + "address": address}}) rpc.cast(FLAGS.compute_topic, {"method": "run_instance", - "args": {"instance_id": inst_id}}) + "args": {"context": None, + "instance_id": inst_id}}) logging.debug("Casting to node for %s/%s's instance %s" % (context.project.name, context.user.name, inst_id)) defer.returnValue(self._format_run_instances(context, diff --git a/nova/flags.py b/nova/flags.py index d4b2b7c3b..dfdfe9785 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -213,3 +213,11 @@ DEFINE_string('sql_connection', 'sqlite:///%s/nova.sqlite' % os.path.abspath("./"), 'connection string for sql database') +DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager', + 'Manager for compute') +DEFINE_string('network_manager', 'nova.network.manager.VlanManager', + 'Manager for network') +DEFINE_string('volume_manager', 'nova.volume.manager.AOEManager', + 'Manager for volume') + + diff --git a/nova/network/service.py b/nova/network/service.py index da2953470..28f017a27 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -17,195 +17,15 @@ # under the License. """ -Network Hosts are responsible for allocating ips and setting up network +Network service allows rpc calls to the network manager and reports state +to the database. """ -import logging - -from nova import db -from nova import exception -from nova import flags from nova import service -from nova.network import linux_net - - -FLAGS = flags.FLAGS -flags.DEFINE_string('flat_network_bridge', 'br100', - 'Bridge for simple network instances') -flags.DEFINE_list('flat_network_ips', - ['192.168.0.2', '192.168.0.3', '192.168.0.4'], - 'Available ips for simple network') -flags.DEFINE_string('flat_network_network', '192.168.0.0', - 'Network for simple network') -flags.DEFINE_string('flat_network_netmask', '255.255.255.0', - 'Netmask for simple network') -flags.DEFINE_string('flat_network_gateway', '192.168.0.1', - 'Broadcast for simple network') -flags.DEFINE_string('flat_network_broadcast', '192.168.0.255', - 'Broadcast for simple network') -flags.DEFINE_string('flat_network_dns', '8.8.4.4', - 'Dns for simple network') - - -class AddressAlreadyAllocated(exception.Error): - pass - - -class AddressNotAllocated(exception.Error): - pass - -# TODO(vish): some better type of dependency injection? -_driver = linux_net - -def type_to_class(network_type): - """Convert a network_type string into an actual Python class""" - if not network_type: - logging.warn("Network type couldn't be determined, using %s" % - FLAGS.network_type) - network_type = FLAGS.network_type - if network_type == 'flat': - return FlatNetworkService - elif network_type == 'vlan': - return VlanNetworkService - raise exception.NotFound("Couldn't find %s network type" % network_type) - - -def setup_compute_network(context, project_id): - """Sets up the network on a compute host""" - network_ref = db.project_get_network(None, project_id) - srv = type_to_class(network_ref.kind) - srv.setup_compute_network(context, network_ref['id']) - - -class BaseNetworkService(service.Service): - """Implements common network service functionality - - This class must be subclassed. +class NetworkService(service.Service): """ - - def set_network_host(self, project_id, context=None): - """Safely sets the host of the projects network""" - network_ref = db.project_get_network(context, project_id) - # TODO(vish): can we minimize db access by just getting the - # id here instead of the ref? - network_id = network_ref['id'] - host = db.network_set_host(context, - network_id, - FLAGS.node_name) - self._on_set_network_host(context, network_id) - return host - - def setup_fixed_ip(self, address): - """Sets up rules for fixed ip""" - raise NotImplementedError() - - def _on_set_network_host(self, context, network_id): - """Called when this host becomes the host for a project""" - raise NotImplementedError() - - @classmethod - def setup_compute_network(cls, context, network_id): - """Sets up matching network for compute hosts""" - raise NotImplementedError() - - def allocate_floating_ip(self, project_id, context=None): - """Gets an floating ip from the pool""" - # TODO(vish): add floating ips through manage command - return db.floating_ip_allocate_address(context, - FLAGS.node_name, - project_id) - - def associate_floating_ip(self, floating_address, fixed_address, - context=None): - """Associates an floating ip to a fixed ip""" - db.floating_ip_fixed_ip_associate(context, - floating_address, - fixed_address) - _driver.bind_floating_ip(floating_address) - _driver.ensure_floating_forward(floating_address, fixed_address) - - def disassociate_floating_ip(self, floating_address, context=None): - """Disassociates a floating ip""" - fixed_address = db.floating_ip_disassociate(context, - floating_address) - _driver.unbind_floating_ip(floating_address) - _driver.remove_floating_forward(floating_address, fixed_address) - - def deallocate_floating_ip(self, floating_address, context=None): - """Returns an floating ip to the pool""" - db.floating_ip_deallocate(context, floating_address) - - -class FlatNetworkService(BaseNetworkService): - """Basic network where no vlans are used""" - - @classmethod - def setup_compute_network(cls, context, network_id): - """Network is created manually""" - pass - - def setup_fixed_ip(self, address): - """Currently no setup""" - pass - - def _on_set_network_host(self, context, network_id): - """Called when this host becomes the host for a project""" - # NOTE(vish): should there be two types of network objects - # in the database? - net = {} - net['injected'] = True - net['kind'] = FLAGS.network_type - net['network_str']=FLAGS.flat_network_network - net['netmask']=FLAGS.flat_network_netmask - net['bridge']=FLAGS.flat_network_bridge - net['gateway']=FLAGS.flat_network_gateway - net['broadcast']=FLAGS.flat_network_broadcast - net['dns']=FLAGS.flat_network_dns - db.network_update(context, network_id, net) - # TODO(vish): add public ips from flags to the datastore - -class VlanNetworkService(BaseNetworkService): - """Vlan network with dhcp""" - - def setup_fixed_ip(self, address, context=None): - """Gets a fixed ip from the pool""" - fixed_ip_ref = db.fixed_ip_get_by_address(context, address) - network_ref = db.fixed_ip_get_network(context, address) - if db.instance_is_vpn(context, fixed_ip_ref['instance_id']): - _driver.ensure_vlan_forward(network_ref['vpn_public_ip_str'], - network_ref['vpn_public_port'], - network_ref['vpn_private_ip_str']) - _driver.update_dhcp(context, network_ref['id']) - - def lease_fixed_ip(self, address, context=None): - """Called by bridge when ip is leased""" - logging.debug("Leasing IP %s", address) - db.fixed_ip_lease(context, address) - - def release_fixed_ip(self, address, context=None): - """Called by bridge when ip is released""" - logging.debug("Releasing IP %s", address) - db.fixed_ip_release(context, address) - db.fixed_ip_instance_disassociate(context, address) - - def restart_nets(self): - """Ensure the network for each user is enabled""" - # FIXME - pass - - def _on_set_network_host(self, context, network_id): - """Called when this host becomes the host for a project""" - network_ref = db.network_get(context, network_id) - _driver.ensure_vlan_bridge(network_ref['vlan'], - network_ref['bridge'], - network_ref) - - - @classmethod - def setup_compute_network(cls, context, network_id): - """Sets up matching network for compute hosts""" - network_ref = db.network_get(context, network_id) - _driver.ensure_vlan_bridge(network_ref['vlan'], - network_ref['bridge']) + Network Service automatically passes commands on to the Network Manager + """ + pass diff --git a/nova/volume/service.py b/nova/volume/service.py index 423359007..f1b1d8695 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -21,15 +21,9 @@ Volume service allows rpc calls to the volume manager and reports state to the database. """ -from nova import flags from nova import service -FLAGS = flags.FLAGS - -flags.DEFINE_string('volume_manager', 'nova.volume.manager.AOEManager', - 'Manager for volumes') - class VolumeService(service.Service): """ Volume Service automatically passes commands on to the Volume Manager -- cgit From ae6905b9f1ef97206ee3c8722cec3b26fc064f38 Mon Sep 17 00:00:00 2001 From: Devin Carlen Date: Sat, 28 Aug 2010 20:32:48 -0700 Subject: Refactored orm to support atomic actions --- nova/db/sqlalchemy/api.py | 305 ++++++++++++++++++++++++------------------- nova/db/sqlalchemy/models.py | 173 +++++++++++------------- 2 files changed, 249 insertions(+), 229 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index cba85ccb7..5295d1e38 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -17,16 +17,17 @@ # under the License. import math - import IPy from nova import db from nova import exception from nova import flags from nova.db.sqlalchemy import models +from nova.db.sqlalchemy import session FLAGS = flags.FLAGS + ################### @@ -55,18 +56,21 @@ def daemon_update(context, daemon_id, values): def floating_ip_allocate_address(context, node_name, project_id): - session = models.NovaBase.get_session() - query = session.query(models.FloatingIp).filter_by(node_name=node_name) - query = query.filter_by(fixed_ip_id=None).with_lockmode("update") - floating_ip_ref = query.first() - # NOTE(vish): if with_lockmode isn't supported, as in sqlite, - # then this has concurrency issues - if not floating_ip_ref: - raise db.NoMoreAddresses() - floating_ip_ref['project_id'] = project_id - session.add(floating_ip_ref) - session.commit() - return floating_ip_ref['str_id'] + with session.managed(auto_commit=False) as session: + floating_ip_ref = session.query(models.FloatingIp) \ + .filter_by(node_name=node_name) \ + .filter_by(fixed_ip_id=None) \ + .filter_by(deleted=False) \ + .with_lockmode('update') \ + .first() + # NOTE(vish): if with_lockmode isn't supported, as in sqlite, + # then this has concurrency issues + if not floating_ip_ref: + raise db.NoMoreAddresses() + floating_ip_ref['project_id'] = project_id + session.add(floating_ip_ref) + session.commit() + return floating_ip_ref['str_id'] def floating_ip_create(context, address, host): @@ -91,11 +95,13 @@ def floating_ip_disassociate(context, address): floating_ip_ref.save() return fixed_ip_address + def floating_ip_deallocate(context, address): floating_ip_ref = db.floating_ip_get_by_address(context, address) floating_ip_ref['project_id'] = None floating_ip_ref.save() + def floating_ip_get_by_address(context, address): return models.FloatingIp.find_by_str(address) @@ -104,19 +110,23 @@ def floating_ip_get_by_address(context, address): def fixed_ip_allocate(context, network_id): - session = models.NovaBase.get_session() - query = session.query(models.FixedIp).filter_by(network_id=network_id) - query = query.filter_by(reserved=False).filter_by(allocated=False) - query = query.filter_by(leased=False).with_lockmode("update") - fixed_ip_ref = query.first() - # NOTE(vish): if with_lockmode isn't supported, as in sqlite, - # then this has concurrency issues - if not fixed_ip_ref: - raise db.NoMoreAddresses() - fixed_ip_ref['allocated'] = True - session.add(fixed_ip_ref) - session.commit() - return fixed_ip_ref + with session.open(autocommit=False) as session: + fixed_ip_ref = session.query(models.FixedIp) \ + .filter_by(network_id=network_id) \ + .filter_by(reserved=False) \ + .filter_by(allocated=False) \ + .filter_by(leased=False) \ + .filter_by(deleted=False) \ + .with_lockmode('update') \ + .first() + # NOTE(vish): if with_lockmode isn't supported, as in sqlite, + # then this has concurrency issues + if not fixed_ip_ref: + raise db.NoMoreAddresses() + fixed_ip_ref['allocated'] = True + session.add(fixed_ip_ref) + session.commit() + return fixed_ip_ref def fixed_ip_get_by_address(context, address): @@ -192,19 +202,19 @@ def instance_get_by_address(context, address): def instance_get_by_project(context, project_id): - session = models.NovaBase.get_session() - query = session.query(models.Instance) - results = query.filter_by(project_id=project_id).all() - session.commit() - return results + with session.managed() as session: + return session.query(models.Instance) \ + .filter_by(project_id=project_id) \ + .filter_by(deleted=False) \ + .all() def instance_get_by_reservation(context, reservation_id): - session = models.NovaBase.get_session() - query = session.query(models.Instance) - results = query.filter_by(reservation_id=reservation_id).all() - session.commit() - return results + with session.managed() as session: + return session.query(models.Instance) \ + .filter_by(reservation_id=reservation_id) \ + .filter_by(deleted=False) \ + .all() def instance_get_by_str(context, str_id): @@ -280,24 +290,31 @@ def network_count(context): return models.Network.count() def network_count_allocated_ips(context, network_id): - session = models.NovaBase.get_session() - query = session.query(models.FixedIp).filter_by(network_id=network_id) - query = query.filter_by(allocated=True) - return query.count() + with session.managed() as session: + return session.query(models.FixedIp) \ + .filter_by(network_id=network_id) \ + .filter_by(allocated=True) \ + .filter_by(deleted=False) \ + .count() def network_count_available_ips(context, network_id): - session = models.NovaBase.get_session() - query = session.query(models.FixedIp).filter_by(network_id=network_id) - query = query.filter_by(allocated=False).filter_by(reserved=False) - return query.count() + with session.managed() as session: + return session.query(models.FixedIp) \ + .filter_by(network_id=network_id) \ + .filter_by(allocated=False) \ + .filter_by(reserved=False) \ + .filter_by(deleted=False) \ + .count() def network_count_reserved_ips(context, network_id): - session = models.NovaBase.get_session() - query = session.query(models.FixedIp).filter_by(network_id=network_id) - query = query.filter_by(reserved=True) - return query.count() + with session.managed() as session: + return session.query(models.FixedIp) \ + .filter_by(network_id=network_id) \ + .filter_by(reserved=True) \ + .filter_by(deleted=False) \ + .count() def network_create(context, values): @@ -309,37 +326,43 @@ def network_create(context, values): def network_create_fixed_ips(context, network_id, num_vpn_clients): - network_ref = network_get(context, network_id) - # NOTE(vish): should these be properties of the network as opposed - # to constants? - BOTTOM_RESERVED = 3 - TOP_RESERVED = 1 + num_vpn_clients - project_net = IPy.IP(network_ref['cidr']) - num_ips = len(project_net) - session = models.NovaBase.get_session() - for i in range(num_ips): - fixed_ip = models.FixedIp() - fixed_ip['ip_str'] = str(project_net[i]) - if i < BOTTOM_RESERVED or num_ips - i < TOP_RESERVED: - fixed_ip['reserved'] = True - fixed_ip['network'] = network_get(context, network_id) - session.add(fixed_ip) - session.commit() + with session.managed(auto_commit=False) as session: + network_ref = network_get(context, network_id) + # NOTE(vish): should these be properties of the network as opposed + # to constants? + BOTTOM_RESERVED = 3 + TOP_RESERVED = 1 + num_vpn_clients + project_net = IPy.IP(network_ref['cidr']) + num_ips = len(project_net) + + for i in range(num_ips): + fixed_ip = models.FixedIp() + fixed_ip['ip_str'] = str(project_net[i]) + if i < BOTTOM_RESERVED or num_ips - i < TOP_RESERVED: + fixed_ip['reserved'] = True + fixed_ip['network'] = network_get(context, network_id) + session.add(fixed_ip) + session.commit() def network_ensure_indexes(context, num_networks): - if models.NetworkIndex.count() == 0: - session = models.NovaBase.get_session() - for i in range(num_networks): - network_index = models.NetworkIndex() - network_index.index = i - session.add(network_index) - session.commit() + with session.managed(auto_commit=False) as session: + if models.NetworkIndex.count() == 0: + session = models.NovaBase.get_session() + for i in range(num_networks): + network_index = models.NetworkIndex() + network_index.index = i + session.add(network_index) + session.commit() def network_destroy(context, network_id): - network_ref = network_get(context, network_id) - network_ref.delete() + with session.managed(auto_commit=False) as session: + session.execute('update networks set deleted=1 where id=:id', + {'id': network_id}) + session.execute('update network_indexes set deleted=1 where network_id=:id', + {'id': network_id}) + session.commit() def network_get(context, network_id): @@ -347,18 +370,22 @@ def network_get(context, network_id): def network_get_associated_fixed_ips(context, network_id): - session = models.NovaBase.get_session() - query = session.query(models.FixedIp) - fixed_ips = query.filter(models.FixedIp.instance_id != None).all() - session.commit() - return fixed_ips + with session.managed() as session: + return session.query(models.FixedIp) \ + .filter(models.FixedIp.instance_id != None) \ + .filter_by(deleted=False) \ + .all() + def network_get_by_bridge(context, bridge): - session = models.NovaBase.get_session() - rv = session.query(models.Network).filter_by(bridge=bridge).first() - if not rv: - raise exception.NotFound('No network for bridge %s' % bridge) - return rv + with session.managed() as session: + rv = session.query(models.Network) \ + .filter_by(bridge=bridge) \ + .filter_by(deleted=False) \ + .first() + if not rv: + raise exception.NotFound('No network for bridge %s' % bridge) + return rv def network_get_vpn_ip(context, network_id): @@ -378,15 +405,18 @@ def network_get_host(context, network_id): def network_get_index(context, network_id): - session = models.NovaBase.get_session() - query = session.query(models.NetworkIndex).filter_by(network_id=None) - network_index = query.with_lockmode("update").first() - if not network_index: - raise db.NoMoreNetworks() - network_index['network'] = network_get(context, network_id) - session.add(network_index) - session.commit() - return network_index['index'] + with session.managed(auto_commit=False) as session: + network_index = session.query(models.NetworkIndex) \ + .filter_by(network_id=None) \ + .filter_by(deleted=False) \ + .with_lockmode('update') \ + .first() + if not network_index: + raise db.NoMoreNetworks() + network_index['network'] = network_get(context, network_id) + session.add(network_index) + session.commit() + return network_index['index'] def network_set_cidr(context, network_id, cidr): @@ -402,21 +432,24 @@ def network_set_cidr(context, network_id, cidr): def network_set_host(context, network_id, host_id): - session = models.NovaBase.get_session() - query = session.query(models.Network).filter_by(id=network_id) - network = query.with_lockmode("update").first() - if not network: - raise exception.NotFound("Couldn't find network with %s" % - network_id) - # NOTE(vish): if with_lockmode isn't supported, as in sqlite, - # then this has concurrency issues - if network.node_name: + with session.managed(auto_commit=False) as session: + network = session.query(models.Network) \ + .filter_by(id=network_id) \ + .filter_by(deleted=False) \ + .with_lockmode('update') \ + .first() + if not network: + raise exception.NotFound("Couldn't find network with %s" % + network_id) + # NOTE(vish): if with_lockmode isn't supported, as in sqlite, + # then this has concurrency issues + if network.node_name: + session.commit() + return network['node_name'] + network['node_name'] = host_id + session.add(network) session.commit() return network['node_name'] - network['node_name'] = host_id - session.add(network) - session.commit() - return network['node_name'] def network_update(context, network_id, values): @@ -430,11 +463,14 @@ def network_update(context, network_id, values): def project_get_network(context, project_id): - session = models.create_session() - rv = session.query(models.Network).filter_by(project_id=project_id).first() - if not rv: - raise exception.NotFound('No network for project: %s' % project_id) - return rv + with session.managed() as session: + rv = session.query(models.Network) \ + .filter_by(project_id=project_id) \ + .filter_by(deleted=False) \ + .first() + if not rv: + raise exception.NotFound('No network for project: %s' % project_id) + return rv ################### @@ -447,20 +483,24 @@ def queue_get_for(context, topic, physical_node_id): def volume_allocate_shelf_and_blade(context, volume_id): - db.volume_ensure_blades(context, - FLAGS.num_shelves, - FLAGS.blades_per_shelf) - session = models.NovaBase.get_session() - query = session.query(models.ExportDevice).filter_by(volume=None) - export_device = query.with_lockmode("update").first() - # NOTE(vish): if with_lockmode isn't supported, as in sqlite, - # then this has concurrency issues - if not export_device: - raise db.NoMoreBlades() - export_device.volume_id = volume_id - session.add(export_device) - session.commit() - return (export_device.shelf_id, export_device.blade_id) + with session.managed(auto_commit=False) as session: + db.volume_ensure_blades(context, + session, + FLAGS.num_shelves, + FLAGS.blades_per_shelf) + export_device = session.query(models.ExportDevice) \ + .filter_by(volume=None) \ + .filter_by(deleted=False) \ + .with_lockmode('update') \ + .first() + # NOTE(vish): if with_lockmode isn't supported, as in sqlite, + # then this has concurrency issues + if not export_device: + raise db.NoMoreBlades() + export_device.volume_id = volume_id + session.add(export_device) + session.commit() + return (export_device.shelf_id, export_device.blade_id) def volume_attached(context, volume_id, instance_id, mountpoint): @@ -495,15 +535,16 @@ def volume_detached(context, volume_id): # NOTE(vish): should this code go up a layer? -def volume_ensure_blades(context, num_shelves, blades_per_shelf): - if models.ExportDevice.count() >= num_shelves * blades_per_shelf: +def volume_ensure_blades(context, session, num_shelves, blades_per_shelf): + count = models.ExportDevice.count(session=session) + if count >= num_shelves * blades_per_shelf: return for shelf_id in xrange(num_shelves): for blade_id in xrange(blades_per_shelf): export_device = models.ExportDevice() export_device.shelf_id = shelf_id export_device.blade_id = blade_id - export_device.save() + export_device.save(session=session) def volume_get(context, volume_id): @@ -515,11 +556,11 @@ def volume_get_all(context): def volume_get_by_project(context, project_id): - session = models.NovaBase.get_session() - query = session.query(models.Volume) - results = query.filter_by(project_id=project_id).all() - session.commit() - return results + with session.managed() as session: + return session.query(models.Volume) \ + .filter_by(project_id=project_id) \ + .filter_by(deleted=False) \ + .all() def volume_get_by_str(context, str_id): diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 36d6cf3ad..c3529f29c 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -25,6 +25,7 @@ from sqlalchemy import Table, Column, Integer, String from sqlalchemy import MetaData, ForeignKey, DateTime, Boolean, Text from sqlalchemy.ext.declarative import declarative_base +from nova.db.sqlalchemy import session from nova import auth from nova import exception from nova import flags @@ -38,72 +39,61 @@ class NovaBase(object): __prefix__ = 'none' created_at = Column(DateTime) updated_at = Column(DateTime) + deleted = Column(Boolean, default=False) - _session = None - _engine = None @classmethod - def create_engine(cls): - if NovaBase._engine is not None: - return NovaBase._engine - from sqlalchemy import create_engine - NovaBase._engine = create_engine(FLAGS.sql_connection, echo=False) - Base.metadata.create_all(NovaBase._engine) - return NovaBase._engine + def all(cls, session=None): + if session: + return session.query(cls) \ + .filter_by(deleted=False) \ + .all() + else: + with session.managed() as session: + return cls.all(session=session) @classmethod - def get_session(cls): - from sqlalchemy.orm import sessionmaker - if NovaBase._session == None: - NovaBase.create_engine() - NovaBase._session = sessionmaker(bind=NovaBase._engine)() - return NovaBase._session + def count(cls, session=None): + if session: + return session.query(cls) \ + .filter_by(deleted=False) \ + .count() + else: + with session.managed() as session: + return cls.count(session=session) @classmethod - def all(cls): - session = NovaBase.get_session() - result = session.query(cls).all() - session.commit() - return result + def find(cls, obj_id, session=None): + if session: + try: + return session.query(cls) \ + .filter_by(id=obj_id) \ + .filter_by(deleted=False) \ + .one() + except exc.NoResultFound: + raise exception.NotFound("No model for id %s" % obj_id) + else: + with session.managed() as session: + return cls.find(obj_id, session=session) @classmethod - def count(cls): - session = NovaBase.get_session() - result = session.query(cls).count() - session.commit() - return result - - @classmethod - def find(cls, obj_id): - session = NovaBase.get_session() - try: - result = session.query(cls).filter_by(id=obj_id).one() - session.commit() - return result - except exc.NoResultFound: - raise exception.NotFound("No model for id %s" % obj_id) - - @classmethod - def find_by_str(cls, str_id): + def find_by_str(cls, str_id, session=None): id = int(str_id.rpartition('-')[2]) - return cls.find(id) + return cls.find(id, session=session) @property def str_id(self): return "%s-%s" % (self.__prefix__, self.id) - def save(self): - session = NovaBase.get_session() - session.add(self) - session.commit() + def save(self, session=None): + if session: + session.add(self) + else: + with session.managed() as s: + self.save(session=s) - def delete(self): - session = NovaBase.get_session() - session.delete(self) - session.commit() - - def refresh(self): - session = NovaBase.get_session() - session.refresh(self) + def delete(self, session=None): + self.deleted = True + self.save(session=session) def __setitem__(self, key, value): setattr(self, key, value) @@ -118,7 +108,6 @@ class Image(Base, NovaBase): id = Column(Integer, primary_key=True) user_id = Column(String(255))#, ForeignKey('users.id'), nullable=False) project_id = Column(String(255))#, ForeignKey('projects.id'), nullable=False) - image_type = Column(String(255)) public = Column(Boolean, default=False) state = Column(String(255)) @@ -158,13 +147,13 @@ class Daemon(Base, NovaBase): report_count = Column(Integer, nullable=False, default=0) @classmethod - def find_by_args(cls, node_name, binary): - session = NovaBase.get_session() + def find_by_args(cls, session, node_name, binary): try: - query = session.query(cls).filter_by(node_name=node_name) - result = query.filter_by(binary=binary).one() - session.commit() - return result + return session.query(cls) \ + .filter_by(node_name=node_name) \ + .filter_by(binary=binary) \ + .filter_by(deleted=False) \ + .one() except exc.NoResultFound: raise exception.NotFound("No model for %s, %s" % (node_name, binary)) @@ -173,25 +162,10 @@ class Daemon(Base, NovaBase): class Instance(Base, NovaBase): __tablename__ = 'instances' __prefix__ = 'i' - id = Column(Integer, primary_key=True) + id = Column(Integer, primary_key=True) user_id = Column(String(255)) #, ForeignKey('users.id'), nullable=False) project_id = Column(String(255)) #, ForeignKey('projects.id')) - - @property - def user(self): - return auth.manager.AuthManager().get_user(self.user_id) - - @property - def project(self): - return auth.manager.AuthManager().get_project(self.project_id) - - # TODO(vish): make this opaque somehow - @property - def name(self): - return self.str_id - - image_id = Column(Integer, ForeignKey('images.id'), nullable=True) kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True) ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True) @@ -214,13 +188,26 @@ class Instance(Base, NovaBase): reservation_id = Column(String(255)) mac_address = Column(String(255)) - def set_state(self, state_code, state_description=None): + @property + def user(self): + return auth.manager.AuthManager().get_user(self.user_id) + + @property + def project(self): + return auth.manager.AuthManager().get_project(self.project_id) + + # TODO(vish): make this opaque somehow + @property + def name(self): + return self.str_id + + def set_state(self, session, state_code, state_description=None): from nova.compute import power_state self.state = state_code if not state_description: state_description = power_state.name(state_code) self.state_description = state_description - self.save() + self.save(session) # ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id)) # kernel = relationship(Kernel, backref=backref('instances', order_by=id)) @@ -280,12 +267,12 @@ class FixedIp(Base, NovaBase): return self.ip_str @classmethod - def find_by_str(cls, str_id): - session = NovaBase.get_session() + def find_by_str(cls, session, str_id): try: - result = session.query(cls).filter_by(ip_str=str_id).one() - session.commit() - return result + return session.query(cls) \ + .filter_by(ip_str=str_id) \ + .filter_by(deleted=False) \ + .one() except exc.NoResultFound: raise exception.NotFound("No model for ip str %s" % str_id) @@ -305,12 +292,12 @@ class FloatingIp(Base, NovaBase): return self.ip_str @classmethod - def find_by_str(cls, str_id): - session = NovaBase.get_session() + def find_by_str(cls, session, str_id): try: - result = session.query(cls).filter_by(ip_str=str_id).one() - session.commit() - return result + return session.query(cls) \ + .filter_by(ip_str=str_id) \ + .filter_by(deleted=False) \ + .one() except exc.NoResultFound: raise exception.NotFound("No model for ip str %s" % str_id) @@ -352,17 +339,9 @@ class NetworkIndex(Base, NovaBase): uselist=False)) - - -def create_session(engine=None): - return NovaBase.get_session() - if __name__ == '__main__': - engine = NovaBase.create_engine() - session = NovaBase.create_session(engine) - instance = Instance(image_id='as', ramdisk_id='AS', user_id='anthony') user = User(id='anthony') - session.add(instance) - session.commit() - + + with session.managed() as session: + session.add(instance) -- cgit From 5425a3252f6e91d842a891fbd93ee51f490bddce Mon Sep 17 00:00:00 2001 From: Devin Carlen Date: Sat, 28 Aug 2010 23:06:40 -0700 Subject: Making tests pass --- nova/db/sqlalchemy/__init__.py | 3 +++ nova/db/sqlalchemy/api.py | 58 ++++++++++++++++++++++-------------------- nova/db/sqlalchemy/models.py | 31 ++++++++++++++-------- nova/tests/network_unittest.py | 1 + 4 files changed, 55 insertions(+), 38 deletions(-) diff --git a/nova/db/sqlalchemy/__init__.py b/nova/db/sqlalchemy/__init__.py index e69de29bb..e94f99486 100644 --- a/nova/db/sqlalchemy/__init__.py +++ b/nova/db/sqlalchemy/__init__.py @@ -0,0 +1,3 @@ +from models import register_models + +register_models() \ No newline at end of file diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 5295d1e38..0b6316221 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -23,7 +23,7 @@ from nova import db from nova import exception from nova import flags from nova.db.sqlalchemy import models -from nova.db.sqlalchemy import session +from nova.db.sqlalchemy.session import managed_session FLAGS = flags.FLAGS @@ -56,7 +56,7 @@ def daemon_update(context, daemon_id, values): def floating_ip_allocate_address(context, node_name, project_id): - with session.managed(auto_commit=False) as session: + with managed_session(autocommit=False) as session: floating_ip_ref = session.query(models.FloatingIp) \ .filter_by(node_name=node_name) \ .filter_by(fixed_ip_id=None) \ @@ -202,7 +202,7 @@ def instance_get_by_address(context, address): def instance_get_by_project(context, project_id): - with session.managed() as session: + with managed_session() as session: return session.query(models.Instance) \ .filter_by(project_id=project_id) \ .filter_by(deleted=False) \ @@ -210,7 +210,7 @@ def instance_get_by_project(context, project_id): def instance_get_by_reservation(context, reservation_id): - with session.managed() as session: + with managed_session() as session: return session.query(models.Instance) \ .filter_by(reservation_id=reservation_id) \ .filter_by(deleted=False) \ @@ -290,7 +290,7 @@ def network_count(context): return models.Network.count() def network_count_allocated_ips(context, network_id): - with session.managed() as session: + with managed_session() as session: return session.query(models.FixedIp) \ .filter_by(network_id=network_id) \ .filter_by(allocated=True) \ @@ -299,7 +299,7 @@ def network_count_allocated_ips(context, network_id): def network_count_available_ips(context, network_id): - with session.managed() as session: + with managed_session() as session: return session.query(models.FixedIp) \ .filter_by(network_id=network_id) \ .filter_by(allocated=False) \ @@ -309,7 +309,7 @@ def network_count_available_ips(context, network_id): def network_count_reserved_ips(context, network_id): - with session.managed() as session: + with managed_session() as session: return session.query(models.FixedIp) \ .filter_by(network_id=network_id) \ .filter_by(reserved=True) \ @@ -326,8 +326,8 @@ def network_create(context, values): def network_create_fixed_ips(context, network_id, num_vpn_clients): - with session.managed(auto_commit=False) as session: - network_ref = network_get(context, network_id) + with managed_session(autocommit=False) as session: + network_ref = network_get(context, network_id, session=session) # NOTE(vish): should these be properties of the network as opposed # to constants? BOTTOM_RESERVED = 3 @@ -340,15 +340,16 @@ def network_create_fixed_ips(context, network_id, num_vpn_clients): fixed_ip['ip_str'] = str(project_net[i]) if i < BOTTOM_RESERVED or num_ips - i < TOP_RESERVED: fixed_ip['reserved'] = True - fixed_ip['network'] = network_get(context, network_id) + fixed_ip['network'] = network_get(context, + network_id, + session=session) session.add(fixed_ip) session.commit() def network_ensure_indexes(context, num_networks): - with session.managed(auto_commit=False) as session: + with managed_session(autocommit=False) as session: if models.NetworkIndex.count() == 0: - session = models.NovaBase.get_session() for i in range(num_networks): network_index = models.NetworkIndex() network_index.index = i @@ -357,7 +358,7 @@ def network_ensure_indexes(context, num_networks): def network_destroy(context, network_id): - with session.managed(auto_commit=False) as session: + with managed_session(autocommit=False) as session: session.execute('update networks set deleted=1 where id=:id', {'id': network_id}) session.execute('update network_indexes set deleted=1 where network_id=:id', @@ -365,12 +366,12 @@ def network_destroy(context, network_id): session.commit() -def network_get(context, network_id): - return models.Network.find(network_id) +def network_get(context, network_id, session=None): + return models.Network.find(network_id, session=session) def network_get_associated_fixed_ips(context, network_id): - with session.managed() as session: + with managed_session() as session: return session.query(models.FixedIp) \ .filter(models.FixedIp.instance_id != None) \ .filter_by(deleted=False) \ @@ -378,7 +379,7 @@ def network_get_associated_fixed_ips(context, network_id): def network_get_by_bridge(context, bridge): - with session.managed() as session: + with managed_session() as session: rv = session.query(models.Network) \ .filter_by(bridge=bridge) \ .filter_by(deleted=False) \ @@ -405,7 +406,7 @@ def network_get_host(context, network_id): def network_get_index(context, network_id): - with session.managed(auto_commit=False) as session: + with managed_session(autocommit=False) as session: network_index = session.query(models.NetworkIndex) \ .filter_by(network_id=None) \ .filter_by(deleted=False) \ @@ -413,7 +414,7 @@ def network_get_index(context, network_id): .first() if not network_index: raise db.NoMoreNetworks() - network_index['network'] = network_get(context, network_id) + network_index['network'] = network_get(context, network_id, session=session) session.add(network_index) session.commit() return network_index['index'] @@ -429,10 +430,11 @@ def network_set_cidr(context, network_id, cidr): network_ref['broadcast'] = str(project_net.broadcast()) network_ref['vpn_private_ip_str'] = str(project_net[2]) network_ref['dhcp_start'] = str(project_net[3]) + network_ref.save() def network_set_host(context, network_id, host_id): - with session.managed(auto_commit=False) as session: + with managed_session(autocommit=False) as session: network = session.query(models.Network) \ .filter_by(id=network_id) \ .filter_by(deleted=False) \ @@ -463,7 +465,7 @@ def network_update(context, network_id, values): def project_get_network(context, project_id): - with session.managed() as session: + with managed_session() as session: rv = session.query(models.Network) \ .filter_by(project_id=project_id) \ .filter_by(deleted=False) \ @@ -483,11 +485,11 @@ def queue_get_for(context, topic, physical_node_id): def volume_allocate_shelf_and_blade(context, volume_id): - with session.managed(auto_commit=False) as session: - db.volume_ensure_blades(context, - session, - FLAGS.num_shelves, - FLAGS.blades_per_shelf) + with managed_session(autocommit=False) as session: + volume_ensure_blades(context, + FLAGS.num_shelves, + FLAGS.blades_per_shelf, + session=session) export_device = session.query(models.ExportDevice) \ .filter_by(volume=None) \ .filter_by(deleted=False) \ @@ -535,7 +537,7 @@ def volume_detached(context, volume_id): # NOTE(vish): should this code go up a layer? -def volume_ensure_blades(context, session, num_shelves, blades_per_shelf): +def volume_ensure_blades(context, num_shelves, blades_per_shelf, session=None): count = models.ExportDevice.count(session=session) if count >= num_shelves * blades_per_shelf: return @@ -556,7 +558,7 @@ def volume_get_all(context): def volume_get_by_project(context, project_id): - with session.managed() as session: + with managed_session() as session: return session.query(models.Volume) \ .filter_by(project_id=project_id) \ .filter_by(deleted=False) \ diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index c3529f29c..040fa50cc 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -25,7 +25,7 @@ from sqlalchemy import Table, Column, Integer, String from sqlalchemy import MetaData, ForeignKey, DateTime, Boolean, Text from sqlalchemy.ext.declarative import declarative_base -from nova.db.sqlalchemy import session +from nova.db.sqlalchemy.session import managed_session from nova import auth from nova import exception from nova import flags @@ -36,6 +36,7 @@ Base = declarative_base() class NovaBase(object): __table_args__ = {'mysql_engine':'InnoDB'} + __table_initialized__ = False __prefix__ = 'none' created_at = Column(DateTime) updated_at = Column(DateTime) @@ -48,7 +49,7 @@ class NovaBase(object): .filter_by(deleted=False) \ .all() else: - with session.managed() as session: + with managed_session() as session: return cls.all(session=session) @classmethod @@ -58,8 +59,8 @@ class NovaBase(object): .filter_by(deleted=False) \ .count() else: - with session.managed() as session: - return cls.count(session=session) + with managed_session() as s: + return cls.count(session=s) @classmethod def find(cls, obj_id, session=None): @@ -72,7 +73,7 @@ class NovaBase(object): except exc.NoResultFound: raise exception.NotFound("No model for id %s" % obj_id) else: - with session.managed() as session: + with managed_session() as session: return cls.find(obj_id, session=session) @classmethod @@ -87,8 +88,9 @@ class NovaBase(object): def save(self, session=None): if session: session.add(self) + session.flush() else: - with session.managed() as s: + with managed_session() as s: self.save(session=s) def delete(self, session=None): @@ -253,7 +255,7 @@ class ExportDevice(Base, NovaBase): class FixedIp(Base, NovaBase): __tablename__ = 'fixed_ips' id = Column(Integer, primary_key=True) - ip_str = Column(String(255), unique=True) + ip_str = Column(String(255)) network_id = Column(Integer, ForeignKey('networks.id'), nullable=False) instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) instance = relationship(Instance, backref=backref('fixed_ip', @@ -280,7 +282,7 @@ class FixedIp(Base, NovaBase): class FloatingIp(Base, NovaBase): __tablename__ = 'floating_ips' id = Column(Integer, primary_key=True) - ip_str = Column(String(255), unique=True) + ip_str = Column(String(255)) fixed_ip_id = Column(Integer, ForeignKey('fixed_ips.id'), nullable=True) fixed_ip = relationship(FixedIp, backref=backref('floating_ips')) @@ -336,12 +338,21 @@ class NetworkIndex(Base, NovaBase): index = Column(Integer) network_id = Column(Integer, ForeignKey('networks.id'), nullable=True) network = relationship(Network, backref=backref('network_index', - uselist=False)) + uselist=False)) + +def register_models(): + from sqlalchemy import create_engine + + models = (Image, PhysicalNode, Daemon, Instance, Volume, ExportDevice, + FixedIp, FloatingIp, Network, NetworkIndex) + engine = create_engine(FLAGS.sql_connection, echo=False) + for model in models: + model.metadata.create_all(engine) if __name__ == '__main__': instance = Instance(image_id='as', ramdisk_id='AS', user_id='anthony') user = User(id='anthony') - with session.managed() as session: + with managed_session() as session: session.add(instance) diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index d487c2e45..e0de04be7 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -105,6 +105,7 @@ class NetworkTestCase(test.TrialTestCase): db.fixed_ip_deallocate(None, fix_addr) def test_allocate_deallocate_fixed_ip(self): + import pdb; pdb.set_trace() """Makes sure that we can allocate and deallocate a fixed ip""" address = self._create_address(0) self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) -- cgit From 6012ea583426bf76979448e4262a24a6b8fb2f5d Mon Sep 17 00:00:00 2001 From: Devin Carlen Date: Sat, 28 Aug 2010 23:20:06 -0700 Subject: Making tests pass --- nova/db/sqlalchemy/models.py | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 040fa50cc..4fbe2cc5e 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -164,10 +164,25 @@ class Daemon(Base, NovaBase): class Instance(Base, NovaBase): __tablename__ = 'instances' __prefix__ = 'i' - id = Column(Integer, primary_key=True) + user_id = Column(String(255)) #, ForeignKey('users.id'), nullable=False) project_id = Column(String(255)) #, ForeignKey('projects.id')) + + @property + def user(self): + return auth.manager.AuthManager().get_user(self.user_id) + + @property + def project(self): + return auth.manager.AuthManager().get_project(self.project_id) + + # TODO(vish): make this opaque somehow + @property + def name(self): + return self.str_id + + image_id = Column(Integer, ForeignKey('images.id'), nullable=True) kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True) ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True) @@ -190,26 +205,13 @@ class Instance(Base, NovaBase): reservation_id = Column(String(255)) mac_address = Column(String(255)) - @property - def user(self): - return auth.manager.AuthManager().get_user(self.user_id) - - @property - def project(self): - return auth.manager.AuthManager().get_project(self.project_id) - - # TODO(vish): make this opaque somehow - @property - def name(self): - return self.str_id - - def set_state(self, session, state_code, state_description=None): + def set_state(self, state_code, state_description=None): from nova.compute import power_state self.state = state_code if not state_description: state_description = power_state.name(state_code) self.state_description = state_description - self.save(session) + self.save() # ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id)) # kernel = relationship(Kernel, backref=backref('instances', order_by=id)) -- cgit From fab0bbaca8d6cf34f131c4426463bf5c76a0477f Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 29 Aug 2010 18:53:47 -0700 Subject: tests pass --- bin/nova-dhcpbridge | 21 ++--- nova/auth/manager.py | 4 +- nova/compute/service.py | 174 +---------------------------------------- nova/db/api.py | 30 +++---- nova/db/sqlalchemy/api.py | 7 ++ nova/endpoint/cloud.py | 2 +- nova/manager.py | 2 +- nova/service.py | 10 +-- nova/tests/cloud_unittest.py | 4 +- nova/tests/compute_unittest.py | 3 +- nova/tests/fake_flags.py | 10 ++- nova/tests/network_unittest.py | 14 ++-- nova/tests/service_unittest.py | 19 +++-- nova/tests/volume_unittest.py | 39 ++++----- nova/volume/driver.py | 5 -- nova/volume/manager.py | 4 +- 16 files changed, 103 insertions(+), 245 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 6747a3a0e..52ec2d497 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -34,23 +34,23 @@ from nova import db from nova import flags from nova import rpc from nova import utils -from nova.network import linux_net -from nova.network import service from nova import datastore # for redis_db flag from nova.auth import manager # for auth flags +from nova.network import manager # for network flags FLAGS = flags.FLAGS - def add_lease(_mac, ip_address, _hostname, _interface): """Set the IP that was assigned by the DHCP server.""" if FLAGS.fake_rabbit: logging.debug("leasing ip") - service.VlanNetworkService().lease_fixed_ip(ip_address) + network_manager = utils.import_object(FLAGS.network_manager) + network_manager.lease_fixed_ip(None, ip_address) else: rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name), {"method": "lease_fixed_ip", - "args": {"address": ip_address}}) + "args": {"context": None, + "address": ip_address}}) def old_lease(_mac, _ip_address, _hostname, _interface): @@ -62,20 +62,24 @@ def del_lease(_mac, ip_address, _hostname, _interface): """Called when a lease expires.""" if FLAGS.fake_rabbit: logging.debug("releasing ip") - service.VlanNetworkService().release_fixed_ip(ip_address) + network_manager = utils.import_object(FLAGS.network_manager) + network_manager.release_fixed_ip(None, ip_address) else: rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name), {"method": "release_fixed_ip", - "args": {"address": ip_address}}) + "args": {"context": None, + "address": ip_address}}) def init_leases(interface): """Get the list of hosts for an interface.""" network_ref = db.network_get_by_bridge(None, interface) - return linux_net.get_dhcp_hosts(None, network_ref['id']) + network_manager = utils.import_object(FLAGS.network_manager) + return network_manager.driver.get_dhcp_hosts(None, network_ref['id']) def main(): + global network_manager """Parse environment and arguments and call the approproate action.""" flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile) utils.default_flagfile(flagfile) @@ -93,7 +97,6 @@ def main(): '..', '_trial_temp', 'nova.sqlite')) - print path FLAGS.sql_connection = 'sqlite:///%s' % path #FLAGS.sql_connection = 'mysql://root@localhost/test' action = argv[1] diff --git a/nova/auth/manager.py b/nova/auth/manager.py index a072a143b..62ec3f4e4 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -252,6 +252,7 @@ class AuthManager(object): __init__ is run every time AuthManager() is called, so we only reset the driver if it is not set or a new driver is specified. """ + self.network_manager = utils.import_object(FLAGS.network_manager) if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver) @@ -525,7 +526,8 @@ class AuthManager(object): if project_dict: project = Project(**project_dict) try: - db.network_allocate(context, project.id) + self.network_manager.allocate_network(context, + project.id) except: drv.delete_project(project.id) raise diff --git a/nova/compute/service.py b/nova/compute/service.py index 877246ef6..9bf498d03 100644 --- a/nova/compute/service.py +++ b/nova/compute/service.py @@ -17,182 +17,16 @@ # under the License. """ -Compute Service: - - Runs on each compute host, managing the - hypervisor using the virt module. - +Compute service allows rpc calls to the compute manager and reports state +to the database. """ -import base64 -import logging -import os - -from twisted.internet import defer - -from nova import db -from nova import exception -from nova import flags -from nova import process from nova import service -from nova import utils -from nova.compute import power_state -from nova.network import service as network_service -from nova.virt import connection as virt_connection - - -FLAGS = flags.FLAGS -flags.DEFINE_string('instances_path', utils.abspath('../instances'), - 'where instances are stored on disk') class ComputeService(service.Service): """ - Manages the running instances. + Compute Service automatically passes commands on to the Compute Manager """ - def __init__(self): - """Load configuration options and connect to the hypervisor.""" - super(ComputeService, self).__init__() - self._instances = {} - self._conn = virt_connection.get_connection() - # TODO(joshua): This needs to ensure system state, specifically - # modprobe aoe - - def noop(self): - """Simple test of an AMQP message call.""" - return defer.succeed('PONG') - - def update_state(self, instance_id, context): - # FIXME(ja): include other fields from state? - instance_ref = db.instance_get(context, instance_id) - state = self._conn.get_info(instance_ref.name)['state'] - db.instance_state(context, instance_id, state) - - @defer.inlineCallbacks - @exception.wrap_exception - def run_instance(self, instance_id, context=None, **_kwargs): - """Launch a new instance with specified options.""" - instance_ref = db.instance_get(context, instance_id) - if instance_ref['str_id'] in self._conn.list_instances(): - raise exception.Error("Instance has already been created") - logging.debug("Starting instance %s..." % (instance_id)) - - network_service.setup_compute_network(context, instance_ref['project_id']) - db.instance_update(context, instance_id, {'node_name': FLAGS.node_name}) - - # TODO(vish) check to make sure the availability zone matches - db.instance_state(context, instance_id, power_state.NOSTATE, 'spawning') - - try: - yield self._conn.spawn(instance_ref) - except: - logging.exception("Failed to spawn instance %s" % - instance_ref['str_id']) - db.instance_state(context, instance_id, power_state.SHUTDOWN) - - self.update_state(instance_id, context) - - @defer.inlineCallbacks - @exception.wrap_exception - def terminate_instance(self, instance_id, context=None): - """Terminate an instance on this machine.""" - logging.debug("Got told to terminate instance %s" % instance_id) - instance_ref = db.instance_get(context, instance_id) - - if instance_ref['state'] == power_state.SHUTOFF: - # self.datamodel.destroy() FIXME: RE-ADD? - raise exception.Error('trying to destroy already destroyed' - ' instance: %s' % instance_id) - - db.instance_state( - context, instance_id, power_state.NOSTATE, 'shutting_down') - yield self._conn.destroy(instance_ref) - - # FIXME(ja): should we keep it in a terminated state for a bit? - db.instance_destroy(context, instance_id) - - @defer.inlineCallbacks - @exception.wrap_exception - def reboot_instance(self, instance_id, context=None): - """Reboot an instance on this server. - - KVM doesn't support reboot, so we terminate and restart. - - """ - self.update_state(instance_id, context) - instance_ref = db.instance_get(context, instance_id) - - # FIXME(ja): this is only checking the model state - not state on disk? - if instance_ref['state'] != power_state.RUNNING: - raise exception.Error( - 'trying to reboot a non-running' - 'instance: %s (state: %s excepted: %s)' % - (instance_ref['str_id'], - instance_ref['state'], - power_state.RUNNING)) - - logging.debug('rebooting instance %s' % instance_ref['str_id']) - db.instance_state( - context, instance_id, power_state.NOSTATE, 'rebooting') - yield self._conn.reboot(instance_ref) - self.update_state(instance_id, context) - - @exception.wrap_exception - def get_console_output(self, instance_id, context=None): - """Send the console output for an instance.""" - # FIXME: Abstract this for Xen - - logging.debug("Getting console output for %s" % (instance_id)) - instance_ref = db.instance_get(context, instance_id) - - if FLAGS.connection_type == 'libvirt': - fname = os.path.abspath(os.path.join(FLAGS.instances_path, - instance_ref['str_id'], - 'console.log')) - with open(fname, 'r') as f: - output = f.read() - else: - output = 'FAKE CONSOLE OUTPUT' - - # TODO(termie): this stuff belongs in the API layer, no need to - # munge the data we send to ourselves - output = {"InstanceId" : instance_id, - "Timestamp" : "2", - "output" : base64.b64encode(output)} - return output - - @defer.inlineCallbacks - @exception.wrap_exception - def attach_volume(self, instance_id=None, volume_id=None, mountpoint=None, - context=None): - """Attach a volume to an instance.""" - # TODO(termie): check that instance_id exists - volume_ref = db.volume_get(context, volume_id) - yield self._init_aoe() - yield process.simple_execute( - "sudo virsh attach-disk %s /dev/etherd/%s %s" % - (instance_id, - volume_ref['aoe_device'], - mountpoint.rpartition('/dev/')[2])) - db.volume_attached(context, volume_id) - defer.returnValue(True) - - @defer.inlineCallbacks - @exception.wrap_exception - def detach_volume(self, instance_id, volume_id, context=None): - """Detach a volume from an instance.""" - # despite the documentation, virsh detach-disk just wants the device - # name without the leading /dev/ - # TODO(termie): check that instance_id exists - volume_ref = db.volume_get(context, volume_id) - target = volume_ref['mountpoint'].rpartition('/dev/')[2] - yield process.simple_execute( - "sudo virsh detach-disk %s %s " % (instance_id, target)) - db.volume_detached(context, volume_id) - defer.returnValue(True) + pass - @defer.inlineCallbacks - def _init_aoe(self): - # TODO(vish): these shell calls should move into a different layer. - yield process.simple_execute("sudo aoe-discover") - yield process.simple_execute("sudo aoe-stat") diff --git a/nova/db/api.py b/nova/db/api.py index 699118b16..80583de99 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -123,6 +123,16 @@ def fixed_ip_allocate(context, network_id): return _impl.fixed_ip_allocate(context, network_id) +def fixed_ip_create(context, network_id, address): + """Create a fixed ip from the values dictionary.""" + return _impl.fixed_ip_create(context, network_id, address) + + +def fixed_ip_deallocate(context, address): + """Deallocate a fixed ip by address.""" + return _impl.fixed_ip_deallocate(context, address) + + def fixed_ip_get_by_address(context, address): """Get a fixed ip by address.""" return _impl.fixed_ip_get_by_address(context, address) @@ -133,21 +143,6 @@ def fixed_ip_get_network(context, address): return _impl.fixed_ip_get_network(context, address) -def fixed_ip_lease(context, address): - """Lease a fixed ip by address.""" - return _impl.fixed_ip_lease(context, address) - - -def fixed_ip_release(context, address): - """Un-Lease a fixed ip by address.""" - return _impl.fixed_ip_release(context, address) - - -def fixed_ip_deallocate(context, address): - """Deallocate a fixed ip by address.""" - return _impl.fixed_ip_deallocate(context, address) - - def fixed_ip_instance_associate(context, address, instance_id): """Associate a fixed ip to an instance by address.""" return _impl.fixed_ip_instance_associate(context, address, instance_id) @@ -158,6 +153,11 @@ def fixed_ip_instance_disassociate(context, address): return _impl.fixed_ip_instance_disassociate(context, address) +def fixed_ip_update(context, address, values): + """Create a fixed ip from the values dictionary.""" + return _impl.fixed_ip_update(context, address, values) + + #################### diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index b95346861..12455530d 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -149,6 +149,13 @@ def fixed_ip_instance_disassociate(context, address): fixed_ip_ref.save() +def fixed_ip_update(context, address, values): + fixed_ip_ref = fixed_ip_get_by_address(context, address) + for (key, value) in values.iteritems(): + fixed_ip_ref[key] = value + fixed_ip_ref.save() + + ################### diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index ceff0f827..8ba10a5bb 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -60,7 +60,7 @@ class CloudController(object): sent to the other nodes. """ def __init__(self): - self.network_manager = utils.load_object(FLAGS.network_manager) + self.network_manager = utils.import_object(FLAGS.network_manager) self.setup() def __str__(self): diff --git a/nova/manager.py b/nova/manager.py index 4f212a41b..20b58bd13 100644 --- a/nova/manager.py +++ b/nova/manager.py @@ -25,7 +25,7 @@ from nova import flags FLAGS = flags.FLAGS -flags.DEFINE_string('db_driver', 'nova.db.api' +flags.DEFINE_string('db_driver', 'nova.db.api', 'driver to use for volume creation') diff --git a/nova/service.py b/nova/service.py index 59da6f04e..b20e24348 100644 --- a/nova/service.py +++ b/nova/service.py @@ -46,9 +46,10 @@ class Service(object, service.Service): def __init__(self, manager, *args, **kwargs): self.manager = manager - super(self, Service).__init__(*args, **kwargs) + super(Service, self).__init__(*args, **kwargs) def __getattr__(self, key): + print 'getattr' try: super(Service, self).__getattr__(key) except AttributeError: @@ -65,7 +66,7 @@ class Service(object, service.Service): Args: report_interval, defaults to flag bin_name, defaults to basename of executable - topic, defaults to basename - "nova-" part + topic, defaults to bin_name - "nova-" part manager, defaults to FLAGS._manager """ if not report_interval: @@ -77,17 +78,15 @@ class Service(object, service.Service): if not topic: topic = bin_name.rpartition("nova-")[2] if not manager: - manager = FLAGS.get('%s_manager' % topic) + manager = FLAGS.get('%s_manager' % topic, None) manager_ref = utils.import_object(manager) logging.warn("Starting %s node" % topic) service_ref = cls(manager_ref) - conn = rpc.Connection.instance() consumer_all = rpc.AdapterConsumer( connection=conn, topic='%s' % topic, proxy=service_ref) - consumer_node = rpc.AdapterConsumer( connection=conn, topic='%s.%s' % (topic, FLAGS.node_name), @@ -110,6 +109,7 @@ class Service(object, service.Service): @defer.inlineCallbacks def report_state(self, node_name, binary, context=None): """Update the state of this daemon in the datastore.""" + print 'report_state' try: try: daemon_ref = db.daemon_get_by_args(context, node_name, binary) diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py index 3501771cc..df2246aae 100644 --- a/nova/tests/cloud_unittest.py +++ b/nova/tests/cloud_unittest.py @@ -27,8 +27,8 @@ from xml.etree import ElementTree from nova import flags from nova import rpc from nova import test +from nova import utils from nova.auth import manager -from nova.compute import service from nova.endpoint import api from nova.endpoint import cloud @@ -53,7 +53,7 @@ class CloudTestCase(test.BaseTestCase): self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop)) # set up a service - self.compute = service.ComputeService() + self.compute = utils.import_class(FLAGS.compute_manager) self.compute_consumer = rpc.AdapterConsumer(connection=self.conn, topic=FLAGS.compute_topic, proxy=self.compute) diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index e85973837..28e51f387 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -27,7 +27,6 @@ from nova import flags from nova import test from nova import utils from nova.auth import manager -from nova.compute import service FLAGS = flags.FLAGS @@ -60,7 +59,7 @@ class ComputeConnectionTestCase(test.TrialTestCase): super(ComputeConnectionTestCase, self).setUp() self.flags(connection_type='fake', fake_storage=True) - self.compute = service.ComputeService() + self.compute = utils.import_object(FLAGS.compute_manager) self.manager = manager.AuthManager() user = self.manager.create_user('fake', 'fake', 'fake') project = self.manager.create_project('fake', 'fake', 'fake') diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 42a13e4e3..3114912ba 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -20,13 +20,19 @@ from nova import flags FLAGS = flags.FLAGS -FLAGS.connection_type = 'fake' +flags.DECLARE('fake_storage', 'nova.volume.manager') FLAGS.fake_storage = True +FLAGS.connection_type = 'fake' FLAGS.fake_rabbit = True -FLAGS.fake_network = True FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' +flags.DECLARE('network_size', 'nova.network.manager') +flags.DECLARE('num_networks', 'nova.network.manager') +flags.DECLARE('fake_network', 'nova.network.manager') FLAGS.network_size = 16 FLAGS.num_networks = 5 +FLAGS.fake_network = True +flags.DECLARE('num_shelves', 'nova.volume.manager') +flags.DECLARE('blades_per_shelf', 'nova.volume.manager') FLAGS.num_shelves = 2 FLAGS.blades_per_shelf = 4 FLAGS.verbose = True diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index d487c2e45..e3fe01fa2 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -49,14 +49,15 @@ class NetworkTestCase(test.TrialTestCase): self.manager = manager.AuthManager() self.user = self.manager.create_user('netuser', 'netuser', 'netuser') self.projects = [] - self.service = service.VlanNetworkService() + self.network = utils.import_object(FLAGS.network_manager) + self.context = None for i in range(5): name = 'project%s' % i self.projects.append(self.manager.create_project(name, 'netuser', name)) # create the necessary network data for the project - self.service.set_network_host(self.projects[i].id) + self.network.set_network_host(self.context, self.projects[i].id) instance_id = db.instance_create(None, {'mac_address': utils.generate_mac()}) self.instance_id = instance_id @@ -92,16 +93,17 @@ class NetworkTestCase(test.TrialTestCase): db.floating_ip_get_by_address(None, ip_str) except exception.NotFound: db.floating_ip_create(None, ip_str, FLAGS.node_name) - float_addr = self.service.allocate_floating_ip(self.projects[0].id) + float_addr = self.network.allocate_floating_ip(self.context, + self.projects[0].id) fix_addr = self._create_address(0) self.assertEqual(float_addr, str(pubnet[0])) - self.service.associate_floating_ip(float_addr, fix_addr) + self.network.associate_floating_ip(self.context, float_addr, fix_addr) address = db.instance_get_floating_address(None, self.instance_id) self.assertEqual(address, float_addr) - self.service.disassociate_floating_ip(float_addr) + self.network.disassociate_floating_ip(self.context, float_addr) address = db.instance_get_floating_address(None, self.instance_id) self.assertEqual(address, None) - self.service.deallocate_floating_ip(float_addr) + self.network.deallocate_floating_ip(self.context, float_addr) db.fixed_ip_deallocate(None, fix_addr) def test_allocate_deallocate_fixed_ip(self): diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py index 0b9d60024..e13fe62d1 100644 --- a/nova/tests/service_unittest.py +++ b/nova/tests/service_unittest.py @@ -30,10 +30,16 @@ from nova import flags from nova import rpc from nova import test from nova import service - +from nova import manager FLAGS = flags.FLAGS +flags.DEFINE_string("fake_manager", "nova.tests.service_unittest.FakeManager", + "Manager for testing") + +class FakeManager(manager.Manager): + """Fake manager for tests""" + pass class ServiceTestCase(test.BaseTestCase): """Test cases for rpc""" @@ -46,12 +52,12 @@ class ServiceTestCase(test.BaseTestCase): self.mox.StubOutWithMock( service.task, 'LoopingCall', use_mock_anything=True) rpc.AdapterConsumer(connection=mox.IgnoreArg(), - topic='run_tests.py', + topic='fake', proxy=mox.IsA(service.Service) ).AndReturn(rpc.AdapterConsumer) rpc.AdapterConsumer(connection=mox.IgnoreArg(), - topic='run_tests.py.%s' % FLAGS.node_name, + topic='fake.%s' % FLAGS.node_name, proxy=mox.IsA(service.Service) ).AndReturn(rpc.AdapterConsumer) @@ -67,7 +73,7 @@ class ServiceTestCase(test.BaseTestCase): rpc.AdapterConsumer.attach_to_twisted() self.mox.ReplayAll() - app = service.Service.create() + app = service.Service.create(bin_name='nova-fake') self.assert_(app) # We're testing sort of weird behavior in how report_state decides @@ -82,7 +88,7 @@ class ServiceTestCase(test.BaseTestCase): 'binary': binary, 'report_count': 0, 'id': 1} - + service.db.__getattr__('report_state') service.db.daemon_get_by_args(None, node_name, binary).AndReturn(daemon_ref) @@ -105,6 +111,7 @@ class ServiceTestCase(test.BaseTestCase): 'report_count': 0, 'id': 1} + service.db.__getattr__('report_state') service.db.daemon_get_by_args(None, node_name, binary).AndRaise(exception.NotFound()) @@ -126,6 +133,7 @@ class ServiceTestCase(test.BaseTestCase): 'report_count': 0, 'id': 1} + service.db.__getattr__('report_state') service.db.daemon_get_by_args(None, node_name, binary).AndRaise(Exception()) @@ -145,6 +153,7 @@ class ServiceTestCase(test.BaseTestCase): 'report_count': 0, 'id': 1} + service.db.__getattr__('report_state') service.db.daemon_get_by_args(None, node_name, binary).AndReturn(daemon_ref) diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index a03e0e6e3..4504276e2 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -24,8 +24,7 @@ from nova import exception from nova import db from nova import flags from nova import test -from nova.compute import service as compute_service -from nova.volume import service as volume_service +from nova import utils FLAGS = flags.FLAGS @@ -35,10 +34,11 @@ class VolumeTestCase(test.TrialTestCase): def setUp(self): logging.getLogger().setLevel(logging.DEBUG) super(VolumeTestCase, self).setUp() - self.compute = compute_service.ComputeService() + self.compute = utils.import_object(FLAGS.compute_manager) self.flags(connection_type='fake', fake_storage=True) - self.volume = volume_service.VolumeService() + self.volume = utils.import_object(FLAGS.volume_manager) + self.context = None def _create_volume(self, size='0'): @@ -49,15 +49,15 @@ class VolumeTestCase(test.TrialTestCase): vol['availability_zone'] = FLAGS.storage_availability_zone vol['status'] = "creating" vol['attach_status'] = "detached" - return db.volume_create(None, vol) + return db.volume_create(None, vol)['id'] @defer.inlineCallbacks def test_run_create_volume(self): volume_id = self._create_volume() - yield self.volume.create_volume(volume_id) + yield self.volume.create_volume(self.context, volume_id) self.assertEqual(volume_id, db.volume_get(None, volume_id).id) - yield self.volume.delete_volume(volume_id) + yield self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.NotFound, db.volume_get, None, @@ -70,7 +70,7 @@ class VolumeTestCase(test.TrialTestCase): defer.returnValue(True) try: volume_id = self._create_volume('1001') - yield self.volume.create_volume(volume_id) + yield self.volume.create_volume(self.context, volume_id) self.fail("Should have thrown TypeError") except TypeError: pass @@ -81,14 +81,15 @@ class VolumeTestCase(test.TrialTestCase): total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf for i in xrange(total_slots): volume_id = self._create_volume() - yield self.volume.create_volume(volume_id) + yield self.volume.create_volume(self.context, volume_id) vols.append(volume_id) volume_id = self._create_volume() - self.assertFailure(self.volume.create_volume(volume_id), + self.assertFailure(self.volume.create_volume(self.context, + volume_id), db.NoMoreBlades) db.volume_destroy(None, volume_id) - for id in vols: - yield self.volume.delete_volume(id) + for volume_id in vols: + yield self.volume.delete_volume(self.context, volume_id) @defer.inlineCallbacks def test_run_attach_detach_volume(self): @@ -96,7 +97,7 @@ class VolumeTestCase(test.TrialTestCase): instance_id = "storage-test" mountpoint = "/dev/sdf" volume_id = self._create_volume() - yield self.volume.create_volume(volume_id) + yield self.volume.create_volume(self.context, volume_id) if FLAGS.fake_tests: db.volume_attached(None, volume_id, instance_id, mountpoint) else: @@ -109,15 +110,16 @@ class VolumeTestCase(test.TrialTestCase): self.assertEqual(vol['instance_id'], instance_id) self.assertEqual(vol['mountpoint'], mountpoint) - self.assertFailure(self.volume.delete_volume(volume_id), exception.Error) + self.assertFailure(self.volume.delete_volume(self.context, volume_id), + exception.Error) if FLAGS.fake_tests: db.volume_detached(None, volume_id) else: - rv = yield self.volume.detach_volume(instance_id, + rv = yield self.compute.detach_volume(instance_id, volume_id) self.assertEqual(vol['status'], "available") - rv = self.volume.delete_volume(volume_id) + rv = self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.Error, db.volume_get, None, @@ -142,14 +144,13 @@ class VolumeTestCase(test.TrialTestCase): total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf for i in range(total_slots): volume_id = self._create_volume() - d = self.volume.create_volume(volume_id) + d = self.volume.create_volume(self.context, volume_id) d.addCallback(_check) d.addErrback(self.fail) deferreds.append(d) yield defer.DeferredList(deferreds) for volume_id in volume_ids: - vol = db.volume_get(None, volume_id) - vol.delete() + self.volume.delete_volume(self.context, volume_id) def test_multi_node(self): # TODO(termie): Figure out how to test with two nodes, diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 579472047..e0468b877 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -39,25 +39,20 @@ flags.DEFINE_string('aoe_eth_dev', 'eth0', class FakeAOEDriver(object): - @defer.inlineCallbacks def create_volume(self, volume_id, size): logging.debug("Fake AOE: create_volume %s, %s", volume_id, size) - @defer.inlineCallbacks def delete_volume(self, volume_id): logging.debug("Fake AOE: delete_volume %s", volume_id) - @defer.inlineCallbacks def create_export(self, volume_id, shelf_id, blade_id): logging.debug("Fake AOE: create_export %s, %s, %s", volume_id, shelf_id, blade_id) - @defer.inlineCallbacks def remove_export(self, volume_id, shelf_id, blade_id): logging.debug("Fake AOE: remove_export %s, %s, %s", volume_id, shelf_id, blade_id) - @defer.inlineCallbacks def ensure_exports(self): logging.debug("Fake AOE: ensure_export") diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 0683703a1..7d8e1aca0 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -38,7 +38,7 @@ flags.DEFINE_string('storage_availability_zone', 'availability zone of this service') flags.DEFINE_boolean('fake_storage', False, 'Should we make real storage volumes to attach?') -flags.DEFINE_string('volume_driver', 'nova.volume.driver.AOEDriver' +flags.DEFINE_string('volume_driver', 'nova.volume.driver.AOEDriver', 'Driver to use for volume creation') flags.DEFINE_integer('num_shelves', 100, @@ -60,7 +60,7 @@ class AOEManager(manager.Manager): super(AOEManager, self).__init__(*args, **kwargs) def _ensure_blades(self, context): - total_blades = FLAGS.num_shelves, FLAGS.blades_per_shelf + total_blades = FLAGS.num_shelves * FLAGS.blades_per_shelf if self.db.export_device_count(context) >= total_blades: return for shelf_id in xrange(FLAGS.num_shelves): -- cgit From 7639fe7cb6220f0393e6ee5ec43cd6b9ac35e5a9 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 29 Aug 2010 22:41:43 -0700 Subject: remove creation of volume groups on boot --- nova/volume/driver.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index e0468b877..648ae1a06 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -26,12 +26,9 @@ from twisted.internet import defer from nova import flags from nova import process -from nova import utils FLAGS = flags.FLAGS -flags.DEFINE_string('storage_dev', '/dev/sdb', - 'Physical device to use for volumes') flags.DEFINE_string('volume_group', 'nova-volumes', 'Name for the VG that will contain exported volumes') flags.DEFINE_string('aoe_eth_dev', 'eth0', @@ -60,13 +57,14 @@ class FakeAOEDriver(object): class AOEDriver(object): def __init__(self, *args, **kwargs): super(AOEDriver, self).__init__(*args, **kwargs) - # NOTE(vish): no need for thise to be async, but it may be - # best to explicitly do them at some other time - utils.execute("sudo pvcreate %s" % (FLAGS.storage_dev)) - utils.execute("sudo vgcreate %s %s" % (FLAGS.volume_group, - FLAGS.storage_dev)) + + @defer.inlineCallbacks + def _ensure_vg(self): + yield process.simple_execute("vgs | grep %s" % FLAGS.volume_group) + @defer.inlineCallbacks def create_volume(self, volume_id, size): + self._ensure_vg() if int(size) == 0: sizestr = '100M' else: -- cgit From 9c98cfb47175ca9ace5c0bd731085896303e3e7b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 30 Aug 2010 00:55:19 -0700 Subject: instance runs --- bin/nova-compute | 2 +- bin/nova-network | 3 +- bin/nova-volume | 2 +- nova/db/sqlalchemy/api.py | 2 +- nova/db/sqlalchemy/models.py | 1 - nova/endpoint/cloud.py | 84 +++++++++++++++++++++++++------------------- nova/service.py | 7 ++-- nova/utils.py | 8 +++-- 8 files changed, 59 insertions(+), 50 deletions(-) diff --git a/bin/nova-compute b/bin/nova-compute index ed9a55565..cf9de9bbf 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -29,4 +29,4 @@ if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': - application = service.ComputeService.create() # pylint: disable-msg=C0103 + application = service.ComputeService.create() # pylint: disable=C0103 diff --git a/bin/nova-network b/bin/nova-network index 5753aafbe..6434b6ec3 100755 --- a/bin/nova-network +++ b/bin/nova-network @@ -33,5 +33,4 @@ if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': - # pylint: disable-msg=C0103 - application = service.type_to_class(FLAGS.network_type).create() + application = service.NetworkService.create() # pylint: disable-msg=C0103 diff --git a/bin/nova-volume b/bin/nova-volume index 8ef006ebc..25b5871a3 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -29,4 +29,4 @@ if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': - application = service.VolumeService.create() # pylint: disable-msg=C0103 + application = service.VolumeService.create() # pylint: disable-msg=C0103 diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 12455530d..8b4300241 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -112,7 +112,7 @@ def fixed_ip_allocate(context, network_id): fixed_ip_ref['allocated'] = True session.add(fixed_ip_ref) session.commit() - return fixed_ip_ref + return fixed_ip_ref['str_id'] def fixed_ip_create(context, network_id, address): diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 36d6cf3ad..19ab15091 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -318,7 +318,6 @@ class FloatingIp(Base, NovaBase): class Network(Base, NovaBase): __tablename__ = 'networks' id = Column(Integer, primary_key=True) - kind = Column(String(255)) injected = Column(Boolean, default=False) cidr = Column(String(255)) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 8ba10a5bb..0f3ecb3b0 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -233,7 +233,8 @@ class CloudController(object): return rpc.call('%s.%s' % (FLAGS.compute_topic, instance_ref['node_name']), {"method": "get_console_output", - "args": {"instance_id": instance_ref['id']}}) + "args": {"context": None, + "instance_id": instance_ref['id']}}) @rbac.allow('projectmanager', 'sysadmin') def describe_volumes(self, context, **kwargs): @@ -300,9 +301,10 @@ class CloudController(object): host = db.instance_get_host(context, instance_ref['id']) rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "attach_volume", - "args": {"volume_id": volume_ref['id'], - "instance_id": instance_ref['id'], - "mountpoint": device}}) + "args": {"context": None, + "volume_id": volume_ref['id'], + "instance_id": instance_ref['id'], + "mountpoint": device}}) return defer.succeed({'attachTime': volume_ref['attach_time'], 'device': volume_ref['mountpoint'], 'instanceId': instance_ref['id_str'], @@ -324,8 +326,9 @@ class CloudController(object): host = db.instance_get_host(context, instance_ref['id']) rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "detach_volume", - "args": {"instance_id": instance_ref['id'], - "volume_id": volume_ref['id']}}) + "args": {"context": None, + "instance_id": instance_ref['id'], + "volume_id": volume_ref['id']}}) except exception.NotFound: # If the instance doesn't exist anymore, # then we need to call detach blind @@ -437,7 +440,8 @@ class CloudController(object): network_topic = yield self._get_network_topic(context) public_ip = yield rpc.call(network_topic, {"method": "allocate_floating_ip", - "args": {"project_id": context.project.id}}) + "args": {"context": None, + "project_id": context.project.id}}) defer.returnValue({'addressSet': [{'publicIp': public_ip}]}) @rbac.allow('netadmin') @@ -448,7 +452,8 @@ class CloudController(object): network_topic = yield self._get_network_topic(context) rpc.cast(network_topic, {"method": "deallocate_floating_ip", - "args": {"floating_ip": floating_ip_ref['str_id']}}) + "args": {"context": None, + "floating_ip": floating_ip_ref['str_id']}}) defer.returnValue({'releaseResponse': ["Address released."]}) @rbac.allow('netadmin') @@ -460,7 +465,8 @@ class CloudController(object): network_topic = yield self._get_network_topic(context) rpc.cast(network_topic, {"method": "associate_floating_ip", - "args": {"floating_ip": floating_ip_ref['str_id'], + "args": {"context": None, + "floating_ip": floating_ip_ref['str_id'], "fixed_ip": fixed_ip_ref['str_id'], "instance_id": instance_ref['id']}}) defer.returnValue({'associateResponse': ["Address associated."]}) @@ -472,7 +478,8 @@ class CloudController(object): network_topic = yield self._get_network_topic(context) rpc.cast(network_topic, {"method": "disassociate_floating_ip", - "args": {"floating_ip": floating_ip_ref['str_id']}}) + "args": {"context": None, + "floating_ip": floating_ip_ref['str_id']}}) defer.returnValue({'disassociateResponse': ["Address disassociated."]}) @defer.inlineCallbacks @@ -483,7 +490,8 @@ class CloudController(object): if not host: host = yield rpc.call(FLAGS.network_topic, {"method": "set_network_host", - "args": {"project_id": context.project.id}}) + "args": {"context": None, + "project_id": context.project.id}}) defer.returnValue(db.queue_get_for(context, FLAGS.network_topic, host)) @rbac.allow('projectmanager', 'sysadmin') @@ -568,7 +576,7 @@ class CloudController(object): @rbac.allow('projectmanager', 'sysadmin') - # @defer.inlineCallbacks + @defer.inlineCallbacks def terminate_instances(self, context, instance_id, **kwargs): logging.debug("Going to start terminating instances") # network_topic = yield self._get_network_topic(context) @@ -582,36 +590,37 @@ class CloudController(object): continue # FIXME(ja): where should network deallocate occur? - # floating_ip = network_model.get_public_ip_for_instance(i) - # if floating_ip: - # logging.debug("Disassociating address %s" % floating_ip) - # # NOTE(vish): Right now we don't really care if the ip is - # # disassociated. We may need to worry about - # # checking this later. Perhaps in the scheduler? - # rpc.cast(network_topic, - # {"method": "disassociate_floating_ip", - # "args": {"floating_ip": floating_ip}}) - # - # fixed_ip = instance.get('private_dns_name', None) - # if fixed_ip: - # logging.debug("Deallocating address %s" % fixed_ip) - # # NOTE(vish): Right now we don't really care if the ip is - # # actually removed. We may need to worry about - # # checking this later. Perhaps in the scheduler? - # rpc.cast(network_topic, - # {"method": "deallocate_fixed_ip", - # "args": {"fixed_ip": fixed_ip}}) + address = db.instance_get_floating_address(context, + instance_ref['id']) + if address: + logging.debug("Disassociating address %s" % address) + # NOTE(vish): Right now we don't really care if the ip is + # disassociated. We may need to worry about + # checking this later. Perhaps in the scheduler? + network_topic = yield self._get_network_topic(context) + rpc.cast(network_topic, + {"method": "disassociate_floating_ip", + "args": {"context": None, + "address": address}}) + + address = db.instance_get_fixed_address(context, + instance_ref['id']) + if address: + logging.debug("Deallocating address %s" % address) + # NOTE(vish): Currently, nothing needs to be done on the + # network node until release. If this changes, + # we will need to cast here. + db.fixed_ip_deallocate(context, address) host = db.instance_get_host(context, instance_ref['id']) - if host is not None: - # NOTE(joshua?): It's also internal default + if host: rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "terminate_instance", - "args": {"instance_id": instance_ref['id']}}) + "args": {"context": None, + "instance_id": instance_ref['id']}}) else: db.instance_destroy(context, instance_ref['id']) - # defer.returnValue(True) - return True + defer.returnValue(True) @rbac.allow('projectmanager', 'sysadmin') def reboot_instances(self, context, instance_id, **kwargs): @@ -621,7 +630,8 @@ class CloudController(object): host = db.instance_get_host(context, instance_ref['id']) rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "reboot_instance", - "args": {"instance_id": instance_ref['id']}}) + "args": {"context": None, + "instance_id": instance_ref['id']}}) return defer.succeed(True) @rbac.allow('projectmanager', 'sysadmin') diff --git a/nova/service.py b/nova/service.py index b20e24348..94d91f60a 100644 --- a/nova/service.py +++ b/nova/service.py @@ -46,14 +46,14 @@ class Service(object, service.Service): def __init__(self, manager, *args, **kwargs): self.manager = manager + self.model_disconnected = False super(Service, self).__init__(*args, **kwargs) def __getattr__(self, key): - print 'getattr' try: - super(Service, self).__getattr__(key) + return super(Service, self).__getattr__(key) except AttributeError: - self.manager.__getattr__(key) + return getattr(self.manager, key) @classmethod def create(cls, @@ -109,7 +109,6 @@ class Service(object, service.Service): @defer.inlineCallbacks def report_state(self, node_name, binary, context=None): """Update the state of this daemon in the datastore.""" - print 'report_state' try: try: daemon_ref = db.daemon_get_by_args(context, node_name, binary) diff --git a/nova/utils.py b/nova/utils.py index 392fa8c46..12896c488 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -48,11 +48,13 @@ def import_class(import_str): def import_object(import_str): """Returns an object including a module or module and class""" - cls = import_class(import_str) try: + __import__(import_str) + return sys.modules[import_str] + except ImportError: + cls = import_class(import_str) + print cls return cls() - except TypeError: - return cls def fetchfile(url, target): logging.debug("Fetching %s" % url) -- cgit From f2776632fb94bd55a428bfb9272472e4bd2517bb Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 30 Aug 2010 14:50:50 +0200 Subject: Detect if libvirt connection has been broken and reestablish it. --- nova/virt/libvirt_conn.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 524646ee5..1ff8175d0 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -84,10 +84,22 @@ class LibvirtConnection(object): @property def _conn(self): - if not self._wrapped_conn: + if not self._wrapped_conn or not self._test_connection(): + logging.debug('Connecting to libvirt: %s' % self.libvirt_uri) self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only) return self._wrapped_conn + def _test_connection(self): + try: + self._wrapped_conn.getVersion() + return True + except libvirt.libvirtError as e: + if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \ + e.get_error_domain() == libvirt.VIR_FROM_REMOTE: + logging.debug('Connection to libvirt broke') + return False + raise + def get_uri_and_template(self): if FLAGS.libvirt_type == 'uml': uri = FLAGS.libvirt_uri or 'uml:///system' -- cgit From 40899259205561b43791f1540ec3f9100a4869d1 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 30 Aug 2010 09:03:43 -0700 Subject: ip addresses work now --- bin/nova-dhcpbridge | 4 ++-- nova/db/api.py | 4 ++-- nova/db/sqlalchemy/api.py | 10 +++++++++- nova/db/sqlalchemy/models.py | 15 +++++++++++++-- nova/network/linux_net.py | 2 -- nova/utils.py | 2 +- 6 files changed, 27 insertions(+), 10 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 52ec2d497..a794db271 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -36,6 +36,7 @@ from nova import rpc from nova import utils from nova import datastore # for redis_db flag from nova.auth import manager # for auth flags +from nova.network import linux_net from nova.network import manager # for network flags FLAGS = flags.FLAGS @@ -74,8 +75,7 @@ def del_lease(_mac, ip_address, _hostname, _interface): def init_leases(interface): """Get the list of hosts for an interface.""" network_ref = db.network_get_by_bridge(None, interface) - network_manager = utils.import_object(FLAGS.network_manager) - return network_manager.driver.get_dhcp_hosts(None, network_ref['id']) + return linux_net.get_dhcp_hosts(None, network_ref['id']) def main(): diff --git a/nova/db/api.py b/nova/db/api.py index 80583de99..91d7b8415 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -123,9 +123,9 @@ def fixed_ip_allocate(context, network_id): return _impl.fixed_ip_allocate(context, network_id) -def fixed_ip_create(context, network_id, address): +def fixed_ip_create(context, network_id, address, reserved=False): """Create a fixed ip from the values dictionary.""" - return _impl.fixed_ip_create(context, network_id, address) + return _impl.fixed_ip_create(context, network_id, address, reserved) def fixed_ip_deallocate(context, address): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 8b4300241..d7a107ba8 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -58,6 +58,7 @@ def floating_ip_allocate_address(context, node_name, project_id): # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not floating_ip_ref: + session.rollback() raise db.NoMoreAddresses() floating_ip_ref['project_id'] = project_id session.add(floating_ip_ref) @@ -108,6 +109,7 @@ def fixed_ip_allocate(context, network_id): # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not fixed_ip_ref: + session.rollback() raise db.NoMoreAddresses() fixed_ip_ref['allocated'] = True session.add(fixed_ip_ref) @@ -115,10 +117,11 @@ def fixed_ip_allocate(context, network_id): return fixed_ip_ref['str_id'] -def fixed_ip_create(context, network_id, address): +def fixed_ip_create(context, network_id, address, reserved=False): fixed_ip_ref = models.FixedIp() fixed_ip_ref.network = db.network_get(context, network_id) fixed_ip_ref['ip_str'] = address + fixed_ip_ref['reserved'] = reserved fixed_ip_ref.save() return fixed_ip_ref @@ -303,7 +306,9 @@ def network_get_by_bridge(context, bridge): session = models.NovaBase.get_session() rv = session.query(models.Network).filter_by(bridge=bridge).first() if not rv: + session.rollback() raise exception.NotFound('No network for bridge %s' % bridge) + session.commit() return rv @@ -317,6 +322,7 @@ def network_get_index(context, network_id): query = session.query(models.NetworkIndex).filter_by(network_id=None) network_index = query.with_lockmode("update").first() if not network_index: + session.rollback() raise db.NoMoreNetworks() network_index['network'] = network_get(context, network_id) session.add(network_index) @@ -340,6 +346,7 @@ def network_set_host(context, network_id, host_id): query = session.query(models.Network).filter_by(id=network_id) network = query.with_lockmode("update").first() if not network: + session.rollback() raise exception.NotFound("Couldn't find network with %s" % network_id) # NOTE(vish): if with_lockmode isn't supported, as in sqlite, @@ -402,6 +409,7 @@ def volume_allocate_shelf_and_blade(context, volume_id): # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not export_device: + session.rollback() raise db.NoMoreBlades() export_device.volume_id = volume_id session.add(export_device) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 19ab15091..2f0ce5d83 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -20,6 +20,8 @@ SQLAlchemy models for nova data """ +import logging + from sqlalchemy.orm import relationship, backref, validates, exc from sqlalchemy import Table, Column, Integer, String from sqlalchemy import MetaData, ForeignKey, DateTime, Boolean, Text @@ -80,6 +82,7 @@ class NovaBase(object): session.commit() return result except exc.NoResultFound: + session.rollback() raise exception.NotFound("No model for id %s" % obj_id) @classmethod @@ -94,12 +97,20 @@ class NovaBase(object): def save(self): session = NovaBase.get_session() session.add(self) - session.commit() + try: + session.commit() + except exc.OperationalError: + logging.exception("Error trying to save %s", self) + session.rollback() def delete(self): session = NovaBase.get_session() session.delete(self) - session.commit() + try: + session.commit() + except exc.OperationalError: + logging.exception("Error trying to delete %s", self) + session.rollback() def refresh(self): session = NovaBase.get_session() diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 1e14b4716..a7b81533b 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -125,7 +125,6 @@ def ensure_bridge(bridge, interface, net_attrs=None): def get_dhcp_hosts(context, network_id): hosts = [] for fixed_ip in db.network_get_associated_fixed_ips(context, network_id): - print fixed_ip['ip_str'] hosts.append(_host_dhcp(fixed_ip)) return '\n'.join(hosts) @@ -161,7 +160,6 @@ def update_dhcp(context, network_id): env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, 'DNSMASQ_INTERFACE': network_ref['bridge']} command = _dnsmasq_cmd(network_ref) - print command _execute(command, addl_env=env) def _host_dhcp(fixed_ip): diff --git a/nova/utils.py b/nova/utils.py index 12896c488..705df718e 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -53,7 +53,6 @@ def import_object(import_str): return sys.modules[import_str] except ImportError: cls = import_class(import_str) - print cls return cls() def fetchfile(url, target): @@ -136,6 +135,7 @@ def last_octet(address): def get_my_ip(): ''' returns the actual ip of the local machine. ''' + return '127.0.0.1' if getattr(FLAGS, 'fake_tests', None): return '127.0.0.1' try: -- cgit From db59c270cd4a3a3f32e73c2ab4bf8f8e1226dd66 Mon Sep 17 00:00:00 2001 From: Devin Carlen Date: Mon, 30 Aug 2010 10:51:54 -0700 Subject: Making tests pass --- nova/db/sqlalchemy/api.py | 11 ++++++++--- nova/db/sqlalchemy/models.py | 10 +++++----- nova/tests/network_unittest.py | 1 - 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 0b6316221..3166d35cc 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -349,7 +349,8 @@ def network_create_fixed_ips(context, network_id, num_vpn_clients): def network_ensure_indexes(context, num_networks): with managed_session(autocommit=False) as session: - if models.NetworkIndex.count() == 0: + count = models.NetworkIndex.count(session=session) + if count == 0: for i in range(num_networks): network_index = models.NetworkIndex() network_index.index = i @@ -523,8 +524,12 @@ def volume_create(context, values): def volume_destroy(context, volume_id): - volume_ref = volume_get(context, volume_id) - volume_ref.delete() + with managed_session(autocommit=False) as session: + session.execute('update volumes set deleted=1 where id=:id', + {'id': volume_id}) + session.execute('update export_devices set deleted=1 where network_id=:id', + {'id': volume_id}) + session.commit() def volume_detached(context, volume_id): diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 4fbe2cc5e..10f909d95 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -49,8 +49,8 @@ class NovaBase(object): .filter_by(deleted=False) \ .all() else: - with managed_session() as session: - return cls.all(session=session) + with managed_session() as s: + return cls.all(session=s) @classmethod def count(cls, session=None): @@ -73,8 +73,8 @@ class NovaBase(object): except exc.NoResultFound: raise exception.NotFound("No model for id %s" % obj_id) else: - with managed_session() as session: - return cls.find(obj_id, session=session) + with managed_session() as s: + return cls.find(obj_id, session=s) @classmethod def find_by_str(cls, str_id, session=None): @@ -206,6 +206,7 @@ class Instance(Base, NovaBase): mac_address = Column(String(255)) def set_state(self, state_code, state_description=None): + # TODO(devcamcar): Move this out of models and into api from nova.compute import power_state self.state = state_code if not state_description: @@ -345,7 +346,6 @@ class NetworkIndex(Base, NovaBase): def register_models(): from sqlalchemy import create_engine - models = (Image, PhysicalNode, Daemon, Instance, Volume, ExportDevice, FixedIp, FloatingIp, Network, NetworkIndex) engine = create_engine(FLAGS.sql_connection, echo=False) diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index e0de04be7..d487c2e45 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -105,7 +105,6 @@ class NetworkTestCase(test.TrialTestCase): db.fixed_ip_deallocate(None, fix_addr) def test_allocate_deallocate_fixed_ip(self): - import pdb; pdb.set_trace() """Makes sure that we can allocate and deallocate a fixed ip""" address = self._create_address(0) self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) -- cgit From 7756a1d269946f72e76bae7a8015c3d72063b2c6 Mon Sep 17 00:00:00 2001 From: Devin Carlen Date: Mon, 30 Aug 2010 12:49:31 -0700 Subject: Added session.py --- nova/db/sqlalchemy/session.py | 53 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 nova/db/sqlalchemy/session.py diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py new file mode 100644 index 000000000..06e2ca8cd --- /dev/null +++ b/nova/db/sqlalchemy/session.py @@ -0,0 +1,53 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import create_engine +from sqlalchemy.orm import create_session +from sqlalchemy.ext.declarative import declarative_base + +from nova import flags + +FLAGS = flags.FLAGS + +def managed_session(autocommit=True): + return SessionExecutionManager(autocommit=autocommit) + + +class SessionExecutionManager: + _engine = None + _session = None + + def __init__(self, autocommit): + cls = SessionExecutionManager + if not cls._engine: + cls._engine = create_engine(FLAGS.sql_connection, echo=False) + self._session = create_session(bind=cls._engine, + autocommit=autocommit) + + + def __enter__(self): + return self._session + + def __exit__(self, type, value, traceback): + import pdb + if type or value or traceback: + pdb.set_trace() + # TODO(devcamcar): Rollback on exception. + # TODO(devcamcar): Log exceptions. + if self._session: + self._session.close() \ No newline at end of file -- cgit From 4cdb0cdc6ef069287cba8a687001deee8ed23280 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 30 Aug 2010 13:06:07 -0700 Subject: rollback on exit --- nova/db/sqlalchemy/session.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index 06e2ca8cd..2b088170b 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -16,9 +16,10 @@ # License for the specific language governing permissions and limitations # under the License. +import logging + from sqlalchemy import create_engine from sqlalchemy.orm import create_session -from sqlalchemy.ext.declarative import declarative_base from nova import flags @@ -31,23 +32,21 @@ def managed_session(autocommit=True): class SessionExecutionManager: _engine = None _session = None - + def __init__(self, autocommit): cls = SessionExecutionManager if not cls._engine: cls._engine = create_engine(FLAGS.sql_connection, echo=False) self._session = create_session(bind=cls._engine, autocommit=autocommit) - - + + def __enter__(self): return self._session def __exit__(self, type, value, traceback): - import pdb - if type or value or traceback: - pdb.set_trace() - # TODO(devcamcar): Rollback on exception. - # TODO(devcamcar): Log exceptions. + if type: + logging.exception("Error in database transaction") + self._session.rollback() if self._session: - self._session.close() \ No newline at end of file + self._session.close() -- cgit From de5b1ce17a44e824f1f29ead19dac45db4e0086c Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 30 Aug 2010 15:11:46 -0700 Subject: all tests pass again --- nova/db/api.py | 19 ++++--- nova/db/sqlalchemy/api.py | 121 ++++++++++++++++++++++++++--------------- nova/db/sqlalchemy/models.py | 40 ++++++++------ nova/db/sqlalchemy/session.py | 9 ++- nova/endpoint/cloud.py | 4 +- nova/tests/compute_unittest.py | 30 +++++----- nova/tests/network_unittest.py | 7 +-- nova/tests/volume_unittest.py | 7 ++- 8 files changed, 144 insertions(+), 93 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index 91d7b8415..9b8c48934 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -108,10 +108,15 @@ def floating_ip_fixed_ip_associate(context, floating_address, fixed_address): def floating_ip_get_by_address(context, address): - """Get a floating ip by address.""" + """Get a floating ip by address or raise if it doesn't exist.""" return _impl.floating_ip_get_by_address(context, address) +def floating_ip_get_instance(context, address): + """Get an instance for a floating ip by address.""" + return _impl.floating_ip_get_instance(context, address) + + #################### @@ -134,10 +139,15 @@ def fixed_ip_deallocate(context, address): def fixed_ip_get_by_address(context, address): - """Get a fixed ip by address.""" + """Get a fixed ip by address or raise if it does not exist.""" return _impl.fixed_ip_get_by_address(context, address) +def fixed_ip_get_instance(context, address): + """Get an instance for a fixed ip by address.""" + return _impl.fixed_ip_get_instance(context, address) + + def fixed_ip_get_network(context, address): """Get a network for a fixed ip by address.""" return _impl.fixed_ip_get_network(context, address) @@ -181,11 +191,6 @@ def instance_get_all(context): return _impl.instance_get_all(context) -def instance_get_by_address(context, address): - """Gets an instance by fixed ip address or raise if it does not exist.""" - return _impl.instance_get_by_address(context, address) - - def instance_get_by_project(context, project_id): """Get all instance belonging to a project.""" return _impl.instance_get_by_project(context, project_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index cef77cc50..a4b0ba545 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -79,30 +79,50 @@ def floating_ip_create(context, address, host): def floating_ip_fixed_ip_associate(context, floating_address, fixed_address): - floating_ip_ref = db.floating_ip_get_by_address(context, floating_address) - fixed_ip_ref = models.FixedIp.find_by_str(fixed_address) - floating_ip_ref.fixed_ip = fixed_ip_ref - floating_ip_ref.save() + with managed_session(autocommit=False) as session: + floating_ip_ref = models.FloatingIp.find_by_str(floating_address, + session=session) + fixed_ip_ref = models.FixedIp.find_by_str(fixed_address, + session=session) + floating_ip_ref.fixed_ip = fixed_ip_ref + floating_ip_ref.save(session=session) + session.commit() def floating_ip_disassociate(context, address): - floating_ip_ref = db.floating_ip_get_by_address(context, address) - fixed_ip_address = floating_ip_ref.fixed_ip['str_id'] - floating_ip_ref['fixed_ip'] = None - floating_ip_ref.save() - return fixed_ip_address + with managed_session(autocommit=False) as session: + floating_ip_ref = models.FloatingIp.find_by_str(address, + session=session) + fixed_ip_ref = floating_ip_ref.fixed_ip + if fixed_ip_ref: + fixed_ip_address = fixed_ip_ref['str_id'] + else: + fixed_ip_address = None + floating_ip_ref.fixed_ip = None + floating_ip_ref.save(session=session) + session.commit() + return fixed_ip_address def floating_ip_deallocate(context, address): - floating_ip_ref = db.floating_ip_get_by_address(context, address) - floating_ip_ref['project_id'] = None - floating_ip_ref.save() + with managed_session(autocommit=False) as session: + floating_ip_ref = models.FloatingIp.find_by_str(address, + session=session) + floating_ip_ref['project_id'] = None + floating_ip_ref.save(session=session) def floating_ip_get_by_address(context, address): return models.FloatingIp.find_by_str(address) +def floating_ip_get_instance(context, address): + with managed_session() as session: + floating_ip_ref = models.FloatingIp.find_by_str(address, + session=session) + return floating_ip_ref.fixed_ip.instance + + ################### @@ -139,8 +159,14 @@ def fixed_ip_get_by_address(context, address): return models.FixedIp.find_by_str(address) +def fixed_ip_get_instance(context, address): + with managed_session() as session: + return models.FixedIp.find_by_str(address, session=session).instance + + def fixed_ip_get_network(context, address): - return models.FixedIp.find_by_str(address).network + with managed_session() as session: + return models.FixedIp.find_by_str(address, session=session).network def fixed_ip_deallocate(context, address): @@ -150,15 +176,20 @@ def fixed_ip_deallocate(context, address): def fixed_ip_instance_associate(context, address, instance_id): - fixed_ip_ref = fixed_ip_get_by_address(context, address) - fixed_ip_ref.instance = instance_get(context, instance_id) - fixed_ip_ref.save() + with managed_session(autocommit=False) as session: + fixed_ip_ref = models.FixedIp.find_by_str(address, session=session) + instance_ref = models.Instance.find(instance_id, session=session) + fixed_ip_ref.instance = instance_ref + fixed_ip_ref.save(session=session) + session.commit() def fixed_ip_instance_disassociate(context, address): - fixed_ip_ref = fixed_ip_get_by_address(context, address) - fixed_ip_ref.instance = None - fixed_ip_ref.save() + with managed_session(autocommit=False) as session: + fixed_ip_ref = models.FixedIp.find_by_str(address, session=session) + fixed_ip_ref.instance = None + fixed_ip_ref.save(session=session) + session.commit() def fixed_ip_update(context, address, values): @@ -192,13 +223,6 @@ def instance_get_all(context): return models.Instance.all() -def instance_get_by_address(context, address): - fixed_ip_ref = db.fixed_ip_get_by_address(address) - if not fixed_ip_ref.instance: - raise exception.NotFound("No instance found for address %s" % address) - return fixed_ip_ref.instance - - def instance_get_by_project(context, project_id): with managed_session() as session: return session.query(models.Instance) \ @@ -220,20 +244,22 @@ def instance_get_by_str(context, str_id): def instance_get_fixed_address(context, instance_id): - instance_ref = instance_get(context, instance_id) - if not instance_ref.fixed_ip: - return None - return instance_ref.fixed_ip['str_id'] + with managed_session() as session: + instance_ref = models.Instance.find(instance_id, session=session) + if not instance_ref.fixed_ip: + return None + return instance_ref.fixed_ip['str_id'] def instance_get_floating_address(context, instance_id): - instance_ref = instance_get(context, instance_id) - if not instance_ref.fixed_ip: - return None - if not instance_ref.fixed_ip.floating_ips: - return None - # NOTE(vish): this just returns the first floating ip - return instance_ref.fixed_ip.floating_ips[0]['str_id'] + with managed_session() as session: + instance_ref = models.Instance.find(instance_id, session=session) + if not instance_ref.fixed_ip: + return None + if not instance_ref.fixed_ip.floating_ips: + return None + # NOTE(vish): this just returns the first floating ip + return instance_ref.fixed_ip.floating_ips[0]['str_id'] def instance_get_host(context, instance_id): @@ -307,6 +333,13 @@ def network_destroy(context, network_id): # TODO(vish): do we have to use sql here? session.execute('update networks set deleted=1 where id=:id', {'id': network_id}) + session.execute('update fixed_ips set deleted=1 where network_id=:id', + {'id': network_id}) + session.execute('update floating_ips set deleted=1 ' + 'where fixed_ip_id in ' + '(select id from fixed_ips ' + 'where network_id=:id)', + {'id': network_id}) session.execute('update network_indexes set network_id=NULL where network_id=:id', {'id': network_id}) session.commit() @@ -472,7 +505,7 @@ def volume_destroy(context, volume_id): # TODO(vish): do we have to use sql here? session.execute('update volumes set deleted=1 where id=:id', {'id': volume_id}) - session.execute('update export_devices set volume_id=NULL where network_id=:id', + session.execute('update export_devices set volume_id=NULL where volume_id=:id', {'id': volume_id}) session.commit() @@ -512,11 +545,13 @@ def volume_get_host(context, volume_id): def volume_get_shelf_and_blade(context, volume_id): - volume_ref = volume_get(context, volume_id) - export_device = volume_ref.export_device - if not export_device: - raise exception.NotFound() - return (export_device.shelf_id, export_device.blade_id) + with managed_session() as session: + export_device = session.query(models.ExportDevice) \ + .filter_by(volume_id=volume_id) \ + .first() + if not export_device: + raise exception.NotFound() + return (export_device.shelf_id, export_device.blade_id) def volume_update(context, volume_id, values): diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index b7031eec0..b6077a583 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -274,14 +274,18 @@ class FixedIp(Base, NovaBase): return self.ip_str @classmethod - def find_by_str(cls, session, str_id): - try: - return session.query(cls) \ - .filter_by(ip_str=str_id) \ - .filter_by(deleted=False) \ - .one() - except exc.NoResultFound: - raise exception.NotFound("No model for ip str %s" % str_id) + def find_by_str(cls, str_id, session=None): + if session: + try: + return session.query(cls) \ + .filter_by(ip_str=str_id) \ + .filter_by(deleted=False) \ + .one() + except exc.NoResultFound: + raise exception.NotFound("No model for ip_str %s" % str_id) + else: + with managed_session() as s: + return cls.find_by_str(str_id, session=s) class FloatingIp(Base, NovaBase): @@ -299,14 +303,18 @@ class FloatingIp(Base, NovaBase): return self.ip_str @classmethod - def find_by_str(cls, session, str_id): - try: - return session.query(cls) \ - .filter_by(ip_str=str_id) \ - .filter_by(deleted=False) \ - .one() - except exc.NoResultFound: - raise exception.NotFound("No model for ip str %s" % str_id) + def find_by_str(cls, str_id, session=None): + if session: + try: + return session.query(cls) \ + .filter_by(ip_str=str_id) \ + .filter_by(deleted=False) \ + .one() + except exc.NoResultFound: + raise exception.NotFound("No model for ip_str %s" % str_id) + else: + with managed_session() as s: + return cls.find_by_str(str_id, session=s) class Network(Base, NovaBase): diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index 2b088170b..99270433a 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -44,9 +44,8 @@ class SessionExecutionManager: def __enter__(self): return self._session - def __exit__(self, type, value, traceback): - if type: - logging.exception("Error in database transaction") + def __exit__(self, exc_type, exc_value, traceback): + if exc_type: + logging.exception("Rolling back due to failed transaction") self._session.rollback() - if self._session: - self._session.close() + self._session.close() diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 0f3ecb3b0..4f7f1c605 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -94,7 +94,7 @@ class CloudController(object): return result def get_metadata(self, ipaddress): - i = db.instance_get_by_address(ipaddress) + i = db.fixed_ip_get_instance(ipaddress) if i is None: return None mpi = self._get_mpi_data(i['project_id']) @@ -421,7 +421,7 @@ class CloudController(object): context.project.id) for floating_ip_ref in iterator: address = floating_ip_ref['id_str'] - instance_ref = db.instance_get_by_address(address) + instance_ref = db.floating_ip_get_instance(address) address_rv = { 'public_ip': address, 'instance_id': instance_ref['id_str'] diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index 28e51f387..a8d644c84 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -40,7 +40,7 @@ class InstanceXmlTestCase(test.TrialTestCase): # instance_id = 'foo' # first_node = node.Node() - # inst = yield first_node.run_instance(instance_id) + # inst = yield first_node.run_instance(self.context, instance_id) # # # force the state so that we can verify that it changes # inst._s['state'] = node.Instance.NOSTATE @@ -50,7 +50,7 @@ class InstanceXmlTestCase(test.TrialTestCase): # second_node = node.Node() # new_inst = node.Instance.fromXml(second_node._conn, pool=second_node._pool, xml=xml) # self.assertEqual(new_inst.state, node.Instance.RUNNING) - # rv = yield first_node.terminate_instance(instance_id) + # rv = yield first_node.terminate_instance(self.context, instance_id) class ComputeConnectionTestCase(test.TrialTestCase): @@ -63,6 +63,7 @@ class ComputeConnectionTestCase(test.TrialTestCase): self.manager = manager.AuthManager() user = self.manager.create_user('fake', 'fake', 'fake') project = self.manager.create_project('fake', 'fake', 'fake') + self.context = None def tearDown(self): self.manager.delete_user('fake') @@ -84,13 +85,13 @@ class ComputeConnectionTestCase(test.TrialTestCase): def test_run_describe_terminate(self): instance_id = self._create_instance() - yield self.compute.run_instance(instance_id) + yield self.compute.run_instance(self.context, instance_id) instances = db.instance_get_all(None) logging.info("Running instances: %s", instances) self.assertEqual(len(instances), 1) - yield self.compute.terminate_instance(instance_id) + yield self.compute.terminate_instance(self.context, instance_id) instances = db.instance_get_all(None) logging.info("After terminating instances: %s", instances) @@ -99,22 +100,25 @@ class ComputeConnectionTestCase(test.TrialTestCase): @defer.inlineCallbacks def test_reboot(self): instance_id = self._create_instance() - yield self.compute.run_instance(instance_id) - yield self.compute.reboot_instance(instance_id) - yield self.compute.terminate_instance(instance_id) + yield self.compute.run_instance(self.context, instance_id) + yield self.compute.reboot_instance(self.context, instance_id) + yield self.compute.terminate_instance(self.context, instance_id) @defer.inlineCallbacks def test_console_output(self): instance_id = self._create_instance() - rv = yield self.compute.run_instance(instance_id) + rv = yield self.compute.run_instance(self.context, instance_id) - console = yield self.compute.get_console_output(instance_id) + console = yield self.compute.get_console_output(self.context, + instance_id) self.assert_(console) - rv = yield self.compute.terminate_instance(instance_id) + rv = yield self.compute.terminate_instance(self.context, instance_id) @defer.inlineCallbacks def test_run_instance_existing(self): instance_id = self._create_instance() - yield self.compute.run_instance(instance_id) - self.assertFailure(self.compute.run_instance(instance_id), exception.Error) - yield self.compute.terminate_instance(instance_id) + yield self.compute.run_instance(self.context, instance_id) + self.assertFailure(self.compute.run_instance(self.context, + instance_id), + exception.Error) + yield self.compute.terminate_instance(self.context, instance_id) diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index e3fe01fa2..b479f2fa4 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -253,12 +253,11 @@ class NetworkTestCase(test.TrialTestCase): def is_allocated_in_project(address, project_id): """Returns true if address is in specified project""" - fixed_ip = db.fixed_ip_get_by_address(None, address) project_net = db.project_get_network(None, project_id) + network = db.fixed_ip_get_network(None, address) + instance = db.fixed_ip_get_instance(None, address) # instance exists until release - logging.debug('fixed_ip.instance: %s', fixed_ip.instance) - logging.debug('project_net: %s', project_net) - return fixed_ip.instance is not None and fixed_ip.network == project_net + return instance is not None and network['id'] == project_net['id'] def binpath(script): diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index 4504276e2..6573e9876 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -117,6 +117,7 @@ class VolumeTestCase(test.TrialTestCase): else: rv = yield self.compute.detach_volume(instance_id, volume_id) + vol = db.volume_get(None, volume_id) self.assertEqual(vol['status'], "available") rv = self.volume.delete_volume(self.context, volume_id) @@ -134,9 +135,9 @@ class VolumeTestCase(test.TrialTestCase): volume_ids = [] def _check(volume_id): volume_ids.append(volume_id) - vol = db.volume_get(None, volume_id) - shelf_blade = '%s.%s' % (vol.export_device.shelf_id, - vol.export_device.blade_id) + (shelf_id, blade_id) = db.volume_get_shelf_and_blade(None, + volume_id) + shelf_blade = '%s.%s' % (shelf_id, blade_id) self.assert_(shelf_blade not in shelf_blades) shelf_blades.append(shelf_blade) logging.debug("got %s" % shelf_blade) -- cgit From 6c50b37c0b60219837f940d044542f4032a4436b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 30 Aug 2010 15:15:00 -0700 Subject: undo change to get_my_ip --- nova/utils.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/nova/utils.py b/nova/utils.py index 705df718e..907c174cd 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -133,9 +133,7 @@ def last_octet(address): def get_my_ip(): - ''' returns the actual ip of the local machine. - ''' - return '127.0.0.1' + """Returns the actual ip of the local machine.""" if getattr(FLAGS, 'fake_tests', None): return '127.0.0.1' try: -- cgit From 78b5f67153d6ef843d884ba7e94125101ab5f653 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 30 Aug 2010 15:48:45 -0700 Subject: fix daemon get --- nova/db/sqlalchemy/models.py | 48 ++++++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 22 deletions(-) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index b6077a583..b2ca54973 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -51,8 +51,8 @@ class NovaBase(object): .filter_by(deleted=False) \ .all() else: - with managed_session() as s: - return cls.all(session=s) + with managed_session() as sess: + return cls.all(session=sess) @classmethod def count(cls, session=None): @@ -61,8 +61,8 @@ class NovaBase(object): .filter_by(deleted=False) \ .count() else: - with managed_session() as s: - return cls.count(session=s) + with managed_session() as sess: + return cls.count(session=sess) @classmethod def find(cls, obj_id, session=None): @@ -75,8 +75,8 @@ class NovaBase(object): except exc.NoResultFound: raise exception.NotFound("No model for id %s" % obj_id) else: - with managed_session() as s: - return cls.find(obj_id, session=s) + with managed_session() as sess: + return cls.find(obj_id, session=sess) @classmethod def find_by_str(cls, str_id, session=None): @@ -92,8 +92,8 @@ class NovaBase(object): session.add(self) session.flush() else: - with managed_session() as s: - self.save(session=s) + with managed_session() as sess: + self.save(session=sess) def delete(self, session=None): self.deleted = True @@ -151,16 +151,20 @@ class Daemon(Base, NovaBase): report_count = Column(Integer, nullable=False, default=0) @classmethod - def find_by_args(cls, session, node_name, binary): - try: - return session.query(cls) \ - .filter_by(node_name=node_name) \ - .filter_by(binary=binary) \ - .filter_by(deleted=False) \ - .one() - except exc.NoResultFound: - raise exception.NotFound("No model for %s, %s" % (node_name, - binary)) + def find_by_args(cls, node_name, binary, session=None): + if session: + try: + return session.query(cls) \ + .filter_by(node_name=node_name) \ + .filter_by(binary=binary) \ + .filter_by(deleted=False) \ + .one() + except exc.NoResultFound: + raise exception.NotFound("No model for %s, %s" % (node_name, + binary)) + else: + with managed_session() as sess: + return cls.find_by_args(node_name, binary, session=sess) class Instance(Base, NovaBase): @@ -284,8 +288,8 @@ class FixedIp(Base, NovaBase): except exc.NoResultFound: raise exception.NotFound("No model for ip_str %s" % str_id) else: - with managed_session() as s: - return cls.find_by_str(str_id, session=s) + with managed_session() as sess: + return cls.find_by_str(str_id, session=sess) class FloatingIp(Base, NovaBase): @@ -313,8 +317,8 @@ class FloatingIp(Base, NovaBase): except exc.NoResultFound: raise exception.NotFound("No model for ip_str %s" % str_id) else: - with managed_session() as s: - return cls.find_by_str(str_id, session=s) + with managed_session() as sess: + return cls.find_by_str(str_id, session=sess) class Network(Base, NovaBase): -- cgit From b9aa0dae0a5a64a244f1bff95ad8af22cf87f7f6 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 30 Aug 2010 16:08:17 -0700 Subject: run and terminate work --- nova/db/sqlalchemy/api.py | 2 +- nova/network/linux_net.py | 13 +++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index a4b0ba545..f40f2a476 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -143,7 +143,7 @@ def fixed_ip_allocate(context, network_id): fixed_ip_ref['allocated'] = True session.add(fixed_ip_ref) session.commit() - return fixed_ip_ref + return fixed_ip_ref['str_id'] def fixed_ip_create(context, network_id, address, reserved=False): diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index a7b81533b..3bdceac8f 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -125,7 +125,7 @@ def ensure_bridge(bridge, interface, net_attrs=None): def get_dhcp_hosts(context, network_id): hosts = [] for fixed_ip in db.network_get_associated_fixed_ips(context, network_id): - hosts.append(_host_dhcp(fixed_ip)) + hosts.append(_host_dhcp(fixed_ip['str_id'])) return '\n'.join(hosts) @@ -162,11 +162,12 @@ def update_dhcp(context, network_id): command = _dnsmasq_cmd(network_ref) _execute(command, addl_env=env) -def _host_dhcp(fixed_ip): - """Return a host string for a fixed ip""" - return "%s,%s.novalocal,%s" % (fixed_ip.instance['mac_address'], - fixed_ip.instance['hostname'], - fixed_ip['ip_str']) +def _host_dhcp(address): + """Return a host string for an address""" + instance_ref = db.fixed_ip_get_instance(None, address) + return "%s,%s.novalocal,%s" % (instance_ref['mac_address'], + instance_ref['hostname'], + address) def _execute(cmd, *args, **kwargs): -- cgit From d1c7d29726bf2469dd7f05d7f460edbb613c4bb2 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 30 Aug 2010 17:19:04 -0700 Subject: add sqlalchemy to pip requires --- tools/pip-requires | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/pip-requires b/tools/pip-requires index 13e8e5f45..dd69708ce 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -1,3 +1,4 @@ +SQLAlchemy==0.6.3 pep8==0.5.0 pylint==0.19 IPy==0.70 -- cgit From a64149a8b148858414409a88f968408f9606891f Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 30 Aug 2010 17:53:59 -0700 Subject: pep8 cleanup --- nova/compute/model.py | 309 ----------------------------------------- nova/compute/service.py | 1 - nova/db/sqlalchemy/__init__.py | 2 +- nova/db/sqlalchemy/api.py | 17 ++- nova/db/sqlalchemy/models.py | 52 +++---- nova/db/sqlalchemy/session.py | 2 +- nova/network/linux_net.py | 6 + nova/volume/driver.py | 1 - nova/volume/manager.py | 6 +- 9 files changed, 46 insertions(+), 350 deletions(-) delete mode 100644 nova/compute/model.py diff --git a/nova/compute/model.py b/nova/compute/model.py deleted file mode 100644 index baa41c3e0..000000000 --- a/nova/compute/model.py +++ /dev/null @@ -1,309 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Datastore Model objects for Compute Instances, with -InstanceDirectory manager. - -# Create a new instance? ->>> InstDir = InstanceDirectory() ->>> inst = InstDir.new() ->>> inst.destroy() -True ->>> inst = InstDir['i-123'] ->>> inst['ip'] = "192.168.0.3" ->>> inst['project_id'] = "projectA" ->>> inst.save() -True - ->>> InstDir['i-123'] - ->>> InstDir.all.next() - - ->>> inst.destroy() -True -""" - -import datetime -import uuid - -from nova import datastore -from nova import exception -from nova import flags -from nova import utils - - -FLAGS = flags.FLAGS - - -# TODO(todd): Implement this at the class level for Instance -class InstanceDirectory(object): - """an api for interacting with the global state of instances""" - - def get(self, instance_id): - """returns an instance object for a given id""" - return Instance(instance_id) - - def __getitem__(self, item): - return self.get(item) - - def by_project(self, project): - """returns a list of instance objects for a project""" - for instance_id in datastore.Redis.instance().smembers('project:%s:instances' % project): - yield Instance(instance_id) - - def by_node(self, node): - """returns a list of instances for a node""" - for instance_id in datastore.Redis.instance().smembers('node:%s:instances' % node): - yield Instance(instance_id) - - def by_ip(self, ip): - """returns an instance object that is using the IP""" - # NOTE(vish): The ip association should be just a single value, but - # to maintain consistency it is using the standard - # association and the ugly method for retrieving - # the first item in the set below. - result = datastore.Redis.instance().smembers('ip:%s:instances' % ip) - if not result: - return None - return Instance(list(result)[0]) - - def by_volume(self, volume_id): - """returns the instance a volume is attached to""" - pass - - def exists(self, instance_id): - return datastore.Redis.instance().sismember('instances', instance_id) - - @property - def all(self): - """returns a list of all instances""" - for instance_id in datastore.Redis.instance().smembers('instances'): - yield Instance(instance_id) - - def new(self): - """returns an empty Instance object, with ID""" - instance_id = utils.generate_uid('i') - return self.get(instance_id) - - -class Instance(): - """Wrapper around stored properties of an instance""" - - def __init__(self, instance_id): - """loads an instance from the datastore if exists""" - # set instance data before super call since it uses default_state - self.instance_id = instance_id - super(Instance, self).__init__() - - def default_state(self): - return {'state': 0, - 'state_description': 'pending', - 'instance_id': self.instance_id, - 'node_name': 'unassigned', - 'project_id': 'unassigned', - 'user_id': 'unassigned', - 'private_dns_name': 'unassigned'} - - @property - def identifier(self): - return self.instance_id - - @property - def project(self): - if self.state.get('project_id', None): - return self.state['project_id'] - return self.state.get('owner_id', 'unassigned') - - @property - def volumes(self): - """returns a list of attached volumes""" - pass - - @property - def reservation(self): - """Returns a reservation object""" - pass - - def save(self): - """Call into superclass to save object, then save associations""" - # NOTE(todd): doesn't track migration between projects/nodes, - # it just adds the first one - is_new = self.is_new_record() - node_set = (self.state['node_name'] != 'unassigned' and - self.initial_state.get('node_name', 'unassigned') - == 'unassigned') - success = super(Instance, self).save() - if success and is_new: - self.associate_with("project", self.project) - self.associate_with("ip", self.state['private_dns_name']) - if success and node_set: - self.associate_with("node", self.state['node_name']) - return True - - def destroy(self): - """Destroy associations, then destroy the object""" - self.unassociate_with("project", self.project) - self.unassociate_with("node", self.state['node_name']) - self.unassociate_with("ip", self.state['private_dns_name']) - return super(Instance, self).destroy() - -class Host(): - """A Host is the machine where a Daemon is running.""" - - def __init__(self, hostname): - """loads an instance from the datastore if exists""" - # set instance data before super call since it uses default_state - self.hostname = hostname - super(Host, self).__init__() - - def default_state(self): - return {"hostname": self.hostname} - - @property - def identifier(self): - return self.hostname - - -class Daemon(): - """A Daemon is a job (compute, api, network, ...) that runs on a host.""" - - def __init__(self, host_or_combined, binpath=None): - """loads an instance from the datastore if exists""" - # set instance data before super call since it uses default_state - # since loading from datastore expects a combined key that - # is equivilent to identifier, we need to expect that, while - # maintaining meaningful semantics (2 arguments) when creating - # from within other code like the bin/nova-* scripts - if binpath: - self.hostname = host_or_combined - self.binary = binpath - else: - self.hostname, self.binary = host_or_combined.split(":") - super(Daemon, self).__init__() - - def default_state(self): - return {"hostname": self.hostname, - "binary": self.binary, - "updated_at": utils.isotime() - } - - @property - def identifier(self): - return "%s:%s" % (self.hostname, self.binary) - - def save(self): - """Call into superclass to save object, then save associations""" - # NOTE(todd): this makes no attempt to destroy itsself, - # so after termination a record w/ old timestmap remains - success = super(Daemon, self).save() - if success: - self.associate_with("host", self.hostname) - return True - - def destroy(self): - """Destroy associations, then destroy the object""" - self.unassociate_with("host", self.hostname) - return super(Daemon, self).destroy() - - def heartbeat(self): - self['updated_at'] = utils.isotime() - return self.save() - - @classmethod - def by_host(cls, hostname): - for x in cls.associated_to("host", hostname): - yield x - - -class SessionToken(): - """This is a short-lived auth token that is passed through web requests""" - - def __init__(self, session_token): - self.token = session_token - self.default_ttl = FLAGS.auth_token_ttl - super(SessionToken, self).__init__() - - @property - def identifier(self): - return self.token - - def default_state(self): - now = datetime.datetime.utcnow() - diff = datetime.timedelta(seconds=self.default_ttl) - expires = now + diff - return {'user': None, 'session_type': None, 'token': self.token, - 'expiry': expires.strftime(utils.TIME_FORMAT)} - - def save(self): - """Call into superclass to save object, then save associations""" - if not self['user']: - raise exception.Invalid("SessionToken requires a User association") - success = super(SessionToken, self).save() - if success: - self.associate_with("user", self['user']) - return True - - @classmethod - def lookup(cls, key): - token = super(SessionToken, cls).lookup(key) - if token: - expires_at = utils.parse_isotime(token['expiry']) - if datetime.datetime.utcnow() >= expires_at: - token.destroy() - return None - return token - - @classmethod - def generate(cls, userid, session_type=None): - """make a new token for the given user""" - token = str(uuid.uuid4()) - while cls.lookup(token): - token = str(uuid.uuid4()) - instance = cls(token) - instance['user'] = userid - instance['session_type'] = session_type - instance.save() - return instance - - def update_expiry(self, **kwargs): - """updates the expirty attribute, but doesn't save""" - if not kwargs: - kwargs['seconds'] = self.default_ttl - time = datetime.datetime.utcnow() - diff = datetime.timedelta(**kwargs) - expires = time + diff - self['expiry'] = expires.strftime(utils.TIME_FORMAT) - - def is_expired(self): - now = datetime.datetime.utcnow() - expires = utils.parse_isotime(self['expiry']) - return expires <= now - - def ttl(self): - """number of seconds remaining before expiration""" - now = datetime.datetime.utcnow() - expires = utils.parse_isotime(self['expiry']) - delta = expires - now - return (delta.seconds + (delta.days * 24 * 3600)) - - -if __name__ == "__main__": - import doctest - doctest.testmod() diff --git a/nova/compute/service.py b/nova/compute/service.py index 9bf498d03..4df7e7171 100644 --- a/nova/compute/service.py +++ b/nova/compute/service.py @@ -29,4 +29,3 @@ class ComputeService(service.Service): Compute Service automatically passes commands on to the Compute Manager """ pass - diff --git a/nova/db/sqlalchemy/__init__.py b/nova/db/sqlalchemy/__init__.py index e94f99486..444f50a9b 100644 --- a/nova/db/sqlalchemy/__init__.py +++ b/nova/db/sqlalchemy/__init__.py @@ -1,3 +1,3 @@ from models import register_models -register_models() \ No newline at end of file +register_models() diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index f40f2a476..e366e989f 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -340,13 +340,14 @@ def network_destroy(context, network_id): '(select id from fixed_ips ' 'where network_id=:id)', {'id': network_id}) - session.execute('update network_indexes set network_id=NULL where network_id=:id', + session.execute('update network_indexes set network_id=NULL ' + 'where network_id=:id', {'id': network_id}) session.commit() -def network_get(context, network_id, session=None): - return models.Network.find(network_id, session=session) +def network_get(context, network_id): + return models.Network.find(network_id) def network_get_associated_fixed_ips(context, network_id): @@ -357,7 +358,6 @@ def network_get_associated_fixed_ips(context, network_id): .all() - def network_get_by_bridge(context, bridge): with managed_session() as session: rv = session.query(models.Network) \ @@ -383,7 +383,8 @@ def network_get_index(context, network_id): .first() if not network_index: raise db.NoMoreNetworks() - network_index['network'] = network_get(context, network_id, session=session) + network_index['network'] = models.Network.find(network_id, + session=session) session.add(network_index) session.commit() return network_index['index'] @@ -446,7 +447,8 @@ def project_get_network(context, project_id): def queue_get_for(context, topic, physical_node_id): - return "%s.%s" % (topic, physical_node_id) # FIXME(ja): this should be servername? + # FIXME(ja): this should be servername? + return "%s.%s" % (topic, physical_node_id) ################### @@ -505,7 +507,8 @@ def volume_destroy(context, volume_id): # TODO(vish): do we have to use sql here? session.execute('update volumes set deleted=1 where id=:id', {'id': volume_id}) - session.execute('update export_devices set volume_id=NULL where volume_id=:id', + session.execute('update export_devices set volume_id=NULL ' + 'where volume_id=:id', {'id': volume_id}) session.commit() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index b2ca54973..53aa1f469 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -20,8 +20,6 @@ SQLAlchemy models for nova data """ -import logging - from sqlalchemy.orm import relationship, backref, validates, exc from sqlalchemy import Table, Column, Integer, String from sqlalchemy import MetaData, ForeignKey, DateTime, Boolean, Text @@ -32,12 +30,14 @@ from nova import auth from nova import exception from nova import flags -FLAGS=flags.FLAGS +FLAGS = flags.FLAGS + Base = declarative_base() + class NovaBase(object): - __table_args__ = {'mysql_engine':'InnoDB'} + __table_args__ = {'mysql_engine': 'InnoDB'} __table_initialized__ = False __prefix__ = 'none' created_at = Column(DateTime) @@ -110,8 +110,8 @@ class Image(Base, NovaBase): __tablename__ = 'images' __prefix__ = 'ami' id = Column(Integer, primary_key=True) - user_id = Column(String(255))#, ForeignKey('users.id'), nullable=False) - project_id = Column(String(255))#, ForeignKey('projects.id'), nullable=False) + user_id = Column(String(255)) + project_id = Column(String(255)) image_type = Column(String(255)) public = Column(Boolean, default=False) state = Column(String(255)) @@ -143,10 +143,11 @@ class PhysicalNode(Base, NovaBase): __tablename__ = 'physical_nodes' id = Column(String(255), primary_key=True) + class Daemon(Base, NovaBase): __tablename__ = 'daemons' id = Column(Integer, primary_key=True) - node_name = Column(String(255)) #, ForeignKey('physical_node.id')) + node_name = Column(String(255)) # , ForeignKey('physical_node.id')) binary = Column(String(255)) report_count = Column(Integer, nullable=False, default=0) @@ -172,8 +173,8 @@ class Instance(Base, NovaBase): __prefix__ = 'i' id = Column(Integer, primary_key=True) - user_id = Column(String(255)) #, ForeignKey('users.id'), nullable=False) - project_id = Column(String(255)) #, ForeignKey('projects.id')) + user_id = Column(String(255)) + project_id = Column(String(255)) @property def user(self): @@ -183,12 +184,10 @@ class Instance(Base, NovaBase): def project(self): return auth.manager.AuthManager().get_project(self.project_id) - # TODO(vish): make this opaque somehow @property def name(self): return self.str_id - image_id = Column(Integer, ForeignKey('images.id'), nullable=True) kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True) ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True) @@ -202,7 +201,7 @@ class Instance(Base, NovaBase): state_description = Column(String(255)) hostname = Column(String(255)) - node_name = Column(String(255)) #, ForeignKey('physical_node.id')) + node_name = Column(String(255)) # , ForeignKey('physical_node.id')) instance_type = Column(Integer) @@ -219,11 +218,9 @@ class Instance(Base, NovaBase): state_description = power_state.name(state_code) self.state_description = state_description self.save() - # ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id)) # kernel = relationship(Kernel, backref=backref('instances', order_by=id)) # project = relationship(Project, backref=backref('instances', order_by=id)) - #TODO - see Ewan's email about state improvements # vmstate_state = running, halted, suspended, paused # power_state = what we have @@ -231,24 +228,27 @@ class Instance(Base, NovaBase): #@validates('state') #def validate_state(self, key, state): - # assert(state in ['nostate', 'running', 'blocked', 'paused', 'shutdown', 'shutoff', 'crashed']) + # assert(state in ['nostate', 'running', 'blocked', 'paused', + # 'shutdown', 'shutoff', 'crashed']) + class Volume(Base, NovaBase): __tablename__ = 'volumes' __prefix__ = 'vol' id = Column(Integer, primary_key=True) - user_id = Column(String(255)) #, ForeignKey('users.id'), nullable=False) - project_id = Column(String(255)) #, ForeignKey('projects.id')) + user_id = Column(String(255)) + project_id = Column(String(255)) - node_name = Column(String(255)) #, ForeignKey('physical_node.id')) + node_name = Column(String(255)) # , ForeignKey('physical_node.id')) size = Column(Integer) - availability_zone = Column(String(255)) # TODO(vish) foreign key? + availability_zone = Column(String(255)) # TODO(vish): foreign key? instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) mountpoint = Column(String(255)) - attach_time = Column(String(255)) # TODO(vish) datetime - status = Column(String(255)) # TODO(vish) enum? - attach_status = Column(String(255)) # TODO(vish) enum + attach_time = Column(String(255)) # TODO(vish): datetime + status = Column(String(255)) # TODO(vish): enum? + attach_status = Column(String(255)) # TODO(vish): enum + class ExportDevice(Base, NovaBase): __tablename__ = 'export_devices' @@ -299,8 +299,8 @@ class FloatingIp(Base, NovaBase): fixed_ip_id = Column(Integer, ForeignKey('fixed_ips.id'), nullable=True) fixed_ip = relationship(FixedIp, backref=backref('floating_ips')) - project_id = Column(String(255)) #, ForeignKey('projects.id'), nullable=False) - node_name = Column(String(255)) #, ForeignKey('physical_node.id')) + project_id = Column(String(255)) + node_name = Column(String(255)) # , ForeignKey('physical_node.id')) @property def str_id(self): @@ -339,8 +339,8 @@ class Network(Base, NovaBase): vpn_private_ip_str = Column(String(255)) dhcp_start = Column(String(255)) - project_id = Column(String(255)) #, ForeignKey('projects.id'), nullable=False) - node_name = Column(String(255)) #, ForeignKey('physical_node.id')) + project_id = Column(String(255)) + node_name = Column(String(255)) # , ForeignKey('physical_node.id')) fixed_ips = relationship(FixedIp, single_parent=True, diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index 99270433a..201948328 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -25,6 +25,7 @@ from nova import flags FLAGS = flags.FLAGS + def managed_session(autocommit=True): return SessionExecutionManager(autocommit=autocommit) @@ -40,7 +41,6 @@ class SessionExecutionManager: self._session = create_session(bind=cls._engine, autocommit=autocommit) - def __enter__(self): return self._session diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 3bdceac8f..6114e4ffe 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -40,6 +40,7 @@ flags.DEFINE_string('public_interface', 'vlan1', flags.DEFINE_string('bridge_dev', 'eth0', 'network device for bridges') + def bind_floating_ip(floating_ip): """Bind ip to public interface""" _execute("sudo ip addr add %s dev %s" % (floating_ip, @@ -59,8 +60,10 @@ def ensure_vlan_forward(public_ip, port, private_ip): "PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194" % (public_ip, port, private_ip)) + DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] + def ensure_floating_forward(floating_ip, fixed_ip): """Ensure floating ip forwarding rule""" _confirm_rule("PREROUTING -t nat -d %s -j DNAT --to %s" @@ -75,6 +78,7 @@ def ensure_floating_forward(floating_ip, fixed_ip): "FORWARD -d %s -p %s --dport %s -j ACCEPT" % (fixed_ip, protocol, port)) + def remove_floating_forward(floating_ip, fixed_ip): """Remove forwarding for floating ip""" _remove_rule("PREROUTING -t nat -d %s -j DNAT --to %s" @@ -93,6 +97,7 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): interface = ensure_vlan(vlan_num) ensure_bridge(bridge, interface, net_attrs) + def ensure_vlan(vlan_num): interface = "vlan%s" % vlan_num if not _device_exists(interface): @@ -162,6 +167,7 @@ def update_dhcp(context, network_id): command = _dnsmasq_cmd(network_ref) _execute(command, addl_env=env) + def _host_dhcp(address): """Return a host string for an address""" instance_ref = db.fixed_ip_get_instance(None, address) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 648ae1a06..990bfe958 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -110,4 +110,3 @@ class AOEDriver(object): check_exit_code=False) yield process.simple_execute("sudo vblade-persist start all", check_exit_code=False) - diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 7d8e1aca0..c57c920c9 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -29,7 +29,6 @@ from nova import exception from nova import flags from nova import manager from nova import utils -from nova.volume import driver FLAGS = flags.FLAGS @@ -53,9 +52,9 @@ class AOEManager(manager.Manager): if not volume_driver: # NOTE(vish): support the legacy fake storage flag if FLAGS.fake_storage: - volume_driver='nova.volume.driver.FakeAOEDriver' + volume_driver = 'nova.volume.driver.FakeAOEDriver' else: - volume_driver=FLAGS.volume_driver + volume_driver = FLAGS.volume_driver self.driver = utils.import_object(volume_driver) super(AOEManager, self).__init__(*args, **kwargs) @@ -117,4 +116,3 @@ class AOEManager(manager.Manager): yield self.driver.delete_volume(volume_id) self.db.volume_destroy(context, volume_id) defer.returnValue(True) - -- cgit From b4c5c97160a6b71d37b7655c6b4039baf4ff0969 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 30 Aug 2010 17:58:02 -0700 Subject: more pep8 --- bin/nova-dhcpbridge | 7 ++++--- bin/nova-manage | 4 ++-- bin/nova-objectstore | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index a794db271..c416d07a7 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -34,13 +34,14 @@ from nova import db from nova import flags from nova import rpc from nova import utils -from nova import datastore # for redis_db flag -from nova.auth import manager # for auth flags +from nova import datastore # for redis_db flag +from nova.auth import manager # for auth flags from nova.network import linux_net -from nova.network import manager # for network flags +from nova.network import manager # for network flags FLAGS = flags.FLAGS + def add_lease(_mac, ip_address, _hostname, _interface): """Set the IP that was assigned by the DHCP server.""" if FLAGS.fake_rabbit: diff --git a/bin/nova-manage b/bin/nova-manage index 145294d3d..7f20531dc 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -56,8 +56,8 @@ class VpnCommands(object): vpn = self._vpn_for(project.id) if vpn: command = "ping -c1 -w1 %s > /dev/null; echo $?" - out, _err = utils.execute( command % vpn['private_dns_name'], - check_exit_code=False) + out, _err = utils.execute(command % vpn['private_dns_name'], + check_exit_code=False) if out.strip() == '0': net = 'up' else: diff --git a/bin/nova-objectstore b/bin/nova-objectstore index afcf13e24..7cb718b6f 100755 --- a/bin/nova-objectstore +++ b/bin/nova-objectstore @@ -35,4 +35,4 @@ if __name__ == '__main__': if __name__ == '__builtin__': utils.default_flagfile() - application = handler.get_application() # pylint: disable-msg=C0103 + application = handler.get_application() # pylint: disable-msg=C0103 -- cgit From 73c7bbce87e72b5223f11c194ff41d2da1df5c86 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 30 Aug 2010 18:10:52 -0700 Subject: more pep8 --- nova/tests/compute_unittest.py | 3 ++- nova/tests/network_unittest.py | 6 ++---- nova/tests/service_unittest.py | 27 ++++++++++++--------------- nova/tests/volume_unittest.py | 3 +-- 4 files changed, 17 insertions(+), 22 deletions(-) diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index a8d644c84..0166dc4be 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -48,7 +48,8 @@ class InstanceXmlTestCase(test.TrialTestCase): # self.assert_(ElementTree.parse(StringIO.StringIO(xml))) # # second_node = node.Node() - # new_inst = node.Instance.fromXml(second_node._conn, pool=second_node._pool, xml=xml) + # new_inst = node.Instance.fromXml(second_node._conn, + # pool=second_node._pool, xml=xml) # self.assertEqual(new_inst.state, node.Instance.RUNNING) # rv = yield first_node.terminate_instance(self.context, instance_id) diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index b284e4e51..15ec8dbf4 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -140,7 +140,8 @@ class NetworkTestCase(test.TrialTestCase): db.fixed_ip_deallocate(None, address2) release_ip(address2) - self.assertFalse(is_allocated_in_project(address2, self.projects[1].id)) + self.assertFalse(is_allocated_in_project(address2, + self.projects[1].id)) def test_subnet_edge(self): """Makes sure that private ips don't overlap""" @@ -190,7 +191,6 @@ class NetworkTestCase(test.TrialTestCase): for project in projects: self.manager.delete_project(project) - def test_ips_are_reused(self): """Makes sure that ip addresses that are deallocated get reused""" address = self._create_address(0) @@ -224,8 +224,6 @@ class NetworkTestCase(test.TrialTestCase): """Test for a NoMoreAddresses exception when all fixed ips are used. """ network = db.project_get_network(None, self.projects[0].id) - - num_available_ips = db.network_count_available_ips(None, network['id']) addresses = [] diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py index e13fe62d1..902f9bab1 100644 --- a/nova/tests/service_unittest.py +++ b/nova/tests/service_unittest.py @@ -20,10 +20,7 @@ Unit Tests for remote procedure calls using queue """ -import logging - import mox -from twisted.internet import defer from nova import exception from nova import flags @@ -33,33 +30,37 @@ from nova import service from nova import manager FLAGS = flags.FLAGS - flags.DEFINE_string("fake_manager", "nova.tests.service_unittest.FakeManager", "Manager for testing") + class FakeManager(manager.Manager): """Fake manager for tests""" pass + class ServiceTestCase(test.BaseTestCase): """Test cases for rpc""" + def setUp(self): # pylint: disable=C0103 super(ServiceTestCase, self).setUp() self.mox.StubOutWithMock(service, 'db') def test_create(self): - self.mox.StubOutWithMock(rpc, 'AdapterConsumer', use_mock_anything=True) + self.mox.StubOutWithMock(rpc, + 'AdapterConsumer', + use_mock_anything=True) self.mox.StubOutWithMock( service.task, 'LoopingCall', use_mock_anything=True) rpc.AdapterConsumer(connection=mox.IgnoreArg(), topic='fake', - proxy=mox.IsA(service.Service) - ).AndReturn(rpc.AdapterConsumer) + proxy=mox.IsA(service.Service)).AndReturn( + rpc.AdapterConsumer) rpc.AdapterConsumer(connection=mox.IgnoreArg(), topic='fake.%s' % FLAGS.node_name, - proxy=mox.IsA(service.Service) - ).AndReturn(rpc.AdapterConsumer) + proxy=mox.IsA(service.Service)).AndReturn( + rpc.AdapterConsumer) # Stub out looping call a bit needlessly since we don't have an easy # way to cancel it (yet) when the tests finishes @@ -80,7 +81,6 @@ class ServiceTestCase(test.BaseTestCase): # whether it is disconnected, it looks for a variable on itself called # 'model_disconnected' and report_state doesn't really do much so this # these are mostly just for coverage - def test_report_state(self): node_name = 'foo' binary = 'bar' @@ -99,7 +99,6 @@ class ServiceTestCase(test.BaseTestCase): s = service.Service() rv = yield s.report_state(node_name, binary) - def test_report_state_no_daemon(self): node_name = 'foo' binary = 'bar' @@ -115,7 +114,8 @@ class ServiceTestCase(test.BaseTestCase): service.db.daemon_get_by_args(None, node_name, binary).AndRaise(exception.NotFound()) - service.db.daemon_create(None, daemon_create).AndReturn(daemon_ref['id']) + service.db.daemon_create(None, + daemon_create).AndReturn(daemon_ref['id']) service.db.daemon_get(None, daemon_ref['id']).AndReturn(daemon_ref) service.db.daemon_update(None, daemon_ref['id'], mox.ContainsKeyValue('report_count', 1)) @@ -124,7 +124,6 @@ class ServiceTestCase(test.BaseTestCase): s = service.Service() rv = yield s.report_state(node_name, binary) - def test_report_state_newly_disconnected(self): node_name = 'foo' binary = 'bar' @@ -144,7 +143,6 @@ class ServiceTestCase(test.BaseTestCase): self.assert_(s.model_disconnected) - def test_report_state_newly_connected(self): node_name = 'foo' binary = 'bar' @@ -166,4 +164,3 @@ class ServiceTestCase(test.BaseTestCase): rv = yield s.report_state(node_name, binary) self.assert_(not s.model_disconnected) - diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index 6573e9876..f42d0ac8d 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -26,7 +26,6 @@ from nova import flags from nova import test from nova import utils - FLAGS = flags.FLAGS @@ -40,7 +39,6 @@ class VolumeTestCase(test.TrialTestCase): self.volume = utils.import_object(FLAGS.volume_manager) self.context = None - def _create_volume(self, size='0'): vol = {} vol['size'] = '0' @@ -133,6 +131,7 @@ class VolumeTestCase(test.TrialTestCase): project_id = 'fake' shelf_blades = [] volume_ids = [] + def _check(volume_id): volume_ids.append(volume_id) (shelf_id, blade_id) = db.volume_get_shelf_and_blade(None, -- cgit From 4374bef0536846afe9be1156b340b34e6d4c8d2d Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 30 Aug 2010 20:42:31 -0700 Subject: more cleanup and pylint fixes --- nova/auth/manager.py | 2 +- nova/db/api.py | 4 +- nova/db/sqlalchemy/api.py | 33 +++++++++------- nova/db/sqlalchemy/models.py | 86 ++++++++++++++++++++---------------------- nova/network/linux_net.py | 5 ++- nova/tests/network_unittest.py | 1 - nova/virt/libvirt_conn.py | 41 +++++++++++--------- nova/volume/driver.py | 76 +++++++++++++++++++++++-------------- nova/volume/manager.py | 21 ++++++----- 9 files changed, 149 insertions(+), 120 deletions(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 62ec3f4e4..d5fbec7c5 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -574,7 +574,7 @@ class AuthManager(object): if not network_ref['vpn_public_port']: raise exception.NotFound('project network data has not been set') - return (network_ref['vpn_public_ip_str'], + return (network_ref['vpn_public_address'], network_ref['vpn_public_port']) def delete_project(self, project, context=None): diff --git a/nova/db/api.py b/nova/db/api.py index 9b8c48934..d95d1ce6e 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -128,9 +128,9 @@ def fixed_ip_allocate(context, network_id): return _impl.fixed_ip_allocate(context, network_id) -def fixed_ip_create(context, network_id, address, reserved=False): +def fixed_ip_create(context, values): """Create a fixed ip from the values dictionary.""" - return _impl.fixed_ip_create(context, network_id, address, reserved) + return _impl.fixed_ip_create(context, values) def fixed_ip_deallocate(context, address): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index e366e989f..b00ad19ff 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -21,6 +21,7 @@ from nova import exception from nova import flags from nova.db.sqlalchemy import models from nova.db.sqlalchemy.session import managed_session +from sqlalchemy import or_ FLAGS = flags.FLAGS @@ -37,7 +38,9 @@ def daemon_get_by_args(context, node_name, binary): def daemon_create(context, values): - daemon_ref = models.Daemon(**values) + daemon_ref = models.Daemon() + for (key, value) in values.iteritems(): + daemon_ref[key] = value daemon_ref.save() return daemon_ref.id @@ -67,12 +70,12 @@ def floating_ip_allocate_address(context, node_name, project_id): floating_ip_ref['project_id'] = project_id session.add(floating_ip_ref) session.commit() - return floating_ip_ref['str_id'] + return floating_ip_ref['address'] def floating_ip_create(context, address, host): floating_ip_ref = models.FloatingIp() - floating_ip_ref['ip_str'] = address + floating_ip_ref['address'] = address floating_ip_ref['node_name'] = host floating_ip_ref.save() return floating_ip_ref @@ -95,7 +98,7 @@ def floating_ip_disassociate(context, address): session=session) fixed_ip_ref = floating_ip_ref.fixed_ip if fixed_ip_ref: - fixed_ip_address = fixed_ip_ref['str_id'] + fixed_ip_address = fixed_ip_ref['address'] else: fixed_ip_address = None floating_ip_ref.fixed_ip = None @@ -128,8 +131,10 @@ def floating_ip_get_instance(context, address): def fixed_ip_allocate(context, network_id): with managed_session(autocommit=False) as session: + network_or_none = or_(models.FixedIp.network_id==network_id, + models.FixedIp.network_id==None) fixed_ip_ref = session.query(models.FixedIp) \ - .filter_by(network_id=network_id) \ + .filter(network_or_none) \ .filter_by(reserved=False) \ .filter_by(allocated=False) \ .filter_by(leased=False) \ @@ -140,19 +145,20 @@ def fixed_ip_allocate(context, network_id): # then this has concurrency issues if not fixed_ip_ref: raise db.NoMoreAddresses() + if not fixed_ip_ref.network: + fixed_ip_ref.network = models.Network.find(network_id) fixed_ip_ref['allocated'] = True session.add(fixed_ip_ref) session.commit() - return fixed_ip_ref['str_id'] + return fixed_ip_ref['address'] -def fixed_ip_create(context, network_id, address, reserved=False): +def fixed_ip_create(context, values): fixed_ip_ref = models.FixedIp() - fixed_ip_ref.network = db.network_get(context, network_id) - fixed_ip_ref['ip_str'] = address - fixed_ip_ref['reserved'] = reserved + for (key, value) in values.iteritems(): + fixed_ip_ref[key] = value fixed_ip_ref.save() - return fixed_ip_ref + return fixed_ip_ref['address'] def fixed_ip_get_by_address(context, address): @@ -248,7 +254,7 @@ def instance_get_fixed_address(context, instance_id): instance_ref = models.Instance.find(instance_id, session=session) if not instance_ref.fixed_ip: return None - return instance_ref.fixed_ip['str_id'] + return instance_ref.fixed_ip['address'] def instance_get_floating_address(context, instance_id): @@ -259,7 +265,7 @@ def instance_get_floating_address(context, instance_id): if not instance_ref.fixed_ip.floating_ips: return None # NOTE(vish): this just returns the first floating ip - return instance_ref.fixed_ip.floating_ips[0]['str_id'] + return instance_ref.fixed_ip.floating_ips[0]['address'] def instance_get_host(context, instance_id): @@ -325,7 +331,6 @@ def network_create(context, values): network_ref[key] = value network_ref.save() return network_ref - return network_ref.id def network_destroy(context, network_id): diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 53aa1f469..b9ed34bb1 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -260,12 +260,44 @@ class ExportDevice(Base, NovaBase): uselist=False)) +class Network(Base, NovaBase): + __tablename__ = 'networks' + id = Column(Integer, primary_key=True) + + injected = Column(Boolean, default=False) + cidr = Column(String(255)) + netmask = Column(String(255)) + bridge = Column(String(255)) + gateway = Column(String(255)) + broadcast = Column(String(255)) + dns = Column(String(255)) + + vlan = Column(Integer) + vpn_public_address = Column(String(255)) + vpn_public_port = Column(Integer) + vpn_private_address = Column(String(255)) + dhcp_start = Column(String(255)) + + project_id = Column(String(255)) + node_name = Column(String(255)) # , ForeignKey('physical_node.id')) + + +class NetworkIndex(Base, NovaBase): + __tablename__ = 'network_indexes' + id = Column(Integer, primary_key=True) + index = Column(Integer) + network_id = Column(Integer, ForeignKey('networks.id'), nullable=True) + network = relationship(Network, backref=backref('network_index', + uselist=False)) + + # TODO(vish): can these both come from the same baseclass? class FixedIp(Base, NovaBase): __tablename__ = 'fixed_ips' id = Column(Integer, primary_key=True) - ip_str = Column(String(255)) - network_id = Column(Integer, ForeignKey('networks.id'), nullable=False) + address = Column(String(255)) + network_id = Column(Integer, ForeignKey('networks.id'), nullable=True) + network = relationship(Network, backref=backref('fixed_ips')) instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) instance = relationship(Instance, backref=backref('fixed_ip', uselist=False)) @@ -275,18 +307,18 @@ class FixedIp(Base, NovaBase): @property def str_id(self): - return self.ip_str + return self.address @classmethod def find_by_str(cls, str_id, session=None): if session: try: return session.query(cls) \ - .filter_by(ip_str=str_id) \ + .filter_by(address=str_id) \ .filter_by(deleted=False) \ .one() except exc.NoResultFound: - raise exception.NotFound("No model for ip_str %s" % str_id) + raise exception.NotFound("No model for address %s" % str_id) else: with managed_session() as sess: return cls.find_by_str(str_id, session=sess) @@ -295,7 +327,7 @@ class FixedIp(Base, NovaBase): class FloatingIp(Base, NovaBase): __tablename__ = 'floating_ips' id = Column(Integer, primary_key=True) - ip_str = Column(String(255)) + address = Column(String(255)) fixed_ip_id = Column(Integer, ForeignKey('fixed_ips.id'), nullable=True) fixed_ip = relationship(FixedIp, backref=backref('floating_ips')) @@ -304,59 +336,23 @@ class FloatingIp(Base, NovaBase): @property def str_id(self): - return self.ip_str + return self.address @classmethod def find_by_str(cls, str_id, session=None): if session: try: return session.query(cls) \ - .filter_by(ip_str=str_id) \ + .filter_by(address=str_id) \ .filter_by(deleted=False) \ .one() except exc.NoResultFound: - raise exception.NotFound("No model for ip_str %s" % str_id) + raise exception.NotFound("No model for address %s" % str_id) else: with managed_session() as sess: return cls.find_by_str(str_id, session=sess) -class Network(Base, NovaBase): - __tablename__ = 'networks' - id = Column(Integer, primary_key=True) - - injected = Column(Boolean, default=False) - cidr = Column(String(255)) - netmask = Column(String(255)) - bridge = Column(String(255)) - gateway = Column(String(255)) - broadcast = Column(String(255)) - dns = Column(String(255)) - - vlan = Column(Integer) - vpn_public_ip_str = Column(String(255)) - vpn_public_port = Column(Integer) - vpn_private_ip_str = Column(String(255)) - dhcp_start = Column(String(255)) - - project_id = Column(String(255)) - node_name = Column(String(255)) # , ForeignKey('physical_node.id')) - - fixed_ips = relationship(FixedIp, - single_parent=True, - backref=backref('network'), - cascade='all, delete, delete-orphan') - - -class NetworkIndex(Base, NovaBase): - __tablename__ = 'network_indexes' - id = Column(Integer, primary_key=True) - index = Column(Integer) - network_id = Column(Integer, ForeignKey('networks.id'), nullable=True) - network = relationship(Network, backref=backref('network_index', - uselist=False)) - - def register_models(): from sqlalchemy import create_engine models = (Image, PhysicalNode, Daemon, Instance, Volume, ExportDevice, diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 6114e4ffe..1506e85ad 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -99,6 +99,7 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): def ensure_vlan(vlan_num): + """Create a vlan unless it already exists""" interface = "vlan%s" % vlan_num if not _device_exists(interface): logging.debug("Starting VLAN inteface %s", interface) @@ -109,6 +110,7 @@ def ensure_vlan(vlan_num): def ensure_bridge(bridge, interface, net_attrs=None): + """Create a bridge unless it already exists""" if not _device_exists(bridge): logging.debug("Starting Bridge inteface for %s", interface) _execute("sudo brctl addbr %s" % bridge) @@ -128,6 +130,7 @@ def ensure_bridge(bridge, interface, net_attrs=None): def get_dhcp_hosts(context, network_id): + """Get a string containing a network's hosts config in dnsmasq format""" hosts = [] for fixed_ip in db.network_get_associated_fixed_ips(context, network_id): hosts.append(_host_dhcp(fixed_ip['str_id'])) @@ -158,7 +161,7 @@ def update_dhcp(context, network_id): try: os.kill(pid, signal.SIGHUP) return - except Exception as exc: # pylint: disable=W0703 + except Exception as exc: # pylint: disable-msg=W0703 logging.debug("Hupping dnsmasq threw %s", exc) # FLAGFILE and DNSMASQ_INTERFACE in env diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 15ec8dbf4..fccfc23fb 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -28,7 +28,6 @@ from nova import flags from nova import test from nova import utils from nova.auth import manager -from nova.network import service FLAGS = flags.FLAGS diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 823eb1e0b..b353fc44b 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -126,7 +126,7 @@ class LibvirtConnection(object): def destroy(self, instance): try: - virt_dom = self._conn.lookupByName(instance.name) + virt_dom = self._conn.lookupByName(instance['name']) virt_dom.destroy() except Exception as _err: pass @@ -140,7 +140,7 @@ class LibvirtConnection(object): timer = task.LoopingCall(f=None) def _wait_for_shutdown(): try: - instance.set_state(self.get_info(instance.name)['state']) + instance.set_state(self.get_info(instance['name'])['state']) if instance.state == power_state.SHUTDOWN: timer.stop() d.callback(None) @@ -153,7 +153,7 @@ class LibvirtConnection(object): return d def _cleanup(self, instance): - target = os.path.join(FLAGS.instances_path, instance.name) + target = os.path.join(FLAGS.instances_path, instance['name']) logging.info("Deleting instance files at %s", target) if os.path.exists(target): shutil.rmtree(target) @@ -162,20 +162,20 @@ class LibvirtConnection(object): @exception.wrap_exception def reboot(self, instance): xml = self.to_xml(instance) - yield self._conn.lookupByName(instance.name).destroy() + yield self._conn.lookupByName(instance['name']).destroy() yield self._conn.createXML(xml, 0) d = defer.Deferred() timer = task.LoopingCall(f=None) def _wait_for_reboot(): try: - instance.set_state(self.get_info(instance.name)['state']) + instance.set_state(self.get_info(instance['name'])['state']) if instance.state == power_state.RUNNING: - logging.debug('rebooted instance %s' % instance.name) + logging.debug('rebooted instance %s' % instance['name']) timer.stop() d.callback(None) except Exception, exn: - logging.error('_wait_for_reboot failed: %s' % exn) + logging.error('_wait_for_reboot failed: %s', exn) instance.set_state(power_state.SHUTDOWN) timer.stop() d.callback(None) @@ -198,13 +198,14 @@ class LibvirtConnection(object): timer = task.LoopingCall(f=None) def _wait_for_boot(): try: - instance.set_state(self.get_info(instance.name)['state']) + instance.set_state(self.get_info(instance['name'])['state']) if instance.state == power_state.RUNNING: - logging.debug('booted instance %s' % instance.name) + logging.debug('booted instance %s', instance['name']) timer.stop() local_d.callback(None) except: - logging.exception('Failed to boot instance %s' % instance.name) + logging.exception('Failed to boot instance %s', + instance['name']) instance.set_state(power_state.SHUTDOWN) timer.stop() local_d.callback(None) @@ -215,7 +216,9 @@ class LibvirtConnection(object): @defer.inlineCallbacks def _create_image(self, inst, libvirt_xml): # syntactic nicety - basepath = lambda x='': os.path.join(FLAGS.instances_path, inst.name, x) + basepath = lambda fname='': os.path.join(FLAGS.instances_path, + inst['name'], + fname) # ensure directories exist and are writable yield process.simple_execute('mkdir -p %s' % basepath()) @@ -224,7 +227,7 @@ class LibvirtConnection(object): # TODO(termie): these are blocking calls, it would be great # if they weren't. - logging.info('Creating image for: %s', inst.name) + logging.info('Creating image for: %s', inst['name']) f = open(basepath('libvirt.xml'), 'w') f.write(libvirt_xml) f.close() @@ -245,10 +248,11 @@ class LibvirtConnection(object): key = inst.key_data net = None - network_ref = db.project_get_network(None, project.id) # FIXME + network_ref = db.project_get_network(None, project.id) if network_ref['injected']: + address = db.instance_get_fixed_address(None, inst['id']) with open(FLAGS.injected_network_template) as f: - net = f.read() % {'address': inst.fixed_ip['ip_str'], # FIXME + net = f.read() % {'address': address, 'network': network_ref['network'], 'netmask': network_ref['netmask'], 'gateway': network_ref['gateway'], @@ -269,12 +273,13 @@ class LibvirtConnection(object): def to_xml(self, instance): # TODO(termie): cache? logging.debug("Starting the toXML method") - network = db.project_get_network(None, instance['project_id']) # FIXME + network = db.project_get_network(None, instance['project_id']) # FIXME(vish): stick this in db - instance_type = instance_types.INSTANCE_TYPES[instance.instance_type] + instance_type = instance_types.INSTANCE_TYPES[instance['instance_type']] xml_info = {'type': FLAGS.libvirt_type, - 'name': instance.name, - 'basepath': os.path.join(FLAGS.instances_path, instance.name), + 'name': instance['name'], + 'basepath': os.path.join(FLAGS.instances_path, + instance['name']), 'memory_kb': instance_type['memory_mb'] * 1024, 'vcpus': instance_type['vcpus'], 'bridge_name': network['bridge'], diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 990bfe958..e82449b27 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -35,36 +35,16 @@ flags.DEFINE_string('aoe_eth_dev', 'eth0', 'Which device to export the volumes on') -class FakeAOEDriver(object): - def create_volume(self, volume_id, size): - logging.debug("Fake AOE: create_volume %s, %s", volume_id, size) - - def delete_volume(self, volume_id): - logging.debug("Fake AOE: delete_volume %s", volume_id) - - def create_export(self, volume_id, shelf_id, blade_id): - logging.debug("Fake AOE: create_export %s, %s, %s", - volume_id, shelf_id, blade_id) - - def remove_export(self, volume_id, shelf_id, blade_id): - logging.debug("Fake AOE: remove_export %s, %s, %s", - volume_id, shelf_id, blade_id) - - def ensure_exports(self): - logging.debug("Fake AOE: ensure_export") - class AOEDriver(object): - def __init__(self, *args, **kwargs): - super(AOEDriver, self).__init__(*args, **kwargs) + """Executes commands relating to AOE volumes""" @defer.inlineCallbacks - def _ensure_vg(self): + @staticmethod + def create_volume(volume_id, size): + """Creates a logical volume""" + # NOTE(vish): makes sure that the volume group exists yield process.simple_execute("vgs | grep %s" % FLAGS.volume_group) - - @defer.inlineCallbacks - def create_volume(self, volume_id, size): - self._ensure_vg() if int(size) == 0: sizestr = '100M' else: @@ -76,14 +56,18 @@ class AOEDriver(object): terminate_on_stderr=False) @defer.inlineCallbacks - def delete_volume(self, volume_id): + @staticmethod + def delete_volume(volume_id): + """Deletes a logical volume""" yield process.simple_execute( "sudo lvremove -f %s/%s" % (FLAGS.volume_group, volume_id), terminate_on_stderr=False) @defer.inlineCallbacks - def create_export(self, volume_id, shelf_id, blade_id): + @staticmethod + def create_export(volume_id, shelf_id, blade_id): + """Creates an export for a logical volume""" yield process.simple_execute( "sudo vblade-persist setup %s %s %s /dev/%s/%s" % (shelf_id, @@ -94,7 +78,9 @@ class AOEDriver(object): terminate_on_stderr=False) @defer.inlineCallbacks - def remove_export(self, _volume_id, shelf_id, blade_id): + @staticmethod + def remove_export(_volume_id, shelf_id, blade_id): + """Removes an export for a logical volume""" yield process.simple_execute( "sudo vblade-persist stop %s %s" % (shelf_id, blade_id), terminate_on_stderr=False) @@ -103,10 +89,42 @@ class AOEDriver(object): terminate_on_stderr=False) @defer.inlineCallbacks - def ensure_exports(self): + @staticmethod + def ensure_exports(): + """Runs all existing exports""" # NOTE(ja): wait for blades to appear yield process.simple_execute("sleep 5") yield process.simple_execute("sudo vblade-persist auto all", check_exit_code=False) yield process.simple_execute("sudo vblade-persist start all", check_exit_code=False) + + +class FakeAOEDriver(AOEDriver): + """Logs calls instead of executing""" + @staticmethod + def create_volume(volume_id, size): + """Creates a logical volume""" + logging.debug("Fake AOE: create_volume %s, %s", volume_id, size) + + @staticmethod + def delete_volume(volume_id): + """Deletes a logical volume""" + logging.debug("Fake AOE: delete_volume %s", volume_id) + + @staticmethod + def create_export(volume_id, shelf_id, blade_id): + """Creates an export for a logical volume""" + logging.debug("Fake AOE: create_export %s, %s, %s", + volume_id, shelf_id, blade_id) + + @staticmethod + def remove_export(volume_id, shelf_id, blade_id): + """Removes an export for a logical volume""" + logging.debug("Fake AOE: remove_export %s, %s, %s", + volume_id, shelf_id, blade_id) + + @staticmethod + def ensure_exports(): + """Runs all existing exports""" + logging.debug("Fake AOE: ensure_export") diff --git a/nova/volume/manager.py b/nova/volume/manager.py index c57c920c9..ad5aa22a2 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -48,6 +48,7 @@ flags.DEFINE_integer('blades_per_shelf', class AOEManager(manager.Manager): + """Manages Ata-Over_Ethernet volumes""" def __init__(self, volume_driver=None, *args, **kwargs): if not volume_driver: # NOTE(vish): support the legacy fake storage flag @@ -59,6 +60,7 @@ class AOEManager(manager.Manager): super(AOEManager, self).__init__(*args, **kwargs) def _ensure_blades(self, context): + """Ensure that blades have been created in datastore""" total_blades = FLAGS.num_shelves * FLAGS.blades_per_shelf if self.db.export_device_count(context) >= total_blades: return @@ -69,8 +71,8 @@ class AOEManager(manager.Manager): @defer.inlineCallbacks def create_volume(self, context, volume_id): - """Creates and exports the volume.""" - logging.info("volume %s: creating" % (volume_id)) + """Creates and exports the volume""" + logging.info("volume %s: creating", volume_id) volume_ref = self.db.volume_get(context, volume_id) @@ -79,15 +81,15 @@ class AOEManager(manager.Manager): {'node_name': FLAGS.node_name}) size = volume_ref['size'] - logging.debug("volume %s: creating lv of size %sG" % (volume_id, size)) + logging.debug("volume %s: creating lv of size %sG", volume_id, size) yield self.driver.create_volume(volume_id, size) - logging.debug("volume %s: allocating shelf & blade" % (volume_id)) + logging.debug("volume %s: allocating shelf & blade", volume_id) self._ensure_blades(context) rval = self.db.volume_allocate_shelf_and_blade(context, volume_id) (shelf_id, blade_id) = rval - logging.debug("volume %s: exporting shelf %s & blade %s" % (volume_id, + logging.debug("volume %s: exporting shelf %s & blade %s", (volume_id, shelf_id, blade_id)) yield self.driver.create_export(volume_id, shelf_id, blade_id) @@ -96,15 +98,16 @@ class AOEManager(manager.Manager): self.db.volume_update(context, volume_id, {'status': 'available'}) - logging.debug("volume %s: re-exporting all values" % (volume_id)) + logging.debug("volume %s: re-exporting all values", volume_id) yield self.driver.ensure_exports() - logging.debug("volume %s: created successfully" % (volume_id)) + logging.debug("volume %s: created successfully", volume_id) defer.returnValue(volume_id) @defer.inlineCallbacks def delete_volume(self, context, volume_id): - logging.debug("Deleting volume with id of: %s" % (volume_id)) + """Deletes and unexports volume""" + logging.debug("Deleting volume with id of: %s", volume_id) volume_ref = self.db.volume_get(context, volume_id) if volume_ref['attach_status'] == "attached": raise exception.Error("Volume is still attached") @@ -113,6 +116,6 @@ class AOEManager(manager.Manager): shelf_id, blade_id = self.db.volume_get_shelf_and_blade(context, volume_id) yield self.driver.remove_export(volume_id, shelf_id, blade_id) - yield self.driver.delete_volume(volume_id) + yield self.driver.delete_volumevolume_id self.db.volume_destroy(context, volume_id) defer.returnValue(True) -- cgit From ed4bcbb5fee2f7c6f27236ad196138ff7150af18 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 30 Aug 2010 21:21:11 -0700 Subject: volume cleanup --- nova/volume/driver.py | 79 +++++++++++++++++--------------------------------- nova/volume/manager.py | 2 +- 2 files changed, 28 insertions(+), 53 deletions(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index e82449b27..f5c1330a3 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -38,93 +38,68 @@ flags.DEFINE_string('aoe_eth_dev', 'eth0', class AOEDriver(object): """Executes commands relating to AOE volumes""" + def __init__(self, execute=process.simple_execute, *args, **kwargs): + self._execute = execute @defer.inlineCallbacks - @staticmethod - def create_volume(volume_id, size): + def create_volume(self, volume_id, size): """Creates a logical volume""" # NOTE(vish): makes sure that the volume group exists - yield process.simple_execute("vgs | grep %s" % FLAGS.volume_group) + yield self._execute("vgs | grep %s" % FLAGS.volume_group) if int(size) == 0: sizestr = '100M' else: sizestr = '%sG' % size - yield process.simple_execute( + yield self._execute( "sudo lvcreate -L %s -n %s %s" % (sizestr, volume_id, - FLAGS.volume_group), - terminate_on_stderr=False) + FLAGS.volume_group)) @defer.inlineCallbacks - @staticmethod - def delete_volume(volume_id): + def delete_volume(self, volume_id): """Deletes a logical volume""" - yield process.simple_execute( + yield self._execute( "sudo lvremove -f %s/%s" % (FLAGS.volume_group, - volume_id), - terminate_on_stderr=False) + volume_id)) @defer.inlineCallbacks - @staticmethod - def create_export(volume_id, shelf_id, blade_id): + def create_export(self, volume_id, shelf_id, blade_id): """Creates an export for a logical volume""" - yield process.simple_execute( + yield self._execute( "sudo vblade-persist setup %s %s %s /dev/%s/%s" % (shelf_id, blade_id, FLAGS.aoe_eth_dev, FLAGS.volume_group, - volume_id), - terminate_on_stderr=False) + volume_id)) @defer.inlineCallbacks - @staticmethod - def remove_export(_volume_id, shelf_id, blade_id): + def remove_export(self, _volume_id, shelf_id, blade_id): """Removes an export for a logical volume""" - yield process.simple_execute( - "sudo vblade-persist stop %s %s" % (shelf_id, blade_id), - terminate_on_stderr=False) - yield process.simple_execute( - "sudo vblade-persist destroy %s %s" % (shelf_id, blade_id), - terminate_on_stderr=False) + yield self._execute( + "sudo vblade-persist stop %s %s" % (shelf_id, blade_id)) + yield self._execute( + "sudo vblade-persist destroy %s %s" % (shelf_id, blade_id)) @defer.inlineCallbacks - @staticmethod - def ensure_exports(): + def ensure_exports(self): """Runs all existing exports""" # NOTE(ja): wait for blades to appear - yield process.simple_execute("sleep 5") - yield process.simple_execute("sudo vblade-persist auto all", + yield self._execute("sleep 5") + yield self._execute("sudo vblade-persist auto all", check_exit_code=False) - yield process.simple_execute("sudo vblade-persist start all", + yield self._execute("sudo vblade-persist start all", check_exit_code=False) + class FakeAOEDriver(AOEDriver): """Logs calls instead of executing""" - @staticmethod - def create_volume(volume_id, size): - """Creates a logical volume""" - logging.debug("Fake AOE: create_volume %s, %s", volume_id, size) + def __init__(self, *args, **kwargs): + super(FakeAOEDriver, self).__init__(self.fake_execute) @staticmethod - def delete_volume(volume_id): - """Deletes a logical volume""" - logging.debug("Fake AOE: delete_volume %s", volume_id) + def fake_execute(cmd, *_args, **_kwargs): + """Execute that simply logs the command""" + logging.debug("FAKE AOE: %s", cmd) - @staticmethod - def create_export(volume_id, shelf_id, blade_id): - """Creates an export for a logical volume""" - logging.debug("Fake AOE: create_export %s, %s, %s", - volume_id, shelf_id, blade_id) - - @staticmethod - def remove_export(volume_id, shelf_id, blade_id): - """Removes an export for a logical volume""" - logging.debug("Fake AOE: remove_export %s, %s, %s", - volume_id, shelf_id, blade_id) - - @staticmethod - def ensure_exports(): - """Runs all existing exports""" - logging.debug("Fake AOE: ensure_export") diff --git a/nova/volume/manager.py b/nova/volume/manager.py index ad5aa22a2..94d2f7d70 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -116,6 +116,6 @@ class AOEManager(manager.Manager): shelf_id, blade_id = self.db.volume_get_shelf_and_blade(context, volume_id) yield self.driver.remove_export(volume_id, shelf_id, blade_id) - yield self.driver.delete_volumevolume_id + yield self.driver.delete_volume(volume_id) self.db.volume_destroy(context, volume_id) defer.returnValue(True) -- cgit From e5b93d09d7095316921cd457887a8b4d8808c3c5 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 30 Aug 2010 21:21:31 -0700 Subject: add missing manager classes --- nova/compute/manager.py | 202 +++++++++++++++++++++++++++++ nova/network/manager.py | 328 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 530 insertions(+) create mode 100644 nova/compute/manager.py create mode 100644 nova/network/manager.py diff --git a/nova/compute/manager.py b/nova/compute/manager.py new file mode 100644 index 000000000..59f56730b --- /dev/null +++ b/nova/compute/manager.py @@ -0,0 +1,202 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Compute Manager: + + Handles all code relating to instances + +""" + +import base64 +import logging +import os + +from twisted.internet import defer + +from nova import db +from nova import exception +from nova import flags +from nova import process +from nova import manager +from nova import utils +from nova.compute import power_state + + +FLAGS = flags.FLAGS +flags.DEFINE_string('instances_path', utils.abspath('../instances'), + 'where instances are stored on disk') +flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', + 'Driver to use for volume creation') + + +class ComputeManager(manager.Manager): + """ + Manages the running instances. + """ + def __init__(self, compute_driver=None, *args, **kwargs): + """Load configuration options and connect to the hypervisor.""" + # TODO(vish): sync driver creation logic with the rest of the system + if not compute_driver: + compute_driver = FLAGS.compute_driver + self.driver = utils.import_object(compute_driver) + self.network_manager = utils.import_object(FLAGS.network_manager) + super(ComputeManager, self).__init__(*args, **kwargs) + # TODO(joshua): This needs to ensure system state, specifically + # modprobe aoe + + def _update_state(self, context, instance_id): + """Update the state of an instance from the driver info""" + # FIXME(ja): include other fields from state? + instance_ref = db.instance_get(context, instance_id) + state = self.driver.get_info(instance_ref.name)['state'] + db.instance_state(context, instance_id, state) + + @defer.inlineCallbacks + @exception.wrap_exception + def run_instance(self, context, instance_id, **_kwargs): + """Launch a new instance with specified options.""" + instance_ref = db.instance_get(context, instance_id) + if instance_ref['str_id'] in self.driver.list_instances(): + raise exception.Error("Instance has already been created") + logging.debug("Starting instance %s...", instance_id) + project_id = instance_ref['project_id'] + self.network_manager.setup_compute_network(context, project_id) + db.instance_update(context, + instance_id, + {'node_name': FLAGS.node_name}) + + # TODO(vish) check to make sure the availability zone matches + db.instance_state(context, + instance_id, + power_state.NOSTATE, + 'spawning') + + try: + yield self.driver.spawn(instance_ref) + except: # pylint: disable-msg=W0702 + logging.exception("Failed to spawn instance %s", + instance_ref['name']) + db.instance_state(context, instance_id, power_state.SHUTDOWN) + + self._update_state(context, instance_id) + + @defer.inlineCallbacks + @exception.wrap_exception + def terminate_instance(self, context, instance_id): + """Terminate an instance on this machine.""" + logging.debug("Got told to terminate instance %s", instance_id) + instance_ref = db.instance_get(context, instance_id) + + if instance_ref['state'] == power_state.SHUTOFF: + # self.datamodel.destroy() FIXME: RE-ADD? + raise exception.Error('trying to destroy already destroyed' + ' instance: %s' % instance_id) + + db.instance_state( + context, instance_id, power_state.NOSTATE, 'shutting_down') + yield self.driver.destroy(instance_ref) + + # FIXME(ja): should we keep it in a terminated state for a bit? + db.instance_destroy(context, instance_id) + + @defer.inlineCallbacks + @exception.wrap_exception + def reboot_instance(self, context, instance_id): + """Reboot an instance on this server. + + KVM doesn't support reboot, so we terminate and restart. + + """ + self._update_state(context, instance_id) + instance_ref = db.instance_get(context, instance_id) + + # FIXME(ja): this is only checking the model state - not state on disk? + if instance_ref['state'] != power_state.RUNNING: + raise exception.Error( + 'trying to reboot a non-running' + 'instance: %s (state: %s excepted: %s)' % + (instance_ref['str_id'], + instance_ref['state'], + power_state.RUNNING)) + + logging.debug('rebooting instance %s', instance_ref['name']) + db.instance_state( + context, instance_id, power_state.NOSTATE, 'rebooting') + yield self.driver.reboot(instance_ref) + self._update_state(context, instance_id) + + @exception.wrap_exception + def get_console_output(self, context, instance_id): + """Send the console output for an instance.""" + # FIXME: Abstract this for Xen + + logging.debug("Getting console output for %s", (instance_id)) + instance_ref = db.instance_get(context, instance_id) + + if FLAGS.connection_type == 'libvirt': + fname = os.path.abspath(os.path.join(FLAGS.instances_path, + instance_ref['str_id'], + 'console.log')) + with open(fname, 'r') as f: + output = f.read() + else: + output = 'FAKE CONSOLE OUTPUT' + + # TODO(termie): this stuff belongs in the API layer, no need to + # munge the data we send to ourselves + output = {"InstanceId": instance_id, + "Timestamp": "2", + "output": base64.b64encode(output)} + return output + + @defer.inlineCallbacks + @exception.wrap_exception + def attach_volume(self, context, instance_id, volume_id, mountpoint): + """Attach a volume to an instance.""" + # TODO(termie): check that instance_id exists + volume_ref = db.volume_get(context, volume_id) + yield self._init_aoe() + yield process.simple_execute( + "sudo virsh attach-disk %s /dev/etherd/%s %s" % + (instance_id, + volume_ref['aoe_device'], + mountpoint.rpartition('/dev/')[2])) + db.volume_attached(context, volume_id, instance_id, mountpoint) + defer.returnValue(True) + + @defer.inlineCallbacks + @exception.wrap_exception + def detach_volume(self, context, instance_id, volume_id): + """Detach a volume from an instance.""" + # despite the documentation, virsh detach-disk just wants the device + # name without the leading /dev/ + # TODO(termie): check that instance_id exists + volume_ref = db.volume_get(context, volume_id) + target = volume_ref['mountpoint'].rpartition('/dev/')[2] + yield process.simple_execute( + "sudo virsh detach-disk %s %s " % (instance_id, target)) + db.volume_detached(context, volume_id) + defer.returnValue(True) + + @defer.inlineCallbacks + def _init_aoe(self): + """Discover aoe exported devices""" + # TODO(vish): these shell calls should move into a different layer. + yield process.simple_execute("sudo aoe-discover") + yield process.simple_execute("sudo aoe-stat") diff --git a/nova/network/manager.py b/nova/network/manager.py new file mode 100644 index 000000000..9eeb4923d --- /dev/null +++ b/nova/network/manager.py @@ -0,0 +1,328 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Network Hosts are responsible for allocating ips and setting up network +""" + +import logging +import math + +import IPy + +from nova import db +from nova import exception +from nova import flags +from nova import manager +from nova import utils + + +FLAGS = flags.FLAGS +flags.DEFINE_string('flat_network_bridge', 'br100', + 'Bridge for simple network instances') +flags.DEFINE_list('flat_network_ips', + ['192.168.0.2', '192.168.0.3', '192.168.0.4'], + 'Available ips for simple network') +flags.DEFINE_string('flat_network_network', '192.168.0.0', + 'Network for simple network') +flags.DEFINE_string('flat_network_netmask', '255.255.255.0', + 'Netmask for simple network') +flags.DEFINE_string('flat_network_gateway', '192.168.0.1', + 'Broadcast for simple network') +flags.DEFINE_string('flat_network_broadcast', '192.168.0.255', + 'Broadcast for simple network') +flags.DEFINE_string('flat_network_dns', '8.8.4.4', + 'Dns for simple network') +flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks') +flags.DEFINE_integer('num_networks', 1000, 'Number of networks to support') +flags.DEFINE_string('vpn_ip', utils.get_my_ip(), + 'Public IP for the cloudpipe VPN servers') +flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks') +flags.DEFINE_integer('network_size', 256, + 'Number of addresses in each private subnet') +flags.DEFINE_string('public_range', '4.4.4.0/24', 'Public IP address block') +flags.DEFINE_string('private_range', '10.0.0.0/8', 'Private IP address block') +flags.DEFINE_integer('cnt_vpn_clients', 5, + 'Number of addresses reserved for vpn clients') +flags.DEFINE_string('network_driver', 'nova.network.linux_net', + 'Driver to use for network creation') + + +class AddressAlreadyAllocated(exception.Error): + """Address was already allocated""" + pass + + +class AddressNotAllocated(exception.Error): + """Address has not been allocated""" + pass + + +class NetworkManager(manager.Manager): + """Implements common network manager functionality + + This class must be subclassed. + """ + def __init__(self, network_driver=None, *args, **kwargs): + if not network_driver: + network_driver = FLAGS.network_driver + self.driver = utils.import_object(network_driver) + super(NetworkManager, self).__init__(*args, **kwargs) + + def set_network_host(self, context, project_id): + """Safely sets the host of the projects network""" + logging.debug("setting network host") + network_ref = self.db.project_get_network(context, project_id) + # TODO(vish): can we minimize db access by just getting the + # id here instead of the ref? + network_id = network_ref['id'] + host = self.db.network_set_host(context, + network_id, + FLAGS.node_name) + self._on_set_network_host(context, network_id) + return host + + def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): + """Gets a fixed ip from the pool""" + raise NotImplementedError() + + def setup_fixed_ip(self, context, address): + """Sets up rules for fixed ip""" + raise NotImplementedError() + + def _on_set_network_host(self, context, network_id): + """Called when this host becomes the host for a project""" + raise NotImplementedError() + + def setup_compute_network(self, context, project_id): + """Sets up matching network for compute hosts""" + raise NotImplementedError() + + def allocate_floating_ip(self, context, project_id): + """Gets an floating ip from the pool""" + # TODO(vish): add floating ips through manage command + return self.db.floating_ip_allocate_address(context, + FLAGS.node_name, + project_id) + + def associate_floating_ip(self, context, floating_address, fixed_address): + """Associates an floating ip to a fixed ip""" + self.db.floating_ip_fixed_ip_associate(context, + floating_address, + fixed_address) + self.driver.bind_floating_ip(floating_address) + self.driver.ensure_floating_forward(floating_address, fixed_address) + + def disassociate_floating_ip(self, context, floating_address): + """Disassociates a floating ip""" + fixed_address = self.db.floating_ip_disassociate(context, + floating_address) + self.driver.unbind_floating_ip(floating_address) + self.driver.remove_floating_forward(floating_address, fixed_address) + + def deallocate_floating_ip(self, context, floating_address): + """Returns an floating ip to the pool""" + self.db.floating_ip_deallocate(context, floating_address) + + @property + def _bottom_reserved_ips(self): # pylint: disable-msg=R0201 + """Number of reserved ips at the bottom of the range""" + return 2 # network, gateway + + @property + def _top_reserved_ips(self): # pylint: disable-msg=R0201 + """Number of reserved ips at the top of the range""" + return 1 # broadcast + + def _create_fixed_ips(self, context, network_id): + """Create all fixed ips for network""" + network_ref = self.db.network_get(context, network_id) + # NOTE(vish): should these be properties of the network as opposed + # to properties of the manager class? + bottom_reserved = self._bottom_reserved_ips + top_reserved = self._top_reserved_ips + project_net = IPy.IP(network_ref['cidr']) + num_ips = len(project_net) + for index in range(num_ips): + address = str(project_net[index]) + if index < bottom_reserved or num_ips - index < top_reserved: + reserved = True + else: + reserved = False + self.db.fixed_ip_create(context, {'network_id': network_id, + 'address': address, + 'reserved': reserved}) + + +class FlatManager(NetworkManager): + """Basic network where no vlans are used""" + + def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): + """Gets a fixed ip from the pool""" + network_ref = self.db.project_get_network(context, context.project.id) + address = self.db.fixed_ip_allocate(context, network_ref['id']) + self.db.fixed_ip_instance_associate(context, address, instance_id) + return address + + def setup_compute_network(self, context, project_id): + """Network is created manually""" + pass + + def setup_fixed_ip(self, context, address): + """Currently no setup""" + pass + + def _on_set_network_host(self, context, network_id): + """Called when this host becomes the host for a project""" + # NOTE(vish): should there be two types of network objects + # in the datastore? + net = {} + net['injected'] = True + net['network_str'] = FLAGS.flat_network_network + net['netmask'] = FLAGS.flat_network_netmask + net['bridge'] = FLAGS.flat_network_bridge + net['gateway'] = FLAGS.flat_network_gateway + net['broadcast'] = FLAGS.flat_network_broadcast + net['dns'] = FLAGS.flat_network_dns + self.db.network_update(context, network_id, net) + # NOTE(vish): Rignt now we are putting all of the fixed ips in + # one large pool, but ultimately it may be better to + # have each network manager have its own network that + # it is responsible for and its own pool of ips. + for address in FLAGS.flat_network_ips: + self.db.fixed_ip_create(context, {'address': address}) + + +class VlanManager(NetworkManager): + """Vlan network with dhcp""" + def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): + """Gets a fixed ip from the pool""" + network_ref = self.db.project_get_network(context, context.project.id) + if kwargs.get('vpn', None): + address = self._allocate_vpn_ip(context, network_ref['id']) + else: + address = self.db.fixed_ip_allocate(context, + network_ref['id']) + self.db.fixed_ip_instance_associate(context, address, instance_id) + return address + + def setup_fixed_ip(self, context, address): + """Sets forwarding rules and dhcp for fixed ip""" + fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) + network_ref = self.db.fixed_ip_get_network(context, address) + if self.db.instance_is_vpn(context, fixed_ip_ref['instance_id']): + self.driver.ensure_vlan_forward(network_ref['vpn_public_address'], + network_ref['vpn_public_port'], + network_ref['vpn_private_address']) + self.driver.update_dhcp(context, network_ref['id']) + + def lease_fixed_ip(self, context, address): + """Called by dhcp-bridge when ip is leased""" + logging.debug("Leasing IP %s", address) + fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) + if not fixed_ip_ref['allocated']: + raise AddressNotAllocated(address) + self.db.fixed_ip_update(context, + fixed_ip_ref['str_id'], + {'leased': True}) + + def release_fixed_ip(self, context, address): + """Called by dhcp-bridge when ip is released""" + logging.debug("Releasing IP %s", address) + self.db.fixed_ip_update(context, address, {'allocated': False, + 'leased': False}) + self.db.fixed_ip_instance_disassociate(context, address) + + def allocate_network(self, context, project_id): + """Set up the network""" + self._ensure_indexes(context) + network_ref = db.network_create(context, {'project_id': project_id}) + network_id = network_ref['id'] + private_net = IPy.IP(FLAGS.private_range) + index = db.network_get_index(context, network_id) + vlan = FLAGS.vlan_start + index + start = index * FLAGS.network_size + significant_bits = 32 - int(math.log(FLAGS.network_size, 2)) + cidr = "%s/%s" % (private_net[start], significant_bits) + project_net = IPy.IP(cidr) + net = {} + net['cidr'] = cidr + # NOTE(vish): we could turn these into properties + net['netmask'] = str(project_net.netmask()) + net['gateway'] = str(project_net[1]) + net['broadcast'] = str(project_net.broadcast()) + net['vpn_private_address'] = str(project_net[2]) + net['dhcp_start'] = str(project_net[3]) + net['vlan'] = vlan + net['bridge'] = 'br%s' % vlan + net['vpn_public_address'] = FLAGS.vpn_ip + net['vpn_public_port'] = FLAGS.vpn_start + index + db.network_update(context, network_id, net) + self._create_fixed_ips(context, network_id) + return network_id + + def setup_compute_network(self, context, project_id): + """Sets up matching network for compute hosts""" + network_ref = self.db.project_get_network(context, project_id) + self.driver.ensure_vlan_bridge(network_ref['vlan'], + network_ref['bridge']) + + def restart_nets(self): + """Ensure the network for each user is enabled""" + # TODO(vish): Implement this + pass + + @staticmethod + def _allocate_vpn_ip(context, network_id): + """Allocate vpn ip for network""" + # TODO(vish): There is a possible concurrency issue here. + network_ref = db.network_get(context, network_id) + address = network_ref['vpn_private_address'] + fixed_ip_ref = db.fixed_ip_get_by_address(context, address) + # TODO(vish): Should this be fixed_ip_is_allocated? + if fixed_ip_ref['allocated']: + raise AddressAlreadyAllocated() + db.fixed_ip_update(context, fixed_ip_ref['id'], {'allocated': True}) + return fixed_ip_ref['str_id'] + + def _ensure_indexes(self, context): + """Ensure the indexes for the network exist + + This could use a manage command instead of keying off of a flag""" + if not self.db.network_index_count(context): + for index in range(FLAGS.num_networks): + self.db.network_index_create(context, {'index': index}) + + def _on_set_network_host(self, context, network_id): + """Called when this host becomes the host for a project""" + network_ref = self.db.network_get(context, network_id) + self.driver.ensure_vlan_bridge(network_ref['vlan'], + network_ref['bridge'], + network_ref) + + @property + def _bottom_reserved_ips(self): + """Number of reserved ips at the bottom of the range""" + return super(VlanManager, self)._bottom_reserved_ips + 1 # vpn server + + @property + def _top_reserved_ips(self): + """Number of reserved ips at the top of the range""" + parent_reserved = super(VlanManager, self)._top_reserved_ips + return parent_reserved + FLAGS.cnt_vpn_clients + -- cgit From e738c3995a319decbc0b8e10bf74ade794b8daa4 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 30 Aug 2010 22:13:22 -0700 Subject: pylint cleanup of tests --- nova/tests/compute_unittest.py | 61 ++++++++++++++++-------------------------- nova/tests/network_unittest.py | 3 +-- nova/tests/volume_unittest.py | 45 ++++++++++++++++++------------- 3 files changed, 50 insertions(+), 59 deletions(-) diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index 0166dc4be..867b572f3 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -15,11 +15,12 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - +""" +Tests For Compute +""" import logging -import time + from twisted.internet import defer -from xml.etree import ElementTree from nova import db from nova import exception @@ -32,58 +33,39 @@ from nova.auth import manager FLAGS = flags.FLAGS -class InstanceXmlTestCase(test.TrialTestCase): - # @defer.inlineCallbacks - def test_serialization(self): - # TODO: Reimplement this, it doesn't make sense in redis-land - return - - # instance_id = 'foo' - # first_node = node.Node() - # inst = yield first_node.run_instance(self.context, instance_id) - # - # # force the state so that we can verify that it changes - # inst._s['state'] = node.Instance.NOSTATE - # xml = inst.toXml() - # self.assert_(ElementTree.parse(StringIO.StringIO(xml))) - # - # second_node = node.Node() - # new_inst = node.Instance.fromXml(second_node._conn, - # pool=second_node._pool, xml=xml) - # self.assertEqual(new_inst.state, node.Instance.RUNNING) - # rv = yield first_node.terminate_instance(self.context, instance_id) - - -class ComputeConnectionTestCase(test.TrialTestCase): - def setUp(self): +class ComputeTestCase(test.TrialTestCase): + """Test case for compute""" + def setUp(self): # pylint: disable-msg=C0103 logging.getLogger().setLevel(logging.DEBUG) - super(ComputeConnectionTestCase, self).setUp() + super(ComputeTestCase, self).setUp() self.flags(connection_type='fake', fake_storage=True) self.compute = utils.import_object(FLAGS.compute_manager) self.manager = manager.AuthManager() - user = self.manager.create_user('fake', 'fake', 'fake') - project = self.manager.create_project('fake', 'fake', 'fake') + self.user = self.manager.create_user('fake', 'fake', 'fake') + self.project = self.manager.create_project('fake', 'fake', 'fake') self.context = None - def tearDown(self): - self.manager.delete_user('fake') - self.manager.delete_project('fake') + def tearDown(self): # pylint: disable-msg=C0103 + self.manager.delete_user(self.user) + self.manager.delete_project(self.project) def _create_instance(self): + """Create a test instance""" inst = {} inst['image_id'] = 'ami-test' inst['reservation_id'] = 'r-fakeres' inst['launch_time'] = '10' - inst['user_id'] = 'fake' - inst['project_id'] = 'fake' + inst['user_id'] = self.user.id + inst['project_id'] = self.project.id inst['instance_type'] = 'm1.tiny' inst['mac_address'] = utils.generate_mac() inst['ami_launch_index'] = 0 return db.instance_create(None, inst) @defer.inlineCallbacks - def test_run_describe_terminate(self): + def test_run_terminate(self): + """Make sure it is possible to run and terminate instance""" instance_id = self._create_instance() yield self.compute.run_instance(self.context, instance_id) @@ -100,6 +82,7 @@ class ComputeConnectionTestCase(test.TrialTestCase): @defer.inlineCallbacks def test_reboot(self): + """Ensure instance can be rebooted""" instance_id = self._create_instance() yield self.compute.run_instance(self.context, instance_id) yield self.compute.reboot_instance(self.context, instance_id) @@ -107,16 +90,18 @@ class ComputeConnectionTestCase(test.TrialTestCase): @defer.inlineCallbacks def test_console_output(self): + """Make sure we can get console output from instance""" instance_id = self._create_instance() - rv = yield self.compute.run_instance(self.context, instance_id) + yield self.compute.run_instance(self.context, instance_id) console = yield self.compute.get_console_output(self.context, instance_id) self.assert_(console) - rv = yield self.compute.terminate_instance(self.context, instance_id) + yield self.compute.terminate_instance(self.context, instance_id) @defer.inlineCallbacks def test_run_instance_existing(self): + """Ensure failure when running an instance that already exists""" instance_id = self._create_instance() yield self.compute.run_instance(self.context, instance_id) self.assertFailure(self.compute.run_instance(self.context, diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index fccfc23fb..7cd20dfcd 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -75,6 +75,7 @@ class NetworkTestCase(test.TrialTestCase): self.manager.delete_user(self.user) def _create_address(self, project_num, instance_id=None): + """Create an address in given project num""" net = db.project_get_network(None, self.projects[project_num].id) address = db.fixed_ip_allocate(None, net['id']) if instance_id is None: @@ -147,7 +148,6 @@ class NetworkTestCase(test.TrialTestCase): first = self._create_address(0) lease_ip(first) for i in range(1, 5): - project_id = self.projects[i].id address = self._create_address(i) address2 = self._create_address(i) address3 = self._create_address(i) @@ -227,7 +227,6 @@ class NetworkTestCase(test.TrialTestCase): network['id']) addresses = [] for i in range(num_available_ips): - project_id = self.projects[0].id address = self._create_address(0) addresses.append(address) lease_ip(address) diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index f42d0ac8d..0df0c20d6 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -15,7 +15,9 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - +""" +Tests for Volume Code +""" import logging from twisted.internet import defer @@ -30,7 +32,8 @@ FLAGS = flags.FLAGS class VolumeTestCase(test.TrialTestCase): - def setUp(self): + """Test Case for volumes""" + def setUp(self): # pylint: disable-msg=C0103 logging.getLogger().setLevel(logging.DEBUG) super(VolumeTestCase, self).setUp() self.compute = utils.import_object(FLAGS.compute_manager) @@ -39,9 +42,11 @@ class VolumeTestCase(test.TrialTestCase): self.volume = utils.import_object(FLAGS.volume_manager) self.context = None - def _create_volume(self, size='0'): + @staticmethod + def _create_volume(size='0'): + """Create a volume object""" vol = {} - vol['size'] = '0' + vol['size'] = size vol['user_id'] = 'fake' vol['project_id'] = 'fake' vol['availability_zone'] = FLAGS.storage_availability_zone @@ -50,7 +55,8 @@ class VolumeTestCase(test.TrialTestCase): return db.volume_create(None, vol)['id'] @defer.inlineCallbacks - def test_run_create_volume(self): + def test_create_delete_volume(self): + """Test volume can be created and deleted""" volume_id = self._create_volume() yield self.volume.create_volume(self.context, volume_id) self.assertEqual(volume_id, db.volume_get(None, volume_id).id) @@ -63,6 +69,7 @@ class VolumeTestCase(test.TrialTestCase): @defer.inlineCallbacks def test_too_big_volume(self): + """Ensure failure if a too large of a volume is requested""" # FIXME(vish): validation needs to move into the data layer in # volume_create defer.returnValue(True) @@ -75,9 +82,10 @@ class VolumeTestCase(test.TrialTestCase): @defer.inlineCallbacks def test_too_many_volumes(self): + """Ensure that NoMoreBlades is raised when we run out of volumes""" vols = [] total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf - for i in xrange(total_slots): + for _index in xrange(total_slots): volume_id = self._create_volume() yield self.volume.create_volume(self.context, volume_id) vols.append(volume_id) @@ -91,7 +99,7 @@ class VolumeTestCase(test.TrialTestCase): @defer.inlineCallbacks def test_run_attach_detach_volume(self): - # Create one volume and one compute to test with + """Make sure volume can be attached and detached from instance""" instance_id = "storage-test" mountpoint = "/dev/sdf" volume_id = self._create_volume() @@ -99,9 +107,9 @@ class VolumeTestCase(test.TrialTestCase): if FLAGS.fake_tests: db.volume_attached(None, volume_id, instance_id, mountpoint) else: - rv = yield self.compute.attach_volume(instance_id, - volume_id, - mountpoint) + yield self.compute.attach_volume(instance_id, + volume_id, + mountpoint) vol = db.volume_get(None, volume_id) self.assertEqual(vol['status'], "in-use") self.assertEqual(vol['attach_status'], "attached") @@ -113,12 +121,12 @@ class VolumeTestCase(test.TrialTestCase): if FLAGS.fake_tests: db.volume_detached(None, volume_id) else: - rv = yield self.compute.detach_volume(instance_id, - volume_id) + yield self.compute.detach_volume(instance_id, + volume_id) vol = db.volume_get(None, volume_id) self.assertEqual(vol['status'], "available") - rv = self.volume.delete_volume(self.context, volume_id) + yield self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.Error, db.volume_get, None, @@ -126,23 +134,22 @@ class VolumeTestCase(test.TrialTestCase): @defer.inlineCallbacks def test_concurrent_volumes_get_different_blades(self): - vol_size = "5" - user_id = "fake" - project_id = 'fake' - shelf_blades = [] + """Ensure multiple concurrent volumes get different blades""" volume_ids = [] + shelf_blades = [] def _check(volume_id): + """Make sure blades aren't duplicated""" volume_ids.append(volume_id) (shelf_id, blade_id) = db.volume_get_shelf_and_blade(None, volume_id) shelf_blade = '%s.%s' % (shelf_id, blade_id) self.assert_(shelf_blade not in shelf_blades) shelf_blades.append(shelf_blade) - logging.debug("got %s" % shelf_blade) + logging.debug("Blade %s allocated", shelf_blade) deferreds = [] total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf - for i in range(total_slots): + for _index in xrange(total_slots): volume_id = self._create_volume() d = self.volume.create_volume(self.context, volume_id) d.addCallback(_check) -- cgit From 8e3ab2119289cf082830aea39409a44cdff54e12 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 30 Aug 2010 22:21:47 -0700 Subject: a little more cleanup in compute --- nova/compute/manager.py | 37 +++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 59f56730b..7723edd53 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -17,10 +17,7 @@ # under the License. """ -Compute Manager: - - Handles all code relating to instances - +Handles all code relating to instances (guest vms) """ import base64 @@ -57,8 +54,6 @@ class ComputeManager(manager.Manager): self.driver = utils.import_object(compute_driver) self.network_manager = utils.import_object(FLAGS.network_manager) super(ComputeManager, self).__init__(*args, **kwargs) - # TODO(joshua): This needs to ensure system state, specifically - # modprobe aoe def _update_state(self, context, instance_id): """Update the state of an instance from the driver info""" @@ -103,30 +98,28 @@ class ComputeManager(manager.Manager): logging.debug("Got told to terminate instance %s", instance_id) instance_ref = db.instance_get(context, instance_id) + # TODO(vish): move this logic to layer? if instance_ref['state'] == power_state.SHUTOFF: - # self.datamodel.destroy() FIXME: RE-ADD? + db.instance_destroy(context, instance_id) raise exception.Error('trying to destroy already destroyed' ' instance: %s' % instance_id) - db.instance_state( - context, instance_id, power_state.NOSTATE, 'shutting_down') + db.instance_state(context, + instance_id, + power_state.NOSTATE, + 'shutting_down') yield self.driver.destroy(instance_ref) - # FIXME(ja): should we keep it in a terminated state for a bit? + # TODO(ja): should we keep it in a terminated state for a bit? db.instance_destroy(context, instance_id) @defer.inlineCallbacks @exception.wrap_exception def reboot_instance(self, context, instance_id): - """Reboot an instance on this server. - - KVM doesn't support reboot, so we terminate and restart. - - """ + """Reboot an instance on this server.""" self._update_state(context, instance_id) instance_ref = db.instance_get(context, instance_id) - # FIXME(ja): this is only checking the model state - not state on disk? if instance_ref['state'] != power_state.RUNNING: raise exception.Error( 'trying to reboot a non-running' @@ -136,15 +129,17 @@ class ComputeManager(manager.Manager): power_state.RUNNING)) logging.debug('rebooting instance %s', instance_ref['name']) - db.instance_state( - context, instance_id, power_state.NOSTATE, 'rebooting') + db.instance_state(context, + instance_id, + power_state.NOSTATE, + 'rebooting') yield self.driver.reboot(instance_ref) self._update_state(context, instance_id) @exception.wrap_exception def get_console_output(self, context, instance_id): """Send the console output for an instance.""" - # FIXME: Abstract this for Xen + # TODO(vish): Move this into the driver layer logging.debug("Getting console output for %s", (instance_id)) instance_ref = db.instance_get(context, instance_id) @@ -172,6 +167,7 @@ class ComputeManager(manager.Manager): # TODO(termie): check that instance_id exists volume_ref = db.volume_get(context, volume_id) yield self._init_aoe() + # TODO(vish): Move this into the driver layer yield process.simple_execute( "sudo virsh attach-disk %s /dev/etherd/%s %s" % (instance_id, @@ -189,6 +185,7 @@ class ComputeManager(manager.Manager): # TODO(termie): check that instance_id exists volume_ref = db.volume_get(context, volume_id) target = volume_ref['mountpoint'].rpartition('/dev/')[2] + # TODO(vish): Move this into the driver layer yield process.simple_execute( "sudo virsh detach-disk %s %s " % (instance_id, target)) db.volume_detached(context, volume_id) @@ -197,6 +194,6 @@ class ComputeManager(manager.Manager): @defer.inlineCallbacks def _init_aoe(self): """Discover aoe exported devices""" - # TODO(vish): these shell calls should move into a different layer. + # TODO(vish): these shell calls should move into volume manager. yield process.simple_execute("sudo aoe-discover") yield process.simple_execute("sudo aoe-stat") -- cgit From 85c81f39e02fde5c09b12978d482151dc93a4b9e Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 31 Aug 2010 09:54:31 +0200 Subject: Fix up setup.py to match nova-rsapi -> nova-api-new rename. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 0fd286f7d..25252e8f4 100644 --- a/setup.py +++ b/setup.py @@ -52,5 +52,5 @@ setup(name='nova', 'bin/nova-manage', 'bin/nova-network', 'bin/nova-objectstore', - 'bin/nova-rsapi', + 'bin/nova-api-new', 'bin/nova-volume']) -- cgit From 871c49adc3c824b9b1e095b0d7135c1fdab486c1 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 31 Aug 2010 10:15:20 +0200 Subject: Call getInfo() instead of getVersion() on the libvirt connection object. virConnectGetVersion was not exposed properly in the python bindings until quite recently, so this makes us rather more backwards compatible. --- nova/virt/libvirt_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 1ff8175d0..c29bdb466 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -91,7 +91,7 @@ class LibvirtConnection(object): def _test_connection(self): try: - self._wrapped_conn.getVersion() + self._wrapped_conn.getInfo() return True except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \ -- cgit From 2c16344cfea8461e96425a2c375b4dabd21f03c5 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 31 Aug 2010 16:48:41 -0700 Subject: rename node_name to host --- nova/compute/manager.py | 2 +- nova/db/__init__.py | 20 +++++++++ nova/db/api.py | 15 ++++--- nova/db/sqlalchemy/__init__.py | 25 +++++++++++- nova/db/sqlalchemy/api.py | 28 ++++++------- nova/db/sqlalchemy/models.py | 92 +++++++++++++++++++++++++++--------------- nova/endpoint/cloud.py | 6 +-- nova/flags.py | 2 +- nova/network/manager.py | 4 +- nova/service.py | 10 ++--- nova/tests/model_unittest.py | 6 +-- nova/tests/network_unittest.py | 2 +- nova/tests/service_unittest.py | 36 ++++++++--------- nova/volume/manager.py | 4 +- 14 files changed, 163 insertions(+), 89 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 7723edd53..c15c9e1f5 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -74,7 +74,7 @@ class ComputeManager(manager.Manager): self.network_manager.setup_compute_network(context, project_id) db.instance_update(context, instance_id, - {'node_name': FLAGS.node_name}) + {'host': FLAGS.host}) # TODO(vish) check to make sure the availability zone matches db.instance_state(context, diff --git a/nova/db/__init__.py b/nova/db/__init__.py index 2d893cb36..054b7ac94 100644 --- a/nova/db/__init__.py +++ b/nova/db/__init__.py @@ -1,3 +1,23 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +DB abstraction for Nova +""" from nova.db.api import * diff --git a/nova/db/api.py b/nova/db/api.py index d95d1ce6e..8c0649df2 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -15,11 +15,13 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +""" +Defines interface for DB access +""" from nova import exception from nova import flags from nova import utils -from nova import validate FLAGS = flags.FLAGS @@ -33,14 +35,17 @@ _impl = utils.LazyPluggable(FLAGS['db_backend'], # TODO(vish): where should these exceptions go? class NoMoreAddresses(exception.Error): + """No more available addresses""" pass class NoMoreBlades(exception.Error): + """No more available blades""" pass class NoMoreNetworks(exception.Error): + """No more available networks""" pass @@ -52,9 +57,9 @@ def daemon_get(context, daemon_id): return _impl.daemon_get(context, daemon_id) -def daemon_get_by_args(context, node_name, binary): +def daemon_get_by_args(context, host, binary): """Get the state of an daemon by node name and binary.""" - return _impl.daemon_get_by_args(context, node_name, binary) + return _impl.daemon_get_by_args(context, host, binary) def daemon_create(context, values): @@ -74,12 +79,12 @@ def daemon_update(context, daemon_id, values): ################### -def floating_ip_allocate_address(context, node_name, project_id): +def floating_ip_allocate_address(context, host, project_id): """Allocate free floating ip and return the address. Raises if one is not available. """ - return _impl.floating_ip_allocate_address(context, node_name, project_id) + return _impl.floating_ip_allocate_address(context, host, project_id) def floating_ip_create(context, address, host): diff --git a/nova/db/sqlalchemy/__init__.py b/nova/db/sqlalchemy/__init__.py index 444f50a9b..3288ebd20 100644 --- a/nova/db/sqlalchemy/__init__.py +++ b/nova/db/sqlalchemy/__init__.py @@ -1,3 +1,24 @@ -from models import register_models +# vim: tabstop=4 shiftwidth=4 softtabstop=4 -register_models() +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +SQLAlchemy database backend +""" +from nova.db.sqlalchemy import models + +models.register_models() diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index b00ad19ff..f6be037b3 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -33,8 +33,8 @@ def daemon_get(context, daemon_id): return models.Daemon.find(daemon_id) -def daemon_get_by_args(context, node_name, binary): - return models.Daemon.find_by_args(node_name, binary) +def daemon_get_by_args(context, host, binary): + return models.Daemon.find_by_args(host, binary) def daemon_create(context, values): @@ -55,10 +55,10 @@ def daemon_update(context, daemon_id, values): ################### -def floating_ip_allocate_address(context, node_name, project_id): +def floating_ip_allocate_address(context, host, project_id): with managed_session(autocommit=False) as session: floating_ip_ref = session.query(models.FloatingIp) \ - .filter_by(node_name=node_name) \ + .filter_by(host=host) \ .filter_by(fixed_ip_id=None) \ .filter_by(deleted=False) \ .with_lockmode('update') \ @@ -76,7 +76,7 @@ def floating_ip_allocate_address(context, node_name, project_id): def floating_ip_create(context, address, host): floating_ip_ref = models.FloatingIp() floating_ip_ref['address'] = address - floating_ip_ref['node_name'] = host + floating_ip_ref['host'] = host floating_ip_ref.save() return floating_ip_ref @@ -131,8 +131,8 @@ def floating_ip_get_instance(context, address): def fixed_ip_allocate(context, network_id): with managed_session(autocommit=False) as session: - network_or_none = or_(models.FixedIp.network_id==network_id, - models.FixedIp.network_id==None) + network_or_none = or_(models.FixedIp.network_id == network_id, + models.FixedIp.network_id == None) fixed_ip_ref = session.query(models.FixedIp) \ .filter(network_or_none) \ .filter_by(reserved=False) \ @@ -270,7 +270,7 @@ def instance_get_floating_address(context, instance_id): def instance_get_host(context, instance_id): instance_ref = instance_get(context, instance_id) - return instance_ref['node_name'] + return instance_ref['host'] def instance_is_vpn(context, instance_id): @@ -376,7 +376,7 @@ def network_get_by_bridge(context, bridge): def network_get_host(context, network_id): network_ref = network_get(context, network_id) - return network_ref['node_name'] + return network_ref['host'] def network_get_index(context, network_id): @@ -418,13 +418,13 @@ def network_set_host(context, network_id, host_id): network_id) # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues - if network.node_name: + if network.host: session.commit() - return network['node_name'] - network['node_name'] = host_id + return network['host'] + network['host'] = host_id session.add(network) session.commit() - return network['node_name'] + return network['host'] def network_update(context, network_id, values): @@ -549,7 +549,7 @@ def volume_get_by_str(context, str_id): def volume_get_host(context, volume_id): volume_ref = volume_get(context, volume_id) - return volume_ref['node_name'] + return volume_ref['host'] def volume_get_shelf_and_blade(context, volume_id): diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index b9ed34bb1..9e15614f7 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -20,23 +20,25 @@ SQLAlchemy models for nova data """ +# TODO(vish): clean up these imports from sqlalchemy.orm import relationship, backref, validates, exc -from sqlalchemy import Table, Column, Integer, String -from sqlalchemy import MetaData, ForeignKey, DateTime, Boolean, Text +from sqlalchemy import Column, Integer, String +from sqlalchemy import ForeignKey, DateTime, Boolean, Text from sqlalchemy.ext.declarative import declarative_base from nova.db.sqlalchemy.session import managed_session + from nova import auth from nova import exception from nova import flags FLAGS = flags.FLAGS - -Base = declarative_base() +BASE = declarative_base() class NovaBase(object): + """Base class for Nova Models""" __table_args__ = {'mysql_engine': 'InnoDB'} __table_initialized__ = False __prefix__ = 'none' @@ -46,6 +48,7 @@ class NovaBase(object): @classmethod def all(cls, session=None): + """Get all objects of this type""" if session: return session.query(cls) \ .filter_by(deleted=False) \ @@ -56,6 +59,7 @@ class NovaBase(object): @classmethod def count(cls, session=None): + """Count objects of this type""" if session: return session.query(cls) \ .filter_by(deleted=False) \ @@ -66,6 +70,7 @@ class NovaBase(object): @classmethod def find(cls, obj_id, session=None): + """Find object by id""" if session: try: return session.query(cls) \ @@ -80,14 +85,17 @@ class NovaBase(object): @classmethod def find_by_str(cls, str_id, session=None): - id = int(str_id.rpartition('-')[2]) - return cls.find(id, session=session) + """Find object by str_id""" + int_id = int(str_id.rpartition('-')[2]) + return cls.find(int_id, session=session) @property def str_id(self): + """Get string id of object (generally prefix + '-' + id)""" return "%s-%s" % (self.__prefix__, self.id) def save(self, session=None): + """Save this object""" if session: session.add(self) session.flush() @@ -96,6 +104,7 @@ class NovaBase(object): self.save(session=sess) def delete(self, session=None): + """Delete this object""" self.deleted = True self.save(session=session) @@ -106,7 +115,8 @@ class NovaBase(object): return getattr(self, key) -class Image(Base, NovaBase): +class Image(BASE, NovaBase): + """Represents an image in the datastore""" __tablename__ = 'images' __prefix__ = 'ami' id = Column(Integer, primary_key=True) @@ -139,36 +149,39 @@ class Image(Base, NovaBase): assert(val is None) -class PhysicalNode(Base, NovaBase): - __tablename__ = 'physical_nodes' +class Host(BASE, NovaBase): + """Represents a host where services are running""" + __tablename__ = 'hosts' id = Column(String(255), primary_key=True) -class Daemon(Base, NovaBase): +class Daemon(BASE, NovaBase): + """Represents a running service on a host""" __tablename__ = 'daemons' id = Column(Integer, primary_key=True) - node_name = Column(String(255)) # , ForeignKey('physical_node.id')) + host = Column(String(255), ForeignKey('hosts.id')) binary = Column(String(255)) report_count = Column(Integer, nullable=False, default=0) @classmethod - def find_by_args(cls, node_name, binary, session=None): + def find_by_args(cls, host, binary, session=None): if session: try: return session.query(cls) \ - .filter_by(node_name=node_name) \ + .filter_by(host=host) \ .filter_by(binary=binary) \ .filter_by(deleted=False) \ .one() except exc.NoResultFound: - raise exception.NotFound("No model for %s, %s" % (node_name, + raise exception.NotFound("No model for %s, %s" % (host, binary)) else: with managed_session() as sess: - return cls.find_by_args(node_name, binary, session=sess) + return cls.find_by_args(host, binary, session=sess) -class Instance(Base, NovaBase): +class Instance(BASE, NovaBase): + """Represents a guest vm""" __tablename__ = 'instances' __prefix__ = 'i' id = Column(Integer, primary_key=True) @@ -191,6 +204,9 @@ class Instance(Base, NovaBase): image_id = Column(Integer, ForeignKey('images.id'), nullable=True) kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True) ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True) +# ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id)) +# kernel = relationship(Kernel, backref=backref('instances', order_by=id)) +# project = relationship(Project, backref=backref('instances', order_by=id)) launch_index = Column(Integer) key_name = Column(String(255)) @@ -201,7 +217,7 @@ class Instance(Base, NovaBase): state_description = Column(String(255)) hostname = Column(String(255)) - node_name = Column(String(255)) # , ForeignKey('physical_node.id')) + host = Column(String(255), ForeignKey('hosts.id')) instance_type = Column(Integer) @@ -211,17 +227,17 @@ class Instance(Base, NovaBase): mac_address = Column(String(255)) def set_state(self, state_code, state_description=None): - # TODO(devcamcar): Move this out of models and into api + """Set the code and description of an instance""" + # TODO(devcamcar): Move this out of models and into driver from nova.compute import power_state self.state = state_code if not state_description: state_description = power_state.name(state_code) self.state_description = state_description self.save() -# ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id)) -# kernel = relationship(Kernel, backref=backref('instances', order_by=id)) -# project = relationship(Project, backref=backref('instances', order_by=id)) -#TODO - see Ewan's email about state improvements + + # TODO(vish): see Ewan's email about state improvements, probably + # should be in a driver base class or some such # vmstate_state = running, halted, suspended, paused # power_state = what we have # task_state = transitory and may trigger power state transition @@ -232,7 +248,8 @@ class Instance(Base, NovaBase): # 'shutdown', 'shutoff', 'crashed']) -class Volume(Base, NovaBase): +class Volume(BASE, NovaBase): + """Represents a block storage device that can be attached to a vm""" __tablename__ = 'volumes' __prefix__ = 'vol' id = Column(Integer, primary_key=True) @@ -240,7 +257,7 @@ class Volume(Base, NovaBase): user_id = Column(String(255)) project_id = Column(String(255)) - node_name = Column(String(255)) # , ForeignKey('physical_node.id')) + host = Column(String(255), ForeignKey('hosts.id')) size = Column(Integer) availability_zone = Column(String(255)) # TODO(vish): foreign key? instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) @@ -250,7 +267,8 @@ class Volume(Base, NovaBase): attach_status = Column(String(255)) # TODO(vish): enum -class ExportDevice(Base, NovaBase): +class ExportDevice(BASE, NovaBase): + """Represates a shelf and blade that a volume can be exported on""" __tablename__ = 'export_devices' id = Column(Integer, primary_key=True) shelf_id = Column(Integer) @@ -260,7 +278,8 @@ class ExportDevice(Base, NovaBase): uselist=False)) -class Network(Base, NovaBase): +class Network(BASE, NovaBase): + """Represents a network""" __tablename__ = 'networks' id = Column(Integer, primary_key=True) @@ -279,10 +298,16 @@ class Network(Base, NovaBase): dhcp_start = Column(String(255)) project_id = Column(String(255)) - node_name = Column(String(255)) # , ForeignKey('physical_node.id')) + host = Column(String(255), ForeignKey('hosts.id')) + +class NetworkIndex(BASE, NovaBase): + """Represents a unique offset for a network -class NetworkIndex(Base, NovaBase): + Currently vlan number, vpn port, and fixed ip ranges are keyed off of + this index. These may ultimately need to be converted to separate + pools. + """ __tablename__ = 'network_indexes' id = Column(Integer, primary_key=True) index = Column(Integer) @@ -292,7 +317,8 @@ class NetworkIndex(Base, NovaBase): # TODO(vish): can these both come from the same baseclass? -class FixedIp(Base, NovaBase): +class FixedIp(BASE, NovaBase): + """Represents a fixed ip for an instance""" __tablename__ = 'fixed_ips' id = Column(Integer, primary_key=True) address = Column(String(255)) @@ -324,7 +350,8 @@ class FixedIp(Base, NovaBase): return cls.find_by_str(str_id, session=sess) -class FloatingIp(Base, NovaBase): +class FloatingIp(BASE, NovaBase): + """Represents a floating ip that dynamically forwards to a fixed ip""" __tablename__ = 'floating_ips' id = Column(Integer, primary_key=True) address = Column(String(255)) @@ -332,7 +359,7 @@ class FloatingIp(Base, NovaBase): fixed_ip = relationship(FixedIp, backref=backref('floating_ips')) project_id = Column(String(255)) - node_name = Column(String(255)) # , ForeignKey('physical_node.id')) + host = Column(String(255), ForeignKey('hosts.id')) @property def str_id(self): @@ -354,8 +381,9 @@ class FloatingIp(Base, NovaBase): def register_models(): + """Register Models and create metadata""" from sqlalchemy import create_engine - models = (Image, PhysicalNode, Daemon, Instance, Volume, ExportDevice, + models = (Image, Host, Daemon, Instance, Volume, ExportDevice, FixedIp, FloatingIp, Network, NetworkIndex) engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 8e459c935..c34eb5da9 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -230,7 +230,7 @@ class CloudController(object): # instance_id is passed in as a list of instances instance_ref = db.instance_get_by_str(context, instance_id[0]) return rpc.call('%s.%s' % (FLAGS.compute_topic, - instance_ref['node_name']), + instance_ref['host']), {"method": "get_console_output", "args": {"context": None, "instance_id": instance_ref['id']}}) @@ -257,7 +257,7 @@ class CloudController(object): v['status'] = '%s (%s, %s, %s, %s)' % ( volume['status'], volume['user_id'], - 'node_name', + 'host', volume['instance_id'], volume['mountpoint']) if volume['attach_status'] == 'attached': @@ -391,7 +391,7 @@ class CloudController(object): if context.user.is_admin(): i['key_name'] = '%s (%s, %s)' % (i['key_name'], instance.project_id, - 'node_name') # FIXME + 'host') # FIXME i['product_codes_set'] = self._convert_to_set([], 'product_codes') i['instance_type'] = instance.instance_type i['launch_time'] = instance.created_at diff --git a/nova/flags.py b/nova/flags.py index a99179837..ebbfe3ff8 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -206,7 +206,7 @@ DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger') # UNUSED DEFINE_string('node_availability_zone', 'nova', 'availability zone of this node') -DEFINE_string('node_name', socket.gethostname(), +DEFINE_string('host', socket.gethostname(), 'name of this node') DEFINE_string('sql_connection', diff --git a/nova/network/manager.py b/nova/network/manager.py index 9eeb4923d..dbb8e66da 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -93,7 +93,7 @@ class NetworkManager(manager.Manager): network_id = network_ref['id'] host = self.db.network_set_host(context, network_id, - FLAGS.node_name) + FLAGS.host) self._on_set_network_host(context, network_id) return host @@ -117,7 +117,7 @@ class NetworkManager(manager.Manager): """Gets an floating ip from the pool""" # TODO(vish): add floating ips through manage command return self.db.floating_ip_allocate_address(context, - FLAGS.node_name, + FLAGS.host, project_id) def associate_floating_ip(self, context, floating_address, fixed_address): diff --git a/nova/service.py b/nova/service.py index 94d91f60a..d39a5cf10 100644 --- a/nova/service.py +++ b/nova/service.py @@ -89,11 +89,11 @@ class Service(object, service.Service): proxy=service_ref) consumer_node = rpc.AdapterConsumer( connection=conn, - topic='%s.%s' % (topic, FLAGS.node_name), + topic='%s.%s' % (topic, FLAGS.host), proxy=service_ref) pulse = task.LoopingCall(service_ref.report_state, - FLAGS.node_name, + FLAGS.host, bin_name) pulse.start(interval=report_interval, now=False) @@ -107,14 +107,14 @@ class Service(object, service.Service): return application @defer.inlineCallbacks - def report_state(self, node_name, binary, context=None): + def report_state(self, host, binary, context=None): """Update the state of this daemon in the datastore.""" try: try: - daemon_ref = db.daemon_get_by_args(context, node_name, binary) + daemon_ref = db.daemon_get_by_args(context, host, binary) daemon_id = daemon_ref['id'] except exception.NotFound: - daemon_id = db.daemon_create(context, {'node_name': node_name, + daemon_id = db.daemon_create(context, {'host': host, 'binary': binary, 'report_count': 0}) daemon_ref = db.daemon_get(context, daemon_id) diff --git a/nova/tests/model_unittest.py b/nova/tests/model_unittest.py index dc2441c24..130516c66 100644 --- a/nova/tests/model_unittest.py +++ b/nova/tests/model_unittest.py @@ -108,14 +108,14 @@ class ModelTestCase(test.TrialTestCase): self.assertEqual(x.identifier, 'i-test') def test_instance_associates_node(self): - """create, then check that it is listed for the node_name""" + """create, then check that it is listed for the host""" instance = self.create_instance() found = False - for x in model.InstanceDirectory().by_node(FLAGS.node_name): + for x in model.InstanceDirectory().by_node(FLAGS.host): if x.identifier == 'i-test': found = True self.assertFalse(found) - instance['node_name'] = 'test_node' + instance['host'] = 'test_node' instance.save() for x in model.InstanceDirectory().by_node('test_node'): if x.identifier == 'i-test': diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 7cd20dfcd..f3124c1ba 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -91,7 +91,7 @@ class NetworkTestCase(test.TrialTestCase): try: db.floating_ip_get_by_address(None, ip_str) except exception.NotFound: - db.floating_ip_create(None, ip_str, FLAGS.node_name) + db.floating_ip_create(None, ip_str, FLAGS.host) float_addr = self.network.allocate_floating_ip(self.context, self.projects[0].id) fix_addr = self._create_address(0) diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py index 902f9bab1..318abe645 100644 --- a/nova/tests/service_unittest.py +++ b/nova/tests/service_unittest.py @@ -58,7 +58,7 @@ class ServiceTestCase(test.BaseTestCase): rpc.AdapterConsumer) rpc.AdapterConsumer(connection=mox.IgnoreArg(), - topic='fake.%s' % FLAGS.node_name, + topic='fake.%s' % FLAGS.host, proxy=mox.IsA(service.Service)).AndReturn( rpc.AdapterConsumer) @@ -82,37 +82,37 @@ class ServiceTestCase(test.BaseTestCase): # 'model_disconnected' and report_state doesn't really do much so this # these are mostly just for coverage def test_report_state(self): - node_name = 'foo' + host = 'foo' binary = 'bar' - daemon_ref = {'node_name': node_name, + daemon_ref = {'host': host, 'binary': binary, 'report_count': 0, 'id': 1} service.db.__getattr__('report_state') service.db.daemon_get_by_args(None, - node_name, + host, binary).AndReturn(daemon_ref) service.db.daemon_update(None, daemon_ref['id'], mox.ContainsKeyValue('report_count', 1)) self.mox.ReplayAll() s = service.Service() - rv = yield s.report_state(node_name, binary) + rv = yield s.report_state(host, binary) def test_report_state_no_daemon(self): - node_name = 'foo' + host = 'foo' binary = 'bar' - daemon_create = {'node_name': node_name, + daemon_create = {'host': host, 'binary': binary, 'report_count': 0} - daemon_ref = {'node_name': node_name, + daemon_ref = {'host': host, 'binary': binary, 'report_count': 0, 'id': 1} service.db.__getattr__('report_state') service.db.daemon_get_by_args(None, - node_name, + host, binary).AndRaise(exception.NotFound()) service.db.daemon_create(None, daemon_create).AndReturn(daemon_ref['id']) @@ -122,38 +122,38 @@ class ServiceTestCase(test.BaseTestCase): self.mox.ReplayAll() s = service.Service() - rv = yield s.report_state(node_name, binary) + rv = yield s.report_state(host, binary) def test_report_state_newly_disconnected(self): - node_name = 'foo' + host = 'foo' binary = 'bar' - daemon_ref = {'node_name': node_name, + daemon_ref = {'host': host, 'binary': binary, 'report_count': 0, 'id': 1} service.db.__getattr__('report_state') service.db.daemon_get_by_args(None, - node_name, + host, binary).AndRaise(Exception()) self.mox.ReplayAll() s = service.Service() - rv = yield s.report_state(node_name, binary) + rv = yield s.report_state(host, binary) self.assert_(s.model_disconnected) def test_report_state_newly_connected(self): - node_name = 'foo' + host = 'foo' binary = 'bar' - daemon_ref = {'node_name': node_name, + daemon_ref = {'host': host, 'binary': binary, 'report_count': 0, 'id': 1} service.db.__getattr__('report_state') service.db.daemon_get_by_args(None, - node_name, + host, binary).AndReturn(daemon_ref) service.db.daemon_update(None, daemon_ref['id'], mox.ContainsKeyValue('report_count', 1)) @@ -161,6 +161,6 @@ class ServiceTestCase(test.BaseTestCase): self.mox.ReplayAll() s = service.Service() s.model_disconnected = True - rv = yield s.report_state(node_name, binary) + rv = yield s.report_state(host, binary) self.assert_(not s.model_disconnected) diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 94d2f7d70..e5f4805a1 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -78,7 +78,7 @@ class AOEManager(manager.Manager): self.db.volume_update(context, volume_id, - {'node_name': FLAGS.node_name}) + {'host': FLAGS.host}) size = volume_ref['size'] logging.debug("volume %s: creating lv of size %sG", volume_id, size) @@ -111,7 +111,7 @@ class AOEManager(manager.Manager): volume_ref = self.db.volume_get(context, volume_id) if volume_ref['attach_status'] == "attached": raise exception.Error("Volume is still attached") - if volume_ref['node_name'] != FLAGS.node_name: + if volume_ref['host'] != FLAGS.host: raise exception.Error("Volume is not local to this node") shelf_id, blade_id = self.db.volume_get_shelf_and_blade(context, volume_id) -- cgit From 16f4faf4039ecab8119a31d77eb197a1928639ec Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 31 Aug 2010 17:00:48 -0700 Subject: pylint cleanup of db classes --- nova/db/api.py | 137 +++++++++++++++++------------------ nova/db/sqlalchemy/api.py | 164 ++++++++++++++++++++++-------------------- nova/db/sqlalchemy/session.py | 12 ++-- 3 files changed, 163 insertions(+), 150 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index 8c0649df2..6cb49b7e4 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -29,7 +29,7 @@ flags.DEFINE_string('db_backend', 'sqlalchemy', 'The backend to use for db') -_impl = utils.LazyPluggable(FLAGS['db_backend'], +IMPL = utils.LazyPluggable(FLAGS['db_backend'], sqlalchemy='nova.db.sqlalchemy.api') @@ -54,17 +54,17 @@ class NoMoreNetworks(exception.Error): def daemon_get(context, daemon_id): """Get an daemon or raise if it does not exist.""" - return _impl.daemon_get(context, daemon_id) + return IMPL.daemon_get(context, daemon_id) def daemon_get_by_args(context, host, binary): """Get the state of an daemon by node name and binary.""" - return _impl.daemon_get_by_args(context, host, binary) + return IMPL.daemon_get_by_args(context, host, binary) def daemon_create(context, values): """Create a daemon from the values dictionary.""" - return _impl.daemon_create(context, values) + return IMPL.daemon_create(context, values) def daemon_update(context, daemon_id, values): @@ -73,7 +73,7 @@ def daemon_update(context, daemon_id, values): Raises NotFound if daemon does not exist. """ - return _impl.daemon_update(context, daemon_id, values) + return IMPL.daemon_update(context, daemon_id, values) ################### @@ -84,12 +84,12 @@ def floating_ip_allocate_address(context, host, project_id): Raises if one is not available. """ - return _impl.floating_ip_allocate_address(context, host, project_id) + return IMPL.floating_ip_allocate_address(context, host, project_id) def floating_ip_create(context, address, host): """Create a floating ip for a given address on the specified host.""" - return _impl.floating_ip_create(context, address, host) + return IMPL.floating_ip_create(context, address, host) def floating_ip_disassociate(context, address): @@ -97,29 +97,29 @@ def floating_ip_disassociate(context, address): Returns the address of the existing fixed ip. """ - return _impl.floating_ip_disassociate(context, address) + return IMPL.floating_ip_disassociate(context, address) def floating_ip_deallocate(context, address): """Deallocate an floating ip by address""" - return _impl.floating_ip_deallocate(context, address) + return IMPL.floating_ip_deallocate(context, address) def floating_ip_fixed_ip_associate(context, floating_address, fixed_address): """Associate an floating ip to a fixed_ip by address.""" - return _impl.floating_ip_fixed_ip_associate(context, + return IMPL.floating_ip_fixed_ip_associate(context, floating_address, fixed_address) def floating_ip_get_by_address(context, address): """Get a floating ip by address or raise if it doesn't exist.""" - return _impl.floating_ip_get_by_address(context, address) + return IMPL.floating_ip_get_by_address(context, address) def floating_ip_get_instance(context, address): """Get an instance for a floating ip by address.""" - return _impl.floating_ip_get_instance(context, address) + return IMPL.floating_ip_get_instance(context, address) #################### @@ -130,47 +130,47 @@ def fixed_ip_allocate(context, network_id): Raises if one is not available. """ - return _impl.fixed_ip_allocate(context, network_id) + return IMPL.fixed_ip_allocate(context, network_id) def fixed_ip_create(context, values): """Create a fixed ip from the values dictionary.""" - return _impl.fixed_ip_create(context, values) + return IMPL.fixed_ip_create(context, values) def fixed_ip_deallocate(context, address): """Deallocate a fixed ip by address.""" - return _impl.fixed_ip_deallocate(context, address) + return IMPL.fixed_ip_deallocate(context, address) def fixed_ip_get_by_address(context, address): """Get a fixed ip by address or raise if it does not exist.""" - return _impl.fixed_ip_get_by_address(context, address) + return IMPL.fixed_ip_get_by_address(context, address) def fixed_ip_get_instance(context, address): """Get an instance for a fixed ip by address.""" - return _impl.fixed_ip_get_instance(context, address) + return IMPL.fixed_ip_get_instance(context, address) def fixed_ip_get_network(context, address): """Get a network for a fixed ip by address.""" - return _impl.fixed_ip_get_network(context, address) + return IMPL.fixed_ip_get_network(context, address) def fixed_ip_instance_associate(context, address, instance_id): """Associate a fixed ip to an instance by address.""" - return _impl.fixed_ip_instance_associate(context, address, instance_id) + return IMPL.fixed_ip_instance_associate(context, address, instance_id) def fixed_ip_instance_disassociate(context, address): """Disassociate a fixed ip from an instance by address.""" - return _impl.fixed_ip_instance_disassociate(context, address) + return IMPL.fixed_ip_instance_disassociate(context, address) def fixed_ip_update(context, address, values): """Create a fixed ip from the values dictionary.""" - return _impl.fixed_ip_update(context, address, values) + return IMPL.fixed_ip_update(context, address, values) #################### @@ -178,62 +178,62 @@ def fixed_ip_update(context, address, values): def instance_create(context, values): """Create an instance from the values dictionary.""" - return _impl.instance_create(context, values) + return IMPL.instance_create(context, values) def instance_destroy(context, instance_id): """Destroy the instance or raise if it does not exist.""" - return _impl.instance_destroy(context, instance_id) + return IMPL.instance_destroy(context, instance_id) def instance_get(context, instance_id): """Get an instance or raise if it does not exist.""" - return _impl.instance_get(context, instance_id) + return IMPL.instance_get(context, instance_id) def instance_get_all(context): """Get all instances.""" - return _impl.instance_get_all(context) + return IMPL.instance_get_all(context) def instance_get_by_project(context, project_id): """Get all instance belonging to a project.""" - return _impl.instance_get_by_project(context, project_id) + return IMPL.instance_get_by_project(context, project_id) def instance_get_by_reservation(context, reservation_id): """Get all instance belonging to a reservation.""" - return _impl.instance_get_by_reservation(context, reservation_id) + return IMPL.instance_get_by_reservation(context, reservation_id) def instance_get_fixed_address(context, instance_id): """Get the fixed ip address of an instance.""" - return _impl.instance_get_fixed_address(context, instance_id) + return IMPL.instance_get_fixed_address(context, instance_id) def instance_get_floating_address(context, instance_id): """Get the first floating ip address of an instance.""" - return _impl.instance_get_floating_address(context, instance_id) + return IMPL.instance_get_floating_address(context, instance_id) def instance_get_by_str(context, str_id): """Get an instance by string id.""" - return _impl.instance_get_by_str(context, str_id) + return IMPL.instance_get_by_str(context, str_id) def instance_get_host(context, instance_id): """Get the host that the instance is running on.""" - return _impl.instance_get_host(context, instance_id) + return IMPL.instance_get_host(context, instance_id) def instance_is_vpn(context, instance_id): """True if instance is a vpn.""" - return _impl.instance_is_vpn(context, instance_id) + return IMPL.instance_is_vpn(context, instance_id) def instance_state(context, instance_id, state, description=None): """Set the state of an instance.""" - return _impl.instance_state(context, instance_id, state, description) + return IMPL.instance_state(context, instance_id, state, description) def instance_update(context, instance_id, values): @@ -242,7 +242,7 @@ def instance_update(context, instance_id, values): Raises NotFound if instance does not exist. """ - return _impl.instance_update(context, instance_id, values) + return IMPL.instance_update(context, instance_id, values) #################### @@ -250,87 +250,88 @@ def instance_update(context, instance_id, values): def network_count(context): """Return the number of networks.""" - return _impl.network_count(context) + return IMPL.network_count(context) def network_count_allocated_ips(context, network_id): """Return the number of allocated non-reserved ips in the network.""" - return _impl.network_count_allocated_ips(context, network_id) + return IMPL.network_count_allocated_ips(context, network_id) def network_count_available_ips(context, network_id): """Return the number of available ips in the network.""" - return _impl.network_count_available_ips(context, network_id) + return IMPL.network_count_available_ips(context, network_id) def network_count_reserved_ips(context, network_id): """Return the number of reserved ips in the network.""" - return _impl.network_count_reserved_ips(context, network_id) + return IMPL.network_count_reserved_ips(context, network_id) def network_create(context, values): """Create a network from the values dictionary.""" - return _impl.network_create(context, values) + return IMPL.network_create(context, values) def network_create_fixed_ips(context, network_id, num_vpn_clients): """Create the ips for the network, reserving sepecified ips.""" - return _impl.network_create_fixed_ips(context, network_id, num_vpn_clients) + return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients) def network_destroy(context, network_id): """Destroy the network or raise if it does not exist.""" - return _impl.network_destroy(context, network_id) + return IMPL.network_destroy(context, network_id) def network_get(context, network_id): """Get an network or raise if it does not exist.""" - return _impl.network_get(context, network_id) + return IMPL.network_get(context, network_id) +# pylint: disable-msg=C0103 def network_get_associated_fixed_ips(context, network_id): """Get all network's ips that have been associated.""" - return _impl.network_get_associated_fixed_ips(context, network_id) + return IMPL.network_get_associated_fixed_ips(context, network_id) def network_get_by_bridge(context, bridge): """Get an network or raise if it does not exist.""" - return _impl.network_get_by_bridge(context, bridge) + return IMPL.network_get_by_bridge(context, bridge) def network_get_host(context, network_id): """Get host assigned to network or raise""" - return _impl.network_get_host(context, network_id) + return IMPL.network_get_host(context, network_id) def network_get_index(context, network_id): """Get non-conflicting index for network""" - return _impl.network_get_index(context, network_id) + return IMPL.network_get_index(context, network_id) def network_get_vpn_ip(context, network_id): """Get non-conflicting index for network""" - return _impl.network_get_vpn_ip(context, network_id) + return IMPL.network_get_vpn_ip(context, network_id) def network_index_count(context): """Return count of network indexes""" - return _impl.network_index_count(context) + return IMPL.network_index_count(context) def network_index_create(context, values): """Create a network index from the values dict""" - return _impl.network_index_create(context, values) + return IMPL.network_index_create(context, values) def network_set_cidr(context, network_id, cidr): """Set the Classless Inner Domain Routing for the network""" - return _impl.network_set_cidr(context, network_id, cidr) + return IMPL.network_set_cidr(context, network_id, cidr) def network_set_host(context, network_id, host_id): """Safely set the host for network""" - return _impl.network_set_host(context, network_id, host_id) + return IMPL.network_set_host(context, network_id, host_id) def network_update(context, network_id, values): @@ -339,7 +340,7 @@ def network_update(context, network_id, values): Raises NotFound if network does not exist. """ - return _impl.network_update(context, network_id, values) + return IMPL.network_update(context, network_id, values) ################### @@ -347,7 +348,7 @@ def network_update(context, network_id, values): def project_get_network(context, project_id): """Return the network associated with the project.""" - return _impl.project_get_network(context, project_id) + return IMPL.project_get_network(context, project_id) ################### @@ -355,7 +356,7 @@ def project_get_network(context, project_id): def queue_get_for(context, topic, physical_node_id): """Return a channel to send a message to a node with a topic.""" - return _impl.queue_get_for(context, topic, physical_node_id) + return IMPL.queue_get_for(context, topic, physical_node_id) ################### @@ -363,12 +364,12 @@ def queue_get_for(context, topic, physical_node_id): def export_device_count(context): """Return count of export devices.""" - return _impl.export_device_count(context) + return IMPL.export_device_count(context) def export_device_create(context, values): """Create an export_device from the values dictionary.""" - return _impl.export_device_create(context, values) + return IMPL.export_device_create(context, values) ################### @@ -376,57 +377,57 @@ def export_device_create(context, values): def volume_allocate_shelf_and_blade(context, volume_id): """Atomically allocate a free shelf and blade from the pool.""" - return _impl.volume_allocate_shelf_and_blade(context, volume_id) + return IMPL.volume_allocate_shelf_and_blade(context, volume_id) def volume_attached(context, volume_id, instance_id, mountpoint): """Ensure that a volume is set as attached.""" - return _impl.volume_attached(context, volume_id, instance_id, mountpoint) + return IMPL.volume_attached(context, volume_id, instance_id, mountpoint) def volume_create(context, values): """Create a volume from the values dictionary.""" - return _impl.volume_create(context, values) + return IMPL.volume_create(context, values) def volume_destroy(context, volume_id): """Destroy the volume or raise if it does not exist.""" - return _impl.volume_destroy(context, volume_id) + return IMPL.volume_destroy(context, volume_id) def volume_detached(context, volume_id): """Ensure that a volume is set as detached.""" - return _impl.volume_detached(context, volume_id) + return IMPL.volume_detached(context, volume_id) def volume_get(context, volume_id): """Get a volume or raise if it does not exist.""" - return _impl.volume_get(context, volume_id) + return IMPL.volume_get(context, volume_id) def volume_get_all(context): """Get all volumes.""" - return _impl.volume_get_all(context) + return IMPL.volume_get_all(context) def volume_get_by_project(context, project_id): """Get all volumes belonging to a project.""" - return _impl.volume_get_by_project(context, project_id) + return IMPL.volume_get_by_project(context, project_id) def volume_get_by_str(context, str_id): """Get a volume by string id.""" - return _impl.volume_get_by_str(context, str_id) + return IMPL.volume_get_by_str(context, str_id) def volume_get_host(context, volume_id): """Get the host that the volume is running on.""" - return _impl.volume_get_host(context, volume_id) + return IMPL.volume_get_host(context, volume_id) def volume_get_shelf_and_blade(context, volume_id): """Get the shelf and blade allocated to the volume.""" - return _impl.volume_get_shelf_and_blade(context, volume_id) + return IMPL.volume_get_shelf_and_blade(context, volume_id) def volume_update(context, volume_id, values): @@ -435,4 +436,4 @@ def volume_update(context, volume_id, values): Raises NotFound if volume does not exist. """ - return _impl.volume_update(context, volume_id, values) + return IMPL.volume_update(context, volume_id, values) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index f6be037b3..5d98ee5bf 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -15,6 +15,9 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +""" +Implementation of SQLAlchemy backend +""" from nova import db from nova import exception @@ -25,19 +28,22 @@ from sqlalchemy import or_ FLAGS = flags.FLAGS +# NOTE(vish): disabling docstring pylint because the docstrings are +# in the interface definition +# pylint: disable-msg=C0111 ################### -def daemon_get(context, daemon_id): +def daemon_get(_context, daemon_id): return models.Daemon.find(daemon_id) -def daemon_get_by_args(context, host, binary): +def daemon_get_by_args(_context, host, binary): return models.Daemon.find_by_args(host, binary) -def daemon_create(context, values): +def daemon_create(_context, values): daemon_ref = models.Daemon() for (key, value) in values.iteritems(): daemon_ref[key] = value @@ -45,8 +51,8 @@ def daemon_create(context, values): return daemon_ref.id -def daemon_update(context, daemon_id, values): - daemon_ref = daemon_get(context, daemon_id) +def daemon_update(_context, daemon_id, values): + daemon_ref = daemon_get(_context, daemon_id) for (key, value) in values.iteritems(): daemon_ref[key] = value daemon_ref.save() @@ -55,7 +61,7 @@ def daemon_update(context, daemon_id, values): ################### -def floating_ip_allocate_address(context, host, project_id): +def floating_ip_allocate_address(_context, host, project_id): with managed_session(autocommit=False) as session: floating_ip_ref = session.query(models.FloatingIp) \ .filter_by(host=host) \ @@ -73,7 +79,7 @@ def floating_ip_allocate_address(context, host, project_id): return floating_ip_ref['address'] -def floating_ip_create(context, address, host): +def floating_ip_create(_context, address, host): floating_ip_ref = models.FloatingIp() floating_ip_ref['address'] = address floating_ip_ref['host'] = host @@ -81,7 +87,7 @@ def floating_ip_create(context, address, host): return floating_ip_ref -def floating_ip_fixed_ip_associate(context, floating_address, fixed_address): +def floating_ip_fixed_ip_associate(_context, floating_address, fixed_address): with managed_session(autocommit=False) as session: floating_ip_ref = models.FloatingIp.find_by_str(floating_address, session=session) @@ -92,7 +98,7 @@ def floating_ip_fixed_ip_associate(context, floating_address, fixed_address): session.commit() -def floating_ip_disassociate(context, address): +def floating_ip_disassociate(_context, address): with managed_session(autocommit=False) as session: floating_ip_ref = models.FloatingIp.find_by_str(address, session=session) @@ -107,7 +113,7 @@ def floating_ip_disassociate(context, address): return fixed_ip_address -def floating_ip_deallocate(context, address): +def floating_ip_deallocate(_context, address): with managed_session(autocommit=False) as session: floating_ip_ref = models.FloatingIp.find_by_str(address, session=session) @@ -115,11 +121,11 @@ def floating_ip_deallocate(context, address): floating_ip_ref.save(session=session) -def floating_ip_get_by_address(context, address): +def floating_ip_get_by_address(_context, address): return models.FloatingIp.find_by_str(address) -def floating_ip_get_instance(context, address): +def floating_ip_get_instance(_context, address): with managed_session() as session: floating_ip_ref = models.FloatingIp.find_by_str(address, session=session) @@ -129,7 +135,7 @@ def floating_ip_get_instance(context, address): ################### -def fixed_ip_allocate(context, network_id): +def fixed_ip_allocate(_context, network_id): with managed_session(autocommit=False) as session: network_or_none = or_(models.FixedIp.network_id == network_id, models.FixedIp.network_id == None) @@ -153,7 +159,7 @@ def fixed_ip_allocate(context, network_id): return fixed_ip_ref['address'] -def fixed_ip_create(context, values): +def fixed_ip_create(_context, values): fixed_ip_ref = models.FixedIp() for (key, value) in values.iteritems(): fixed_ip_ref[key] = value @@ -161,27 +167,27 @@ def fixed_ip_create(context, values): return fixed_ip_ref['address'] -def fixed_ip_get_by_address(context, address): +def fixed_ip_get_by_address(_context, address): return models.FixedIp.find_by_str(address) -def fixed_ip_get_instance(context, address): +def fixed_ip_get_instance(_context, address): with managed_session() as session: return models.FixedIp.find_by_str(address, session=session).instance -def fixed_ip_get_network(context, address): +def fixed_ip_get_network(_context, address): with managed_session() as session: return models.FixedIp.find_by_str(address, session=session).network -def fixed_ip_deallocate(context, address): - fixed_ip_ref = fixed_ip_get_by_address(context, address) +def fixed_ip_deallocate(_context, address): + fixed_ip_ref = fixed_ip_get_by_address(_context, address) fixed_ip_ref['allocated'] = False fixed_ip_ref.save() -def fixed_ip_instance_associate(context, address, instance_id): +def fixed_ip_instance_associate(_context, address, instance_id): with managed_session(autocommit=False) as session: fixed_ip_ref = models.FixedIp.find_by_str(address, session=session) instance_ref = models.Instance.find(instance_id, session=session) @@ -190,7 +196,7 @@ def fixed_ip_instance_associate(context, address, instance_id): session.commit() -def fixed_ip_instance_disassociate(context, address): +def fixed_ip_instance_disassociate(_context, address): with managed_session(autocommit=False) as session: fixed_ip_ref = models.FixedIp.find_by_str(address, session=session) fixed_ip_ref.instance = None @@ -198,8 +204,8 @@ def fixed_ip_instance_disassociate(context, address): session.commit() -def fixed_ip_update(context, address, values): - fixed_ip_ref = fixed_ip_get_by_address(context, address) +def fixed_ip_update(_context, address, values): + fixed_ip_ref = fixed_ip_get_by_address(_context, address) for (key, value) in values.iteritems(): fixed_ip_ref[key] = value fixed_ip_ref.save() @@ -208,7 +214,7 @@ def fixed_ip_update(context, address, values): ################### -def instance_create(context, values): +def instance_create(_context, values): instance_ref = models.Instance() for (key, value) in values.iteritems(): instance_ref[key] = value @@ -216,20 +222,20 @@ def instance_create(context, values): return instance_ref.id -def instance_destroy(context, instance_id): - instance_ref = instance_get(context, instance_id) +def instance_destroy(_context, instance_id): + instance_ref = instance_get(_context, instance_id) instance_ref.delete() -def instance_get(context, instance_id): +def instance_get(_context, instance_id): return models.Instance.find(instance_id) -def instance_get_all(context): +def instance_get_all(_context): return models.Instance.all() -def instance_get_by_project(context, project_id): +def instance_get_by_project(_context, project_id): with managed_session() as session: return session.query(models.Instance) \ .filter_by(project_id=project_id) \ @@ -237,7 +243,7 @@ def instance_get_by_project(context, project_id): .all() -def instance_get_by_reservation(context, reservation_id): +def instance_get_by_reservation(_context, reservation_id): with managed_session() as session: return session.query(models.Instance) \ .filter_by(reservation_id=reservation_id) \ @@ -245,11 +251,11 @@ def instance_get_by_reservation(context, reservation_id): .all() -def instance_get_by_str(context, str_id): +def instance_get_by_str(_context, str_id): return models.Instance.find_by_str(str_id) -def instance_get_fixed_address(context, instance_id): +def instance_get_fixed_address(_context, instance_id): with managed_session() as session: instance_ref = models.Instance.find(instance_id, session=session) if not instance_ref.fixed_ip: @@ -257,7 +263,7 @@ def instance_get_fixed_address(context, instance_id): return instance_ref.fixed_ip['address'] -def instance_get_floating_address(context, instance_id): +def instance_get_floating_address(_context, instance_id): with managed_session() as session: instance_ref = models.Instance.find(instance_id, session=session) if not instance_ref.fixed_ip: @@ -268,23 +274,23 @@ def instance_get_floating_address(context, instance_id): return instance_ref.fixed_ip.floating_ips[0]['address'] -def instance_get_host(context, instance_id): - instance_ref = instance_get(context, instance_id) +def instance_get_host(_context, instance_id): + instance_ref = instance_get(_context, instance_id) return instance_ref['host'] -def instance_is_vpn(context, instance_id): - instance_ref = instance_get(context, instance_id) +def instance_is_vpn(_context, instance_id): + instance_ref = instance_get(_context, instance_id) return instance_ref['image_id'] == FLAGS.vpn_image_id -def instance_state(context, instance_id, state, description=None): - instance_ref = instance_get(context, instance_id) +def instance_state(_context, instance_id, state, description=None): + instance_ref = instance_get(_context, instance_id) instance_ref.set_state(state, description) -def instance_update(context, instance_id, values): - instance_ref = instance_get(context, instance_id) +def instance_update(_context, instance_id, values): + instance_ref = instance_get(_context, instance_id) for (key, value) in values.iteritems(): instance_ref[key] = value instance_ref.save() @@ -293,11 +299,11 @@ def instance_update(context, instance_id, values): ################### -def network_count(context): +def network_count(_context): return models.Network.count() -def network_count_allocated_ips(context, network_id): +def network_count_allocated_ips(_context, network_id): with managed_session() as session: return session.query(models.FixedIp) \ .filter_by(network_id=network_id) \ @@ -306,7 +312,7 @@ def network_count_allocated_ips(context, network_id): .count() -def network_count_available_ips(context, network_id): +def network_count_available_ips(_context, network_id): with managed_session() as session: return session.query(models.FixedIp) \ .filter_by(network_id=network_id) \ @@ -316,7 +322,7 @@ def network_count_available_ips(context, network_id): .count() -def network_count_reserved_ips(context, network_id): +def network_count_reserved_ips(_context, network_id): with managed_session() as session: return session.query(models.FixedIp) \ .filter_by(network_id=network_id) \ @@ -325,7 +331,7 @@ def network_count_reserved_ips(context, network_id): .count() -def network_create(context, values): +def network_create(_context, values): network_ref = models.Network() for (key, value) in values.iteritems(): network_ref[key] = value @@ -333,7 +339,7 @@ def network_create(context, values): return network_ref -def network_destroy(context, network_id): +def network_destroy(_context, network_id): with managed_session(autocommit=False) as session: # TODO(vish): do we have to use sql here? session.execute('update networks set deleted=1 where id=:id', @@ -351,19 +357,21 @@ def network_destroy(context, network_id): session.commit() -def network_get(context, network_id): +def network_get(_context, network_id): return models.Network.find(network_id) -def network_get_associated_fixed_ips(context, network_id): +# pylint: disable-msg=C0103 +def network_get_associated_fixed_ips(_context, network_id): with managed_session() as session: return session.query(models.FixedIp) \ + .filter_by(network_id=network_id) \ .filter(models.FixedIp.instance_id != None) \ .filter_by(deleted=False) \ .all() -def network_get_by_bridge(context, bridge): +def network_get_by_bridge(_context, bridge): with managed_session() as session: rv = session.query(models.Network) \ .filter_by(bridge=bridge) \ @@ -374,12 +382,12 @@ def network_get_by_bridge(context, bridge): return rv -def network_get_host(context, network_id): - network_ref = network_get(context, network_id) +def network_get_host(_context, network_id): + network_ref = network_get(_context, network_id) return network_ref['host'] -def network_get_index(context, network_id): +def network_get_index(_context, network_id): with managed_session(autocommit=False) as session: network_index = session.query(models.NetworkIndex) \ .filter_by(network_id=None) \ @@ -395,18 +403,18 @@ def network_get_index(context, network_id): return network_index['index'] -def network_index_count(context): +def network_index_count(_context): return models.NetworkIndex.count() -def network_index_create(context, values): +def network_index_create(_context, values): network_index_ref = models.NetworkIndex() for (key, value) in values.iteritems(): network_index_ref[key] = value network_index_ref.save() -def network_set_host(context, network_id, host_id): +def network_set_host(_context, network_id, host_id): with managed_session(autocommit=False) as session: network = session.query(models.Network) \ .filter_by(id=network_id) \ @@ -427,8 +435,8 @@ def network_set_host(context, network_id, host_id): return network['host'] -def network_update(context, network_id, values): - network_ref = network_get(context, network_id) +def network_update(_context, network_id, values): + network_ref = network_get(_context, network_id) for (key, value) in values.iteritems(): network_ref[key] = value network_ref.save() @@ -437,7 +445,7 @@ def network_update(context, network_id, values): ################### -def project_get_network(context, project_id): +def project_get_network(_context, project_id): with managed_session() as session: rv = session.query(models.Network) \ .filter_by(project_id=project_id) \ @@ -451,18 +459,18 @@ def project_get_network(context, project_id): ################### -def queue_get_for(context, topic, physical_node_id): +def queue_get_for(_context, topic, physical_node_id): # FIXME(ja): this should be servername? return "%s.%s" % (topic, physical_node_id) ################### -def export_device_count(context): +def export_device_count(_context): return models.ExportDevice.count() -def export_device_create(context, values): +def export_device_create(_context, values): export_device_ref = models.ExportDevice() for (key, value) in values.iteritems(): export_device_ref[key] = value @@ -473,7 +481,7 @@ def export_device_create(context, values): ################### -def volume_allocate_shelf_and_blade(context, volume_id): +def volume_allocate_shelf_and_blade(_context, volume_id): with managed_session(autocommit=False) as session: export_device = session.query(models.ExportDevice) \ .filter_by(volume=None) \ @@ -490,8 +498,8 @@ def volume_allocate_shelf_and_blade(context, volume_id): return (export_device.shelf_id, export_device.blade_id) -def volume_attached(context, volume_id, instance_id, mountpoint): - volume_ref = volume_get(context, volume_id) +def volume_attached(_context, volume_id, instance_id, mountpoint): + volume_ref = volume_get(_context, volume_id) volume_ref.instance_id = instance_id volume_ref['status'] = 'in-use' volume_ref['mountpoint'] = mountpoint @@ -499,7 +507,7 @@ def volume_attached(context, volume_id, instance_id, mountpoint): volume_ref.save() -def volume_create(context, values): +def volume_create(_context, values): volume_ref = models.Volume() for (key, value) in values.iteritems(): volume_ref[key] = value @@ -507,7 +515,7 @@ def volume_create(context, values): return volume_ref -def volume_destroy(context, volume_id): +def volume_destroy(_context, volume_id): with managed_session(autocommit=False) as session: # TODO(vish): do we have to use sql here? session.execute('update volumes set deleted=1 where id=:id', @@ -518,8 +526,8 @@ def volume_destroy(context, volume_id): session.commit() -def volume_detached(context, volume_id): - volume_ref = volume_get(context, volume_id) +def volume_detached(_context, volume_id): + volume_ref = volume_get(_context, volume_id) volume_ref['instance_id'] = None volume_ref['mountpoint'] = None volume_ref['status'] = 'available' @@ -527,15 +535,15 @@ def volume_detached(context, volume_id): volume_ref.save() -def volume_get(context, volume_id): +def volume_get(_context, volume_id): return models.Volume.find(volume_id) -def volume_get_all(context): +def volume_get_all(_context): return models.Volume.all() -def volume_get_by_project(context, project_id): +def volume_get_by_project(_context, project_id): with managed_session() as session: return session.query(models.Volume) \ .filter_by(project_id=project_id) \ @@ -543,16 +551,16 @@ def volume_get_by_project(context, project_id): .all() -def volume_get_by_str(context, str_id): +def volume_get_by_str(_context, str_id): return models.Volume.find_by_str(str_id) -def volume_get_host(context, volume_id): - volume_ref = volume_get(context, volume_id) +def volume_get_host(_context, volume_id): + volume_ref = volume_get(_context, volume_id) return volume_ref['host'] -def volume_get_shelf_and_blade(context, volume_id): +def volume_get_shelf_and_blade(_context, volume_id): with managed_session() as session: export_device = session.query(models.ExportDevice) \ .filter_by(volume_id=volume_id) \ @@ -562,8 +570,8 @@ def volume_get_shelf_and_blade(context, volume_id): return (export_device.shelf_id, export_device.blade_id) -def volume_update(context, volume_id, values): - volume_ref = volume_get(context, volume_id) +def volume_update(_context, volume_id, values): + volume_ref = volume_get(_context, volume_id) for (key, value) in values.iteritems(): volume_ref[key] = value volume_ref.save() diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index 201948328..70e3212e1 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -15,6 +15,9 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +""" +Session Handling for SQLAlchemy backend +""" import logging @@ -27,18 +30,19 @@ FLAGS = flags.FLAGS def managed_session(autocommit=True): + """Helper method to grab session manager""" return SessionExecutionManager(autocommit=autocommit) class SessionExecutionManager: + """Session manager supporting with .. as syntax""" _engine = None _session = None def __init__(self, autocommit): - cls = SessionExecutionManager - if not cls._engine: - cls._engine = create_engine(FLAGS.sql_connection, echo=False) - self._session = create_session(bind=cls._engine, + if not self._engine: + self._engine = create_engine(FLAGS.sql_connection, echo=False) + self._session = create_session(bind=self._engine, autocommit=autocommit) def __enter__(self): -- cgit From 975861fd0b8fe7c89ccb6a31b0d0c89948c18252 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 31 Aug 2010 17:35:04 -0700 Subject: pylint clean of manager and service --- nova/manager.py | 6 ++---- nova/service.py | 5 +++-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/nova/manager.py b/nova/manager.py index 20b58bd13..4cc27f05b 100644 --- a/nova/manager.py +++ b/nova/manager.py @@ -15,7 +15,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - """ Base class for managers of different parts of the system """ @@ -33,6 +32,5 @@ class Manager(object): """DB driver is injected in the init method""" def __init__(self, db_driver=None): if not db_driver: - db_driver=FLAGS.db_driver - self.db = utils.import_object(db_driver) - + db_driver = FLAGS.db_driver + self.db = utils.import_object(db_driver) # pylint: disable-msg=C0103 diff --git a/nova/service.py b/nova/service.py index d39a5cf10..a6df7335b 100644 --- a/nova/service.py +++ b/nova/service.py @@ -80,7 +80,7 @@ class Service(object, service.Service): if not manager: manager = FLAGS.get('%s_manager' % topic, None) manager_ref = utils.import_object(manager) - logging.warn("Starting %s node" % topic) + logging.warn("Starting %s node", topic) service_ref = cls(manager_ref) conn = rpc.Connection.instance() consumer_all = rpc.AdapterConsumer( @@ -127,7 +127,8 @@ class Service(object, service.Service): self.model_disconnected = False logging.error("Recovered model server connection!") - except Exception, ex: #FIXME this should only be connection error + # TODO(vish): this should probably only catch connection errors + except: # pylint: disable-msg=W0702 if not getattr(self, "model_disconnected", False): self.model_disconnected = True logging.exception("model server went away") -- cgit