diff options
28 files changed, 971 insertions, 221 deletions
@@ -87,6 +87,11 @@ pyflakes: money: clean -sloccount --addlang "makefile" $(TOPDIR) $(PYDIRS) $(EXAMPLEDIR) $(INITDIR) +async: install + /sbin/service funcd restart + sleep 4 + python test/async_test.py + testit: clean -cd test; sh test-it.sh diff --git a/func/overlord/forkbomb.py b/func/forkbomb.py index c30cc9e..3dc12c8 100644 --- a/func/overlord/forkbomb.py +++ b/func/forkbomb.py @@ -20,6 +20,7 @@ import bsddb import sys import tempfile import fcntl +import utils DEFAULT_FORKS = 4 DEFAULT_CACHE_DIR = "/var/lib/func" @@ -52,11 +53,16 @@ def __access_buckets(filename,clear,new_key=None,new_value=None): if not storage.has_key("data"): storage["data"] = {} + else: + # print "DEBUG: existing: %s" % storage["data"] + pass if new_key is not None: # bsdb is a bit weird about this newish = storage["data"].copy() + new_value = utils.remove_exceptions(new_value) newish[new_key] = new_value + # print "DEBUG: newish: %s" % newish storage["data"] = newish rc = storage["data"].copy() @@ -118,7 +124,7 @@ def __demo(bucket_number, buckets, my_item): This is a demo handler for test purposes. It just multiplies all numbers by 1000, but slowly. """ - print ">> I am fork (%s) and I am processing item (%s)" % (bucket_number, my_item) + # print ">> I am fork (%s) and I am processing item (%s)" % (bucket_number, my_item) # just to verify forks are not sequential sleep = random.randrange(0,4) time.sleep(sleep) @@ -133,6 +139,7 @@ def batch_run(pool,callback,nforks=DEFAULT_FORKS,cachedir=DEFAULT_CACHE_DIR): if nforks <= 1: # modulus voodoo gets crazy otherwise and bad things happen nforks = 2 + # print "DEBUG: nforks=%s" % 2 shelf_file = __get_storage(cachedir) __access_buckets(shelf_file,True,None) buckets = __bucketize(pool, nforks) diff --git a/func/jobthing.py b/func/jobthing.py new file mode 100644 index 0000000..ca8ee38 --- /dev/null +++ b/func/jobthing.py @@ -0,0 +1,215 @@ +# jobthing is a module that allows for background execution of a task, and +# getting status of that task. The ultimate goal is to allow ajaxyness +# of GUI apps using Func, and also for extremely long running tasks that +# we don't want to block on as called by scripts using the FunC API. The +# CLI should not use this. +# +# Copyright 2007, Red Hat, Inc +# Michael DeHaan <mdehaan@redhat.com> +# +# This software may be freely redistributed under the terms of the GNU +# general public license. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + +import os +import random # for testing only +import time # for testing only +import shelve +import bsddb +import sys +import tempfile +import fcntl +import forkbomb +import utils + +JOB_ID_RUNNING = 0 +JOB_ID_FINISHED = 1 +JOB_ID_LOST_IN_SPACE = 2 +JOB_ID_ASYNC_PARTIAL = 3 +JOB_ID_ASYNC_FINISHED = 4 + +REMOTE_ERROR = utils.REMOTE_CANARY + +# how long to retain old job records in the job id database +RETAIN_INTERVAL = 60 * 60 + +# where to store the internal job id database +CACHE_DIR = "/var/lib/func" + +def __update_status(jobid, status, results, clear=False): + return __access_status(jobid=jobid, status=status, results=results, write=True) + +def __get_status(jobid): + return __access_status(jobid=jobid, write=False) + +def purge_old_jobs(): + return __access_status(purge=True) + +def __purge_old_jobs(storage): + """ + Deletes jobs older than RETAIN_INTERVAL seconds. + MINOR FIXME: this probably should be a more intelligent algorithm that only + deletes jobs if the database is too big and then only the oldest jobs + but this will work just as well. + """ + nowtime = time.time() + for x in storage.keys(): + # minion jobs have "-minion" in the job id so disambiguation so we need to remove that + jobkey = x.replace("-","").replace("minion","") + create_time = float(jobkey) + if nowtime - create_time > RETAIN_INTERVAL: + del storage[x] + +def __access_status(jobid=0, status=0, results=0, clear=False, write=False, purge=False): + + dir = os.path.expanduser(CACHE_DIR) + if not os.path.exists(dir): + os.makedirs(dir) + filename = os.path.join(dir,"status-%s" % os.getuid()) + + internal_db = bsddb.btopen(filename, 'c', 0644 ) + handle = open(filename,"r") + fcntl.flock(handle.fileno(), fcntl.LOCK_EX) + storage = shelve.BsdDbShelf(internal_db) + + + if clear: + storage.clear() + storage.close() + fcntl.flock(handle.fileno(), fcntl.LOCK_UN) + return {} + + if purge or write: + __purge_old_jobs(storage) + + if write: + results = utils.remove_exceptions(results) + # print "DEBUG: status=%s" % status + # print "DEBUG: results=%s" % results + storage[str(jobid)] = (status, results) + rc = jobid + elif not purge: + if storage.has_key(str(jobid)): + # tuple of (status, results) + + rc = storage[str(jobid)] + else: + rc = (JOB_ID_LOST_IN_SPACE, 0) + else: + rc = 0 + + storage.close() + fcntl.flock(handle.fileno(), fcntl.LOCK_UN) + + return rc + +def batch_run(server, process_server, nforks): + """ + This is the method used by the overlord side usage of jobthing. + Minion side usage will use minion_async_run instead. + + Given an array of items (pool), call callback in each one, but divide + the workload over nfork forks. Temporary files used during the + operation will be created in cachedir and subsequently deleted. + """ + + job_id = time.time() + pid = os.fork() + if pid != 0: + #print "DEBUG: UPDATE STATUS: r1: %s" % job_id + __update_status(job_id, JOB_ID_RUNNING, -1) + return job_id + else: + # kick off the job + __update_status(job_id, JOB_ID_RUNNING, -1) + results = forkbomb.batch_run(server, process_server, nforks) + + # we now have a list of job id's for each minion, kill the task + __update_status(job_id, JOB_ID_ASYNC_PARTIAL, results) + sys.exit(0) + +def minion_async_run(function_ref, args): + """ + This is a simpler invocation for minion side async usage. + """ + # to avoid confusion of job id's (we use the same job database) + # minion jobs contain the string "minion". + job_id = "%s-minion" % time.time() + pid = os.fork() + if pid != 0: + __update_status(job_id, JOB_ID_RUNNING, -1) + return job_id + else: + __update_status(job_id, JOB_ID_RUNNING, -1) + try: + results = function_ref(*args) + except Exception, e: + # FIXME: we need to make sure this is logged + # NOTE: we need a return here, else the async engine won't get any results + # so that is the reason for the catch + # FIXME: it would be nice to store the string data from the exception here so that the caller + # can read the exception data, however we can't store the exception directly in the DB. + # some care must be made to also make this not suck for the user of the API, + # when they are iterating over batch results, so they can tell good data from exceptions that + # are represented as strings. Ideally, reconstructing the exceptions back into objects would be shiny + # but if we go there I will need more caffeine first. + __update_status(job_id, JOB_ID_FINISHED, REMOTE_ERROR) + __update_status(job_id, JOB_ID_FINISHED, results) + sys.exit(0) + +def job_status(jobid, client_class=None): + + # NOTE: client_class is here to get around some evil circular reference + # type stuff. This is intended to be called by minions (who can leave it None) + # or by the Client module code (which does not need to be worried about it). API + # users should not be calling jobthing.py methods directly. + + got_status = __get_status(jobid) + + # if the status comes back as JOB_ID_ASYNC_PARTIAL what we have is actually a hash + # of hostname/minion-jobid pairs. Instantiate a client handle for each and poll them + # for their actual status, filling in only the ones that are actually done. + + (interim_rc, interim_results) = got_status + + if interim_rc == JOB_ID_ASYNC_PARTIAL: + + partial_results = {} + + # print "DEBUG: partial results for batch task: %s" % interim_results + + for host in interim_results.keys(): + + minion_job = interim_results[host] + client = client_class(host, noglobs=True, async=False) + minion_result = client.jobs.job_status(minion_job) + # print "DEBUG: background task on minion (%s) has status %s" % (minion_job, minion_result) + + (minion_interim_rc, minion_interim_result) = minion_result + + some_missing = False + if minion_interim_rc not in [ JOB_ID_RUNNING ]: + if minion_interim_rc in [ JOB_ID_LOST_IN_SPACE ]: + partial_results[host] = REMOTE_ERROR + else: + partial_results[host] = minion_interim_result + else: + some_missing = True + + if some_missing: + return (JOB_ID_ASYNC_PARTIAL, partial_results) + else: + return (JOB_ID_ASYNC_FINISHED, partial_results) + + else: + return got_status + + # of job id's on the minion in results. + +if __name__ == "__main__": + __test() + + diff --git a/func/minion/modules/jobs.py b/func/minion/modules/jobs.py new file mode 100644 index 0000000..69fb75f --- /dev/null +++ b/func/minion/modules/jobs.py @@ -0,0 +1,36 @@ +## (Largely internal) module for access to asynchoronously dispatched +## module job ID's. The Func Client() module wraps most of this usage +## so it's not entirely relevant to folks using the CLI or Func API +## directly. +## +## Copyright 2008, Red Hat, Inc +## Michael DeHaan <mdehaan@redhat.com> +## +## This software may be freely redistributed under the terms of the GNU +## general public license. +## +## You should have received a copy of the GNU General Public License +## along with this program; if not, write to the Free Software +## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +## + +import codes +from func import jobthing +import func_module + +# ================================= + +class JobsModule(func_module.FuncModule): + + version = "0.0.1" + api_version = "0.0.1" + description = "Internal module for tracking background minion tasks." + + def job_status(self, job_id): + """ + Returns job status in the form of (status, datastruct). + Datastruct is undefined for unfinished jobs. See jobthing.py and + Wiki details on async invocation for more information. + """ + return jobthing.job_status(job_id) + diff --git a/func/minion/modules/sysctl.py b/func/minion/modules/sysctl.py new file mode 100644 index 0000000..1f11d55 --- /dev/null +++ b/func/minion/modules/sysctl.py @@ -0,0 +1,31 @@ +# Copyright 2008, Red Hat, Inc +# Luke Macken <lmacken@redhat.com> +# +# This software may be freely redistributed under the terms of the GNU +# general public license. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + +import func_module +import sub_process + +class SysctlModule(func_module.FuncModule): + + version = "0.0.1" + description = "Configure kernel parameters at runtime" + + def __run(self, cmd): + cmd = sub_process.Popen(cmd.split(), stdout=sub_process.PIPE, + stderr=sub_process.PIPE, shell=False) + return [line for line in cmd.communicate()[0].strip().split('\n')] + + def list(self): + return self.__run("/sbin/sysctl -a") + + def get(self, name): + return self.__run("/sbin/sysctl -n %s" % name) + + def set(self, name, value): + return self.__run("/sbin/sysctl -w %s=%s" % (name, value)) diff --git a/func/minion/modules/test.py b/func/minion/modules/test.py index 6718fed..6f7c5fa 100644 --- a/func/minion/modules/test.py +++ b/func/minion/modules/test.py @@ -1,5 +1,6 @@ import func_module import time +import exceptions class Test(func_module.FuncModule): version = "11.11.11" @@ -20,3 +21,9 @@ class Test(func_module.FuncModule): t = int(t) time.sleep(t) return time.time() + + def explode(self): + """ + Testing remote exception handling is useful + """ + raise exceptions.Exception("khhhhhhaaaaaan!!!!!!") diff --git a/func/minion/server.py b/func/minion/server.py index 6e55e70..7187783 100755 --- a/func/minion/server.py +++ b/func/minion/server.py @@ -28,6 +28,7 @@ from func.config import read_config from func.commonconfig import FuncdConfig from func import logger from func import certs +import func.jobthing as jobthing # our modules import AuthedXMLRPCServer @@ -196,6 +197,16 @@ class FuncSSLXMLRPCServer(AuthedXMLRPCServer.AuthedSSLXMLRPCServer, peer_cert = r.get_peer_certificate() ip = a[0] + + # generally calling conventions are: hardware.info + # async convention is async.hardware.info + # here we parse out the async to decide how to invoke it. + # see the async docs on the Wiki for further info. + async_dispatch = False + if method.startswith("async."): + async_dispatch = True + method = method.replace("async.","",1) + if not self._check_acl(peer_cert, ip, method, params): raise codes.AccessToMethodDenied @@ -207,7 +218,11 @@ class FuncSSLXMLRPCServer(AuthedXMLRPCServer.AuthedSSLXMLRPCServer, sub_hash = peer_cert.subject_name_hash() self.audit_logger.log_call(ip, cn, sub_hash, method, params) - return self.get_dispatch_method(method)(*params) + if not async_dispatch: + return self.get_dispatch_method(method)(*params) + else: + meth = self.get_dispatch_method(method) + return jobthing.minion_async_run(meth, params) def auth_cb(self, request, client_address): peer_cert = request.get_peer_certificate() diff --git a/func/overlord/client.py b/func/overlord/client.py index 60b5c24..f33bc4b 100755 --- a/func/overlord/client.py +++ b/func/overlord/client.py @@ -22,8 +22,10 @@ from func.config import read_config, CONFIG_FILE import sslclient import command -import forkbomb -import jobthing +import groups +import func.forkbomb as forkbomb +import func.jobthing as jobthing + # =================================== # defaults @@ -68,6 +70,11 @@ def expand_servers(spec, port=51234, noglobs=None, verbose=None, just_fqdns=Fals Given a regex/blob of servers, expand to a list of server ids. """ + + + # FIXME: we need to refactor expand_servers, it seems to do + # weird things, reload the config and groups config everytime it's + # called for one, which may or may not be bad... -akl config = read_config(CONFIG_FILE, CMConfig) if noglobs: @@ -76,9 +83,23 @@ def expand_servers(spec, port=51234, noglobs=None, verbose=None, just_fqdns=Fals else: return spec + group_class = groups.Groups() + group_dict = group_class.get_groups() + all_hosts = [] all_certs = [] seperate_gloobs = spec.split(";") + new_hosts = [] + + # we notate groups with @foo annotation, so look for that in the hostnamegoo + for each_gloob in seperate_gloobs: + if each_gloob[0] == '@': + if group_dict.has_key(each_gloob[1:]): + new_hosts = new_hosts + group_dict[each_gloob[1:]] + else: + print "group %s not defined" % each_gloob + + seperate_gloobs = seperate_gloobs + new_hosts for each_gloob in seperate_gloobs: actual_gloob = "%s/%s.cert" % (config.certroot, each_gloob) certs = glob.glob(actual_gloob) @@ -166,7 +187,7 @@ class Client(object): """ Use this to acquire status from jobs when using run with async client handles """ - return jobthing.job_status(jobid) + return jobthing.job_status(jobid, client_class=Client) # ----------------------------------------------- @@ -200,7 +221,16 @@ class Client(object): # we can't call "call" on s, since thats a rpc, so # we call gettatr around it. meth = "%s.%s" % (module, method) + + # async calling signature has an "imaginary" prefix + # so async.abc.def does abc.def as a background task. + # see Wiki docs for details + if self.async: + meth = "async.%s" % meth + + # this is the point at which we make the remote call. retval = getattr(conn, meth)(*args[:]) + if self.interactive: print retval except Exception, e: diff --git a/func/overlord/groups.py b/func/overlord/groups.py new file mode 100644 index 0000000..8eaf28e --- /dev/null +++ b/func/overlord/groups.py @@ -0,0 +1,95 @@ +#!/usr/bin/python + +## func command line interface & client lib +## +## Copyright 2007,2008 Red Hat, Inc +## Adrian Likins <alikins@redhat.com> +## +AUTHORS +## +## This software may be freely redistributed under the terms of the GNU +## general public license. +## +## You should have received a copy of the GNU General Public License +## along with this program; if not, write to the Free Software +## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +## + + +# this module lets you define groups of systems to work with from the +# commandline. It uses an "ini" style config parser like: + +#[groupname] +#host = foobar, baz, blip +#subgroup = blippy + + +import ConfigParser +import os + + +class Groups(object): + + def __init__(self, filename="/etc/func/groups"): + self.filename = filename + self.group_names = {} + self.groups = {} + self.__parse() + + def __parse(self): + + self.cp = ConfigParser.SafeConfigParser() + self.cp.read(self.filename) + + for section in self.cp.sections(): + self.add_group(section) + options = self.cp.options(section) + for option in options: + if option == "host": + self.add_hosts_to_group(section, self.cp.get(section, option)) + if option == "subgroup": + pass + + + def show(self): + print self.cp.sections() + print self.groups + + def add_group(self, group): + pass + + def __parse_hoststrings(self, hoststring): + hosts = [] + bits = hoststring.split(';') + for bit in bits: + blip = bit.strip().split(' ') + for host in blip: + if host not in hosts: + hosts.append(host.strip()) + + return hosts + + def add_hosts_to_group(self, group, hoststring): + hosts = self.__parse_hoststrings(hoststring) + for host in hosts: + self.add_host_to_group(group, host) + + + + def add_host_to_group(self, group, host): + if not self.groups.has_key(group): + self.groups[group] = [] + self.groups[group].append(host) + + def get_groups(self): + return self.groups + + + +def main(): + g = Groups("/tmp/testgroups") + print g.show() + + + +if __name__ == "__main__": + main() diff --git a/func/overlord/jobthing.py b/func/overlord/jobthing.py deleted file mode 100644 index e405616..0000000 --- a/func/overlord/jobthing.py +++ /dev/null @@ -1,118 +0,0 @@ -# jobthing is a module that allows for background execution of a task, and -# getting status of that task. The ultimate goal is to allow ajaxyness -# of GUI apps using Func, and also for extremely long running tasks that -# we don't want to block on as called by scripts using the FunC API. The -# CLI should not use this. -# -# Copyright 2007, Red Hat, Inc -# Michael DeHaan <mdehaan@redhat.com> -# -# This software may be freely redistributed under the terms of the GNU -# general public license. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - -import os -import random # for testing only -import time # for testing only -import shelve -import bsddb -import sys -import tempfile -import fcntl -import forkbomb - -JOB_ID_RUNNING = 0 -JOB_ID_FINISHED = 1 -JOB_ID_LOST_IN_SPACE = 2 - -# how long to retain old job records in the job id database -RETAIN_INTERVAL = 60 * 60 - -# where to store the internal job id database -CACHE_DIR = "/var/lib/func" - -def __update_status(jobid, status, results, clear=False): - return __access_status(jobid=jobid, status=status, results=results, write=True) - -def __get_status(jobid): - return __access_status(jobid=jobid, write=False) - - -def __purge_old_jobs(storage): - """ - Deletes jobs older than RETAIN_INTERVAL seconds. - MINOR FIXME: this probably should be a more intelligent algorithm that only - deletes jobs if the database is too big and then only the oldest jobs - but this will work just as well. - """ - nowtime = time.time() - for x in storage.keys(): - create_time = float(x) - if nowtime - create_time > RETAIN_INTERVAL: - del storage[x] - -def __access_status(jobid=0, status=0, results=0, clear=False, write=False): - - dir = os.path.expanduser(CACHE_DIR) - if not os.path.exists(dir): - os.makedirs(dir) - filename = os.path.join(dir,"status-%s" % os.getuid()) - - internal_db = bsddb.btopen(filename, 'c', 0644 ) - handle = open(filename,"r") - fcntl.flock(handle.fileno(), fcntl.LOCK_EX) - storage = shelve.BsdDbShelf(internal_db) - - if clear: - storage.clear() - storage.close() - fcntl.flock(handle.fileno(), fcntl.LOCK_UN) - return {} - - if write: - __purge_old_jobs(storage) - storage[str(jobid)] = (status, results) - rc = jobid - else: - if storage.has_key(str(jobid)): - # tuple of (status, results) - rc = storage[str(jobid)] - else: - rc = (JOB_ID_LOST_IN_SPACE, 0) - - storage.close() - fcntl.flock(handle.fileno(), fcntl.LOCK_UN) - - return rc - -def batch_run(server, process_server, nforks): - """ - Given an array of items (pool), call callback in each one, but divide - the workload over nfork forks. Temporary files used during the - operation will be created in cachedir and subsequently deleted. - """ - - job_id = time.time() - pid = os.fork() - if pid != 0: - #print "DEBUG: UPDATE STATUS: r1: %s" % job_id - __update_status(job_id, JOB_ID_RUNNING, -1) - return job_id - else: - #print "DEBUG: UPDATE STATUS: r2: %s" % job_id - __update_status(job_id, JOB_ID_RUNNING, -1) - results = forkbomb.batch_run(server, process_server, nforks) - #print "DEBUG: UPDATE STATUS: f1: %s" % job_id - __update_status(job_id, JOB_ID_FINISHED, results) - sys.exit(0) - -def job_status(jobid): - return __get_status(jobid) - -if __name__ == "__main__": - __test() - - diff --git a/func/utils.py b/func/utils.py index 4149885..c2fbb9f 100755 --- a/func/utils.py +++ b/func/utils.py @@ -14,7 +14,9 @@ import os import string import sys import traceback +import xmlrpclib +REMOTE_CANARY = "***REMOTE_ERROR***" # this is kind of handy, so keep it around for now # but we really need to fix out server side logging and error @@ -44,3 +46,36 @@ def daemonize(pidfile=None): if pidfile is not None: open(pidfile, "w").write(str(pid)) sys.exit(0) + +def remove_exceptions(results): + """ + Used by forkbomb/jobthing to avoid storing exceptions in database + because you know those don't serialize so well :) + # FIXME: this needs cleanup + """ + + if results is None: + return REMOTE_CANARY + + if str(results).startswith("<Fault"): + return REMOTE_CANARY + + if type(results) == xmlrpclib.Fault: + return REMOTE_CANARY + + if type(results) == dict: + new_results = {} + for x in results.keys(): + value = results[x] + if str(value).find("<Fault") == -1: + # there are interesting issues with the way it is imported and type() + # so that is why this hack is here. type(x) != xmlrpclib.Fault appears to miss some things + new_results[x] = value + else: + new_results[x] = REMOTE_CANARY + return new_results + + return results + + + diff --git a/funcweb/README.txt b/funcweb/README.txt index 3374563..4c52dcd 100644 --- a/funcweb/README.txt +++ b/funcweb/README.txt @@ -3,14 +3,29 @@ funcweb A TurboGears interface to func. -This project is currently under development, and should not be used in an -production environment. It employs no concept of security, and should only -be used for testing. +This project is currently under development, and is currently just a +proof-of-concept and should not be used in a production environment. Running ======= - # ./start-funcweb.py + # yum install TurboGears python-genshi python-elixir + $ python setup.py egg_info + $ tg-admin sql create + # ./start-funcweb.py + +Connect to http://localhost:8080 + +Creating a new user +=================== + +Currently funcweb only allows connections from 127.0.0.1 and from authenticated +users. So if you wish to grant other people access to your funcweb instance, +you can create new users easily: + + $ tg-admin shell + >>> user = User(user_name='name', password='password') + >>> ^D Authors ======= diff --git a/funcweb/dev.cfg b/funcweb/dev.cfg index 77efc8c..638c92b 100644 --- a/funcweb/dev.cfg +++ b/funcweb/dev.cfg @@ -1,28 +1,22 @@ [global] # This is where all of your settings go for your development environment # Settings that are the same for both development and production -# (such as template engine, encodings, etc.) all go in +# (such as template engine, encodings, etc.) all go in # funcweb/config/app.cfg # DATABASE +# driver://username:password@host:port/database + # pick the form for your database -# sqlobject.dburi="postgres://username@hostname/databasename" -# sqlobject.dburi="mysql://username:password@hostname:port/databasename" -# sqlobject.dburi="sqlite:///file_name_and_path" +# sqlalchemy.dburi="postgres://username@hostname/databasename" +# sqlalchemy.dburi="mysql://username:password@hostname:port/databasename" +# sqlalchemy.dburi="sqlite://%(current_dir_uri)s/devdata.sqlite" # If you have sqlite, here's a simple default to get you started # in development -sqlobject.dburi="sqlite://%(current_dir_uri)s/devdata.sqlite" - - -# if you are using a database or table type without transactions -# (MySQL default, for example), you should turn off transactions -# by prepending notrans_ on the uri -# sqlobject.dburi="notrans_mysql://username:password@hostname:port/databasename" +sqlalchemy.dburi="sqlite:///devdata.sqlite" -# for Windows users, sqlite URIs look like: -# sqlobject.dburi="sqlite:///drive_letter:/path/to/file" # SERVER @@ -32,6 +26,7 @@ sqlobject.dburi="sqlite://%(current_dir_uri)s/devdata.sqlite" # Enable the debug output at the end on pages. # log_debug_info_filter.on = False +server.socket_host="127.0.0.1" server.environment="development" autoreload.package="funcweb" @@ -64,3 +59,16 @@ level='INFO' qualname='turbogears.access' handlers=['access_out'] propagate=0 + +[[[identity]]] +level='INFO' +qualname='turbogears.identity' +handlers=['access_out'] +propagate=0 + +[[[database]]] +# Set to INFO to make SQLAlchemy display SQL commands +level='ERROR' +qualname='sqlalchemy.engine' +handlers=['debug_out'] +propagate=0 diff --git a/funcweb/funcweb/commands.py b/funcweb/funcweb/commands.py new file mode 100644 index 0000000..043ce1f --- /dev/null +++ b/funcweb/funcweb/commands.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +"""This module contains functions called from console script entry points.""" + +import os +import sys + +from os.path import dirname, exists, join + +import pkg_resources +pkg_resources.require("TurboGears") + +import turbogears +import cherrypy + +cherrypy.lowercase_api = True + +class ConfigurationError(Exception): + pass + +def start(): + """Start the CherryPy application server.""" + + setupdir = dirname(dirname(__file__)) + curdir = os.getcwd() + + # First look on the command line for a desired config file, + # if it's not on the command line, then look for 'setup.py' + # in the current directory. If there, load configuration + # from a file called 'dev.cfg'. If it's not there, the project + # is probably installed and we'll look first for a file called + # 'prod.cfg' in the current directory and then for a default + # config file called 'default.cfg' packaged in the egg. + if len(sys.argv) > 1: + configfile = sys.argv[1] + elif exists(join(setupdir, "setup.py")): + configfile = join(setupdir, "dev.cfg") + elif exists(join(curdir, "prod.cfg")): + configfile = join(curdir, "prod.cfg") + else: + try: + configfile = pkg_resources.resource_filename( + pkg_resources.Requirement.parse("funcweb"), + "config/default.cfg") + except pkg_resources.DistributionNotFound: + raise ConfigurationError("Could not find default configuration.") + + turbogears.update_config(configfile=configfile, + modulename="funcweb.config") + + from funcweb.controllers import Root + + turbogears.start_server(Root()) diff --git a/funcweb/funcweb/config/app.cfg b/funcweb/funcweb/config/app.cfg index 24e0807..504f018 100644 --- a/funcweb/funcweb/config/app.cfg +++ b/funcweb/funcweb/config/app.cfg @@ -19,16 +19,109 @@ tg.defaultview = "genshi" # Allow every exposed function to be called as json, # tg.allow_json = False +# Suppress the inclusion of the shipped MochiKit version, which is rather outdated. +# Attention: setting this to True and listing 'turbogears.mochikit' in 'tg.include_widgets' +# is a contradiction. This option will overrule the default-inclusion to prevent version +# mismatch bugs. +# tg.mochikit_suppress = True + # List of Widgets to include on every page. -# for exemple ['turbogears.mochikit'] +# for example ['turbogears.mochikit'] # tg.include_widgets = [] # Set to True if the scheduler should be started # tg.scheduler = False +# Set to True to allow paginate decorator redirects when page number gets +# out of bound. Useful for getting the real page id in the url +# paginate.redirect_on_out_of_range = True + +# Set to True to allow paginate decorator redirects when last page is requested. +# This is useful for getting the real last page id in the url +# paginate.redirect_on_last_page = True + # Set session or cookie # session_filter.on = True +# VISIT TRACKING +# Each visit to your application will be assigned a unique visit ID tracked via +# a cookie sent to the visitor's browser. +# -------------- + +# Enable Visit tracking +visit.on=True + +# Number of minutes a visit may be idle before it expires. +# visit.timeout=20 + +# The name of the cookie to transmit to the visitor's browser. +# visit.cookie.name="tg-visit" + +# Domain name to specify when setting the cookie (must begin with . according to +# RFC 2109). The default (None) should work for most cases and will default to +# the machine to which the request was made. NOTE: localhost is NEVER a valid +# value and will NOT WORK. +# visit.cookie.domain=None + +# Specific path for the cookie +# visit.cookie.path="/" + +# The name of the VisitManager plugin to use for visitor tracking. +visit.manager="sqlalchemy" + +# Database class to use for visit tracking +visit.saprovider.model = "funcweb.model.Visit" +identity.saprovider.model.visit = "funcweb.model.VisitIdentity" + +# IDENTITY +# General configuration of the TurboGears Identity management module +# -------- + +# Switch to turn on or off the Identity management module +identity.on=True + +# [REQUIRED] URL to which CherryPy will internally redirect when an access +# control check fails. If Identity management is turned on, a value for this +# option must be specified. +identity.failure_url="/login" + +identity.provider='sqlalchemy' + +# The names of the fields on the login form containing the visitor's user ID +# and password. In addition, the submit button is specified simply so its +# existence may be stripped out prior to passing the form data to the target +# controller. +# identity.form.user_name="user_name" +# identity.form.password="password" +# identity.form.submit="login" + +# What sources should the identity provider consider when determining the +# identity associated with a request? Comma separated list of identity sources. +# Valid sources: form, visit, http_auth +# identity.source="form,http_auth,visit" + +# SqlAlchemyIdentityProvider +# Configuration options for the default IdentityProvider +# ------------------------- + +# The classes you wish to use for your Identity model. Remember to not use reserved +# SQL keywords for class names (at least unless you specify a different table +# name using sqlmeta). +identity.saprovider.model.user="funcweb.model.User" +identity.saprovider.model.group="funcweb.model.Group" +identity.saprovider.model.permission="funcweb.model.Permission" + +# The password encryption algorithm used when comparing passwords against what's +# stored in the database. Valid values are 'md5' or 'sha1'. If you do not +# specify an encryption algorithm, passwords are expected to be clear text. +# The SqlAlchemyProvider *will* encrypt passwords supplied as part of your login +# form. If you set the password through the password property, like: +# my_user.password = 'secret' +# the password will be encrypted in the database, provided identity is up and +# running, or you have loaded the configuration specifying what encryption to +# use (in situations where identity may not yet be running, like tests). + +# identity.saprovider.encryption_algorithm=None # compress the data sends to the web browser # [/] diff --git a/funcweb/funcweb/controllers.py b/funcweb/funcweb/controllers.py index 0d74a50..df4c05c 100644 --- a/funcweb/funcweb/controllers.py +++ b/funcweb/funcweb/controllers.py @@ -1,18 +1,24 @@ import logging log = logging.getLogger(__name__) -from turbogears import controllers, expose, flash +from turbogears import controllers, expose, flash, identity, redirect from func.overlord.client import Client class Root(controllers.RootController): @expose(template="funcweb.templates.minions") - def minions(self): - """ Return a list of our minions """ - fc = Client("*") + @identity.require(identity.Any( + identity.from_host("127.0.0.1"), identity.not_anonymous())) + def minions(self, glob='*'): + """ Return a list of our minions that match a given glob """ + fc = Client(glob) return dict(minions=fc.system.list_methods()) + index = minions # start with our minion view, for now + @expose(template="funcweb.templates.minion") + @identity.require(identity.Any( + identity.from_host("127.0.0.1"), identity.not_anonymous())) def minion(self, name, module=None, method=None): """ Display module or method details for a specific minion. @@ -34,11 +40,43 @@ class Root(controllers.RootController): return dict(modules=modules, module=module, tg_template="funcweb.templates.module") - index = minions # start with our minion view, for now @expose(template="funcweb.templates.run") + @identity.require(identity.Any( + identity.from_host("127.0.0.1"), identity.not_anonymous())) def run(self, minion="*", module=None, method=None, arguments=''): fc = Client(minion) results = getattr(getattr(fc, module), method)(*arguments.split()) cmd = "%s.%s.%s(%s)" % (minion, module, method, arguments) return dict(cmd=cmd, results=results) + + @expose(template="funcweb.templates.login") + def login(self, forward_url=None, previous_url=None, *args, **kw): + from cherrypy import request, response + if not identity.current.anonymous \ + and identity.was_login_attempted() \ + and not identity.get_identity_errors(): + raise redirect(forward_url) + + forward_url=None + previous_url= request.path + + if identity.was_login_attempted(): + msg=_("The credentials you supplied were not correct or " + "did not grant access to this resource.") + elif identity.get_identity_errors(): + msg=_("You must provide your credentials before accessing " + "this resource.") + else: + msg=_("Please log in.") + forward_url= request.headers.get("Referer", "/") + + response.status=403 + return dict(message=msg, previous_url=previous_url, logging_in=True, + original_parameters=request.params, + forward_url=forward_url) + + @expose() + def logout(self): + identity.current.logout() + raise redirect("/") diff --git a/funcweb/funcweb/model.py b/funcweb/funcweb/model.py index c35e930..2997cf0 100644 --- a/funcweb/funcweb/model.py +++ b/funcweb/funcweb/model.py @@ -1 +1,99 @@ -# <insert SQLAlchemy hotness here> +from datetime import datetime +# import the basic Elixir classes and functions for declaring the data model +# (see http://elixir.ematia.de/trac/wiki/TutorialDivingIn) +from elixir import Entity, Field, OneToMany, ManyToOne, ManyToMany +from elixir import options_defaults, using_options, setup_all +# import some datatypes for table columns from Elixir +# (see http://www.sqlalchemy.org/docs/04/types.html for more) +from elixir import String, Unicode, Integer, DateTime +from turbogears import identity + +options_defaults['autosetup'] = False + + +# your data model + +# class YourDataClass(Entity): +# pass + + +# the identity model + + +class Visit(Entity): + """ + A visit to your site + """ + using_options(tablename='visit') + + visit_key = Field(String(40), primary_key=True) + created = Field(DateTime, nullable=False, default=datetime.now) + expiry = Field(DateTime) + + @classmethod + def lookup_visit(cls, visit_key): + return Visit.get(visit_key) + + +class VisitIdentity(Entity): + """ + A Visit that is link to a User object + """ + using_options(tablename='visit_identity') + + visit_key = Field(String(40), primary_key=True) + user = ManyToOne('User', colname='user_id', use_alter=True) + + +class Group(Entity): + """ + An ultra-simple group definition. + """ + using_options(tablename='tg_group') + + group_id = Field(Integer, primary_key=True) + group_name = Field(Unicode(16), unique=True) + display_name = Field(Unicode(255)) + created = Field(DateTime, default=datetime.now) + users = ManyToMany('User', tablename='user_group') + permissions = ManyToMany('Permission', tablename='group_permission') + + +class User(Entity): + """ + Reasonably basic User definition. + Probably would want additional attributes. + """ + using_options(tablename='tg_user') + + user_id = Field(Integer, primary_key=True) + user_name = Field(Unicode(16), unique=True) + email_address = Field(Unicode(255), unique=True) + display_name = Field(Unicode(255)) + password = Field(Unicode(40)) + created = Field(DateTime, default=datetime.now) + groups = ManyToMany('Group', tablename='user_group') + + @property + def permissions(self): + perms = set() + for g in self.groups: + perms |= set(g.permissions) + return perms + + +class Permission(Entity): + """ + A relationship that determines what each Group can do + """ + using_options(tablename='permission') + + permission_id = Field(Integer, primary_key=True) + permission_name = Field(Unicode(16), unique=True) + description = Field(Unicode(255)) + groups = ManyToMany('Group', tablename='group_permission') + + +# Set up all Elixir entities declared above + +setup_all() diff --git a/funcweb/funcweb/release.py b/funcweb/funcweb/release.py new file mode 100644 index 0000000..b6593d4 --- /dev/null +++ b/funcweb/funcweb/release.py @@ -0,0 +1,9 @@ +# Release information about funcweb + +version = "0.1" +description = "A web interface to func" +author = "Luke Macken" +email = "lmacken@redhat.com" +copyright = "2008 Red Hat, Inc." +url = "http://fedorahosted.org/func" +license = "GPL" diff --git a/funcweb/funcweb/templates/login.html b/funcweb/funcweb/templates/login.html index 4fd6755..e93b271 100644 --- a/funcweb/funcweb/templates/login.html +++ b/funcweb/funcweb/templates/login.html @@ -1,7 +1,7 @@ -<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" - "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" - xmlns:py="http://purl.org/kid/ns#"> + xmlns:py="http://genshi.edgewall.org/" + xmlns:xi="http://www.w3.org/2001/XInclude"> <head> <meta content="text/html; charset=UTF-8" diff --git a/funcweb/funcweb/templates/master.html b/funcweb/funcweb/templates/master.html index 8d09254..ab14ca0 100644 --- a/funcweb/funcweb/templates/master.html +++ b/funcweb/funcweb/templates/master.html @@ -34,6 +34,7 @@ </div> </div> <div class="content"> + <!-- <div py:if="tg.config('identity.on',False) and not 'logging_in' in locals()" id="pageLogin" class="usernav"> <span py:if="tg.identity.anonymous"> You are not logged in yet <a class="loginButton" href="${tg.url('/login/')}">login</a> @@ -43,7 +44,7 @@ <a class="loginButton" href="${tg.url('/logout/')}">logout</a> </span> </div> - + --> <div py:replace="select('*|text()')" /> </div> </div> diff --git a/funcweb/funcweb/templates/minion.html b/funcweb/funcweb/templates/minion.html index 2223d57..f171b00 100644 --- a/funcweb/funcweb/templates/minion.html +++ b/funcweb/funcweb/templates/minion.html @@ -5,7 +5,7 @@ <body> <div id="modules" class="modules"> <ul py:for="minion, mods in modules.items()"> - <h2>${minion}</h2> + <h2>${minion[:13]}</h2> <li py:for="module in mods"> <a href="#" onclick="$('#col4').hide();$('#col3').hide().load('/minion/${minion}/${module}').show('slow');">${module}</a> </li> diff --git a/funcweb/sample-prod.cfg b/funcweb/sample-prod.cfg index 6ca2b4e..7f6cc6e 100644 --- a/funcweb/sample-prod.cfg +++ b/funcweb/sample-prod.cfg @@ -8,23 +8,16 @@ # DATABASE +# driver://username:password@host:port/database + # pick the form for your database -# sqlobject.dburi="postgres://username@hostname/databasename" -# sqlobject.dburi="mysql://username:password@hostname:port/databasename" -# sqlobject.dburi="sqlite:///file_name_and_path" +# sqlalchemy.dburi="postgres://username@hostname/databasename" +# sqlalchemy.dburi="mysql://username:password@hostname:port/databasename" +# sqlalchemy.dburi="sqlite:///file_name_and_path" # If you have sqlite, here's a simple default to get you started # in development -sqlobject.dburi="sqlite://%(current_dir_uri)s/devdata.sqlite" - - -# if you are using a database or table type without transactions -# (MySQL default, for example), you should turn off transactions -# by prepending notrans_ on the uri -# sqlobject.dburi="notrans_mysql://username:password@hostname:port/databasename" - -# for Windows users, sqlite URIs look like: -# sqlobject.dburi="sqlite:///drive_letter:/path/to/file" +sqlalchemy.dburi="sqlite:///%(current_dir_uri)s/devdata.sqlite" # SERVER diff --git a/funcweb/setup.py b/funcweb/setup.py index 7ba1a53..9bde340 100644 --- a/funcweb/setup.py +++ b/funcweb/setup.py @@ -1,7 +1,10 @@ +# -*- coding: utf-8 -*- + from setuptools import setup, find_packages from turbogears.finddata import find_package_data import os +execfile(os.path.join("funcweb", "release.py")) packages=find_packages() package_data = find_package_data(where='funcweb', @@ -14,18 +17,16 @@ if os.path.isdir('locales'): setup( name="funcweb", version=version, - - #description=description, - author="Luke Macken", - author_email="lmacken@redhat.com", - #url=url, - #download_url=download_url, - #license=license, + description=description, + author=author, + author_email=email, + url=url, + license=license, install_requires=[ - "TurboGears >= 1.0.3.2", + "TurboGears >= 1.0.4.2", + "SQLAlchemy>=0.3.10", ], - scripts=["start-funcweb.py"], zip_safe=False, packages=packages, package_data=package_data, @@ -63,5 +64,12 @@ setup( # 'Framework :: TurboGears :: Widgets', ], test_suite='nose.collector', + entry_points = { + 'console_scripts': [ + 'start-funcweb = funcweb.commands:start', + ], + }, + # Uncomment next line and create a default.cfg file in your project dir + # if you want to package a default configuration in your egg. + #data_files = [('config', ['default.cfg'])], ) - diff --git a/funcweb/start-funcweb.py b/funcweb/start-funcweb.py index 604cf19..3d375a3 100755 --- a/funcweb/start-funcweb.py +++ b/funcweb/start-funcweb.py @@ -1,25 +1,18 @@ #!/usr/bin/python -__requires__="TurboGears" -import pkg_resources +# -*- coding: utf-8 -*- +"""Start script for the funcweb TurboGears project. -from turbogears import config, update_config, start_server -import cherrypy -cherrypy.lowercase_api = True -from os.path import * -import sys +This script is only needed during development for running from the project +directory. When the project is installed, easy_install will create a +proper start script. +""" -# first look on the command line for a desired config file, -# if it's not on the command line, then -# look for setup.py in this directory. If it's not there, this script is -# probably installed -if len(sys.argv) > 1: - update_config(configfile=sys.argv[1], - modulename="funcweb.config") -elif exists(join(dirname(__file__), "setup.py")): - update_config(configfile="dev.cfg",modulename="funcweb.config") -else: - update_config(configfile="prod.cfg",modulename="funcweb.config") -config.update(dict(package="funcweb")) +import sys +from funcweb.commands import start, ConfigurationError -from funcweb.controllers import Root -start_server(Root()) +if __name__ == "__main__": + try: + start() + except ConfigurationError, exc: + sys.stderr.write(str(exc)) + sys.exit(1) diff --git a/funcweb/test.cfg b/funcweb/test.cfg new file mode 100644 index 0000000..4140d5a --- /dev/null +++ b/funcweb/test.cfg @@ -0,0 +1,32 @@ +[global] +# You can place test-specific configuration options here (like test db uri, etc) + +# DATABASE + +sqlalchemy.dburi = "sqlite:///:memory:" + +# LOGGING + +[logging] + +[[formatters]] +[[[full_content]]] +format='*(asctime)s *(name)s *(levelname)s *(message)s' + +[[handlers]] +[[[test_out]]] +class='StreamHandler' +level='DEBUG' +args='(sys.stdout,)' +formatter='full_content' + +[[loggers]] +[[[funcweb]]] +level='DEBUG' +qualname='funcweb' +handlers=['test_out'] + +[[[turbogears]]] +level='INFO' +qualname='turbogears' +handlers=['test_out'] diff --git a/test/async_test.py b/test/async_test.py index 4c1acf5..39a22b1 100644 --- a/test/async_test.py +++ b/test/async_test.py @@ -1,17 +1,38 @@ from func.overlord.client import Client -import func.overlord.jobthing as jobthing +import func.jobthing as jobthing import time import sys TEST_SLEEP = 5 EXTRA_SLEEP = 5 -def __tester(async): +SLOW_COMMAND = 1 +QUICK_COMMAND = 2 +RAISES_EXCEPTION_COMMAND = 3 +FAKE_COMMAND = 4 +TESTS = [ SLOW_COMMAND, QUICK_COMMAND, RAISES_EXCEPTION_COMMAND, FAKE_COMMAND ] + +def __tester(async,test): if async: client = Client("*",nforks=10,async=True) oldtime = time.time() - print "asking minion to sleep for %s seconds" % TEST_SLEEP - job_id = client.test.sleep(TEST_SLEEP) + + job_id = -411 + print "======================================================" + if test == SLOW_COMMAND: + print "TESTING command that sleeps %s seconds" % TEST_SLEEP + job_id = client.test.sleep(TEST_SLEEP) + elif test == QUICK_COMMAND: + print "TESTING a quick command" + job_id = client.test.add(1,2) + elif test == RAISES_EXCEPTION_COMMAND: + print "TESTING a command that deliberately raises an exception" + job_id = client.test.explode() # doesn't work yet + elif test == FAKE_COMMAND: + print "TESTING a command that does not exist" + job_id = client.test.does_not_exist(1,2) # ditto + print "======================================================" + print "job_id = %s" % job_id while True: status = client.job_status(job_id) @@ -20,19 +41,27 @@ def __tester(async): delta = int(nowtime - oldtime) if nowtime > oldtime + TEST_SLEEP + EXTRA_SLEEP: print "time expired, test failed" - sys.exit(1) + return if code == jobthing.JOB_ID_RUNNING: print "task is still running, %s elapsed ..." % delta + elif code == jobthing.JOB_ID_ASYNC_PARTIAL: + print "task reports partial status, %s elapsed, results = %s" % (delta, results) elif code == jobthing.JOB_ID_FINISHED: - print "task complete, %s elapsed, results = %s" % (delta, results) - sys.exit(0) + print "(non-async) task complete, %s elapsed, results = %s" % (delta, results) + return + elif code == jobthing.JOB_ID_ASYNC_FINISHED: + print "(async) task complete, %s elapsed, results = %s" % (delta, results) + return else: print "job not found: %s, %s elapased" % (code, delta) time.sleep(1) else: print Client("*",nforks=10,async=False).test.sleep(5) -# __tester(False) -__tester(True) +for t in TESTS: + __tester(True,t) +print "=======================================================" +print "Testing non-async call" +print __tester(False,-1) diff --git a/test/test-it.sh b/test/test-it.sh index 72f3224..fd93d62 100755 --- a/test/test-it.sh +++ b/test/test-it.sh @@ -273,25 +273,13 @@ run_unittests run_async_test stop_the_func -# see if funcd is running -# see if certmaster is installed -# see if cermtasterd is running - -# setup certs -## see if we have certs set up properly +# leaving the test cases with func not running is kind of +# annoying, so restart it +start_the_func ### probably do some stuff to test bad/no/malformed/unauthed certs as well -# see if we can connect to funcd with the overloard - -# see what modules we have available -# for each module, call the info stuff on them - -# / start walking over the modules, doing commandliney stuff to each, and -# trying to check return data and return code as much as possible - -# test shut down of init scripts diff --git a/test/unittest/test_client.py b/test/unittest/test_client.py index f201da5..f4d56cc 100644 --- a/test/unittest/test_client.py +++ b/test/unittest/test_client.py @@ -6,17 +6,23 @@ import unittest import xmlrpclib import func.overlord.client as fc +import socket class BaseTest: # assume we are talking to localhost th = socket.gethostname() + nforks=1 + async=False + def __init__(self): pass def setUp(self): - self.client = fc.Client(self.th) + self.client = fc.Client(self.th, + nforks=self.nforks, + async=self.async) def test_module_version(self): mod = getattr(self.client, self.module) @@ -238,6 +244,16 @@ class TestSmart(BaseTest): self.assert_on_fault(result) +class TestSysctl(BaseTest): + module = "sysctl" + def test_list(self): + result = self.client.sysctl.list() + self.assert_on_fault(result) + + def test_get(self): + result = self.client.sysctl.get("kernel.max_lock_depth") + self.assert_on_fault(result) + class TestYum(BaseTest): module = "yumcmd" def test_check_update(self): @@ -273,3 +289,22 @@ class TestSystem(BaseTest): +class TestAsyncTest(BaseTest): + module = "async.test" + nforks=4 + async=True + def test_sleep_async(self): + job_id = self.client.test.sleep(5) + print "job_id", job_id + (return_code, results) = self.client.job_status(job_id) +# self.assert_on_fault(results) + print "return_code", return_code + print "results", results + + def test_add_async(self): + job_id = self.client.test.add(1,5) + print "job_id", job_id + (return_code, results) = self.client.job_status(job_id) +# self.assert_on_fault(results) + print "return_code", return_code + print "results", results |