summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn Eckersberg <jeckersb@redhat.com>2008-01-29 20:16:40 -0500
committerJohn Eckersberg <jeckersb@redhat.com>2008-01-29 20:16:40 -0500
commit70bdbfd625ae24e07ec85a190d88a496b5240545 (patch)
tree4b306445d44504ec64feec57278599490f781e64
parent60e28e9577353c1ee2dd3f9901bfed5fbfcfb3c5 (diff)
parenta25c78935c275d8b2e21da49264899b8a5821cef (diff)
downloadthird_party-func-70bdbfd625ae24e07ec85a190d88a496b5240545.tar.gz
third_party-func-70bdbfd625ae24e07ec85a190d88a496b5240545.tar.xz
third_party-func-70bdbfd625ae24e07ec85a190d88a496b5240545.zip
Merge branch 'master' into netapp
-rw-r--r--AUTHORS1
-rwxr-xr-xMakefile2
-rw-r--r--examples/drive_checker.py5
-rw-r--r--examples/find_recalled_parts.py5
-rw-r--r--examples/service_checker.py1
-rwxr-xr-xfunc/certmaster.py10
-rw-r--r--func/certs.py7
-rw-r--r--func/forkbomb.py8
-rw-r--r--func/jobthing.py35
-rwxr-xr-xfunc/minion/server.py26
-rwxr-xr-xfunc/overlord/client.py42
-rwxr-xr-xfunc/overlord/inventory.py18
-rwxr-xr-xfunc/utils.py58
-rwxr-xr-xtest/Makefile7
-rw-r--r--test/async_test.py2
-rwxr-xr-xtest/test-it.sh103
-rwxr-xr-xtest/unittest/Makefile7
-rw-r--r--test/unittest/test_client.py40
-rw-r--r--test/unittest/test_groups.py22
19 files changed, 234 insertions, 165 deletions
diff --git a/AUTHORS b/AUTHORS
index a4f989b..10daf6f 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -15,6 +15,7 @@ Additional patches and contributions by ...
John Eckersberg <jeckersb@redhat.com>
Scott Henson <shenson@redhat.com>
Vito Laurenza <vitolaurenza@gmail.com>
+ Brenton Leanhardt <bleanhar@redhat.com>
Luke Macken <lmacken@redhat.com>
Steve Milner <smilner@redhat.com>
Stephen Nelson-Smith <atalanta.systems@googlemail.com>
diff --git a/Makefile b/Makefile
index 974ca4f..187e7c9 100755
--- a/Makefile
+++ b/Makefile
@@ -5,7 +5,7 @@ NEWRELEASE = $(shell echo $$(($(RELEASE) + 1)))
MESSAGESPOT=po/messages.pot
TOPDIR = $(shell pwd)
-DIRS = func docs scripts
+DIRS = func docs scripts test test/unittest
PYDIRS = func scripts
EXAMPLEDIR = examples
INITDIR = init-scripts
diff --git a/examples/drive_checker.py b/examples/drive_checker.py
index 77e9181..f563c76 100644
--- a/examples/drive_checker.py
+++ b/examples/drive_checker.py
@@ -5,14 +5,15 @@
# ===============================================
import func.overlord.client as fc
+import func.utils as utils
info = fc.Client("*").smart.info()
failures = 0
for (host,details) in info.iteritems():
- if type(details) != list:
- print "%s had an error : %s" % (host,str(details))
+ if utils.is_error(details):
+ print "%s had an error : %s" % (host,details[1:3])
break
(rc, list_of_output) = details
diff --git a/examples/find_recalled_parts.py b/examples/find_recalled_parts.py
index fc1a3b1..45eb48e 100644
--- a/examples/find_recalled_parts.py
+++ b/examples/find_recalled_parts.py
@@ -4,6 +4,7 @@
# ===============================================
import func.overlord.client as fc
+import func.utils as utils
bad = open("./part_data.txt").read().split()
@@ -11,8 +12,8 @@ info = fc.Client("*").hardware.hal_info()
for (host,details) in info.iteritems():
- if type(details) != dict:
- print "%s had an error : %s" % (host,str(details))
+ if utils.is_error(details):
+ print "%s had an error : %s" % (host,details[1:3])
break
for (device, full_output) in details.iteritems():
diff --git a/examples/service_checker.py b/examples/service_checker.py
index 543290d..5541619 100644
--- a/examples/service_checker.py
+++ b/examples/service_checker.py
@@ -21,7 +21,6 @@
import sys
import func.overlord.client as fc
-
if __name__ == '__main__':
try:
service = sys.argv[1]
diff --git a/func/certmaster.py b/func/certmaster.py
index 1cde806..b74c8d2 100755
--- a/func/certmaster.py
+++ b/func/certmaster.py
@@ -36,7 +36,15 @@ CERTMASTER_LISTEN_PORT = 51235
class CertMaster(object):
def __init__(self, conf_file):
self.cfg = read_config(conf_file, CMConfig)
- mycn = '%s-CA-KEY' % socket.getfqdn()
+
+ fqdn = socket.getfqdn()
+ host = socket.gethostname()
+ if fqdn.find(host) != -1:
+ usename = fqdn
+ else:
+ usename = host
+
+ mycn = '%s-CA-KEY' % usename
self.ca_key_file = '%s/funcmaster.key' % self.cfg.cadir
self.ca_cert_file = '%s/funcmaster.crt' % self.cfg.cadir
try:
diff --git a/func/certs.py b/func/certs.py
index 22af866..413f9ce 100644
--- a/func/certs.py
+++ b/func/certs.py
@@ -48,7 +48,12 @@ def make_csr(pkey, dest=None, cn=None):
if cn:
subj.CN = cn
else:
- subj.CN = socket.getfqdn()
+ fqdn = socket.getfqdn()
+ host = socket.gethostname()
+ if fqdn.find(host) != -1:
+ subj.CN = fqdn
+ else:
+ subj.CN = host
subj.emailAddress = 'root@%s' % subj.CN
req.set_pubkey(pkey)
diff --git a/func/forkbomb.py b/func/forkbomb.py
index 3dc12c8..3dfa6f2 100644
--- a/func/forkbomb.py
+++ b/func/forkbomb.py
@@ -21,6 +21,7 @@ import sys
import tempfile
import fcntl
import utils
+import xmlrpclib
DEFAULT_FORKS = 4
DEFAULT_CACHE_DIR = "/var/lib/func"
@@ -54,15 +55,12 @@ def __access_buckets(filename,clear,new_key=None,new_value=None):
if not storage.has_key("data"):
storage["data"] = {}
else:
- # print "DEBUG: existing: %s" % storage["data"]
pass
if new_key is not None:
# bsdb is a bit weird about this
newish = storage["data"].copy()
- new_value = utils.remove_exceptions(new_value)
newish[new_key] = new_value
- # print "DEBUG: newish: %s" % newish
storage["data"] = newish
rc = storage["data"].copy()
@@ -78,14 +76,12 @@ def __bucketize(pool, slots):
"""
buckets = {}
count = 0
- # print "DEBUG: slots: %s" % slots
for key in pool:
count = count + 1
slot = count % slots
if not buckets.has_key(slot):
buckets[slot] = []
buckets[slot].append(key)
- # print "DEBUG: buckets: %s" % buckets
return buckets
def __with_my_bucket(bucket_number,buckets,what_to_do,filename):
@@ -139,11 +135,9 @@ def batch_run(pool,callback,nforks=DEFAULT_FORKS,cachedir=DEFAULT_CACHE_DIR):
if nforks <= 1:
# modulus voodoo gets crazy otherwise and bad things happen
nforks = 2
- # print "DEBUG: nforks=%s" % 2
shelf_file = __get_storage(cachedir)
__access_buckets(shelf_file,True,None)
buckets = __bucketize(pool, nforks)
- # print "DEBUG: buckets: %s" % buckets
__forkbomb(1,buckets,callback,shelf_file)
rc = __access_buckets(shelf_file,False,None)
os.remove(shelf_file)
diff --git a/func/jobthing.py b/func/jobthing.py
index ca8ee38..67ad1a6 100644
--- a/func/jobthing.py
+++ b/func/jobthing.py
@@ -24,6 +24,7 @@ import tempfile
import fcntl
import forkbomb
import utils
+import traceback
JOB_ID_RUNNING = 0
JOB_ID_FINISHED = 1
@@ -31,8 +32,6 @@ JOB_ID_LOST_IN_SPACE = 2
JOB_ID_ASYNC_PARTIAL = 3
JOB_ID_ASYNC_FINISHED = 4
-REMOTE_ERROR = utils.REMOTE_CANARY
-
# how long to retain old job records in the job id database
RETAIN_INTERVAL = 60 * 60
@@ -86,9 +85,6 @@ def __access_status(jobid=0, status=0, results=0, clear=False, write=False, purg
__purge_old_jobs(storage)
if write:
- results = utils.remove_exceptions(results)
- # print "DEBUG: status=%s" % status
- # print "DEBUG: results=%s" % results
storage[str(jobid)] = (status, results)
rc = jobid
elif not purge:
@@ -119,7 +115,6 @@ def batch_run(server, process_server, nforks):
job_id = time.time()
pid = os.fork()
if pid != 0:
- #print "DEBUG: UPDATE STATUS: r1: %s" % job_id
__update_status(job_id, JOB_ID_RUNNING, -1)
return job_id
else:
@@ -131,12 +126,14 @@ def batch_run(server, process_server, nforks):
__update_status(job_id, JOB_ID_ASYNC_PARTIAL, results)
sys.exit(0)
-def minion_async_run(function_ref, args):
+def minion_async_run(retriever, method, args):
"""
This is a simpler invocation for minion side async usage.
"""
# to avoid confusion of job id's (we use the same job database)
# minion jobs contain the string "minion".
+
+
job_id = "%s-minion" % time.time()
pid = os.fork()
if pid != 0:
@@ -145,19 +142,13 @@ def minion_async_run(function_ref, args):
else:
__update_status(job_id, JOB_ID_RUNNING, -1)
try:
- results = function_ref(*args)
+ function_ref = retriever(method)
+ rc = function_ref(*args)
except Exception, e:
- # FIXME: we need to make sure this is logged
- # NOTE: we need a return here, else the async engine won't get any results
- # so that is the reason for the catch
- # FIXME: it would be nice to store the string data from the exception here so that the caller
- # can read the exception data, however we can't store the exception directly in the DB.
- # some care must be made to also make this not suck for the user of the API,
- # when they are iterating over batch results, so they can tell good data from exceptions that
- # are represented as strings. Ideally, reconstructing the exceptions back into objects would be shiny
- # but if we go there I will need more caffeine first.
- __update_status(job_id, JOB_ID_FINISHED, REMOTE_ERROR)
- __update_status(job_id, JOB_ID_FINISHED, results)
+ (t, v, tb) = sys.exc_info()
+ rc = utils.nice_exception(t,v,tb)
+
+ __update_status(job_id, JOB_ID_FINISHED, rc)
sys.exit(0)
def job_status(jobid, client_class=None):
@@ -179,21 +170,19 @@ def job_status(jobid, client_class=None):
partial_results = {}
- # print "DEBUG: partial results for batch task: %s" % interim_results
+ some_missing = False
for host in interim_results.keys():
minion_job = interim_results[host]
client = client_class(host, noglobs=True, async=False)
minion_result = client.jobs.job_status(minion_job)
- # print "DEBUG: background task on minion (%s) has status %s" % (minion_job, minion_result)
(minion_interim_rc, minion_interim_result) = minion_result
- some_missing = False
if minion_interim_rc not in [ JOB_ID_RUNNING ]:
if minion_interim_rc in [ JOB_ID_LOST_IN_SPACE ]:
- partial_results[host] = REMOTE_ERROR
+ partial_results[host] = [ utils.REMOTE_ERROR, "lost job" ]
else:
partial_results[host] = minion_interim_result
else:
diff --git a/func/minion/server.py b/func/minion/server.py
index 7187783..f1b827f 100755
--- a/func/minion/server.py
+++ b/func/minion/server.py
@@ -29,12 +29,13 @@ from func.commonconfig import FuncdConfig
from func import logger
from func import certs
import func.jobthing as jobthing
+import utils
# our modules
import AuthedXMLRPCServer
import codes
import module_loader
-import utils
+import func.utils as futils
@@ -131,11 +132,12 @@ class FuncApiMethod:
rc = self.__method(*args)
except codes.FuncException, e:
self.__log_exc()
- rc = e
+ (t, v, tb) = sys.exc_info()
+ rc = futils.nice_exception(t,v,tb)
except:
- self.logger.debug("Not a Func-specific exception")
self.__log_exc()
- raise
+ (t, v, tb) = sys.exc_info()
+ rc = futils.nice_exception(t,v,tb)
self.logger.debug("Return code for %s: %s" % (self.__name, rc))
return rc
@@ -218,11 +220,15 @@ class FuncSSLXMLRPCServer(AuthedXMLRPCServer.AuthedSSLXMLRPCServer,
sub_hash = peer_cert.subject_name_hash()
self.audit_logger.log_call(ip, cn, sub_hash, method, params)
- if not async_dispatch:
- return self.get_dispatch_method(method)(*params)
- else:
- meth = self.get_dispatch_method(method)
- return jobthing.minion_async_run(meth, params)
+ try:
+ if not async_dispatch:
+ return self.get_dispatch_method(method)(*params)
+ else:
+ return jobthing.minion_async_run(self.get_dispatch_method, method, params)
+ except:
+ (t, v, tb) = sys.exc_info()
+ rc = futils.nice_exception(t, v, tb)
+ return rc
def auth_cb(self, request, client_address):
peer_cert = request.get_peer_certificate()
@@ -261,7 +267,7 @@ def main(argv):
"""
if "daemon" in sys.argv or "--daemon" in sys.argv:
- utils.daemonize("/var/run/funcd.pid")
+ futils.daemonize("/var/run/funcd.pid")
else:
print "serving...\n"
diff --git a/func/overlord/client.py b/func/overlord/client.py
index f33bc4b..3d00302 100755
--- a/func/overlord/client.py
+++ b/func/overlord/client.py
@@ -25,7 +25,7 @@ import command
import groups
import func.forkbomb as forkbomb
import func.jobthing as jobthing
-
+import func.utils as utils
# ===================================
# defaults
@@ -62,6 +62,23 @@ class CommandAutomagic(object):
return self.clientref.run(module,method,args,nforks=self.nforks)
+def get_groups():
+ group_class = groups.Groups()
+ return group_class.get_groups()
+
+
+def get_hosts_by_groupgoo(groups, groupgoo):
+ group_gloobs = groupgoo.split(':')
+ hosts = []
+ for group_gloob in group_gloobs:
+ if not group_gloob[0] == "@":
+ continue
+ if groups.has_key(group_gloob[1:]):
+ hosts = hosts + groups[group_gloob[1:]]
+ else:
+ print "group %s not defined" % each_gloob
+ return hosts
+
# ===================================
# this is a module level def so we can use it and isServer() from
# other modules with a Client class
@@ -83,22 +100,15 @@ def expand_servers(spec, port=51234, noglobs=None, verbose=None, just_fqdns=Fals
else:
return spec
- group_class = groups.Groups()
- group_dict = group_class.get_groups()
+ group_dict = get_groups()
all_hosts = []
all_certs = []
seperate_gloobs = spec.split(";")
- new_hosts = []
-
- # we notate groups with @foo annotation, so look for that in the hostnamegoo
- for each_gloob in seperate_gloobs:
- if each_gloob[0] == '@':
- if group_dict.has_key(each_gloob[1:]):
- new_hosts = new_hosts + group_dict[each_gloob[1:]]
- else:
- print "group %s not defined" % each_gloob
+
+ new_hosts = get_hosts_by_groupgoo(group_dict, spec)
+ seperate_gloobs = spec.split(";")
seperate_gloobs = seperate_gloobs + new_hosts
for each_gloob in seperate_gloobs:
actual_gloob = "%s/%s.cert" % (config.certroot, each_gloob)
@@ -132,7 +142,7 @@ def isServer(server_string):
class Client(object):
def __init__(self, server_spec, port=DEFAULT_PORT, interactive=False,
- verbose=False, noglobs=False, nforks=1, config=None, async=False, noexceptions=True):
+ verbose=False, noglobs=False, nforks=1, config=None, async=False):
"""
Constructor.
@server_spec -- something like "*.example.org" or "foosball"
@@ -153,7 +163,6 @@ class Client(object):
self.noglobs = noglobs
self.nforks = nforks
self.async = async
- self.noexceptions= noexceptions
self.servers = expand_servers(self.server_spec, port=self.port, noglobs=self.noglobs,verbose=self.verbose)
@@ -234,12 +243,11 @@ class Client(object):
if self.interactive:
print retval
except Exception, e:
- retval = e
+ (t, v, tb) = sys.exc_info()
+ retval = utils.nice_exception(t,v,tb)
if self.interactive:
sys.stderr.write("remote exception on %s: %s\n" %
(server, str(e)))
- if self.noglob and not self.noexceptions:
- raise(e)
if self.noglobs:
return retval
diff --git a/func/overlord/inventory.py b/func/overlord/inventory.py
index 47fff6a..c2a1d30 100755
--- a/func/overlord/inventory.py
+++ b/func/overlord/inventory.py
@@ -21,8 +21,8 @@ import sys
import pprint
import xmlrpclib
from func.minion import sub_process
-
import func.overlord.client as func_client
+import func.utils as utils
DEFAULT_TREE = "/var/lib/func/inventory/"
@@ -80,19 +80,21 @@ class FuncInventory(object):
# call all remote info methods and handle them
if options.verbose:
- # print "- DEBUG: %s" % host_methods
print "- scanning ..."
# for (host, modules) in host_modules.iteritems():
for (host, methods) in host_methods.iteritems():
-
- for each_method in methods:
+ if utils.is_error(methods):
+ print "-- connection refused: %s" % host
+ break
+
+ for each_method in methods:
- if type(each_method) == int:
- if self.options.verbose:
- print "-- connection refused: %s" % host
- break
+ #if type(each_method) == int:
+ # if self.options.verbose:
+ # print "-- connection refused: %s" % host
+ # break
(module_name, method_name) = each_method.split(".")
diff --git a/func/utils.py b/func/utils.py
index c2fbb9f..1a4abb7 100755
--- a/func/utils.py
+++ b/func/utils.py
@@ -16,17 +16,13 @@ import sys
import traceback
import xmlrpclib
-REMOTE_CANARY = "***REMOTE_ERROR***"
+REMOTE_ERROR = "REMOTE_ERROR"
-# this is kind of handy, so keep it around for now
-# but we really need to fix out server side logging and error
-# reporting so we don't need it
def trace_me():
x = traceback.extract_stack()
bar = string.join(traceback.format_list(x))
return bar
-
def daemonize(pidfile=None):
"""
Daemonize this process with the UNIX double-fork trick.
@@ -41,41 +37,27 @@ def daemonize(pidfile=None):
os.umask(0)
pid = os.fork()
-
if pid > 0:
if pidfile is not None:
open(pidfile, "w").write(str(pid))
sys.exit(0)
-def remove_exceptions(results):
- """
- Used by forkbomb/jobthing to avoid storing exceptions in database
- because you know those don't serialize so well :)
- # FIXME: this needs cleanup
- """
-
- if results is None:
- return REMOTE_CANARY
-
- if str(results).startswith("<Fault"):
- return REMOTE_CANARY
-
- if type(results) == xmlrpclib.Fault:
- return REMOTE_CANARY
-
- if type(results) == dict:
- new_results = {}
- for x in results.keys():
- value = results[x]
- if str(value).find("<Fault") == -1:
- # there are interesting issues with the way it is imported and type()
- # so that is why this hack is here. type(x) != xmlrpclib.Fault appears to miss some things
- new_results[x] = value
- else:
- new_results[x] = REMOTE_CANARY
- return new_results
-
- return results
-
-
-
+def nice_exception(etype, evalue, etb):
+ etype = str(etype)
+ lefti = etype.index("'") + 1
+ righti = etype.rindex("'")
+ nicetype = etype[lefti:righti]
+ nicestack = string.join(traceback.format_list(traceback.extract_tb(etb)))
+ return [ REMOTE_ERROR, nicetype, str(evalue), nicestack ]
+
+def is_error(result):
+ if type(result) != list:
+ return False
+ if len(result) == 0:
+ return False
+ if result[0] == REMOTE_ERROR:
+ return True
+ return False
+
+
+
diff --git a/test/Makefile b/test/Makefile
new file mode 100755
index 0000000..ede53b5
--- /dev/null
+++ b/test/Makefile
@@ -0,0 +1,7 @@
+
+
+clean::
+ @rm -fv *.pyc *~ .*~ *.pyo
+ @find . -name .\#\* -exec rm -fv {} \;
+ @rm -fv *.rpm
+ @rm -fv *.gz
diff --git a/test/async_test.py b/test/async_test.py
index 39a22b1..04f6fd5 100644
--- a/test/async_test.py
+++ b/test/async_test.py
@@ -57,6 +57,8 @@ def __tester(async,test):
time.sleep(1)
else:
print Client("*",nforks=10,async=False).test.sleep(5)
+ print Client("*",nforks=10,async=False).test.bork(5)
+ print Client("*",nforks=1,async=False).test.bork(5)
for t in TESTS:
__tester(True,t)
diff --git a/test/test-it.sh b/test/test-it.sh
index fd93d62..d700a41 100755
--- a/test/test-it.sh
+++ b/test/test-it.sh
@@ -22,12 +22,36 @@ RPM_PATH=`pwd`
BUILD=Y
# do we do a fresh pull from git to build
-BUILD_FROM_FRESH_CHECKOUT=Y
+BUILD_FROM_FRESH_CHECKOUT=N
+
+# do we build/uninstall via rpms?
+INSTALL_VIA_RPMS=N
# should we backup existing func pki setup, since
# we are going to be deleting it from the normal spot?
BACKUP_FUNC_PKI="N"
+# do we want to run the unit tests as well
+RUN_UNITTESTS="Y"
+# you can put conf stuff in test-it.conf
+# so you don't have to worry about checking in config stuff
+if [ -f "test-it.conf" ] ; then
+ source test-it.conf
+fi
+
+
+show_config()
+{
+ echo "BUILD_PATH=$BUILD_PATH"
+ echo "RPM_PATH=$RPM_PATH"
+ echo "BUILD=$BUILD"
+ echo "BUILD_FROM_FRESH_CHECKOUT=$BUILD_FROM_FRESH_CHECKOUT"
+ echo "INSTALL_VIA_RPMS=$INSTALL_VIA_RPMS"
+ echo "BACKUP_FUNC_PKL=$BACKUP_FUNC_PKI"
+ echo "RUN_UNITTESTS=$RUN_UNITTESTS"
+
+}
+
rm -rf $RPM_PATH/rpms
rm -rf $RPM_PATH/srpms
rm -rf $RPM_PATH/tars
@@ -57,6 +81,8 @@ check_out_code()
build_rpm()
{
+
+
PKG=$1
BRT=$2
echo;echo;echo
@@ -82,20 +108,29 @@ build_rpm()
fi
}
-uninstall_the_func()
+uninstall_the_func_rpm()
{
+ msg "Removing the func rpm, if there is one"
# just one package for now, easy enough
rpm -e func
}
-install_the_func()
+install_the_func_rpm()
{
+ msg "Installing the func rpm"
rpm -Uvh $RPM_PATH/rpms/func*
STATUS=$?
# do something with the status
}
+install_the_func()
+{
+ msg "Installing func directly"
+ pushd $1
+ make install
+}
+
find_the_func()
{
INSTALLED_FUNC=`rpm -q func`
@@ -172,27 +207,6 @@ sign_the_certmaster_certs()
}
-pound_on_the_threads()
-{
- msg "Trying to poke at the threads a bit"
- THREAD_COUNT=5
- for i in $MINION_CERTS
- do
- for Q in `seq 1 $THREAD_COUNT`
- do
- # background these so they run more or less in parallel to
- # each minion
- echo "test add $Q 6" for $i
- func $i call test add "$Q" "6" &
- done
- done
-
- # this is kind of dumb and ugly, but it gives a change for all the
- # connections to complete before we shut the server down
- sleep 10
-
-}
-
# just some random "poke at func and make sure it works stuff"
test_funcd()
{
@@ -214,37 +228,56 @@ test_funcd()
run_async_test()
{
+ msg "Running async_test.py to test async/forking"
+ pushd $BUILD_PATH/test
python async_test.py
}
run_unittests()
{
+ msg "Running the unittest suite"
+ pushd $BUILD_PATH/test/unittest
nosetests -v -w unittest/
}
+
+
+# start doing stuff
+
+show_config
+
+
if [ "$BUILD" == "Y" ] ; then
if [ "$BUILD_FROM_FRESH_CHECKOUT" == "Y" ] ; then
check_out_code
else
# assume we are running from the test dir
- BUILD_PATH="`pwd`/../../"
+ BUILD_PATH="`pwd`/../"
fi
- # FIXME: red hat specifc
- build_rpm func $BUILD_PATH
- #if we are building, then we should remove the installed
- # versiones as well, and install the new
- uninstall_the_func
+ if [ "$INSTALL_VIA_RPMS" == "Y" ] ; then
+ # FIXME: red hat specifc
+ build_rpm func $BUILD_PATH
+
+ #if we are building, then we should remove the installed
+ # versiones as well, and install the new
+ uninstall_the_func_rpm
- install_the_func
+ install_the_func_rpm
+ else
+ uninstall_the_func_rpm
+ install_the_func $BUILD_PATH
+ fi
fi
# see if func is install
# see if funcd is install
-find_the_func
+if [ "$INSTALL_VIA_RPMS" == "Y" ] ; then
+ find_the_func
+fi
if [ "$BACKUP_FUNC_PKI" == "Y" ] ; then
backup_the_secret_of_the_func
@@ -266,9 +299,11 @@ sign_the_certmaster_certs
test_funcd
-pound_on_the_threads
-run_unittests
+
+if [ "$RUN_UNITTEST" == "Y" ] ; then
+ run_unittests
+fi
run_async_test
diff --git a/test/unittest/Makefile b/test/unittest/Makefile
new file mode 100755
index 0000000..ede53b5
--- /dev/null
+++ b/test/unittest/Makefile
@@ -0,0 +1,7 @@
+
+
+clean::
+ @rm -fv *.pyc *~ .*~ *.pyo
+ @find . -name .\#\* -exec rm -fv {} \;
+ @rm -fv *.rpm
+ @rm -fv *.gz
diff --git a/test/unittest/test_client.py b/test/unittest/test_client.py
index f4d56cc..ed13c6a 100644
--- a/test/unittest/test_client.py
+++ b/test/unittest/test_client.py
@@ -288,23 +288,23 @@ class TestSystem(BaseTest):
pass
-
-class TestAsyncTest(BaseTest):
- module = "async.test"
- nforks=4
- async=True
- def test_sleep_async(self):
- job_id = self.client.test.sleep(5)
- print "job_id", job_id
- (return_code, results) = self.client.job_status(job_id)
-# self.assert_on_fault(results)
- print "return_code", return_code
- print "results", results
-
- def test_add_async(self):
- job_id = self.client.test.add(1,5)
- print "job_id", job_id
- (return_code, results) = self.client.job_status(job_id)
-# self.assert_on_fault(results)
- print "return_code", return_code
- print "results", results
+#import time
+#class TestAsyncTest(BaseTest):
+# module = "async.test"
+# nforks=1
+# async=True
+# def test_sleep_async(self):
+# job_id = self.client.test.sleep(5)
+# print "job_id", job_id
+# time.sleep(5)
+# (return_code, results) = self.client.job_status(job_id)
+# print "return_code", return_code
+# print "results", results
+#
+# def test_add_async(self):
+# job_id = self.client.test.add(1,5)
+# print "job_id", job_id
+# time.sleep(6)
+# (return_code, results) = self.client.job_status(job_id)
+# print "return_code", return_code
+ # print "results", results
diff --git a/test/unittest/test_groups.py b/test/unittest/test_groups.py
new file mode 100644
index 0000000..639022a
--- /dev/null
+++ b/test/unittest/test_groups.py
@@ -0,0 +1,22 @@
+#!/usr/bin/python
+
+# unit tests for group functionality in func
+
+import func.overlord.client as fc
+
+
+
+class TestGroups(object):
+
+ def test_expand_servers(self):
+ result = fc.expand_servers("*")
+ print result
+
+ def test_get_groups(self):
+ result = fc.get_groups()
+ print result
+
+ def test_get_hosts_by_groupgoo(self):
+ group_dict = fc.get_groups()
+ hosts = fc.get_hosts_by_groupgoo(group_dict, "@blippy")
+ print hosts