From cf0f0aac7c14f51ec1c9341c70219fb2fcd2f58c Mon Sep 17 00:00:00 2001 From: "Thierry bordaz (tbordaz)" Date: Mon, 21 Oct 2013 15:40:16 +0200 Subject: [PATCH] Ticket ticket47566 - Initial import of DSadmin into 389-test repos Bug Description: This commit is done with a merge of https://github.com/richm/dsadmin/pull/5 that is a refactoring of dsadmin (introducing replication/backend/replica/suffix) At the time of this commit https://github.com/richm/dsadmin/pull/5 was not yet push in master https://github.com/richm/dsadmin Changes regarding https://github.com/richm/dsadmin are: - removal of instance - support local host name different than localhost - renaming of dsadmin/dsadmin into lib389/lib389 - support of sudoers https://fedorahosted.org/389/ticket/47566 Reviewed by: ? Platforms tested: Flag Day: no Doc impact: no --- .gitignore | 6 + bug_harness.py | 100 +++ dsadmin.pylintrc | 236 ++++++ lib389/__init__.py | 1373 +++++++++++++++++++++++++++++++++++ lib389/_constants.py | 50 ++ lib389/_entry.py | 178 +++++ lib389/_ldifconn.py | 43 ++ lib389/_replication.py | 175 +++++ lib389/brooker.py | 842 +++++++++++++++++++++ lib389/tools.py | 560 ++++++++++++++ lib389/utils.py | 480 ++++++++++++ tests/backend_test.py | 110 +++ tests/config.py | 37 + tests/config_test.py | 81 +++ tests/dsadmin_basic_test.py | 111 +++ tests/dsadmin_create_remove_test.py | 83 +++ tests/dsadmin_test.py | 240 ++++++ tests/entry_test.py | 87 +++ tests/replica_test.py | 318 ++++++++ tests/utils_test.py | 124 ++++ 20 files changed, 5234 insertions(+) create mode 100644 .gitignore create mode 100644 bug_harness.py create mode 100644 dsadmin.pylintrc create mode 100644 lib389/_constants.py create mode 100644 lib389/_entry.py create mode 100644 lib389/_ldifconn.py create mode 100644 lib389/_replication.py create mode 100644 lib389/brooker.py create mode 100644 lib389/tools.py create mode 100644 lib389/utils.py create mode 100644 tests/backend_test.py create mode 100644 tests/config.py create mode 100644 tests/config_test.py create mode 100644 tests/dsadmin_basic_test.py create mode 100644 tests/dsadmin_create_remove_test.py create mode 100644 tests/dsadmin_test.py create mode 100644 tests/entry_test.py create mode 100644 tests/replica_test.py create mode 100644 tests/utils_test.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b6b14e2 --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +# Log directory # +################### +logs/ + +*.pyc + diff --git a/bug_harness.py b/bug_harness.py new file mode 100644 index 0000000..1d523bc --- /dev/null +++ b/bug_harness.py @@ -0,0 +1,100 @@ +from bug_harness import DSAdminHarness as DSAdmin +from dsadmin import Entry +from dsadmin.tools import DSAdminTools +""" + An harness for bug replication. + +""" +import os + +REPLBINDDN = '' +REPLBINDPW = '' + + +@static_var("REPLICAID", 1) +def get_next_replicaid(replica_id=None, replica_type=None): + if replica_id: + REPLICAID = replica_id + return REPLICAID + # get a default replica_id if it's a MASTER, + # or 0 if consumer + if replica_type == MASTER_TYPE: + REPLICAID += 1 + return REPLICAID + + return 0 + + +class DSAdminHarness(DSAdmin, DSAdminTools): + """Harness wrapper around dsadmin. + + Specialize the DSAdmin behavior (No, I don't care about Liskov ;)) + """ + def setupSSL(self, secport, sourcedir=os.environ['SECDIR'], secargs=None): + """Bug scripts requires SECDIR.""" + return DSAdminTools.setupSSL(self, secport, sourcedir, secargs) + + def setupAgreement(self, repoth, args): + """Set default replia credentials """ + args.setdefault('binddn', REPLBINDDN) + args.setdefault('bindpw', REPLBINDPW) + + return DSAdmin.setupAgreement(self, repoth, args) + + def setupReplica(self, args): + """Set default replia credentials """ + args.setdefault('binddn', REPLBINDDN) + args.setdefault('bindpw', REPLBINDPW) + # manage a progressive REPLICAID + args.setdefault( + 'id', get_next_replicaid(args.get('id'), args.get('type'))) + return DSAdmin.setupReplica(self, args) + + def setupBindDN(self, binddn=REPLBINDDN, bindpw=REPLBINDPW): + return DSAdmin.setupBindDN(self, binddn, bindpw) + + def setupReplBindDN(self, binddn=REPLBINDDN, bindpw=REPLBINDPW): + return self.setupBindDN(binddn, bindpw) + + def setupBackend(self, suffix, binddn=None, bindpw=None, urls=None, attrvals=None, benamebase=None, verbose=False): + """Create a backends using the first available cn.""" + # if benamebase is set, try creating without appending + if benamebase: + benum = 0 + else: + benum = 1 + + # figure out what type of be based on args + if binddn and bindpw and urls: # its a chaining be + benamebase = benamebase or "chaindb" + else: # its a ldbm be + benamebase = benamebase or "localdb" + + done = False + while not done: + # if benamebase is set, benum starts at 0 + # and the first attempt tries to create the + # simple benamebase. On failure benum is + # incremented and the suffix is appended + # to the cn + if benum: + benamebase_tmp = benamebase + str(benum) # e.g. localdb1 + else: + benamebase_tmp = benamebase + + try: + cn = DSAdmin.setupBackend(suffix, binddn, bindpw, + urls, attrvals, benamebase, verbose) + done = True + except ldap.ALREADY_EXISTS: + benum += 1 + + return cn + + + def createInstance(args): + # eventually set prefix + args.setdefault('prefix', os.environ.get('PREFIX', None)) + args.setdefault('sroot', os.environ.get('SERVER_ROOT', None)) + DSAdminTools.createInstance(args) + diff --git a/dsadmin.pylintrc b/dsadmin.pylintrc new file mode 100644 index 0000000..5bef74b --- /dev/null +++ b/dsadmin.pylintrc @@ -0,0 +1,236 @@ +[MASTER] + +# Specify a configuration file. +#rcfile= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Profiled execution. +profile=no + +# Add to the black list. It should be a base name, not a +# path. You may set this option multiple times. +ignore=CVS + +# Pickle collected data for later comparisons. +persistent=yes + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + + +[MESSAGES CONTROL] + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time. +#enable= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifier separated by comma (,) or put this option +# multiple time. +disable=C0103,C0111,C0321,C0324 + + +[REPORTS] + +# Set the output format. Available formats are text, parseable, colorized, msvs +# (visual studio) and html +output-format=text + +# Include message's id in output +include-ids=yes + +# Put messages in a separate file for each module / package specified on the +# command line instead of printing them on stdout. Reports (if any) will be +# written in a file name "pylint_global.[txt|html]". +files-output=no + +# Tells whether to display a full report or only the messages +reports=yes + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (R0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Add a comment according to your evaluation note. This is used by the global +# evaluation report (R0004). +comment=no + + +[SIMILARITIES] + +# Minimum lines number of a similarity. +min-similarity-lines=4 + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME,XXX,TODO + + +[FORMAT] + +# Maximum number of characters on a single line. +max-line-length=150 + +# Maximum number of lines in a module +max-module-lines=3000 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + + +[VARIABLES] + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# A regular expression matching names used for dummy variables (i.e. not used). +dummy-variables-rgx=_|dummy + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + + +[BASIC] + +# Required attributes for module, separated by a comma +required-attributes= + +# List of builtins function names that should not be used, separated by a comma +bad-functions=map,filter,apply,input + +# Regular expression which should only match correct module names +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Regular expression which should only match correct module level names +const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Regular expression which should only match correct class names +class-rgx=[A-Z_][a-zA-Z0-9]+$ + +# Regular expression which should only match correct function names +function-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct method names +method-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct instance attribute names +attr-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct argument names +argument-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct variable names +variable-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct list comprehension / +# generator expression variable names +inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ + +# Good variable names which should always be accepted, separated by a comma +good-names=i,j,k,ex,Run,_,dn + +# Bad variable names which should always be refused, separated by a comma +bad-names=foo,bar,baz,toto,tutu,tata + +# Regular expression which should only match functions or classes name which do +# not require a docstring +no-docstring-rgx=__.*__ + + +[TYPECHECK] + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# List of classes names for which member attributes should not be checked +# (useful for classes with attributes dynamically set). +ignored-classes=SQLObject + +# When zope mode is activated, add a predefined set of Zope acquired attributes +# to generated-members. +zope=no + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E0201 when accessed. +generated-members=REQUEST,acl_users,aq_parent + + +[IMPORTS] + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub,string,TERMIOS,Bastion,rexec + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled) +import-graph= + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled) +int-import-graph= + + +[CLASSES] + +# List of interface methods to ignore, separated by a comma. This is used for +# instance to not check methods defines in Zope's Interface base class. +ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__,__new__,setUp + + +[DESIGN] + +# Maximum number of arguments for function / method +max-args=5 + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore +ignored-argument-names=_.* + +# Maximum number of locals for function / method body +max-locals=15 + +# Maximum number of return / yield for function / method body +max-returns=6 + +# Maximum number of branch for function / method body +max-branchs=12 + +# Maximum number of statements in function / method body +max-statements=50 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 diff --git a/lib389/__init__.py b/lib389/__init__.py index e69de29..9e6187a 100644 --- a/lib389/__init__.py +++ b/lib389/__init__.py @@ -0,0 +1,1373 @@ +"""The dsadmin module. + + + IMPORTANT: Ternary operator syntax is unsupported on RHEL5 + x if cond else y #don't! + + The DSAdmin functionalities are split in various classes + defined in brookers.py + + TODO: reorganize method parameters according to SimpleLDAPObject + naming: filterstr, attrlist +""" +try: + from subprocess import Popen, PIPE, STDOUT + HASPOPEN = True +except ImportError: + import popen2 + HASPOPEN = False + +import sys +import os +import os.path +import base64 +import urllib +import urllib2 +import socket +import ldif +import re +import ldap +import cStringIO +import time +import operator +import shutil +import datetime +import select +import logging + +from ldap.ldapobject import SimpleLDAPObject +from ldapurl import LDAPUrl +from ldap.cidict import cidict +from ldap import LDAPError +# file in this package + +from lib389._constants import * +from lib389._entry import Entry +from lib389._replication import CSN, RUV +from lib389._ldifconn import LDIFConn +from lib389.utils import ( + isLocalHost, + is_a_dn, + normalizeDN, + suffixfilt, + escapeDNValue + ) + +# mixin +#from lib389.tools import DSAdminTools + +RE_DBMONATTR = re.compile(r'^([a-zA-Z]+)-([1-9][0-9]*)$') +RE_DBMONATTRSUN = re.compile(r'^([a-zA-Z]+)-([a-zA-Z]+)$') + + + +# My logger +log = logging.getLogger(__name__) + + +class Error(Exception): + pass + + +class InvalidArgumentError(Error): + pass + +class AlreadyExists(ldap.ALREADY_EXISTS): + pass + +class NoSuchEntryError(ldap.NO_SUCH_OBJECT): + pass + + +class MissingEntryError(NoSuchEntryError): + """When just added entries are missing.""" + pass + + +class DsError(Error): + """Generic DS Error.""" + pass + + + + +def wrapper(f, name): + """Wrapper of all superclass methods using lib389.Entry. + @param f - DSAdmin method inherited from SimpleLDAPObject + @param name - method to call + + This seems to need to be an unbound method, that's why it's outside of DSAdmin. Perhaps there + is some way to do this with the new classmethod or staticmethod of 2.4. + + We replace every call to a method in SimpleLDAPObject (the superclass + of DSAdmin) with a call to inner. The f argument to wrapper is the bound method + of DSAdmin (which is inherited from the superclass). Bound means that it will implicitly + be called with the self argument, it is not in the args list. name is the name of + the method to call. If name is a method that returns entry objects (e.g. result), + we wrap the data returned by an Entry class. If name is a method that takes an entry + argument, we extract the raw data from the entry object to pass in.""" + def inner(*args, **kargs): + if name == 'result': + objtype, data = f(*args, **kargs) + # data is either a 2-tuple or a list of 2-tuples + # print data + if data: + if isinstance(data, tuple): + return objtype, Entry(data) + elif isinstance(data, list): + # AD sends back these search references +# if objtype == ldap.RES_SEARCH_RESULT and \ +# isinstance(data[-1],tuple) and \ +# not data[-1][0]: +# print "Received search reference: " +# pprint.pprint(data[-1][1]) +# data.pop() # remove the last non-entry element + + return objtype, [Entry(x) for x in data] + else: + raise TypeError("unknown data type %s returned by result" % + type(data)) + else: + return objtype, data + elif name.startswith('add'): + # the first arg is self + # the second and third arg are the dn and the data to send + # We need to convert the Entry into the format used by + # python-ldap + ent = args[0] + if isinstance(ent, Entry): + return f(ent.dn, ent.toTupleList(), *args[2:]) + else: + return f(*args, **kargs) + else: + return f(*args, **kargs) + return inner + + + +class DSAdmin(SimpleLDAPObject): + + def getDseAttr(self, attrname): + """Return a given attribute from dse.ldif. + TODO can we take it from "cn=config" ? + """ + conffile = self.confdir + '/dse.ldif' + try: + dse_ldif = LDIFConn(conffile) + cnconfig = dse_ldif.get(DN_CONFIG) + if cnconfig: + return cnconfig.getValue(attrname) + return None + except IOError, err: # except..as.. doedn't work on python 2.4 + log.error("could not read dse config file") + raise err + + def __initPart2(self): + """Initialize the DSAdmin structure filling various fields, like: + - dbdir + - errlog + - confdir + + """ + if self.binddn and len(self.binddn) and not hasattr(self, 'sroot'): + try: + # XXX this fields are stale and not continuously updated + # do they have sense? + ent = self.getEntry(DN_CONFIG, attrlist=[ + 'nsslapd-instancedir', + 'nsslapd-errorlog', + 'nsslapd-certdir', + 'nsslapd-schemadir']) + self.errlog = ent.getValue('nsslapd-errorlog') + self.confdir = ent.getValue('nsslapd-certdir') + + if self.isLocal: + if not self.confdir or not os.access(self.confdir + '/dse.ldif', os.R_OK): + self.confdir = ent.getValue('nsslapd-schemadir') + if self.confdir: + self.confdir = os.path.dirname(self.confdir) + instdir = ent.getValue('nsslapd-instancedir') + if not instdir and self.isLocal: + # get instance name from errorlog + # move re outside + self.inst = re.match( + r'(.*)[\/]slapd-([^/]+)/errors', self.errlog).group(2) + if self.isLocal and self.confdir: + instdir = self.getDseAttr('nsslapd-instancedir') + else: + instdir = re.match(r'(.*/slapd-.*)/logs/errors', + self.errlog).group(1) + if not instdir: + instdir = self.confdir + if self.verbose: + log.debug("instdir=%r" % instdir) + log.debug("Entry: %r" % ent) + match = re.match(r'(.*)[\/]slapd-([^/]+)$', instdir) + if match: + self.sroot, self.inst = match.groups() + else: + self.sroot = self.inst = '' + ent = self.getEntry('cn=config,' + DN_LDBM, + attrlist=['nsslapd-directory']) + self.dbdir = os.path.dirname(ent.getValue('nsslapd-directory')) + except (ldap.INSUFFICIENT_ACCESS, ldap.CONNECT_ERROR, NoSuchEntryError): + log.exception("Skipping exception during initialization") + except ldap.OPERATIONS_ERROR, e: + log.exception("Skipping exception: Probably Active Directory") + except ldap.LDAPError, e: + log.exception("Error during initialization") + raise + + def __localinit__(self): + uri = self.toLDAPURL() + + SimpleLDAPObject.__init__(self, uri) + + # see if binddn is a dn or a uid that we need to lookup + if self.binddn and not is_a_dn(self.binddn): + self.simple_bind_s("", "") # anon + ent = self.getEntry(CFGSUFFIX, ldap.SCOPE_SUBTREE, + "(uid=%s)" % self.binddn, + ['uid']) + if ent: + self.binddn = ent.dn + else: + log.error("Error: could not find %s under %s" % ( + self.binddn, CFGSUFFIX)) + if not self.nobind: + needtls = False + while True: + try: + if needtls: + self.start_tls_s() + try: + self.simple_bind_s(self.binddn, self.bindpw) + except ldap.SERVER_DOWN, e: + # TODO add server info in exception + log.error("Cannot connect to %r" % uri) + raise e + break + except ldap.CONFIDENTIALITY_REQUIRED: + needtls = True + self.__initPart2() + + def rebind(self): + """Reconnect to the DS + + @raise ldap.CONFIDENTIALITY_REQUIRED - missing TLS: + """ + SimpleLDAPObject.__init__(self, self.toLDAPURL()) + #self.start_tls_s() + self.simple_bind_s(self.binddn, self.bindpw) + + def __add_brookers__(self): + from lib389.brooker import ( + Replica, + Backend, + Config) + self.replica = Replica(self) + self.backend = Backend(self) + self.config = Config(self) + + def __init__(self, host='localhost', port=389, binddn='', bindpw='', serverId=None, nobind=False, sslport=0, verbose=False): # default to anon bind + """We just set our instance variables and wrap the methods. + The real work is done in the following methods, reused during + instance creation & co. + * __localinit__ + * __initPart2 + + e.g. when using the start command, we just need to reconnect, + not create a new instance""" + log.info("Initializing %s with %s:%s" % (self.__class__, + host, sslport or port)) + self.__wrapmethods() + self.verbose = verbose + self.port = port + self.sslport = sslport + self.host = host + self.binddn = binddn + self.bindpw = bindpw + self.nobind = nobind + self.isLocal = isLocalHost(host) + self.serverId = serverId + + # + # dict caching DS structure + # + self.suffixes = {} + self.agmt = {} + # the real init + self.__localinit__() + self.log = log + # add brookers + self.__add_brookers__() + + + def __str__(self): + """XXX and in SSL case?""" + return self.host + ":" + str(self.port) + + def toLDAPURL(self): + """Return the uri ldap[s]://host:[ssl]port.""" + if self.sslport: + return "ldaps://%s:%d/" % (self.host, self.sslport) + else: + return "ldap://%s:%d/" % (self.host, self.port) + + def getServerId(self): + return self.serverId + + # + # Get entries + # + def getEntry(self, *args, **kwargs): + """Wrapper around SimpleLDAPObject.search. It is common to just get one entry. + @param - entry dn + @param - search scope, in ldap.SCOPE_BASE (default), ldap.SCOPE_SUB, ldap.SCOPE_ONE + @param filterstr - filterstr, default '(objectClass=*)' from SimpleLDAPObject + @param attrlist - list of attributes to retrieve. eg ['cn', 'uid'] + @oaram attrsonly - default None from SimpleLDAPObject + eg. getEntry(dn, scope, filter, attributes) + + XXX This cannot return None + """ + log.debug("Retrieving entry with %r" % [args]) + if len(args) == 1 and 'scope' not in kwargs: + args += (ldap.SCOPE_BASE, ) + + res = self.search(*args, **kwargs) + restype, obj = self.result(res) + # TODO: why not test restype? + if not obj: + raise NoSuchEntryError("no such entry for %r" % [args]) + + log.info("Retrieved entry %r" % obj) + if isinstance(obj, Entry): + return obj + else: # assume list/tuple + if obj[0] is None: + raise NoSuchEntryError("Entry is None") + return obj[0] + + def _test_entry(self, dn, scope=ldap.SCOPE_BASE): + try: + entry = self.getEntry(dn, scope) + log.info("Found entry %s" % entry) + return entry + except NoSuchEntryError: + log.exception("Entry %s was added successfully, but I cannot search it" % dn) + raise MissingEntryError("Entry %s was added successfully, but I cannot search it" % dn) + + def getMTEntry(self, suffix, attrs=None): + """Given a suffix, return the mapping tree entry for it. If attrs is + given, only fetch those attributes, otherwise, get all attributes. + """ + attrs = attrs or [] + filtr = suffixfilt(suffix) + try: + entry = self.getEntry( + DN_MAPPING_TREE, ldap.SCOPE_ONELEVEL, filtr, attrs) + return entry + except NoSuchEntryError: + raise NoSuchEntryError( + "Cannot find suffix in mapping tree: %r " % suffix) + except ldap.FILTER_ERROR, e: + log.error("Error searching for %r" % filtr) + raise e + + def __wrapmethods(self): + """This wraps all methods of SimpleLDAPObject, so that we can intercept + the methods that deal with entries. Instead of using a raw list of tuples + of lists of hashes of arrays as the entry object, we want to wrap entries + in an Entry class that provides some useful methods""" + for name in dir(self.__class__.__bases__[0]): + attr = getattr(self, name) + if callable(attr): + setattr(self, name, wrapper(attr, name)) + + def startTask(self, entry, verbose=False): + # start the task + dn = entry.dn + self.add_s(entry) + + if verbose: + self._test_entry(dn, ldap.SCOPE_BASE) + + return True + + def checkTask(self, entry, dowait=False, verbose=False): + '''check task status - task is complete when the nsTaskExitCode attr is set + return a 2 tuple (true/false,code) first is false if task is running, true if + done - if true, second is the exit code - if dowait is True, this function + will block until the task is complete''' + attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', + 'nsTaskCurrentItem', 'nsTaskTotalItems'] + done = False + exitCode = 0 + dn = entry.dn + while not done: + entry = self.getEntry(dn, attrlist=attrlist) + log.debug("task entry %r" % entry) + + if entry.nsTaskExitCode: + exitCode = int(entry.nsTaskExitCode) + done = True + if dowait: + time.sleep(1) + else: + break + return (done, exitCode) + + def startTaskAndWait(self, entry, verbose=False): + self.startTask(entry, verbose) + (done, exitCode) = self.checkTask(entry, True, verbose) + return exitCode + + def importLDIF(self, ldiffile, suffix, be=None, verbose=False): + cn = "import" + str(int(time.time())) + dn = "cn=%s,cn=import,cn=tasks,cn=config" % cn + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') + entry.setValues('cn', cn) + entry.setValues('nsFilename', ldiffile) + if be: + entry.setValues('nsInstance', be) + else: + entry.setValues('nsIncludeSuffix', suffix) + + rc = self.startTaskAndWait(entry, verbose) + + if rc: + if verbose: + log.error("Error: import task %s for file %s exited with %d" % ( + cn, ldiffile, rc)) + else: + if verbose: + log.info("Import task %s for file %s completed successfully" % ( + cn, ldiffile)) + return rc + + def exportLDIF(self, ldiffile, suffix, be=None, forrepl=False, verbose=False): + cn = "export%d" % time.time() + dn = "cn=%s,cn=export,cn=tasks,cn=config" % cn + entry = Entry(dn) + entry.update({ + 'objectclass': ['top', 'extensibleObject'], + 'cn': cn, + 'nsFilename': ldiffile + }) + if be: + entry.setValues('nsInstance', be) + else: + entry.setValues('nsIncludeSuffix', suffix) + if forrepl: + entry.setValues('nsExportReplica', 'true') + + rc = self.startTaskAndWait(entry, verbose) + + if rc: + if verbose: + log.error("Error: export task %s for file %s exited with %d" % ( + cn, ldiffile, rc)) + else: + if verbose: + log.info("Export task %s for file %s completed successfully" % ( + cn, ldiffile)) + return rc + + def createIndex(self, suffix, attr, verbose=False): + entries_backend = self.getBackendsForSuffix(suffix, ['cn']) + cn = "index%d" % time.time() + dn = "cn=%s,cn=index,cn=tasks,cn=config" % cn + entry = Entry(dn) + entry.update({ + 'objectclass': ['top', 'extensibleObject'], + 'cn': cn, + 'nsIndexAttribute': attr, + 'nsInstance': entries_backend[0].cn + }) + # assume 1 local backend + rc = self.startTaskAndWait(entry, verbose) + + if rc: + log.error("Error: index task %s for file %s exited with %d" % ( + cn, ldiffile, rc)) + else: + log.info("Index task %s for file %s completed successfully" % ( + cn, ldiffile)) + return rc + + def fixupMemberOf(self, suffix, filt=None, verbose=False): + cn = "fixupmemberof%d" % time.time() + dn = "cn=%s,cn=memberOf task,cn=tasks,cn=config" % cn + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'extensibleObject') + entry.setValues('cn', cn) + entry.setValues('basedn', suffix) + if filt: + entry.setValues('filter', filt) + rc = self.startTaskAndWait(entry, verbose) + + if rc: + if verbose: + log.error("Error: fixupMemberOf task %s for basedn %s exited with %d" % (cn, suffix, rc)) + else: + if verbose: + log.info("fixupMemberOf task %s for basedn %s completed successfully" % (cn, suffix)) + return rc + + def addLDIF(self, input_file, cont=False): + class LDIFAdder(ldif.LDIFParser): + def __init__(self, input_file, conn, cont=False, + ignored_attr_types=None, max_entries=0, process_url_schemes=None + ): + myfile = input_file + if isinstance(input_file, basestring): + myfile = open(input_file, "r") + self.conn = conn + self.cont = cont + ldif.LDIFParser.__init__(self, myfile, ignored_attr_types, + max_entries, process_url_schemes) + self.parse() + if isinstance(input_file, basestring): + myfile.close() + + def handle(self, dn, entry): + if not dn: + dn = '' + newentry = Entry((dn, entry)) + try: + self.conn.add_s(newentry) + except ldap.LDAPError, e: + if not self.cont: + raise e + log.exception("Error: could not add entry %s" % dn) + + adder = LDIFAdder(input_file, self, cont) + + def getSuffixes(self): + """@return a list of cn suffixes""" + ents = self.search_s(DN_MAPPING_TREE, ldap.SCOPE_ONELEVEL) + sufs = [] + for ent in ents: + unquoted = None + quoted = None + for val in ent.getValues('cn'): + if val.find('"') < 0: # prefer the one that is not quoted + unquoted = val + else: + quoted = val + if unquoted: # preferred + sufs.append(unquoted) + elif quoted: # strip + sufs.append(quoted.strip('"')) + else: + raise Exception( + "Error: mapping tree entry %r has no suffix" % ent.dn) + return sufs + + def setupBackend(self, suffix, binddn=None, bindpw=None, urls=None, attrvals=None, benamebase='localdb', verbose=False): + """Setup a backend and return its dn. Blank on error + + NOTE This won't create a suffix nor its related entry in + the tree!!! + + XXX Deprecated! @see lib389.brooker.Backend.add + + """ + return self.backend.add(suffix=suffix, binddn=binddn, bindpw=bindpw, + urls=urls, attrvals=attrvals, benamebase=benamebase, + setupmt=False, parent=None) + + + def setupSuffix(self, suffix, bename, parent="", verbose=False): + """Setup a suffix with the given backend-name. + + XXX Deprecated! @see lib389.brooker.Backend.setup_mt + + """ + return self.backend.setup_mt(suffix, bename, parent) + + + def getBackendsForSuffix(self, suffix, attrs=None): + # TESTME removed try..except and raise if NoSuchEntryError + attrs = attrs or [] + nsuffix = normalizeDN(suffix) + entries = self.search_s("cn=plugins,cn=config", ldap.SCOPE_SUBTREE, + "(&(objectclass=nsBackendInstance)(|(nsslapd-suffix=%s)(nsslapd-suffix=%s)))" % (suffix, nsuffix), + attrs) + return entries + + def getSuffixForBackend(self, bename, attrs=None): + """Return the mapping tree entry of `bename` or None if not found""" + attrs = attrs or [] + try: + entry = self.getEntry("cn=plugins,cn=config", ldap.SCOPE_SUBTREE, + "(&(objectclass=nsBackendInstance)(cn=%s))" % bename, + ['nsslapd-suffix']) + suffix = entry.getValue('nsslapd-suffix') + return self.getMTEntry(suffix, attrs) + except NoSuchEntryError: + log.warning("Could not find an entry for backend %r" % bename) + return None + + def findParentSuffix(self, suffix): + """see if the given suffix has a parent suffix""" + rdns = ldap.explode_dn(suffix) + del rdns[0] + + while len(rdns) > 0: + suffix = ','.join(rdns) + try: + mapent = self.getMTEntry(suffix) + return suffix + except NoSuchEntryError: + del rdns[0] + + return "" + + def addSuffix(self, suffix, binddn=None, bindpw=None, urls=None, bename=None, beattrs=None): + """Create and return a suffix and its backend. + + @param suffix + @param urls + @param bename - name of the backed (eventually created) + @param beattrs - parametes to create the backend + @param binddn + @param bindpw + Uses: setupBackend and SetupSuffix + Requires: adding a matching entry in the tree + TODO: test return values and error codes + + `beattrs`: see setupBacked + + """ + benames = [] + entries_backend = self.getBackendsForSuffix(suffix, ['cn']) + # no backends for this suffix yet - create one + if not entries_backend: + # if not bename, self.setupBackend raises + bename = self.setupBackend( + suffix, binddn, bindpw, urls, benamebase=bename, attrvals=beattrs) + else: # use existing backend(s) + benames = [entry.cn for entry in entries_backend] + bename = benames.pop(0) # do I need to modify benames + + try: + parent = self.findParentSuffix(suffix) + return self.setupSuffix(suffix, bename, parent) + except NoSuchEntryError: + log.exception( + "Couldn't create suffix for %s %s" % (bename, suffix)) + raise + + def getDBStats(self, suffix, bename=''): + if bename: + dn = ','.join(("cn=monitor,cn=%s" % bename, DN_LDBM)) + else: + entries_backend = self.getBackendsForSuffix(suffix) + dn = "cn=monitor," + entries_backend[0].dn + dbmondn = "cn=monitor," + DN_LDBM + dbdbdn = "cn=database,cn=monitor," + DN_LDBM + try: + # entrycache and dncache stats + ent = self.getEntry(dn, ldap.SCOPE_BASE) + monent = self.getEntry(dbmondn, ldap.SCOPE_BASE) + dbdbent = self.getEntry(dbdbdn, ldap.SCOPE_BASE) + ret = "cache available ratio count unitsize\n" + mecs = ent.maxentrycachesize or "0" + cecs = ent.currententrycachesize or "0" + rem = int(mecs) - int(cecs) + ratio = ent.entrycachehitratio or "0" + ratio = int(ratio) + count = ent.currententrycachecount or "0" + count = int(count) + if count: + size = int(cecs) / count + else: + size = 0 + ret += "entry % 11d % 3d % 8d % 5d" % (rem, ratio, count, size) + if ent.maxdncachesize: + mdcs = ent.maxdncachesize or "0" + cdcs = ent.currentdncachesize or "0" + rem = int(mdcs) - int(cdcs) + dct = ent.dncachetries or "0" + tries = int(dct) + if tries: + ratio = (100 * int(ent.dncachehits)) / tries + else: + ratio = 0 + count = ent.currentdncachecount or "0" + count = int(count) + if count: + size = int(cdcs) / count + else: + size = 0 + ret += "\ndn % 11d % 3d % 8d % 5d" % ( + rem, ratio, count, size) + + if ent.hasAttr('entrycache-hashtables'): + ret += "\n\n" + ent.getValue('entrycache-hashtables') + + # global db stats + ret += "\n\nglobal db stats" + dbattrs = 'dbcachehits dbcachetries dbcachehitratio dbcachepagein dbcachepageout dbcacheroevict dbcacherwevict'.split(' ') + cols = {'dbcachehits': [len('cachehits'), 'cachehits'], 'dbcachetries': [10, 'cachetries'], + 'dbcachehitratio': [5, 'ratio'], 'dbcachepagein': [6, 'pagein'], + 'dbcachepageout': [7, 'pageout'], 'dbcacheroevict': [7, 'roevict'], + 'dbcacherwevict': [7, 'rwevict']} + dbrec = {} + for attr, vals in monent.iterAttrs(): + if attr.startswith('dbcache'): + val = vals[0] + dbrec[attr] = val + vallen = len(val) + if vallen > cols[attr][0]: + cols[attr][0] = vallen + # construct the format string based on the field widths + fmtstr = '' + ret += "\n" + for attr in dbattrs: + fmtstr += ' %%(%s)%ds' % (attr, cols[attr][0]) + ret += ' %*s' % tuple(cols[attr]) + ret += "\n" + (fmtstr % dbrec) + + # other db stats + skips = {'nsslapd-db-cache-hit': 'nsslapd-db-cache-hit', 'nsslapd-db-cache-try': 'nsslapd-db-cache-try', + 'nsslapd-db-page-write-rate': 'nsslapd-db-page-write-rate', + 'nsslapd-db-page-read-rate': 'nsslapd-db-page-read-rate', + 'nsslapd-db-page-ro-evict-rate': 'nsslapd-db-page-ro-evict-rate', + 'nsslapd-db-page-rw-evict-rate': 'nsslapd-db-page-rw-evict-rate'} + + hline = '' # header line + vline = '' # val line + for attr, vals in dbdbent.iterAttrs(): + if attr in skips: + continue + if attr.startswith('nsslapd-db-'): + short = attr.replace('nsslapd-db-', '') + val = vals[0] + width = max(len(short), len(val)) + if len(hline) + width > 70: + ret += "\n" + hline + "\n" + vline + hline = vline = '' + hline += ' %*s' % (width, short) + vline += ' %*s' % (width, val) + + # per file db stats + ret += "\n\nper file stats" + # key is number + # data is dict - key is attr name without the number - val is the attr val + dbrec = {} + dbattrs = ['dbfilename', 'dbfilecachehit', + 'dbfilecachemiss', 'dbfilepagein', 'dbfilepageout'] + # cols maps dbattr name to column header and width + cols = {'dbfilename': [len('dbfilename'), 'dbfilename'], 'dbfilecachehit': [9, 'cachehits'], + 'dbfilecachemiss': [11, 'cachemisses'], 'dbfilepagein': [6, 'pagein'], + 'dbfilepageout': [7, 'pageout']} + for attr, vals in ent.iterAttrs(): + match = RE_DBMONATTR.match(attr) + if match: + name = match.group(1) + num = match.group(2) + val = vals[0] + if name == 'dbfilename': + val = val.split('/')[-1] + dbrec.setdefault(num, {})[name] = val + vallen = len(val) + if vallen > cols[name][0]: + cols[name][0] = vallen + match = RE_DBMONATTRSUN.match(attr) + if match: + name = match.group(1) + if name == 'entrycache': + continue + num = match.group(2) + val = vals[0] + if name == 'dbfilename': + val = val.split('/')[-1] + dbrec.setdefault(num, {})[name] = val + vallen = len(val) + if vallen > cols[name][0]: + cols[name][0] = vallen + # construct the format string based on the field widths + fmtstr = '' + ret += "\n" + for attr in dbattrs: + fmtstr += ' %%(%s)%ds' % (attr, cols[attr][0]) + ret += ' %*s' % tuple(cols[attr]) + for dbf in dbrec.itervalues(): + ret += "\n" + (fmtstr % dbf) + return ret + except Exception, e: + print "caught exception", str(e) + return '' + + def waitForEntry(self, dn, timeout=7200, attr='', quiet=True): + scope = ldap.SCOPE_BASE + filt = "(objectclass=*)" + attrlist = [] + if attr: + filt = "(%s=*)" % attr + attrlist.append(attr) + timeout += int(time.time()) + + if isinstance(dn, Entry): + dn = dn.dn + + # wait for entry and/or attr to show up + if not quiet: + sys.stdout.write("Waiting for %s %s:%s " % (self, dn, attr)) + sys.stdout.flush() + entry = None + while not entry and int(time.time()) < timeout: + try: + entry = self.getEntry(dn, scope, filt, attrlist) + except NoSuchEntryError: + pass # found entry, but no attr + except ldap.NO_SUCH_OBJECT: + pass # no entry yet + except ldap.LDAPError, e: # badness + print "\nError reading entry", dn, e + break + if not entry: + if not quiet: + sys.stdout.write(".") + sys.stdout.flush() + time.sleep(1) + + if not entry and int(time.time()) > timeout: + print "\nwaitForEntry timeout for %s for %s" % (self, dn) + elif entry: + if not quiet: + print "\nThe waited for entry is:", entry + else: + print "\nError: could not read entry %s from %s" % (dn, self) + + return entry + + def addIndex(self, suffix, attr, indexTypes, *matchingRules): + """Specify the suffix (should contain 1 local database backend), + the name of the attribute to index, and the types of indexes + to create e.g. "pres", "eq", "sub" + """ + entries_backend = self.getBackendsForSuffix(suffix, ['cn']) + # assume 1 local backend + dn = "cn=%s,cn=index,%s" % (attr, entries_backend[0].dn) + entry = Entry(dn) + entry.setValues('objectclass', 'top', 'nsIndex') + entry.setValues('cn', attr) + entry.setValues('nsSystemIndex', "false") + entry.setValues('nsIndexType', indexTypes) + if matchingRules: + entry.setValues('nsMatchingRule', matchingRules) + try: + self.add_s(entry) + except ldap.ALREADY_EXISTS: + print "Index for attr %s for backend %s already exists" % ( + attr, dn) + + def modIndex(self, suffix, attr, mod): + """just a wrapper around a plain old ldap modify, but will + find the correct index entry based on the suffix and attribute""" + entries_backend = self.getBackendsForSuffix(suffix, ['cn']) + # assume 1 local backend + dn = "cn=%s,cn=index,%s" % (attr, entries_backend[0].dn) + self.modify_s(dn, mod) + + def requireIndex(self, suffix): + entries_backend = self.getBackendsForSuffix(suffix, ['cn']) + # assume 1 local backend + dn = entries_backend[0].dn + replace = [(ldap.MOD_REPLACE, 'nsslapd-require-index', 'on')] + self.modify_s(dn, replace) + + def addSchema(self, attr, val): + dn = "cn=schema" + self.modify_s(dn, [(ldap.MOD_ADD, attr, val)]) + + def addAttr(self, *attributes): + return self.addSchema('attributeTypes', attributes) + + def addObjClass(self, *objectclasses): + return self.addSchema('objectClasses', objectclasses) + + + + + + def setupChainingIntermediate(self): + confdn = ','.join(("cn=config", DN_CHAIN)) + try: + self.modify_s(confdn, [(ldap.MOD_ADD, 'nsTransmittedControl', + ['2.16.840.1.113730.3.4.12', '1.3.6.1.4.1.1466.29539.12'])]) + except ldap.TYPE_OR_VALUE_EXISTS: + log.error("chaining backend config already has the required controls") + + def setupChainingMux(self, suffix, isIntermediate, binddn, bindpw, urls): + self.addSuffix(suffix, binddn, bindpw, urls) + if isIntermediate: + self.setupChainingIntermediate() + + def setupChainingFarm(self, suffix, binddn, bindpw): + # step 1 - create the bind dn to use as the proxy + self.setupBindDN(binddn, bindpw) + self.addSuffix(suffix) # step 2 - create the suffix + # step 3 - add the proxy ACI to the suffix + try: + acival = "(targetattr = \"*\")(version 3.0; acl \"Proxied authorization for database links\"" + \ + "; allow (proxy) userdn = \"ldap:///%s\";)" % binddn + self.modify_s(suffix, [(ldap.MOD_ADD, 'aci', [acival])]) + except ldap.TYPE_OR_VALUE_EXISTS: + log.error("proxy aci already exists in suffix %s for %s" % ( + suffix, binddn)) + + def setupChaining(self, to, suffix, isIntermediate): + """Setup chaining from self to to - self is the mux, to is the farm + if isIntermediate is set, this server will chain requests from another server to to + """ + bindcn = "chaining user" + binddn = "cn=%s,cn=config" % bindcn + bindpw = "chaining" + + to.setupChainingFarm(suffix, binddn, bindpw) + self.setupChainingMux( + suffix, isIntermediate, binddn, bindpw, to.toLDAPURL()) + + + def enableChainOnUpdate(self, suffix, bename): + # first, get the mapping tree entry to modify + mtent = self.getMTEntry(suffix, ['cn']) + dn = mtent.dn + + # next, get the path of the replication plugin + e_plugin = self.getEntry( + "cn=Multimaster Replication Plugin,cn=plugins,cn=config", + attrlist=['nsslapd-pluginPath']) + path = e_plugin.getValue('nsslapd-pluginPath') + + mod = [(ldap.MOD_REPLACE, 'nsslapd-state', 'backend'), + (ldap.MOD_ADD, 'nsslapd-backend', bename), + (ldap.MOD_ADD, 'nsslapd-distribution-plugin', path), + (ldap.MOD_ADD, 'nsslapd-distribution-funct', 'repl_chain_on_update')] + + try: + self.modify_s(dn, mod) + except ldap.TYPE_OR_VALUE_EXISTS: + print "chainOnUpdate already enabled for %s" % suffix + + def setupConsumerChainOnUpdate(self, suffix, isIntermediate, binddn, bindpw, urls, beargs=None): + beargs = beargs or {} + # suffix should already exist + # we need to create a chaining backend + if not 'nsCheckLocalACI' in beargs: + beargs['nsCheckLocalACI'] = 'on' # enable local db aci eval. + chainbe = self.setupBackend(suffix, binddn, bindpw, urls, beargs) + # do the stuff for intermediate chains + if isIntermediate: + self.setupChainingIntermediate() + # enable the chain on update + return self.enableChainOnUpdate(suffix, chainbe) + + + def setupBindDN(self, binddn, bindpw, attrs=None): + """ Return - eventually creating - a person entry with the given dn and pwd. + + binddn can be a lib389.Entry + """ + try: + assert binddn + if isinstance(binddn, Entry): + assert binddn.dn + binddn = binddn.dn + except AssertionError: + raise AssertionError("Error: entry dn should be set!" % binddn) + + ent = Entry(binddn) + ent.setValues('objectclass', "top", "person") + ent.setValues('userpassword', bindpw) + ent.setValues('sn', "bind dn pseudo user") + ent.setValues('cn', "bind dn pseudo user") + + # support for uid + attribute, value = binddn.split(",")[0].split("=", 1) + if attribute == 'uid': + ent.setValues('objectclass', "top", "person", 'inetOrgPerson') + ent.setValues('uid', value) + + if attrs: + ent.update(attrs) + + try: + self.add_s(ent) + except ldap.ALREADY_EXISTS: + log.warn("Entry %s already exists" % binddn) + + try: + entry = self._test_entry(binddn, ldap.SCOPE_BASE) + return entry + except MissingEntryError: + log.exception("This entry should exist!") + raise + + def setupWinSyncAgmt(self, args, entry): + if 'winsync' not in args: + return + + suffix = args['suffix'] + entry.setValues("objectclass", "nsDSWindowsReplicationAgreement") + entry.setValues("nsds7WindowsReplicaSubtree", + args.get("win_subtree", + "cn=users," + suffix)) + entry.setValues("nsds7DirectoryReplicaSubtree", + args.get("ds_subtree", + "ou=People," + suffix)) + entry.setValues( + "nsds7NewWinUserSyncEnabled", args.get('newwinusers', 'true')) + entry.setValues( + "nsds7NewWinGroupSyncEnabled", args.get('newwingroups', 'true')) + windomain = '' + if 'windomain' in args: + windomain = args['windomain'] + else: + windomain = '.'.join(ldap.explode_dn(suffix, 1)) + entry.setValues("nsds7WindowsDomain", windomain) + if 'interval' in args: + entry.setValues("winSyncInterval", args['interval']) + if 'onewaysync' in args: + if args['onewaysync'].lower() == 'fromwindows' or \ + args['onewaysync'].lower() == 'towindows': + entry.setValues("oneWaySync", args['onewaysync']) + else: + raise Exception("Error: invalid value %s for oneWaySync: must be fromWindows or toWindows" % args['onewaysync']) + + # args - DSAdmin consumer (repoth), suffix, binddn, bindpw, timeout + # also need an auto_init argument + def setupAgreement(self, consumer, args, cn_format=r'meTo_%s:%s', description_format=r'me to %s:%s'): + """Create (and return) a replication agreement from self to consumer. + - self is the supplier, + - consumer is a DSAdmin object (consumer can be a master) + - cn_format - use this string to format the agreement name + + consumer: + * a DSAdmin object if chaining + * an object with attributes: host, port, sslport, __str__ + + args = { + 'suffix': "dc=example,dc=com", + 'bename': "userRoot", + 'binddn': "cn=replrepl,cn=config", + 'bindcn': "replrepl", # so I need it? + 'bindpw': "replrepl", + 'bindmethod': 'simple', + 'log' : True. + 'timeout': 120 + } + + self.suffixes is of the form { + 'o=suffix1': 'ldaps://consumer.example.com:636', + 'o=suffix2': 'ldap://consumer.example.net:3890' + } + """ + assert args.get('binddn') and args.get('bindpw') + suffix = args['suffix'] + binddn = args.get('binddn') + bindpw = args.get('bindpw') + + nsuffix = normalizeDN(suffix) + othhost, othport, othsslport = ( + consumer.host, consumer.port, consumer.sslport) + othport = othsslport or othport + + # adding agreement to previously created replica + # eventually setting self.suffixes dict. + if not nsuffix in self.suffixes: + replica_entries = self.replica.list(suffix) + if not replica_entries: + raise NoSuchEntryError( + "Error: no replica set up for suffix " + suffix) + replent = replica_entries[0] + self.suffixes[nsuffix] = { + 'dn': replent.dn, + 'type': int(replent.nsds5replicatype) + } + # define agreement entry + cn = cn_format % (othhost, othport) + dn_agreement = "cn=%s,%s" % (cn, self.suffixes[nsuffix]['dn']) + try: + entry = self.getEntry(dn_agreement, ldap.SCOPE_BASE) + except ldap.NO_SUCH_OBJECT: + entry = None + if entry: + log.warn("Agreement exists:", dn_agreement) + self.suffixes.setdefault(nsuffix, {})[str(consumer)] = dn_agreement + return dn_agreement + if (nsuffix in self.agmt) and (consumer in self.agmt[nsuffix]): + log.warn("Agreement exists:", dn_agreement) + return dn_agreement + + # In a separate function in this scope? + entry = Entry(dn_agreement) + entry.update({ + 'objectclass': ["top", "nsds5replicationagreement"], + 'cn': cn, + 'nsds5replicahost': othhost, + 'nsds5replicatimeout': str(args.get('timeout', 120)), + 'nsds5replicabinddn': binddn, + 'nsds5replicacredentials': bindpw, + 'nsds5replicabindmethod': args.get('bindmethod', 'simple'), + 'nsds5replicaroot': nsuffix, + 'nsds5replicaupdateschedule': '0000-2359 0123456', + 'description': description_format % (othhost, othport) + }) + if 'starttls' in args: + entry.setValues('nsds5replicatransportinfo', 'TLS') + entry.setValues('nsds5replicaport', str(othport)) + elif othsslport: + entry.setValues('nsds5replicatransportinfo', 'SSL') + entry.setValues('nsds5replicaport', str(othsslport)) + else: + entry.setValues('nsds5replicatransportinfo', 'LDAP') + entry.setValues('nsds5replicaport', str(othport)) + if 'fractional' in args: + entry.setValues('nsDS5ReplicatedAttributeList', args['fractional']) + if 'auto_init' in args: + entry.setValues('nsds5BeginReplicaRefresh', 'start') + if 'fractional' in args: + entry.setValues('nsDS5ReplicatedAttributeList', args['fractional']) + if 'stripattrs' in args: + entry.setValues('nsds5ReplicaStripAttrs', args['stripattrs']) + + if 'winsync' in args: # state it clearly! + self.setupWinSyncAgmt(args, entry) + + try: + log.debug("Adding replica agreement: [%s]" % entry) + self.add_s(entry) + except: + # TODO check please! + raise + entry = self.waitForEntry(dn_agreement) + if entry: + self.suffixes.setdefault(nsuffix, {})[str(consumer)] = dn_agreement + # More verbose but shows what's going on + if 'chain' in args: + chain_args = { + 'suffix': suffix, + 'binddn': binddn, + 'bindpw': bindpw + } + # Work on `self` aka producer + if self.suffixes[nsuffix]['type'] == MASTER_TYPE: + self.setupChainingFarm(**chain_args) + # Work on `consumer` + # TODO - is it really required? + if consumer.suffixes[nsuffix]['type'] == LEAF_TYPE: + chain_args.update({ + 'isIntermediate': 0, + 'urls': self.toLDAPURL(), + 'args': args['chainargs'] + }) + consumer.setupConsumerChainOnUpdate(**chain_args) + elif consumer.suffixes[nsuffix]['type'] == HUB_TYPE: + chain_args.update({ + 'isIntermediate': 1, + 'urls': self.toLDAPURL(), + 'args': args['chainargs'] + }) + consumer.setupConsumerChainOnUpdate(**chain_args) + self.agmt.setdefault(nsuffix, {})[consumer] = dn_agreement + return dn_agreement + + + + + # moved to Replica + def setupReplica(self, args): + """Deprecated, use replica.add + """ + return self.replica.add(**args) + + def startReplication_async(self, agmtdn): + return self.replica.start_async(agmtdn) + + def checkReplInit(self, agmtdn): + return self.replica.check_init(agmtdn) + + def waitForReplInit(self, agmtdn): + return self.replica.wait_init(agmtdn) + + def startReplication(self, agmtdn): + return self.replica.start_and_wait(agmtdn) + + + def replicaSetupAll(self, repArgs): + """setup everything needed to enable replication for a given suffix. + 1- eventually create the suffix + 2- enable replication logging + 3- create changelog + 4- create replica user + repArgs is a dict with the following fields: + { + suffix - suffix to set up for replication (eventually create) + optional fields and their default values + bename - name of backend corresponding to suffix, otherwise + it will use the *first* backend found (isn't that dangerous?) + parent - parent suffix if suffix is a sub-suffix - default is undef + ro - put database in read only mode - default is read write + type - replica type (MASTER_TYPE, HUB_TYPE, LEAF_TYPE) - default is master + legacy - make this replica a legacy consumer - default is no + + binddn - bind DN of the replication manager user - default is REPLBINDDN + bindpw - bind password of the repl manager - default is REPLBINDPW + + log - if true, replication logging is turned on - default false + id - the replica ID - default is an auto incremented number + } + + TODO: passing the repArgs as an object or as a **repArgs could be + a better documentation choiche + eg. replicaSetupAll(self, suffix, type=MASTER_TYPE, log=False, ...) + """ + + repArgs.setdefault('type', MASTER_TYPE) + user = repArgs.get('binddn'), repArgs.get('bindpw') + + # eventually create the suffix (Eg. o=userRoot) + # TODO should I check the addSuffix output as it doesn't raise + self.addSuffix(repArgs['suffix']) + if 'bename' not in repArgs: + entries_backend = self.getBackendsForSuffix( + repArgs['suffix'], ['cn']) + # just use first one + repArgs['bename'] = entries_backend[0].cn + if repArgs.get('log', False): + self.enableReplLogging() + + # enable changelog for master and hub + if repArgs['type'] != LEAF_TYPE: + self.replica.changelog() + # create replica user without timeout and expiration issues + try: + attrs = list(user) + attrs.append({ + 'nsIdleTimeout': '0', + 'passwordExpirationTime': '20381010000000Z' + }) + self.setupBindDN(*attrs) + except ldap.ALREADY_EXISTS: + log.warn("User already exists: %r " % user) + + # setup replica + repArgs['rtype'], repArgs['rid'] = repArgs['type'], repArgs['id'] + + # remove invalid arguments from replica.add + for invalid_arg in 'type id bename'.split(): + del repArgs[invalid_arg] + if 'log' in repArgs: + del repArgs['log'] + + ret = self.replica.add(**repArgs) + if 'legacy' in repArgs: + self.setupLegacyConsumer(*user) + + return ret + + def subtreePwdPolicy(self, basedn, pwdpolicy, verbose=False, **pwdargs): + args = {'basedn': basedn, 'escdn': escapeDNValue( + normalizeDN(basedn))} + condn = "cn=nsPwPolicyContainer,%(basedn)s" % args + poldn = "cn=cn\\=nsPwPolicyEntry\\,%(escdn)s,cn=nsPwPolicyContainer,%(basedn)s" % args + temdn = "cn=cn\\=nsPwTemplateEntry\\,%(escdn)s,cn=nsPwPolicyContainer,%(basedn)s" % args + cosdn = "cn=nsPwPolicy_cos,%(basedn)s" % args + conent = Entry(condn) + conent.setValues('objectclass', 'nsContainer') + polent = Entry(poldn) + polent.setValues('objectclass', ['ldapsubentry', 'passwordpolicy']) + tement = Entry(temdn) + tement.setValues('objectclass', ['extensibleObject', + 'costemplate', 'ldapsubentry']) + tement.setValues('cosPriority', '1') + tement.setValues('pwdpolicysubentry', poldn) + cosent = Entry(cosdn) + cosent.setValues('objectclass', ['ldapsubentry', + 'cosSuperDefinition', 'cosPointerDefinition']) + cosent.setValues('cosTemplateDn', temdn) + cosent.setValues( + 'cosAttribute', 'pwdpolicysubentry default operational-default') + for ent in (conent, polent, tement, cosent): + try: + self.add_s(ent) + if verbose: + print "created subtree pwpolicy entry", ent.dn + except ldap.ALREADY_EXISTS: + print "subtree pwpolicy entry", ent.dn, "already exists - skipping" + self.setPwdPolicy({'nsslapd-pwpolicy-local': 'on'}) + self.setDNPwdPolicy(poldn, pwdpolicy, **pwdargs) + + def userPwdPolicy(self, user, pwdpolicy, verbose=False, **pwdargs): + ary = ldap.explode_dn(user) + par = ','.join(ary[1:]) + escuser = escapeDNValue(normalizeDN(user)) + args = {'par': par, 'udn': user, 'escudn': escuser} + condn = "cn=nsPwPolicyContainer,%(par)s" % args + poldn = "cn=cn\\=nsPwPolicyEntry\\,%(escudn)s,cn=nsPwPolicyContainer,%(par)s" % args + conent = Entry(condn) + conent.setValues('objectclass', 'nsContainer') + polent = Entry(poldn) + polent.setValues('objectclass', ['ldapsubentry', 'passwordpolicy']) + for ent in (conent, polent): + try: + self.add_s(ent) + if verbose: + print "created user pwpolicy entry", ent.dn + except ldap.ALREADY_EXISTS: + print "user pwpolicy entry", ent.dn, "already exists - skipping" + mod = [(ldap.MOD_REPLACE, 'pwdpolicysubentry', poldn)] + self.modify_s(user, mod) + self.setPwdPolicy({'nsslapd-pwpolicy-local': 'on'}) + self.setDNPwdPolicy(poldn, pwdpolicy, **pwdargs) + + def setPwdPolicy(self, pwdpolicy, **pwdargs): + self.setDNPwdPolicy(DN_CONFIG, pwdpolicy, **pwdargs) + + def setDNPwdPolicy(self, dn, pwdpolicy, **pwdargs): + """input is dict of attr/vals""" + mods = [] + for (attr, val) in pwdpolicy.iteritems(): + mods.append((ldap.MOD_REPLACE, attr, str(val))) + if pwdargs: + for (attr, val) in pwdargs.iteritems(): + mods.append((ldap.MOD_REPLACE, attr, str(val))) + self.modify_s(dn, mods) + + + # Moved to config + # replaced by loglevel + def enableReplLogging(self): + """Enable logging of replication stuff (1<<13)""" + val = LOG_REPLICA + return self.config.loglevel([val]) + + def disableReplLogging(self): + return self.config.loglevel() + + def setLogLevel(self, *vals): + """Set nsslapd-errorlog-level and return its value.""" + return self.config.loglevel(vals) + + def setAccessLogLevel(self, *vals): + """Set nsslapd-accesslog-level and return its value.""" + return self.config.loglevel(vals, level='access') + + def configSSL(self, secport=636, secargs=None): + """Configure SSL support into cn=encryption,cn=config. + + secargs is a dict like { + 'nsSSLPersonalitySSL': 'Server-Cert' + } + + XXX moved to brooker.Config + """ + return self.config.enable_ssl(secport, secargs) + diff --git a/lib389/_constants.py b/lib389/_constants.py new file mode 100644 index 0000000..3103587 --- /dev/null +++ b/lib389/_constants.py @@ -0,0 +1,50 @@ +# replicatype @see https://access.redhat.com/knowledge/docs/en-US/Red_Hat_Directory_Server/8.1/html/Administration_Guide/Managing_Replication-Configuring-Replication-cmd.html +# 2 for consumers and hubs (read-only replicas) +# 3 for both single and multi-master suppliers (read-write replicas) +# TODO: let's find a way to be consistent - eg. using bitwise operator +(MASTER_TYPE, + HUB_TYPE, + LEAF_TYPE) = range(3) + +REPLICA_RDONLY_TYPE = 2 # CONSUMER and HUB +REPLICA_WRONLY_TYPE = 1 # SINGLE and MULTI MASTER +REPLICA_RDWR_TYPE = REPLICA_RDONLY_TYPE | REPLICA_WRONLY_TYPE + + +CFGSUFFIX = "o=NetscapeRoot" +DEFAULT_USER = "nobody" + +# Some DN constants +DN_DM = "cn=Directory Manager" +DN_CONFIG = "cn=config" +DN_LDBM = "cn=ldbm database,cn=plugins,cn=config" +DN_MAPPING_TREE = "cn=mapping tree,cn=config" +DN_CHAIN = "cn=chaining database,cn=plugins,cn=config" +DN_CHANGELOG = "cn=changelog5,cn=config" + +# +# constants +# +DEFAULT_USER = "nobody" + +# +# LOG: see https://access.redhat.com/site/documentation/en-US/Red_Hat_Directory_Server/9.0/html/Administration_Guide/Configuring_Logs.html +# The default log level is 16384 +# +( +LOG_TRACE, +LOG_TRACE_PACKETS, +LOG_TRACE_HEAVY, +LOG_CONNECT, +LOG_PACKET, +LOG_SEARCH_FILTER, +LOG_CONFIG_PARSER, +LOG_ACL, +LOG_ENTRY_PARSER, +LOG_HOUSEKEEPING, +LOG_REPLICA, +LOG_DEFAULT, +LOG_CACHE, +LOG_PLUGIN, +LOG_MICROSECONDS, +LOG_ACL_SUMMARY) = [ 1< (dn, {dict...} ) + * or a reference -> (None, reference) + * or None. + + If creating a new empty entry, data is the string DN. + """ + self.ref = None + if entrydata: + if isinstance(entrydata, tuple): + if entrydata[0] is None: + self.ref = entrydata[1] # continuation reference + else: + self.dn = entrydata[0] + self.data = cidict(entrydata[1]) + elif isinstance(entrydata, basestring): + if not '=' in entrydata: + raise ValueError('Entry dn must contain "="') + + self.dn = entrydata + self.data = cidict() + else: + # + self.dn = '' + self.data = cidict() + + def __nonzero__(self): + """This allows us to do tests like if entry: returns false if there is no data, + true otherwise""" + return self.data is not None and len(self.data) > 0 + + def hasAttr(self, name): + """Return True if this entry has an attribute named name, False otherwise""" + return self.data and name in self.data + + def __getattr__(self, name): + """If name is the name of an LDAP attribute, return the first value for that + attribute - equivalent to getValue - this allows the use of + entry.cn + instead of + entry.getValue('cn') + This also allows us to return None if an attribute is not found rather than + throwing an exception""" + if name == 'dn' or name == 'data': + return self.__dict__.get(name, None) + return self.getValue(name) + + def getValues(self, name): + """Get the list (array) of values for the attribute named name""" + return self.data.get(name, []) + + def getValue(self, name): + """Get the first value for the attribute named name""" + return self.data.get(name, [None])[0] + + def hasValue(self, name, val=None): + """True if the given attribute is present and has the given value + + TODO: list comparison preserves order: should I use a set? + """ + if not self.hasAttr(name): + return False + if not val: + return True + if isinstance(val, list): + return val == self.data.get(name) + if isinstance(val, tuple): + return list(val) == self.data.get(name) + return val in self.data.get(name) + + def hasValueCase(self, name, val): + """True if the given attribute is present and has the given value - case insensitive value match""" + if not self.hasAttr(name): + return False + return val.lower() in [x.lower() for x in self.data.get(name)] + + def setValue(self, name, *value): + """Value passed in may be a single value, several values, or a single sequence. + For example: + ent.setValue('name', 'value') + ent.setValue('name', 'value1', 'value2', ..., 'valueN') + ent.setValue('name', ['value1', 'value2', ..., 'valueN']) + ent.setValue('name', ('value1', 'value2', ..., 'valueN')) + Since *value is a tuple, we may have to extract a list or tuple from that + tuple as in the last two examples above""" + if isinstance(value[0], list) or isinstance(value[0], tuple): + self.data[name] = value[0] + else: + self.data[name] = value + + def getAttrs(self): + if not self.data: + return [] + return self.data.keys() + + def iterAttrs(self, attrsOnly=False): + if attrsOnly: + return self.data.iterkeys() + else: + return self.data.iteritems() + + setValues = setValue + + def toTupleList(self): + """Convert the attrs and values to a list of 2-tuples. The first element + of the tuple is the attribute name. The second element is either a + single value or a list of values.""" + return self.data.items() + + def getref(self): + return self.ref + + def __str__(self): + """Convert the Entry to its LDIF representation""" + return self.__repr__() + + def update(self, dct): + """Update passthru to the data attribute.""" + log.debug("update dn: %r with %r" % (self.dn, dct)) + for k, v in dct.items(): + if hasattr(v, '__iter__'): + self.data[k] = v + else: + self.data[k] = [v] + + def __repr__(self): + """Convert the Entry to its LDIF representation""" + sio = cStringIO.StringIO() + # what's all this then? the unparse method will currently only accept + # a list or a dict, not a class derived from them. self.data is a + # cidict, so unparse barfs on it. I've filed a bug against python-ldap, + # but in the meantime, we have to convert to a plain old dict for printing + # I also don't want to see wrapping, so set the line width really high (1000) + newdata = {} + newdata.update(self.data) + ldif.LDIFWriter( + sio, Entry.base64_attrs, 1000).unparse(self.dn, newdata) + return sio.getvalue() diff --git a/lib389/_ldifconn.py b/lib389/_ldifconn.py new file mode 100644 index 0000000..1be9069 --- /dev/null +++ b/lib389/_ldifconn.py @@ -0,0 +1,43 @@ +__all__ = ['LDIFConn'] +import ldif +from lib389._entry import Entry +from lib389.utils import normalizeDN + +class LDIFConn(ldif.LDIFParser): + def __init__( + self, + input_file, + ignored_attr_types=None, max_entries=0, process_url_schemes=None + ): + """ + See LDIFParser.__init__() + + Additional Parameters: + all_records + List instance for storing parsed records + """ + self.dndict = {} # maps dn to Entry + self.dnlist = [] # contains entries in order read + myfile = input_file + if isinstance(input_file, basestring): + myfile = open(input_file, "r") + ldif.LDIFParser.__init__(self, myfile, ignored_attr_types, + max_entries, process_url_schemes) + self.parse() + if isinstance(input_file, basestring): + myfile.close() + + def handle(self, dn, entry): + """ + Append single record to dictionary of all records. + """ + if not dn: + dn = '' + newentry = Entry((dn, entry)) + self.dndict[normalizeDN(dn)] = newentry + self.dnlist.append(newentry) + + def get(self, dn): + ndn = normalizeDN(dn) + return self.dndict.get(ndn, Entry(None)) + diff --git a/lib389/_replication.py b/lib389/_replication.py new file mode 100644 index 0000000..fc9e8cb --- /dev/null +++ b/lib389/_replication.py @@ -0,0 +1,175 @@ + +class CSN(object): + """CSN is Change Sequence Number + csn.ts is the timestamp (time_t - seconds) + csn.seq is the sequence number (max 65535) + csn.rid is the replica ID of the originating master + csn.subseq is not currently used""" + import re + import time + csnpat = r'(.{8})(.{4})(.{4})(.{4})' + csnre = re.compile(csnpat) + + def __init__(self, csnstr): + match = CSN.csnre.match(csnstr) + self.ts = 0 + self.seq = 0 + self.rid = 0 + self.subseq = 0 + if match: + self.ts = int(match.group(1), 16) + self.seq = int(match.group(2), 16) + self.rid = int(match.group(3), 16) + self.subseq = int(match.group(4), 16) + elif csnstr: + self.ts = 0 + self.seq = 0 + self.rid = 0 + self.subseq = 0 + log.info("%r is not a valid CSN" % csnstr) + + def csndiff(self, oth): + return (oth.ts - self.ts, oth.seq - self.seq, oth.rid - self.rid, oth.subseq - self.subseq) + + def __cmp__(self, oth): + if self is oth: + return 0 + (tsdiff, seqdiff, riddiff, subseqdiff) = self.csndiff(oth) + + diff = tsdiff or seqdiff or riddiff or subseqdiff + ret = 0 + if diff > 0: + ret = 1 + elif diff < 0: + ret = -1 + return ret + + def __eq__(self, oth): + return cmp(self, oth) == 0 + + def diff2str(self, oth): + retstr = '' + diff = oth.ts - self.ts + if diff > 0: + td = datetime.timedelta(seconds=diff) + retstr = "is behind by %s" % td + elif diff < 0: + td = datetime.timedelta(seconds=-diff) + retstr = "is ahead by %s" % td + else: + diff = oth.seq - self.seq + if diff: + retstr = "seq differs by %d" % diff + elif self.rid != oth.rid: + retstr = "rid %d not equal to rid %d" % (self.rid, oth.rid) + else: + retstr = "equal" + return retstr + + def __repr__(self): + return time.strftime("%x %X", time.localtime(self.ts)) + " seq: " + str(self.seq) + " rid: " + str(self.rid) + + def __str__(self): + return self.__repr__() + + + + + +class RUV(object): + """RUV is Replica Update Vector + ruv.gen is the generation CSN + ruv.rid[1] through ruv.rid[N] are dicts - the number (1-N) is the replica ID + ruv.rid[N][url] is the purl + ruv.rid[N][min] is the min csn + ruv.rid[N][max] is the max csn + ruv.rid[N][lastmod] is the last modified timestamp + example ruv attr: + nsds50ruv: {replicageneration} 3b0ebc7f000000010000 + nsds50ruv: {replica 1 ldap://myhost:51010} 3b0ebc9f000000010000 3b0ebef7000000010000 + nsre_ruvplicaLastModified: {replica 1 ldap://myhost:51010} 292398402093 + if the tryrepl flag is true, if getting the ruv from the suffix fails, try getting + the ruv from the cn=replica entry + """ + import re + import time + pre_gen = r'\{replicageneration\}\s+(\w+)' + re_gen = re.compile(pre_gen) + pre_ruv = r'\{replica\s+(\d+)\s+(.+?)\}\s*(\w*)\s*(\w*)' + re_ruv = re.compile(pre_ruv) + + def __init__(self, ent): + # rid is a dict + # key is replica ID - val is dict of url, min csn, max csn + self.rid = {} + for item in ent.getValues('nsds50ruv'): + matchgen = RUV.re_gen.match(item) + matchruv = RUV.re_ruv.match(item) + if matchgen: + self.gen = CSN(matchgen.group(1)) + elif matchruv: + rid = int(matchruv.group(1)) + self.rid[rid] = {'url': matchruv.group(2), + 'min': CSN(matchruv.group(3)), + 'max': CSN(matchruv.group(4))} + else: + log.info("unknown RUV element %r" % item) + for item in ent.getValues('nsre_ruvplicaLastModified'): + matchruv = RUV.re_ruv.match(item) + if matchruv: + rid = int(matchruv.group(1)) + self.rid[rid]['lastmod'] = int(matchruv.group(3), 16) + else: + log.info("unknown nsre_ruvplicaLastModified item %r" % item) + + def __cmp__(self, oth): + if self is oth: + return 0 + if not self: + return -1 # None is less than something + if not oth: + return 1 # something is greater than None + diff = cmp(self.gen, oth.gen) + if diff: + return diff + for rid in self.rid.keys(): + for item in ('max', 'min'): + csn = self.rid[rid][item] + csnoth = oth.rid[rid][item] + diff = cmp(csn, csnoth) + if diff: + return diff + return 0 + + def __eq__(self, oth): + return cmp(self, oth) == 0 + + def getdiffs(self, oth): + """Compare two ruvs and return the differences + returns a tuple - the first element is the + result of cmp() - the second element is a string""" + if self is oth: + return (0, "\tRUVs are the same") + if not self: + return (-1, "\tfirst RUV is empty") + if not oth: + return (1, "\tsecond RUV is empty") + diff = cmp(self.gen, oth.gen) + if diff: + return (diff, "\tgeneration [" + str(self.gen) + "] not equal to [" + str(oth.gen) + "]: likely not yet initialized") + retstr = '' + for rid in self.rid.keys(): + for item in ('max', 'min'): + csn = self.rid[rid][item] + csnoth = oth.rid[rid][item] + csndiff = cmp(csn, csnoth) + if csndiff: + if len(retstr): + retstr += "\n" + retstr += "\trid %d %scsn %s\n\t[%s] vs [%s]" % (rid, item, csn.diff2str(csnoth), + csn, csnoth) + if not diff: + diff = csndiff + if not diff: + retstr = "\tup-to-date - RUVs are equal" + return (diff, retstr) diff --git a/lib389/brooker.py b/lib389/brooker.py new file mode 100644 index 0000000..e811a9b --- /dev/null +++ b/lib389/brooker.py @@ -0,0 +1,842 @@ +"""Brooker classes to organize ldap methods. + Stuff is split in classes, like: + * Replica + * Backend + * Suffix + + You will access this from: + DSAdmin.backend.methodName() +""" +import ldap +import os +import re +import time + + +from lib389._constants import * +from lib389 import Entry, DSAdmin +from lib389.utils import normalizeDN, escapeDNValue, suffixfilt +from lib389 import ( + NoSuchEntryError +) + +from lib389._constants import ( + DN_CHANGELOG, + DN_MAPPING_TREE, + DN_CHAIN, DN_LDBM, + MASTER_TYPE, + HUB_TYPE, + LEAF_TYPE, + REPLICA_RDONLY_TYPE, + REPLICA_RDWR_TYPE +) + +from lib389._replication import RUV, CSN +from lib389._entry import FormatDict + + +class Replica(object): + proxied_methods = 'search_s getEntry'.split() + STOP = '2358-2359 0' + START = '0000-2359 0123456' + ALWAYS = None + + def __init__(self, conn): + """@param conn - a DSAdmin instance""" + self.conn = conn + self.log = conn.log + + def __getattr__(self, name): + if name in Replica.proxied_methods: + return DSAdmin.__getattr__(self.conn, name) + + def _get_mt_entry(self, suffix): + """Return the replica dn of the given suffix.""" + mtent = self.conn.getMTEntry(suffix) + return ','.join(("cn=replica", mtent.dn)) + + def changelog(self, dbname='changelogdb'): + """Add and return the replication changelog entry. + + If dbname starts with "/" then it's considered a full path, + otherwise it's relative to self.dbdir + """ + dn = DN_CHANGELOG + dirpath = os.path.join(self.conn.dbdir, dbname) + entry = Entry(dn) + entry.update({ + 'objectclass': ("top", "extensibleobject"), + 'cn': "changelog5", + 'nsslapd-changelogdir': dirpath + }) + self.log.debug("adding changelog entry: %r" % entry) + try: + self.conn.add_s(entry) + except ldap.ALREADY_EXISTS: + self.log.warn("entry %s already exists" % dn) + + return self.conn._test_entry(dn, ldap.SCOPE_BASE) + + def list(self, suffix=None): + """Return a list of replica entries under the given suffix. + @param suffix - if suffix is None, return all replicas + """ + if suffix: + filtr = "(&(objectclass=nsds5Replica)(nsds5replicaroot=%s))" % suffix + else: + filtr = "(objectclass=nsds5Replica)" + ents = self.conn.search_s(DN_MAPPING_TREE, ldap.SCOPE_SUBTREE, filtr) + return ents + + def check_init(self, agmtdn): + """returns tuple - first element is done/not done, 2nd is no error/has error + @param agmtdn - the agreement dn + """ + done, hasError = False, 0 + attrlist = ['cn', 'nsds5BeginReplicaRefresh', 'nsds5replicaUpdateInProgress', + 'nsds5ReplicaLastInitStatus', 'nsds5ReplicaLastInitStart', + 'nsds5ReplicaLastInitEnd'] + try: + entry = self.conn.getEntry( + agmtdn, ldap.SCOPE_BASE, "(objectclass=*)", attrlist) + except NoSuchEntryError: + self.log.exception("Error reading status from agreement %r" % agmtdn) + hasError = 1 + else: + refresh = entry.nsds5BeginReplicaRefresh + inprogress = entry.nsds5replicaUpdateInProgress + status = entry.nsds5ReplicaLastInitStatus + if not refresh: # done - check status + if not status: + print "No status yet" + elif status.find("replica busy") > -1: + print "Update failed - replica busy - status", status + done = True + hasError = 2 + elif status.find("Total update succeeded") > -1: + print "Update succeeded: status ", status + done = True + elif inprogress.lower() == 'true': + print "Update in progress yet not in progress: status ", status + else: + print "Update failed: status", status + hasError = 1 + done = True + elif self.verbose: + print "Update in progress: status", status + + return done, hasError + + def start_and_wait(self, agmtdn): + """@param agmtdn - agreement dn""" + rc = self.start_async(agmtdn) + if not rc: + rc = self.wait_init(agmtdn) + if rc == 2: # replica busy - retry + rc = self.start_and_wait(agmtdn) + return rc + + def wait_init(self, agmtdn): + """Initialize replication and wait for completion. + @oaram agmtdn - agreement dn + """ + done = False + haserror = 0 + while not done and not haserror: + time.sleep(1) # give it a few seconds to get going + done, haserror = self.check_init(agmtdn) + return haserror + + def start_async(self, agmtdn): + """Initialize replication without waiting. + @param agmtdn - agreement dn + """ + self.log.info("Starting async replication %s" % agmtdn) + mod = [(ldap.MOD_ADD, 'nsds5BeginReplicaRefresh', 'start')] + self.conn.modify_s(agmtdn, mod) + + def stop(self, agmtdn): + """Stop replication. + @param agmtdn - agreement dn + """ + self.log.info("Stopping replication %s" % agmtdn) + mod = [( + ldap.MOD_REPLACE, 'nsds5replicaupdateschedule', [Replica.STOP])] + self.conn.modify_s(agmtdn, mod) + + def restart(self, agmtdn, schedule=START): + """Schedules a new replication. + @param agmtdn - + @param schedule - default START + `schedule` allows to customize the replication instant. + see 389 documentation for further info + """ + self.log.info("Restarting replication %s" % agmtdn) + mod = [(ldap.MOD_REPLACE, 'nsds5replicaupdateschedule', [ + schedule])] + self.modify_s(agmtdn, mod) + + def keep_in_sync(self, agmtdn): + """ + @param agmtdn - + """ + self.log.info("Setting agreement for continuous replication") + raise NotImplementedError("Check nsds5replicaupdateschedule before writing!") + + def status(self, agreement_dn): + """Return a formatted string with the replica status. + @param agreement_dn - + """ + + attrlist = ['cn', 'nsds5BeginReplicaRefresh', 'nsds5replicaUpdateInProgress', + 'nsds5ReplicaLastInitStatus', 'nsds5ReplicaLastInitStart', + 'nsds5ReplicaLastInitEnd', 'nsds5replicaReapActive', + 'nsds5replicaLastUpdateStart', 'nsds5replicaLastUpdateEnd', + 'nsds5replicaChangesSentSinceStartup', 'nsds5replicaLastUpdateStatus', + 'nsds5replicaChangesSkippedSinceStartup', 'nsds5ReplicaHost', + 'nsds5ReplicaPort'] + try: + ent = self.conn.getEntry( + agreement_dn, ldap.SCOPE_BASE, "(objectclass=*)", attrlist) + except NoSuchEntryError: + raise NoSuchEntryError( + "Error reading status from agreement", agreement_dn) + else: + retstr = ( + "Status for %(cn)s agmt %(nsDS5ReplicaHost)s:%(nsDS5ReplicaPort)s" "\n" + "Update in progress: %(nsds5replicaUpdateInProgress)s" "\n" + "Last Update Start: %(nsds5replicaLastUpdateStart)s" "\n" + "Last Update End: %(nsds5replicaLastUpdateEnd)s" "\n" + "Num. Changes Sent: %(nsds5replicaChangesSentSinceStartup)s" "\n" + "Num. changes Skipped: %(nsds5replicaChangesSkippedSinceStartup)s" "\n" + "Last update Status: %(nsds5replicaLastUpdateStatus)s" "\n" + "Init in progress: %(nsds5BeginReplicaRefresh)s" "\n" + "Last Init Start: %(nsds5ReplicaLastInitStart)s" "\n" + "Last Init End: %(nsds5ReplicaLastInitEnd)s" "\n" + "Last Init Status: %(nsds5ReplicaLastInitStatus)s" "\n" + "Reap Active: %(nsds5ReplicaReapActive)s" "\n" + ) + # FormatDict manages missing fields in string formatting + return retstr % FormatDict(ent.data) + + def add(self, suffix, binddn, bindpw, rtype=MASTER_TYPE, rid=None, tombstone_purgedelay=None, purgedelay=None, referrals=None, legacy=False): + """Setup a replica entry on an existing suffix. + @param suffix - dn of suffix + @param binddn - the replication bind dn for this replica + can also be a list ["cn=r1,cn=config","cn=r2,cn=config"] + @param bindpw - used to eventually provision the replication entry + + @param rtype - master, hub, leaf (see above for values) - default is master + @param rid - replica id or - if not given - an internal sequence number will be assigned + + # further args + @param legacy - true or false - for legacy consumer + @param tombstone_purgedelay + @param purgedelay - changelog expiration time in seconds + @param referrals + + Ex. replica.add(**{ + 'suffix': "dc=example,dc=com", + 'type' : lib389.MASTER_TYPE, + 'binddn': "cn=replication manager,cn=config" + }) + binddn + TODO: this method does not update replica type + """ + # set default values + if rtype == MASTER_TYPE: + rtype = REPLICA_RDWR_TYPE + else: + rtype = REPLICA_RDONLY_TYPE + + if legacy: + legacy = 'on' + else: + legacy = 'off' + + # create replica entry in mapping-tree + nsuffix = normalizeDN(suffix) + mtent = self.conn.getMTEntry(suffix) + dn_replica = ','.join(("cn=replica", mtent.dn)) + try: + entry = self.conn.getEntry(dn_replica, ldap.SCOPE_BASE) + self.log.warn("Already setup replica for suffix %r" % suffix) + rec = self.conn.suffixes.setdefault(nsuffix, {}) + rec.update({'dn': dn_replica, 'type': rtype}) + return rec + except ldap.NO_SUCH_OBJECT: + entry = None + + # If a replica does not exist + binddnlist = [] + if hasattr(binddn, '__iter__'): + binddnlist = binddn + else: + binddnlist.append(binddn) + + entry = Entry(dn_replica) + entry.update({ + 'objectclass': ("top", "nsds5replica", "extensibleobject"), + 'cn': "replica", + 'nsds5replicaroot': nsuffix, + 'nsds5replicaid': str(rid), + 'nsds5replicatype': str(rtype), + 'nsds5replicalegacyconsumer': legacy, + 'nsds5replicabinddn': binddnlist + }) + if rtype != LEAF_TYPE: + entry.setValues('nsds5flags', "1") + + # other args + if tombstone_purgedelay is not None: + entry.setValues( + 'nsds5replicatombstonepurgeinterval', str(tombstone_purgedelay)) + if purgedelay is not None: + entry.setValues('nsds5ReplicaPurgeDelay', str(purgedelay)) + if referrals: + entry.setValues('nsds5ReplicaReferral', referrals) + + self.conn.add_s(entry) + + # check if the entry exists TODO better to raise! + self.conn._test_entry(dn_replica, ldap.SCOPE_BASE) + + self.conn.suffixes[nsuffix] = {'dn': dn_replica, 'type': rtype} + return {'dn': dn_replica, 'type': rtype} + + def ruv(self, suffix, tryrepl=False): + """return a replica update vector for the given suffix. + + @param suffix - eg. 'o=netscapeRoot' + + @raises NoSuchEntryError if missing + """ + uuid = "ffffffff-ffffffff-ffffffff-ffffffff" + filt = "(&(nsUniqueID=%s)(objectclass=nsTombstone))" % uuid + attrs = ['nsds50ruv', 'nsruvReplicaLastModified'] + ents = self.conn.search_s(suffix, ldap.SCOPE_SUBTREE, filt, attrs) + ent = None + if ents and (len(ents) > 0): + ent = ents[0] + elif tryrepl: + self.log.warn("Could not get RUV from %r entry - trying cn=replica" % suffix) + ensuffix = escapeDNValue(normalizeDN(suffix)) + dn = ','.join(("cn=replica", "cn=%s" % ensuffix, DN_MAPPING_TREE)) + ents = self.conn.search_s(dn, ldap.SCOPE_BASE, "objectclass=*", attrs) + + if ents and (len(ents) > 0): + ent = ents[0] + self.log.debug("RUV entry is %r" % ent) + return RUV(ent) + + raise NoSuchEntryError("RUV not found: suffix: %r" % suffix) + + def agreements(self, filtr='', attrs=None, dn=True): + """Return a list of agreement dn. + @param filtr - get only agreements matching the given filter + eg. '(cn=*example.it*)' + @param attrs - attributes to retrieve + eg. use ['*'] for all, defaul is ['cn'] + @param dn - return a list of lib389.Entry if dn=False + + """ + attrs = attrs or ['cn'] + realfiltr = "(objectclass=nsds5ReplicationAgreement)" + if filtr: + realfiltr = "(&%s%s)" % (realfiltr, filtr) + + ents = self.conn.search_s( + DN_MAPPING_TREE, ldap.SCOPE_SUBTREE, realfiltr, attrs) + if dn: + return [ent.dn for ent in ents] + return ents + + def agreement_add(self, consumer, suffix=None, binddn=None, bindpw=None, cn_format=r'meTo_$host:$port', description_format=r'me to $host:$port', timeout=120, auto_init=False, bindmethod='simple', starttls=False, schedule=ALWAYS, args=None): + """Create (and return) a replication agreement from self to consumer. + - self is the supplier, + + @param consumer: one of the following (consumer can be a master) + * a DSAdmin object if chaining + * an object with attributes: host, port, sslport, __str__ + @param suffix - eg. 'dc=babel,dc=it' + @param binddn - + @param bindpw - + @param cn_format - string.Template to format the agreement name + @param timeout - replica timeout in seconds + @param auto_init - start replication immediately + @param bindmethod- 'simple' + @param starttls - True or False + @param schedule - when to schedule the replication. default: ALWAYS + @param args - further args dict. Allowed keys: + 'fractional', + 'stripattrs', + 'winsync' + + @raise NosuchEntryError - if a replica doesn't exist for that suffix + @raise ALREADY_EXISTS + @raise UNWILLING_TO_PERFORM if the database was previously + in read-only state. To create new agreements you + need to *restart* the directory server + + NOTE: this method doesn't cache connection entries + + TODO: test winsync + TODO: test chain + + """ + import string + assert binddn and bindpw and suffix + args = args or {} + + othhost, othport, othsslport = ( + consumer.host, consumer.port, consumer.sslport) + othport = othsslport or othport + nsuffix = normalizeDN(suffix) + + # adding agreement to previously created replica + replica_entries = self.list(suffix) + if not replica_entries: + raise NoSuchEntryError( + "Error: no replica set up for suffix " + suffix) + replica = replica_entries[0] + + # define agreement entry + cn = string.Template(cn_format).substitute({'host': othhost, 'port': othport}) + dn_agreement = ','.join(["cn=%s" % cn, replica.dn]) + + # This is probably unnecessary because + # we can just raise ALREADY_EXISTS + try: + entry = self.conn.getEntry(dn_agreement, ldap.SCOPE_BASE) + self.log.warn("Agreement exists: %r" % dn_agreement) + raise ldap.ALREADY_EXISTS + except ldap.NO_SUCH_OBJECT: + entry = None + + # In a separate function in this scope? + entry = Entry(dn_agreement) + entry.update({ + 'objectclass': ["top", "nsds5replicationagreement"], + 'cn': cn, + 'nsds5replicahost': consumer.host, + 'nsds5replicatimeout': str(timeout), + 'nsds5replicabinddn': binddn, + 'nsds5replicacredentials': bindpw, + 'nsds5replicabindmethod': bindmethod, + 'nsds5replicaroot': nsuffix, + 'description': string.Template(description_format).substitute({'host': othhost, 'port': othport}) + }) + if schedule: + if not re.match(r'\d{4}-\d{4} [0-6]{1,7}', schedule): # TODO put the regexp in a separate variable + raise ValueError("Bad schedule format %r" % schedule) + entry.update({'nsds5replicaupdateschedule': schedule}) + if starttls: + entry.setValues('nsds5replicatransportinfo', 'TLS') + entry.setValues('nsds5replicaport', str(othport)) + elif othsslport: + entry.setValues('nsds5replicatransportinfo', 'SSL') + entry.setValues('nsds5replicaport', str(othsslport)) + else: + entry.setValues('nsds5replicatransportinfo', 'LDAP') + entry.setValues('nsds5replicaport', str(othport)) + + if auto_init: + entry.setValues('nsds5BeginReplicaRefresh', 'start') + + # further arguments + if 'fractional' in args: + entry.setValues('nsDS5ReplicatedAttributeList', args['fractional']) + if 'stripattrs' in args: + entry.setValues('nsds5ReplicaStripAttrs', args['stripattrs']) + if 'winsync' in args: # state it clearly! + self.conn.setupWinSyncAgmt(args, entry) + + try: + self.log.debug("Adding replica agreement: [%s]" % entry) + self.conn.add_s(entry) + except: + # FIXME check please! + raise + + entry = self.conn.waitForEntry(dn_agreement) + if entry: + # More verbose but shows what's going on + if 'chain' in args: + chain_args = { + 'suffix': suffix, + 'binddn': binddn, + 'bindpw': bindpw + } + # Work on `self` aka producer + if replica.nsds5replicatype == MASTER_TYPE: + self.setupChainingFarm(**chain_args) + # Work on `consumer` + # TODO - is it really required? + if replica.nsds5replicatype == LEAF_TYPE: + chain_args.update({ + 'isIntermediate': 0, + 'urls': self.conn.toLDAPURL(), + 'args': args['chainargs'] + }) + consumer.setupConsumerChainOnUpdate(**chain_args) + elif replica.nsds5replicatype == HUB_TYPE: + chain_args.update({ + 'isIntermediate': 1, + 'urls': self.conn.toLDAPURL(), + 'args': args['chainargs'] + }) + consumer.setupConsumerChainOnUpdate(**chain_args) + + return dn_agreement + + def agreement_changes(self, agmtdn): + """Return a list of changes sent by this agreement.""" + retval = 0 + try: + ent = self.conn.getEntry( + agmtdn, ldap.SCOPE_BASE, "(objectclass=*)", + ['nsds5replicaChangesSentSinceStartup']) + except: + raise NoSuchEntryError( + "Error reading status from agreement", agmtdn) + + if ent.nsds5replicaChangesSentSinceStartup: + val = ent.nsds5replicaChangesSentSinceStartup + items = val.split(' ') + if len(items) == 1: + retval = int(items[0]) + else: + for item in items: + ary = item.split(":") + if ary and len(ary) > 1: + retval = retval + int(ary[1].split("/")[0]) + return retval + + +class Config(object): + """ + Manage "cn=config" tree, including: + - enable SSL + - set access and error logging + - get and set "cn=config" attributes + """ + def __init__(self, conn): + """@param conn - a DSAdmin instance """ + self.conn = conn + self.log = conn.log + + def set(self, key, value): + """Set a parameter under cn=config + @param key - the attribute name + @param value - attribute value as string + + eg. set('passwordExp', 'on') + """ + self.log.debug("set(%r, %r)" % (key, value)) + return self.conn.modify(DN_CONFIG, + [(ldap.MOD_REPLACE, key, value)]) + + def get(self, key): + """Get an attribute under cn=config""" + return self.conn.getEntry(DN_CONFIG).__getattr__(key) + + def loglevel(self, vals=(LOG_DEFAULT,), level='error', update=False): + """Set the access or error log level. + @param vals - a list of log level codes (eg. lib389.LOG_*) + defaults to LOG_DEFAULT + @param level - 'access' or 'error' + @param update - False for replace (default), True for update + + ex. loglevel([lib389.LOG_DEFAULT, lib389.LOG_ENTRY_PARSER]) + """ + level = 'nsslapd-%slog-level' % level + assert len(vals) > 0, "set at least one log level" + tot = 0 + for v in vals: + tot |= v + + if update: + old = int(self.get(level)) + tot |= old + self.log.debug("Update %s value: %r -> %r" % (level, old, tot)) + else: + self.log.debug("Replace %s with value: %r" % (level, tot)) + + self.set(level, str(tot)) + return tot + + def enable_ssl(self, secport=636, secargs=None): + """Configure SSL support into cn=encryption,cn=config. + + secargs is a dict like { + 'nsSSLPersonalitySSL': 'Server-Cert' + } + """ + self.log.debug("configuring SSL with secargs:%r" % secargs) + secargs = secargs or {} + + dn_enc = 'cn=encryption,cn=config' + ciphers = '-rsa_null_md5,+rsa_rc4_128_md5,+rsa_rc4_40_md5,+rsa_rc2_40_md5,+rsa_des_sha,' + \ + '+rsa_fips_des_sha,+rsa_3des_sha,+rsa_fips_3des_sha,' + \ + '+tls_rsa_export1024_with_rc4_56_sha,+tls_rsa_export1024_with_des_cbc_sha' + mod = [(ldap.MOD_REPLACE, 'nsSSL3', secargs.get('nsSSL3', 'on')), + (ldap.MOD_REPLACE, 'nsSSLClientAuth', + secargs.get('nsSSLClientAuth', 'allowed')), + (ldap.MOD_REPLACE, 'nsSSL3Ciphers', secargs.get('nsSSL3Ciphers', ciphers))] + self.conn.modify_s(dn_enc, mod) + + dn_rsa = 'cn=RSA,cn=encryption,cn=config' + e_rsa = Entry(dn_rsa) + e_rsa.update({ + 'objectclass': ['top', 'nsEncryptionModule'], + 'nsSSLPersonalitySSL': secargs.get('nsSSLPersonalitySSL', 'Server-Cert'), + 'nsSSLToken': secargs.get('nsSSLToken', 'internal (software)'), + 'nsSSLActivation': secargs.get('nsSSLActivation', 'on') + }) + try: + self.conn.add_s(e_rsa) + except ldap.ALREADY_EXISTS: + pass + + mod = [ + (ldap.MOD_REPLACE, + 'nsslapd-security', secargs.get('nsslapd-security', 'on')), + (ldap.MOD_REPLACE, + 'nsslapd-ssl-check-hostname', secargs.get('nsslapd-ssl-check-hostname', 'off')), + (ldap.MOD_REPLACE, + 'nsslapd-secureport', str(secport)) + ] + self.log.debug("trying to modify %r with %r" % (DN_CONFIG, mod)) + self.conn.modify_s(DN_CONFIG, mod) + + fields = 'nsslapd-security nsslapd-ssl-check-hostname'.split() + return self.conn.getEntry(DN_CONFIG, attrlist=fields) + + + + +class Backend(object): + proxied_methods = 'search_s getEntry'.split() + + def __init__(self, conn): + """@param conn - a DSAdmin instance""" + self.conn = conn + self.log = conn.log + + def __getattr__(self, name): + if name in Replica.proxied_methods: + return DSAdmin.__getattr__(self.conn, name) + + def list(self, name=None, suffix=None, attrs=None): + """Get backends by name or suffix + @param name - backend name + @param suffix - get backend for suffix + """ + attrs = attrs or [] + + # raise errors asap + if name and suffix: + raise ValueError("Can't specify both name and suffix") + + def _list_by_suffix(self, suffix, attrs=None): + if suffix: + nsuffix = normalizeDN(suffix) + else: + suffix = nsuffix = '*' + + entries = self.conn.search_s("cn=plugins,cn=config", ldap.SCOPE_SUBTREE, + "(&(objectclass=nsBackendInstance)(|(nsslapd-suffix=%s)(nsslapd-suffix=%s)))" % (suffix, nsuffix), + attrs) + return entries + + def _list_by_name(self, name, attrs=None): + backend_dn = ','.join(('cn=' + name, DN_LDBM)) + return self.conn.search_s(backend_dn, ldap.SCOPE_BASE, attrlist=attrs) + + if name: + return _list_by_name(self, name, attrs) + elif suffix: + return _list_by_suffix(self, suffix, attrs) + + return self.conn.search_s("cn=plugins,cn=config", + ldap.SCOPE_SUBTREE, "(objectclass=nsBackendInstance)", attrs) + + + def readonly(self, bename=None, readonly='on', suffix=None): + """Put a database in readonly mode + @param bename - the backend name (eg. addressbook1) + @param readonly- 'on' or 'off' + + NOTE: I can ldif2db to a read-only database. After the + import, the database will still be in readonly. + + NOTE: When a db is read-only, it seems you need to restart + the directory server before creating further + agreements or initialize consumers + """ + if bename and suffix: + raise ValueError("Specify either bename or suffix") + + if suffix: + raise NotImplementedError() + + self.conn.modify_s(','.join(('cn=' + bename, DN_LDBM)), [ + (ldap.MOD_REPLACE, 'nsslapd-readonly', readonly) + ]) + + + def add(self, suffix, binddn=None, bindpw=None, urls=None, attrvals=None, benamebase='localdb', setupmt=False, parent=None): + """Setup a backend and return its dn. Blank on error XXX should RAISE! + @param suffix + @param benamebase - the backend common name + @param setupmt - eventually setup Mapping Tree entry + @param urls - a string of ldapurl - create a chaining backend + @oaram binddn - chaining proxy user + @param bindpw - chaining proxy password + @param attrvals: a dict with further params like + for ldbm { + 'nsslapd-cachememsize': '1073741824', + 'nsslapd-cachesize': '-1', + } + for chain { + 'nsmaxresponsedelay': '60', + 'nsslapd-sizelimit': '-1' + } + + ex. + benamebase="Isle0-0" + urls=[ + "ldaps://f0-ldap-vip.example.it:636/", + "ldaps://ldap-18.example.it:636/", + "ldaps://ldap-19.example.it:636/" + ] + + NOTE: The suffix attribute is a mere string for the backend. + the following action will work nicely: + c.backend.add(suffix="foo=example,dc=com",benamebase="db1") + + TODO: rename benamebase with cn + TODO: split CHAIN and LDBM ? eg. backend.add_chain + """ + attrvals = attrvals or {} + dnbase = "" + + # figure out what type of be based on args + if binddn and bindpw and urls: # its a chaining be + dnbase = DN_CHAIN + else: # its a ldbm be + dnbase = DN_LDBM + + nsuffix = normalizeDN(suffix) + try: + cn = benamebase + self.log.debug("create backend with cn: %s" % cn) + dn = "cn=" + cn + "," + dnbase + entry = Entry(dn) + entry.update({ + 'objectclass': ['top', 'extensibleObject', 'nsBackendInstance'], + 'cn': cn, + 'nsslapd-suffix': nsuffix + }) + + if binddn and bindpw and urls: # its a chaining be + entry.update({ + 'nsfarmserverurl': urls, + 'nsmultiplexorbinddn': binddn, + 'nsmultiplexorcredentials': bindpw + }) + + # set attrvals (but not cn, because it's in dn) + # TODO do it in Entry + if attrvals: + entry.update(attrvals) + + self.log.debug("adding entry: %r" % entry) + self.conn.add_s(entry) + except ldap.ALREADY_EXISTS, e: + self.log.error("Entry already exists: %r" % dn) + raise ldap.ALREADY_EXISTS("%s : %r" % (e, dn)) + except ldap.LDAPError, e: + self.log.error("Could not add backend entry: %r" % dn) + raise + + self.conn._test_entry(dn, ldap.SCOPE_BASE) + + # + # if setupmt creates the suffix entry + # + if setupmt: + self.log.debug("Setup Mapping Tree entry") + mtentry = self.setup_mt(suffix=suffix, bename=cn, parent=parent) + self.log.info("Created Mapping Tree entry %r" % mtentry) + return cn + + + def setup_mt(self, suffix, bename, parent=None): + """Setup a suffix with the given backend-name. + + @param suffix + @param bename + @param parent - the parent suffix + @param verbose - None + + This method does not create the matching entry in the tree, + nor the given backend. Both should be created apart. + + Ex. setup_mt(suffix='o=addressbook1', bename='addressbook1') + creates: + - the mapping in "cn=mapping tree,cn=config" + you have to create: + - the backend + - the ldap entry "o=addressbook1" *after* + """ + nsuffix = normalizeDN(suffix) + #escapedn = escapeDNValue(nsuffix) + if parent: + nparent = normalizeDN(parent) + else: + nparent = "" + + filt = suffixfilt(suffix) + # if suffix exists, return + try: + entry = self.conn.getEntry( + DN_MAPPING_TREE, ldap.SCOPE_SUBTREE, filt) + return entry + except NoSuchEntryError: + entry = None + + # fix me when we can actually used escaped DNs + #dn = "cn=%s,cn=mapping tree,cn=config" % escapedn + dn = ','.join(('cn="%s"' % nsuffix, DN_MAPPING_TREE)) + entry = Entry(dn) + entry.update({ + 'objectclass': ['top', 'extensibleObject', 'nsMappingTree'], + 'nsslapd-state': 'backend', + # the value in the dn has to be DN escaped + # internal code will add the quoted value - unquoted value is useful for searching + 'cn': nsuffix, + 'nsslapd-backend': bename + }) + #entry.setValues('cn', [escapedn, nsuffix]) # the value in the dn has to be DN escaped + # the other value can be the unescaped value + if parent: + entry.setValues('nsslapd-parent-suffix', nparent) + try: + self.log.debug("Creating entry: %r" % entry) + self.conn.add_s(entry) + except ldap.LDAPError, e: + raise ldap.LDAPError("Error adding suffix entry " + dn, e) + + ret = self.conn._test_entry(dn, ldap.SCOPE_BASE) + return ret + + def suffixes(self,unquote=False): + """Return a list of suffixes dn + @param unquote - remove ldap escaping + """ + suffixes = self.search_s(DN_MAPPING_TREE, ldap.SCOPE_ONELEVEL) + if unquote: + raise NotImplementedError() + return [x.dn.replace("\3D","=").replace("\2C",",") for x in suffixes] + return [x.dn for x in suffixes] diff --git a/lib389/tools.py b/lib389/tools.py new file mode 100644 index 0000000..2d2003e --- /dev/null +++ b/lib389/tools.py @@ -0,0 +1,560 @@ +"""Tools for creating and managing servers + + uses DSAdmin +""" +__all__ = ['DSAdminTools'] +try: + from subprocess import Popen, PIPE, STDOUT + HASPOPEN = True +except ImportError: + import popen2 + HASPOPEN = False + +import sys +import os +import os.path +import base64 +import urllib +import urllib2 +import ldap +import operator +import select +import time +import shutil + +import lib389 +from lib389 import InvalidArgumentError + +from lib389.utils import ( + getcfgdsuserdn, + getcfgdsinfo, + getcfgdsuserdn, + update_newhost_with_fqdn, + get_sbin_dir, get_server_user, getdomainname, + isLocalHost, formatInfData, getserverroot, + + update_admin_domain,getadminport,getdefaultsuffix, + + ) +from lib389._ldifconn import LDIFConn +from lib389._constants import DN_DM + +import logging +logging.basicConfig(level=logging.DEBUG) +log = logging.getLogger(__name__) + +# Private constants +PATH_SETUP_DS_ADMIN = "/setup-ds-admin.pl" +PATH_SETUP_DS = "/setup-ds.pl" +PATH_ADM_CONF = "/etc/dirsrv/admin-serv/adm.conf" + +class DSAdminTools(object): + """DSAdmin mix-in.""" + + @staticmethod + def cgiFake(sroot, verbose, prog, args): + """Run the local program prog as a CGI using the POST method.""" + content = urllib.urlencode(args) + length = len(content) + # setup CGI environment + env = os.environ.copy() + env['REQUEST_METHOD'] = "POST" + env['NETSITE_ROOT'] = sroot + env['CONTENT_LENGTH'] = str(length) + progdir = os.path.dirname(prog) + if HASPOPEN: + pipe = Popen(prog, cwd=progdir, env=env, + stdin=PIPE, stdout=PIPE, stderr=STDOUT) + child_stdin = pipe.stdin + child_stdout = pipe.stdout + else: + saveenv = os.environ + os.environ = env + child_stdout, child_stdin = popen2.popen2(prog) + os.environ = saveenv + child_stdin.write(content) + child_stdin.close() + for line in child_stdout: + if verbose: + sys.stdout.write(line) + ary = line.split(":") + if len(ary) > 1 and ary[0] == 'NMC_Status': + exitCode = ary[1].strip() + break + child_stdout.close() + if HASPOPEN: + osCode = pipe.wait() + print "%s returned NMC code %s and OS code %s" % ( + prog, exitCode, osCode) + return exitCode + + @staticmethod + def cgiPost(host, port, username, password, uri, verbose, secure, args=None): + """Post the request to the admin server. + + Admin server requires authentication, so we use the auth handler classes. + + NOTE: the url classes in python use the deprecated + base64.encodestring() function, which truncates lines, + causing Apache to give us a 400 Bad Request error for the + Authentication string. So, we have to tell + base64.encodestring() not to truncate.""" + args = args or {} + prefix = 'http' + if secure: + prefix = 'https' + hostport = host + ":" + port + # construct our url + url = '%s://%s:%s%s' % (prefix, host, port, uri) + # tell base64 not to truncate lines + savedbinsize = base64.MAXBINSIZE + base64.MAXBINSIZE = 256 + # create the password manager - we don't care about the realm + passman = urllib2.HTTPPasswordMgrWithDefaultRealm() + # add our password + passman.add_password(None, hostport, username, password) + # create the auth handler + authhandler = urllib2.HTTPBasicAuthHandler(passman) + # create our url opener that handles basic auth + opener = urllib2.build_opener(authhandler) + # make admin server think we are the console + opener.addheaders = [('User-Agent', 'Fedora-Console/1.0')] + if verbose: + print "requesting url", url + sys.stdout.flush() + exitCode = 1 + try: + req = opener.open(url, urllib.urlencode(args)) + for line in req: + if verbose: + print line + ary = line.split(":") + if len(ary) > 1 and ary[0] == 'NMC_Status': + exitCode = ary[1].strip() + break + req.close() +# except IOError, e: +# print e +# print e.code +# print e.headers +# raise + finally: + # restore binsize + base64.MAXBINSIZE = savedbinsize + return exitCode + + @staticmethod + def serverCmd(self, cmd, verbose, timeout=120): + """NOTE: this tries to open the log! + """ + instanceDir = os.path.join(self.sroot, "slapd-" + self.inst) + + errLog = instanceDir + '/logs/errors' + if hasattr(self, 'errlog'): + errLog = self.errlog + done = False + started = True + lastLine = "" + cmd = cmd.lower() + fullCmd = instanceDir + "/" + cmd + "-slapd" + if cmd == 'start': + cmdPat = 'slapd started.' + else: + cmdPat = 'slapd stopped.' + + if "USE_GDB" in os.environ or "USE_VALGRIND" in os.environ: + timeout = timeout * 3 + timeout += int(time.time()) + if cmd == 'stop': + log.warn("unbinding before stop") + self.unbind() + + log.info("Setup error log") + logfp = open(errLog, 'r') + logfp.seek(0, os.SEEK_END) # seek to end + pos = logfp.tell() # get current position + logfp.seek(pos, os.SEEK_SET) # reset the EOF flag + + log.warn("Running command: %r" % fullCmd) + rc = os.system(fullCmd) + while not done and int(time.time()) < timeout: + line = logfp.readline() + while not done and line: + lastLine = line + if verbose: + log.debug("current line: %r" % line.strip()) + if line.find(cmdPat) >= 0: + started += 1 + if started == 2: + done = True + elif line.find("Initialization Failed") >= 0: + # sometimes the server fails to start - try again + rc = os.system(fullCmd) + elif line.find("exiting.") >= 0: + # possible transient condition - try again + rc = os.system(fullCmd) + pos = logfp.tell() + line = logfp.readline() + if line.find("PR_Bind") >= 0: + # server port conflicts with another one, just report and punt + log.debug("last line: %r" % lastLine.strip()) + log.warn("This server cannot be started until the other server on this port is shutdown") + done = True + if not done: + time.sleep(2) + logfp.seek(pos, 0) + logfp.close() + if started < 2: + now = int(time.time()) + if now > timeout: + log.warn( + "Probable timeout: timeout=%d now=%d" % (timeout, now)) + + log.error("Error: could not %s server %s %s: %d" % ( + cmd, self.sroot, self.inst, rc)) + return 1 + else: + log.info("%s was successful for %s %s" % ( + cmd, self.sroot, self.inst)) + if cmd == 'start': + self.__localinit__() + return 0 + + @staticmethod + def stop(self, verbose=False, timeout=0): + """Stop server or raise.""" + if not self.isLocal and hasattr(self, 'asport'): + log.info("stopping remote server ", self) + self.unbind() + log.info("closed remote server ", self) + cgiargs = {} + rc = DSAdminTools.cgiPost(self.host, self.asport, self.cfgdsuser, + self.cfgdspwd, + "/slapd-%s/Tasks/Operation/stop" % self.inst, + verbose, cgiargs) + log.info("stopped remote server %s rc = %d" % (self, rc)) + return rc + else: + return DSAdminTools.serverCmd(self, 'stop', verbose, timeout) + + @staticmethod + def start(self, verbose=False, timeout=0): + if not self.isLocal and hasattr(self, 'asport'): + log.debug("starting remote server %s " % self) + cgiargs = {} + rc = DSAdminTools.cgiPost(self.host, self.asport, self.cfgdsuser, + self.cfgdspwd, + "/slapd-%s/Tasks/Operation/start" % self.inst, + verbose, cgiargs) + log.debug("connecting remote server %s" % self) + if not rc: + self.__localinit__() + log.info("started remote server %s rc = %d" % (self, rc)) + return rc + else: + log.debug("Starting server %r" % self) + return DSAdminTools.serverCmd(self, 'start', verbose, timeout) + + @staticmethod + def setupSSL(dsadmin, secport=636, sourcedir=None, secargs=None): + """configure and setup SSL with a given certificate and restart the server. + + See DSAdmin.configSSL for the secargs values + """ + e = lib389.configSSL(secport, secargs) + log.info("entry is %r" % [e]) + dn_config = e.dn + # get our cert dir + e_config = lib389.getEntry( + dn_config, ldap.SCOPE_BASE, '(objectclass=*)') + certdir = e_config.getValue('nsslapd-certdir') + # have to stop the server before replacing any security files + DSAdminTools.stop(dsadmin) + # allow secport for selinux + if secport != 636: + log.debug("Configuring SELinux on port:", secport) + cmd = 'semanage port -a -t ldap_port_t -p tcp %s' % secport + os.system(cmd) + + # eventually copy security files from source dir to our cert dir + if sourcedir: + for ff in ['cert8.db', 'key3.db', 'secmod.db', 'pin.txt', 'certmap.conf']: + srcf = os.path.join(sourcedir, ff) + destf = os.path.join(certdir, ff) + # make sure dest is writable so we can copy over it + try: + log.info("Copying security files: %s to %s" % (srcf, destf)) + mode = os.stat(destf).st_mode + newmode = mode | 0600 + os.chmod(destf, newmode) + except Exception, e: + print e + pass # oh well + # copy2 will copy the mode too + shutil.copy2(srcf, destf) + + # now, restart the ds + DSAdminTools.start(dsadmin, True) + + @staticmethod + def runInfProg(prog, content, verbose): + """run a program that takes an .inf style file on stdin""" + cmd = [ '/usr/bin/sudo' ] + cmd.append('/usr/bin/perl') + cmd.append( prog ) + #cmd = [prog] + if verbose: + cmd.append('-ddd') + else: + cmd.extend(['-l', '/dev/null']) + cmd.extend(['-s', '-f', '-']) + print "running: %s " % cmd + if HASPOPEN: + pipe = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT) + child_stdin = pipe.stdin + child_stdout = pipe.stdout + else: + pipe = popen2.Popen4(cmd) + child_stdin = pipe.tochild + child_stdout = pipe.fromchild + child_stdin.write(content) + child_stdin.close() + while not pipe.poll(): + (rr, wr, xr) = select.select([child_stdout], [], [], 1.0) + if rr and len(rr) > 0: + line = rr[0].readline() + if not line: + break + if verbose: + sys.stdout.write(line) + elif verbose: + print "timed out waiting to read from", cmd + child_stdout.close() + exitCode = pipe.wait() + if verbose: + print "%s returned exit code %s" % (prog, exitCode) + return exitCode + + @staticmethod + def removeInstance(instance): + """run the remove instance command""" + cmd = "/usr/bin/sudo /usr/bin/perl /usr/sbin/remove-ds.pl -i slapd-%s" % instance + #print "running: %s " % cmd + try: + os.system(cmd) + except: + log.exception("error executing %r" % cmd) + + + @staticmethod + def createInstance(args, verbose=0): + """Create a new instance of directory server and return a connection to it. + + This function: + - guesses the hostname where to create the DS, using + localhost by default; + - figures out if the given hostname is the local host or not. + + @param args - a dict with the following values { + # new instance compulsory values + 'newinstance': 'rpolli', + 'newsuffix': 'dc=example,dc=com', + 'newhost': 'localhost.localdomain', + 'newport': 22389, + 'newrootpw': 'password', + + # optionally register instance on an admin tree + 'have_admin': True, + + # you can configure a new dirsrv-admin + 'setup_admin': True, + + # or you need the dirsrv-admin to be already setup + 'cfgdshost': 'localhost.localdomain', + 'cfgdsport': 22389, + 'cfgdsuser': 'admin', + 'cfgdspwd': 'admin', + + } + """ + cfgdn = lib389.CFGSUFFIX + isLocal = update_newhost_with_fqdn(args) + + # use prefix if binaries are relocated + sroot = args.get('sroot', '') + prefix = args.setdefault('prefix', '') + + # new style - prefix or FHS? + args['new_style'] = not args.get('sroot') + + # do we have ds only or ds+admin? + if 'no_admin' not in args: + sbindir = get_sbin_dir(sroot, prefix) + if os.path.isfile(sbindir + PATH_SETUP_DS_ADMIN): + args['have_admin'] = True + + # set default values + args['have_admin'] = args.get('have_admin', False) + args['setup_admin'] = args.get('setup_admin', False) + + # get default values from adm.conf + if args['new_style'] and args['have_admin']: + admconf = LDIFConn( + args['prefix'] + PATH_ADM_CONF) + args['admconf'] = admconf.get('') + + # next, get the configuration ds host and port + if args['have_admin']: + args['cfgdshost'], args['cfgdsport'], cfgdn = getcfgdsinfo(args) + # + # if a Config DS is passed, get the userdn. This creates + # a connection to the given DS. If you don't want to connect + # to this server you should pass 'setup_admin' too. + # + if args['have_admin'] and not args['setup_admin']: + cfgconn = getcfgdsuserdn(cfgdn, args) + + # next, get the server root if not given + if not args['new_style']: + getserverroot(cfgconn, isLocal, args) + # next, get the admin domain + if args['have_admin']: + update_admin_domain(isLocal, args) + # next, get the admin server port and any other information - close the cfgconn + if args['have_admin'] and not args['setup_admin']: + asport, secure = getadminport(cfgconn, cfgdn, args) + # next, get the posix username + get_server_user(args) + # fixup and verify other args + args['newport'] = args.get('newport', 389) + args['newrootdn'] = args.get('newrootdn', DN_DM) + args['newsuffix'] = args.get('newsuffix', getdefaultsuffix(args['newhost'])) + + if not isLocal or 'cfgdshost' in args: + if 'admin_domain' not in args: + args['admin_domain'] = getdomainname(args['newhost']) + if isLocal and 'cfgdspwd' not in args: + args['cfgdspwd'] = "dummy" + if isLocal and 'cfgdshost' not in args: + args['cfgdshost'] = args['newhost'] + if isLocal and 'cfgdsport' not in args: + args['cfgdsport'] = 55555 + missing = False + for param in ('newhost', 'newport', 'newrootdn', 'newrootpw', 'newinstance', 'newsuffix'): + if param not in args: + log.error("missing required argument: ", param) + missing = True + if missing: + raise InvalidArgumentError("missing required arguments") + + # try to connect with the given parameters + try: + newconn = lib389.DSAdmin(args['newhost'], args['newport'], + args['newrootdn'], args['newrootpw'], args['newinstance']) + newconn.isLocal = isLocal + if args['have_admin'] and not args['setup_admin']: + newconn.asport = asport + newconn.cfgdsuser = args['cfgdsuser'] + newconn.cfgdspwd = args['cfgdspwd'] + print "Warning: server at %s:%s already exists, returning connection to it" % \ + (args['newhost'], args['newport']) + return newconn + except ldap.SERVER_DOWN: + pass # not running - create new one + + if not isLocal or 'cfgdshost' in args: + for param in ('cfgdshost', 'cfgdsport', 'cfgdsuser', 'cfgdspwd', 'admin_domain'): + if param not in args: + print "missing required argument", param + missing = True + if not isLocal and not asport: + print "missing required argument admin server port" + missing = True + if missing: + raise InvalidArgumentError("missing required arguments") + + # construct a hash table with our CGI arguments - used with cgiPost + # and cgiFake + cgiargs = { + 'servname': args['newhost'], + 'servport': args['newport'], + 'rootdn': args['newrootdn'], + 'rootpw': args['newrootpw'], + 'servid': args['newinstance'], + 'suffix': args['newsuffix'], + 'servuser': args['newuserid'], + 'start_server': 1 + } + if 'cfgdshost' in args: + cgiargs['cfg_sspt_uid'] = args['cfgdsuser'] + cgiargs['cfg_sspt_uid_pw'] = args['cfgdspwd'] + cgiargs['ldap_url'] = "ldap://%s:%d/%s" % ( + args['cfgdshost'], args['cfgdsport'], cfgdn) + cgiargs['admin_domain'] = args['admin_domain'] + + if not isLocal: + DSAdminTools.cgiPost(args['newhost'], asport, args['cfgdsuser'], + args['cfgdspwd'], "/slapd/Tasks/Operation/Create", verbose, + secure, cgiargs) + elif not args['new_style']: + prog = sroot + "/bin/slapd/admin/bin/ds_create" + if not os.access(prog, os.X_OK): + prog = sroot + "/bin/slapd/admin/bin/ds_newinstance" + DSAdminTools.cgiFake(sroot, verbose, prog, cgiargs) + else: + prog = '' + if args['have_admin']: + prog = get_sbin_dir(sroot, prefix) + PATH_SETUP_DS_ADMIN + else: + prog = get_sbin_dir(sroot, prefix) + PATH_SETUP_DS + + if not os.path.isfile(prog): + log.error("Can't find file: %r, removing extension" % prog) + prog = prog[:-3] + + content = formatInfData(args) + DSAdminTools.runInfProg(prog, content, verbose) + + newconn = lib389.DSAdmin(args['newhost'], args['newport'], + args['newrootdn'], args['newrootpw'], args['newinstance']) + newconn.isLocal = isLocal + # Now the admin should have been created + # but still I should have taken all the required infos + # before. + if args['have_admin'] and not args['setup_admin']: + newconn.asport = asport + newconn.cfgdsuser = args['cfgdsuser'] + newconn.cfgdspwd = args['cfgdspwd'] + return newconn + + @staticmethod + def createAndSetupReplica(createArgs, repArgs): + # pass this sub two dicts - the first one is a dict suitable to create + # a new instance - see createInstance for more details + # the second is a dict suitable for replicaSetupAll - see replicaSetupAll + conn = DSAdminTools.createInstance(createArgs) + if not conn: + print "Error: could not create server", createArgs + return 0 + + conn.replicaSetupAll(repArgs) + return conn + + +class MockDSAdmin(object): + host = 'localhost' + port = 22389 + sslport = 0 + + def __init__(self, dict_=None): + if dict_: + self.host = dict_['host'] + self.port = dict_['port'] + if 'sslport' in dict_: + self.sslport = dict_['sslport'] + + def __str__(self): + if self.sslport: + return 'ldaps://%s:%s' % (self.host, self.sslport) + else: + return 'ldap://%s:%s' % (self.host, self.port) diff --git a/lib389/utils.py b/lib389/utils.py new file mode 100644 index 0000000..d5f3c5d --- /dev/null +++ b/lib389/utils.py @@ -0,0 +1,480 @@ +"""Utilities for DSAdmin. + + TODO put them in a module! +""" +try: + from subprocess import Popen as my_popen, PIPE +except ImportError: + from popen2 import popen2 + + def my_popen(cmd_l, stdout=None): + class MockPopenResult(object): + def wait(): + pass + p = MockPopenResult() + p.stdout, p.stdin = popen2(cmd_l) + return p + +import re +import os +import socket +import logging +logging.basicConfig(level=logging.DEBUG) +log = logging.getLogger(__name__) + +import socket +from socket import getfqdn + +from ldapurl import LDAPUrl +import ldap +import lib389 +from lib389 import DN_CONFIG +from lib389._constants import * + +# +# Decorator +# + + +def static_var(varname, value): + def decorate(func): + setattr(func, varname, value) + return func + return decorate + + +# +# Various searches to be used in getEntry +# eg getEntry(*searches['NAMINGCONTEXTS']) +# +searches = { + 'NAMINGCONTEXTS': ('', ldap.SCOPE_BASE, '(objectclass=*)', ['namingcontexts']), + 'ZOMBIE' : ('', ldap.SCOPE_SUBTREE, '(&(objectclass=glue)(objectclass=extensibleobject))', ['dn']) +} + +# +# Utilities +# + + +def is_a_dn(dn): + """Returns True if the given string is a DN, False otherwise.""" + return (dn.find("=") > 0) + + +def normalizeDN(dn, usespace=False): + # not great, but will do until we use a newer version of python-ldap + # that has DN utilities + ary = ldap.explode_dn(dn.lower()) + joinstr = "," + if usespace: + joinstr = ", " + return joinstr.join(ary) + + +def escapeDNValue(dn): + '''convert special characters in a DN into LDAPv3 escapes. + + e.g. + "dc=example,dc=com" -> \"dc\=example\,\ dc\=com\"''' + for cc in (' ', '"', '+', ',', ';', '<', '>', '='): + dn = dn.replace(cc, '\\' + cc) + return dn + + +def escapeDNFiltValue(dn): + '''convert special characters in a DN into LDAPv3 escapes + for use in search filters''' + for cc in (' ', '"', '+', ',', ';', '<', '>', '='): + dn = dn.replace(cc, '\\%x' % ord(cc)) + return dn + + +def suffixfilt(suffix): + """Return a filter matching any possible suffix form. + + eg. normalized, escaped, spaced... + """ + nsuffix = normalizeDN(suffix) + spacesuffix = normalizeDN(nsuffix, True) + escapesuffix = escapeDNFiltValue(nsuffix) + filt = '(|(cn=%s)(cn=%s)(cn=%s)(cn="%s")(cn="%s")(cn=%s)(cn="%s"))' % (escapesuffix, nsuffix, spacesuffix, nsuffix, spacesuffix, suffix, suffix) + return filt + +# +# path tools +# + + +def get_sbin_dir(sroot=None, prefix=None): + """Return the sbin directory (default /usr/sbin).""" + if sroot: + return "%s/bin/slapd/admin/bin" % sroot + elif prefix: + return "%s/sbin" % prefix + return "/usr/sbin" + + +# +# functions using sockets +# +def isLocalHost(host_name): + """True if host_name points to a local ip. + + Uses gethostbyname() + """ + # first see if this is a "well known" local hostname + if host_name == 'localhost' or host_name == 'localhost.localdomain' or host_name == socket.gethostname(): + return True + + # first lookup ip addr + try: + ip_addr = socket.gethostbyname(host_name) + if ip_addr.startswith("127."): + log.debug("this ip is on loopback, retain only the first octet") + ip_addr = '127.' + except socket.gaierror: + log.debug("no ip address for %r" % host_name) + return False + + # next, see if this IP addr is one of our + # local addresses + p = my_popen(['/sbin/ifconfig', '-a'], stdout=PIPE) + child_stdout = p.stdout.read() + found = ('inet addr:' + ip_addr) in child_stdout + p.wait() + + return found + + +def getdomainname(name=''): + fqdn = getfqdn(name) + index = fqdn.find('.') + if index >= 0: + return fqdn[index + 1:] + else: + return fqdn + + +def getdefaultsuffix(name=''): + dm = getdomainname(name) + if dm: + return "dc=" + dm.replace('.', ',dc=') + else: + return 'dc=localdomain' + + +def get_server_user(args): + """Return the unix username used from the server inspecting the following keys in args. + + 'newuserid', 'admconf', 'sroot' -> ssusers.conf + + """ + if 'newuserid' not in args: + if 'admconf' in args: + args['newuserid'] = args['admconf'].SuiteSpotUserID + elif 'sroot' in args: + ssusers = open("%s/shared/config/ssusers.conf" % args['sroot']) + for line in ssusers: + ary = line.split() + if len(ary) > 1 and ary[0] == 'SuiteSpotUser': + args['newuserid'] = ary[-1] + ssusers.close() + if 'newuserid' not in args: + args['newuserid'] = os.environ['LOGNAME'] + if args['newuserid'] == 'root': + args['newuserid'] = DEFAULT_USER + + +def update_newhost_with_fqdn(args): + """Replace args['newhost'] with its fqdn and returns True if local. + + One of the arguments to createInstance is newhost. If this is specified, we need + to convert it to the fqdn. If not given, we need to figure out what the fqdn of the + local host is. This method sets newhost in args to the appropriate value and + returns True if newhost is the localhost, False otherwise""" + if 'newhost' in args: + args['newhost'] = getfqdn(args['newhost']) + isLocal = isLocalHost(args['newhost']) + else: + isLocal = True + args['newhost'] = getfqdn() + return isLocal + + +def getcfgdsuserdn(cfgdn, args): + """Return a DSAdmin object bound anonymously or to the admin user. + + If the config ds user ID was given, not the full DN, we need to figure + out the full DN. + + Try in order to: + 1- search the directory anonymously; + 2- look in ldap.conf; + 3- try the default DN. + + This may raise a file or LDAP exception. + """ + # create a connection to the cfg ds + conn = lib389.DSAdmin(args['cfgdshost'], args['cfgdsport'], "", "", None) + # if the caller gave a password, but not the cfguser DN, look it up + if 'cfgdspwd' in args and \ + ('cfgdsuser' not in args or not is_a_dn(args['cfgdsuser'])): + if 'cfgdsuser' in args: + ent = conn.getEntry(cfgdn, ldap.SCOPE_SUBTREE, + "(uid=%s)" % args['cfgdsuser'], + ['dn']) + args['cfgdsuser'] = ent.dn + elif 'sroot' in args: + ldapconf = open( + "%s/shared/config/ldap.conf" % args['sroot'], 'r') + for line in ldapconf: + ary = line.split() # default split is all whitespace + if len(ary) > 1 and ary[0] == 'admnm': + args['cfgdsuser'] = ary[-1] + ldapconf.close() + elif 'admconf' in args: + args['cfgdsuser'] = args['admconf'].userdn + elif 'cfgdsuser' in args: + args['cfgdsuser'] = "uid=%s,ou=Administrators,ou=TopologyManagement,%s" % \ + (args['cfgdsuser'], cfgdn) + conn.unbind() + conn = lib389.DSAdmin( + args['cfgdshost'], args['cfgdsport'], args['cfgdsuser'], + args['cfgdspwd'], None) + return conn + + +def update_admin_domain(isLocal, args): + """Get the admin domain to use.""" + if isLocal and 'admin_domain' not in args: + if 'admconf' in args: + args['admin_domain'] = args['admconf'].admindomain + elif 'sroot' in args: + dsconf = open('%s/shared/config/ds.conf' % args['sroot'], 'r') + for line in dsconf: + ary = line.split(":") + if len(ary) > 1 and ary[0] == 'AdminDomain': + args['admin_domain'] = ary[1].strip() + dsconf.close() + + +def getoldcfgdsinfo(args): + """Use the old style sroot/shared/config/dbswitch.conf to get the info""" + dbswitch = open("%s/shared/config/dbswitch.conf" % args['sroot'], 'r') + try: + matcher = re.compile(r'^directory\s+default\s+') + for line in dbswitch: + m = matcher.match(line) + if m: + url = LDAPUrl(line[m.end():]) + ary = url.hostport.split(":") + if len(ary) < 2: + ary.append(389) + else: + ary[1] = int(ary[1]) + ary.append(url.dn) + return ary + finally: + dbswitch.close() + + +def getnewcfgdsinfo(new_instance_arguments): + """Use the new style prefix /etc/dirsrv/admin-serv/adm.conf. + + new_instance_arguments = {'admconf': obj } where obj.ldapurl != None + """ + try: + url = LDAPUrl(new_instance_arguments['admconf'].ldapurl) + except AttributeError: + log.error("missing ldapurl attribute in new_instance_arguments: %r" % new_instance_arguments) + raise + + ary = url.hostport.split(":") + if len(ary) < 2: + ary.append(389) + else: + ary[1] = int(ary[1]) + ary.append(url.dn) + return ary + + +def getcfgdsinfo(new_instance_arguments): + """Returns a 3-tuple consisting of the host, port, and cfg suffix. + + `new_instance_arguments` = { + 'cfgdshost': + 'cfgdsport': + 'new_style': + } + We need the host and port of the configuration directory server in order + to create an instance. If this was not given, read the dbswitch.conf file + to get the information. This method will raise an exception if the file + was not found or could not be open. This assumes new_instance_arguments contains the sroot + parameter for the server root path. If successful, """ + try: + return new_instance_arguments['cfgdshost'], int(new_instance_arguments['cfgdsport']), lib389.CFGSUFFIX + except KeyError: # if keys are missing... + if new_instance_arguments['new_style']: + return getnewcfgdsinfo(new_instance_arguments) + + return getoldcfgdsinfo(new_instance_arguments) + + +def getserverroot(cfgconn, isLocal, args): + """Grab the serverroot from the instance dir of the config ds if the user + did not specify a server root directory""" + if cfgconn and 'sroot' not in args and isLocal: + ent = cfgconn.getEntry( + DN_CONFIG, ldap.SCOPE_BASE, "(objectclass=*)", + ['nsslapd-instancedir']) + if ent: + args['sroot'] = os.path.dirname( + ent.getValue('nsslapd-instancedir')) + + +@staticmethod +def getadminport(cfgconn, cfgdn, args): + """Return a 2-tuple (asport, True) if the admin server is using SSL, False otherwise. + + Get the admin server port so we can contact it via http. We get this from + the configuration entry using the CFGSUFFIX and cfgconn. Also get any other + information we may need from that entry. The .""" + asport = 0 + secure = False + if cfgconn: + dn = cfgdn + if 'admin_domain' in args: + dn = "cn=%s,ou=%s, %s" % ( + args['newhost'], args['admin_domain'], cfgdn) + filt = "(&(objectclass=nsAdminServer)(serverHostName=%s)" % args[ + 'newhost'] + if 'sroot' in args: + filt += "(serverRoot=%s)" % args['sroot'] + filt += ")" + ent = cfgconn.getEntry( + dn, ldap.SCOPE_SUBTREE, filt, ['serverRoot']) + if ent: + if 'sroot' not in args and ent.serverRoot: + args['sroot'] = ent.serverRoot + if 'admin_domain' not in args: + ary = ldap.explode_dn(ent.dn, 1) + args['admin_domain'] = ary[-2] + dn = "cn=configuration, " + ent.dn + ent = cfgconn.getEntry(dn, ldap.SCOPE_BASE, '(objectclass=*)', + ['nsServerPort', 'nsSuiteSpotUser', 'nsServerSecurity']) + if ent: + asport = ent.nsServerPort + secure = (ent.nsServerSecurity and ( + ent.nsServerSecurity == 'on')) + if 'newuserid' not in args: + args['newuserid'] = ent.nsSuiteSpotUser + cfgconn.unbind() + return asport, secure + + +def formatInfData(args): + """Return the .inf data for a silence setup via setup-ds.pl. + + args = { + # new instance values + newhost, newuserid, newport, newrootdn, newrootpw, newsuffix, + + # The following parameters require to register the new instance + # in the admin server + have_admin, cfgdshost, cfgdsport, cfgdsuser,cfgdspwd, admin_domain + + InstallLdifFile, AddOrgEntries, ConfigFile, SchemaFile, ldapifilepath + + # Setup the o=NetscapeRoot namingContext + setup_admin, + } + + @see https://access.redhat.com/site/documentation/en-US/Red_Hat_Directory_Server/8.2/html/Installation_Guide/Installation_Guide-Advanced_Configuration-Silent.html + [General] + FullMachineName= dir.example.com + SuiteSpotUserID= nobody + SuiteSpotGroup= nobody + AdminDomain= example.com + ConfigDirectoryAdminID= admin + ConfigDirectoryAdminPwd= admin + ConfigDirectoryLdapURL= ldap://dir.example.com:389/o=NetscapeRoot + + [slapd] + SlapdConfigForMC= Yes + UseExistingMC= 0 + ServerPort= 389 + ServerIdentifier= dir + Suffix= dc=example,dc=com + RootDN= cn=Directory Manager + RootDNPwd= password + ds_bename=exampleDB + AddSampleEntries= No + + [admin] + Port= 9830 + ServerIpAddress= 111.11.11.11 + ServerAdminID= admin + ServerAdminPwd= admin + + + """ + args = args.copy() + args['CFGSUFFIX'] = lib389.CFGSUFFIX + + content = ( + "[General]" "\n" + "FullMachineName= %(newhost)s" "\n" + "SuiteSpotUserID= %(newuserid)s" "\n" + ) % args + + # by default, use groupname=username + if 'SuiteSpotGroup' in args: + content += """\nSuiteSpotGroup= %s\n""" % args['SuiteSpotGroup'] + else: + content += """\nSuiteSpotGroup= %(newuserid)s\n""" % args + + if args.get('have_admin'): + content += ( + "AdminDomain= %(admin_domain)s" "\n" + "ConfigDirectoryLdapURL= ldap://%(cfgdshost)s:%(cfgdsport)d/%(CFGSUFFIX)s" "\n" + "ConfigDirectoryAdminID= %(cfgdsuser)s" "\n" + "ConfigDirectoryAdminPwd= %(cfgdspwd)s" "\n" + ) % args + + content += ("\n" "\n" + "[slapd]" "\n" + "ServerPort= %(newport)s" "\n" + "RootDN= %(newrootdn)s" "\n" + "RootDNPwd= %(newrootpw)s" "\n" + "ServerIdentifier= %(newinstance)s" "\n" + "Suffix= %(newsuffix)s" "\n" + ) % args + + + + # Create admin? + if args.get('setup_admin'): + content += ( + "SlapdConfigForMC= Yes" "\n" + "UseExistingMC= 0 " "\n" + ) + + + + if 'InstallLdifFile' in args: + content += """\nInstallLdifFile= %s\n""" % args['InstallLdifFile'] + if 'AddOrgEntries' in args: + content += """\nAddOrgEntries= %s\n""" % args['AddOrgEntries'] + if 'ConfigFile' in args: + for ff in args['ConfigFile']: + content += """\nConfigFile= %s\n""" % ff + if 'SchemaFile' in args: + for ff in args['SchemaFile']: + content += """\nSchemaFile= %s\n""" % ff + + if 'ldapifilepath' in args: + content += "\nldapifilepath=%s\n" % args['ldapifilepath'] + + + return content diff --git a/tests/backend_test.py b/tests/backend_test.py new file mode 100644 index 0000000..e12803d --- /dev/null +++ b/tests/backend_test.py @@ -0,0 +1,110 @@ +"""Brooker classes to organize ldap methods. + Stuff is split in classes, like: + * Replica + * Backend + * Suffix + + You will access this from: + DSAdmin.backend.methodName() +""" + +from nose import * +from nose.tools import * + +import config +from config import log +from config import * + +import ldap +import time +import sys +import dsadmin +from dsadmin import DSAdmin, Entry +from dsadmin import NoSuchEntryError +from dsadmin import utils +from dsadmin.tools import DSAdminTools +from subprocess import Popen +from random import randint +from dsadmin.brooker import Replica +from dsadmin import MASTER_TYPE, DN_MAPPING_TREE, DN_CHANGELOG, DN_LDBM +# Test harnesses +from dsadmin_test import drop_backend, addbackend_harn +from dsadmin_test import drop_added_entries + +conn = None +added_entries = None +added_backends = None + +MOCK_REPLICA_ID = '12' +MOCK_TESTREPLICA_DN = "cn=testReplica,cn=ldbm database,cn=plugins,cn=config" + +def setup(): + # uses an existing 389 instance + # add a suffix + # add an agreement + # This setup is quite verbose but to test dsadmin method we should + # do things manually. A better solution would be to use an LDIF. + global conn + conn = DSAdmin(**config.auth) + conn.verbose = True + conn.added_entries = [] + conn.added_backends = set(['o=mockbe1']) + conn.added_replicas = [] + + # add a backend for testing ruv and agreements + addbackend_harn(conn, 'testReplica') + + # add another backend for testing replica.add() + addbackend_harn(conn, 'testReplicaCreation') + + +def teardown(): + global conn + drop_added_entries(conn) + conn.delete_s(','.join(['cn="o=testreplica"', DN_MAPPING_TREE])) + drop_backend(conn, 'o=testreplica') + #conn.delete_s('o=testreplica') + +def list_test(): + ret = conn.backend.list() + ret = [x.dn for x in ret] + assert len(ret) >=2, "Bad result %r" % ret + + +def list_by_name_test(): + tests = [({'name': 'testreplica'}, MOCK_TESTREPLICA_DN)] + for params, result in tests: + ret = conn.backend.list(**params) + ret = [x.dn for x in ret] + assert result in ret, "Result was %r " % ret + + +def list_by_suffix_test(): + tests = [ + ({'suffix': 'o=testreplica'}, MOCK_TESTREPLICA_DN) + ] + + for params, result in tests: + ret = conn.backend.list(**params) + ret = [x.dn for x in ret] + assert result in ret , "Result was %r" % ret + +def list_suffixes(): + tests = [ 'o=testreplica' ] + suffixes = conn.backend.suffixes() + for params in tests: + assert params in suffixes, "Missing %r in %r" % (params, suffixes) + +def readonly_test(): + bename = 'testReplica' + backend_dn = ','.join(('cn=' + bename, DN_LDBM)) + try: + conn.backend.readonly(bename=bename, readonly='on') + e = conn.getEntry(backend_dn) + ret = e.getValue('nsslapd-readonly') + assert ret == 'on', "Readonly value mismatch: %r " % ret + finally: + conn.backend.readonly(bename=bename, readonly='off') + e = conn.getEntry(backend_dn) + ret = e.getValue('nsslapd-readonly') + assert ret == 'off', "Readonly value mismatch: %r " % ret diff --git a/tests/config.py b/tests/config.py new file mode 100644 index 0000000..0bb1e68 --- /dev/null +++ b/tests/config.py @@ -0,0 +1,37 @@ +import logging +logging.basicConfig(level=logging.DEBUG) +log = logging.getLogger(__name__) + +DN_RMANAGER = 'uid=rmanager,cn=config' + +auth = {'host': 'localhost', + 'port': 22389, + 'binddn': 'cn=directory manager', + 'bindpw': 'password'} + + +class MockDSAdmin(object): + host = 'localhost' + port = 22389 + sslport = 0 + + def __str__(self): + if self.sslport: + return 'ldaps://%s:%s' % (self.host, self.sslport) + else: + return 'ldap://%s:%s' % (self.host, self.port) + + +def expect(entry, name, value): + assert entry, "Bad entry %r " % entry + assert entry.getValue(name) == value, "Bad value for entry %s. Expected %r vs %r" % (entry, entry.getValue(name), value) + + +def entry_equals(e1, e2): + """compare using str()""" + return str(e1) == str(e2) + + +def dfilter(my_dict, keys): + """Filter a dict in a 2.4-compatible way""" + return dict([(k, v) for k, v in my_dict.iteritems() if k in keys]) diff --git a/tests/config_test.py b/tests/config_test.py new file mode 100644 index 0000000..c676c2b --- /dev/null +++ b/tests/config_test.py @@ -0,0 +1,81 @@ +"""Brooker classes to organize ldap methods. + Stuff is split in classes, like: + * Replica + * Backend + * Suffix + + You will access this from: + DSAdmin.backend.methodName() +""" + + +import config +from config import log +from config import * + +import dsadmin +from dsadmin import DSAdmin, Entry +# Test harnesses +from dsadmin_test import drop_backend, addbackend_harn +from dsadmin_test import drop_added_entries + +conn = None +added_entries = None +added_backends = None + +MOCK_REPLICA_ID = '12' +MOCK_TESTREPLICA_DN = "cn=testReplica,cn=ldbm database,cn=plugins,cn=config" + +def setup(): + # uses an existing 389 instance + # add a suffix + # add an agreement + # This setup is quite verbose but to test dsadmin method we should + # do things manually. A better solution would be to use an LDIF. + global conn + conn = DSAdmin(**config.auth) + conn.verbose = True + conn.added_entries = [] + conn.added_backends = set(['o=mockbe1']) + conn.added_replicas = [] + """ + # add a backend for testing ruv and agreements + addbackend_harn(conn, 'testReplica') + + # add another backend for testing replica.add() + addbackend_harn(conn, 'testReplicaCreation') + """ + +def teardown(): + global conn + conn.config.loglevel([dsadmin.LOG_CACHE]) + conn.config.loglevel([dsadmin.LOG_CACHE], level='access') + + """ + drop_added_entries(conn) + conn.delete_s(','.join(['cn="o=testreplica"', DN_MAPPING_TREE])) + drop_backend(conn, 'o=testreplica') + #conn.delete_s('o=testreplica') + """ + +def loglevel_test(): + vals = [dsadmin.LOG_CACHE, dsadmin.LOG_REPLICA, dsadmin.LOG_CONNECT] + expected = sum(vals) + assert conn.config.loglevel(vals) == expected + ret = conn.config.get('nsslapd-errorlog-level') + assert ret == str(expected), "expected: %r got: %r" % (expected, ret) + + +def loglevel_update_test(): + vals = [dsadmin.LOG_CACHE, dsadmin.LOG_CONNECT] + e = sum(vals) + assert conn.config.loglevel(vals) == e + vals = [dsadmin.LOG_REPLICA] + ret = conn.config.loglevel(vals, update=True) + assert ret == (e + sum(vals)), "expected %s got %s" % (e + sum(vals), ret) + + +def access_loglevel_test(): + vals = [dsadmin.LOG_CACHE, dsadmin.LOG_REPLICA, dsadmin.LOG_CONNECT] + assert conn.config.loglevel(vals, level='access') == sum(vals) + diff --git a/tests/dsadmin_basic_test.py b/tests/dsadmin_basic_test.py new file mode 100644 index 0000000..7730a8e --- /dev/null +++ b/tests/dsadmin_basic_test.py @@ -0,0 +1,111 @@ +""" Testing basic functionalities of DSAdmin + + +""" +import dsadmin +from dsadmin import DSAdmin, Entry +from dsadmin import NoSuchEntryError +import ldap +from ldap import * + +from nose import SkipTest +from nose.tools import * + +import config +from config import * + +conn = None +added_entries = None + + +def setup(): + global conn + try: + conn = DSAdmin(**config.auth) + conn.verbose = True + conn.added_entries = [] + except SERVER_DOWN, e: + log.error("To run tests you need a working 389 instance %s" % config.auth) + raise e + + +def tearDown(): + global conn + + # reduce log level + conn.config.loglevel(0) + conn.config.loglevel(0, level='access') + + for e in conn.added_entries: + try: + conn.delete_s(e) + except ldap.NO_SUCH_OBJECT: + log.warn("entry not found %r" % e) + + +def bind_test(): + print "conn: %s" % conn + + +def setupBindDN_UID_test(): + user = { + 'binddn': 'uid=rmanager1,cn=config', + 'bindpw': 'password' + } + e = conn.setupBindDN(**user) + conn.added_entries.append(e.dn) + + assert e.dn == user['binddn'], "Bad entry: %r " % e + expected = conn.getEntry(user['binddn'], ldap.SCOPE_BASE) + assert entry_equals( + e, expected), "Mismatching entry %r vs %r" % (e, expected) + + +def setupBindDN_CN_test(): + user = { + 'binddn': 'cn=rmanager1,cn=config', + 'bindpw': 'password' + } + e = conn.setupBindDN(**user) + conn.added_entries.append(e.dn) + assert e.dn == user['binddn'], "Bad entry: %r " % e + expected = conn.getEntry(user['binddn'], ldap.SCOPE_BASE) + assert entry_equals( + e, expected), "Mismatching entry %r vs %r" % (e, expected) + + +def setupChangelog_default_test(): + e = conn.replica.changelog() + conn.added_entries.append(e.dn) + assert e.dn, "Bad changelog entry: %r " % e + assert e.getValue('nsslapd-changelogdir').endswith("changelogdb"), "Mismatching entry %r " % e.data.get('nsslapd-changelogdir') + conn.delete_s("cn=changelog5,cn=config") + + +def setupChangelog_test(): + e = conn.replica.changelog(dbname="mockChangelogDb") + conn.added_entries.append(e.dn) + assert e.dn, "Bad changelog entry: %r " % e + assert e.getValue('nsslapd-changelogdir').endswith("mockChangelogDb"), "Mismatching entry %r " % e.data.get('nsslapd-changelogdir') + conn.delete_s("cn=changelog5,cn=config") + + +def setupChangelog_full_test(): + e = conn.replica.changelog(dbname="/tmp/mockChangelogDb") + conn.added_entries.append(e.dn) + + assert e.dn, "Bad changelog entry: %r " % e + expect(e, 'nsslapd-changelogdir', "/tmp/mockChangelogDb") + conn.delete_s("cn=changelog5,cn=config") + + +@raises(NoSuchEntryError) +def getMTEntry_missing_test(): + e = conn.getMTEntry('o=MISSING') + + +def getMTEntry_present_test(): + suffix = 'o=addressbook16' + e = conn.getMTEntry(suffix) + assert e, "Entry should be present %s" % suffix + diff --git a/tests/dsadmin_create_remove_test.py b/tests/dsadmin_create_remove_test.py new file mode 100644 index 0000000..ac0cd75 --- /dev/null +++ b/tests/dsadmin_create_remove_test.py @@ -0,0 +1,83 @@ +""" Test creation and deletion of instances +""" +import ldap +import os +from dsadmin import DSAdmin, DN_CONFIG +from dsadmin.tools import DSAdminTools +from nose import * + +added_instances = [] + + +def setup(): + global added_instances + + +def teardown(): + global added_instances + for instance in added_instances: + cmd = "remove-ds.pl -i slapd-%s" % instance + try: + os.system(cmd) + except: + log.exception("error executing %r" % cmd) + + +def default_test(): + host = 'localhost' + port = 10200 + binddn = "cn=directory manager" + bindpw = "password" + suffix = 'dc=example,dc=com' + basedn = DN_CONFIG + scope = ldap.SCOPE_BASE + filt = "(objectclass=*)" + instance_name = ['m1', 'm2'] + + instance_config = { + 'cfgdshost': host, + 'cfgdsport': port, + 'cfgdsuser': 'admin', + 'cfgdspwd': 'admin', + 'newrootpw': 'password', + 'newhost': host, + 'newport': port, + 'newinstance': instance_name[0], + 'newsuffix': suffix, + 'setup_admin': True, + } + try: + m1 = DSAdmin(host, port, binddn, bindpw) + except: + m1 = DSAdminTools.createInstance(instance_config, verbose=1) + added_instances.append(instance_config['newinstance']) + +# filename = "%s/slapd-%s/ldif/Example.ldif" % (m1.sroot, m1.inst) +# m1.importLDIF(filename, "dc=example,dc=com", None, True) +# m1.exportLDIF('/tmp/ldif', "dc=example,dc=com", False, True) + print m1.sroot, m1.inst, m1.errlog + ent = m1.getEntry(basedn, scope, filt, None) + if ent: + print ent.passwordmaxage + instance_config.update({ + 'newinstance': instance_name[1], + 'newport': port + 10, + + }) + m1 = DSAdminTools.createInstance(instance_config, verbose=1) + added_instances.append(instance_config['newinstance']) +# m1.stop(True) +# m1.start(True) + cn = m1.setupBackend("dc=example2,dc=com") + rc = m1.setupSuffix("dc=example2,dc=com", cn) + entry = m1.getEntry(DN_CONFIG, ldap.SCOPE_SUBTREE, "(cn=" + cn + ")") + print "new backend entry is:" + print entry + print entry.getValues('objectclass') + print entry.OBJECTCLASS + results = m1.search_s("cn=monitor", ldap.SCOPE_SUBTREE) + print results + results = m1.getBackendsForSuffix("dc=example,dc=com") + print results + + print "done" diff --git a/tests/dsadmin_test.py b/tests/dsadmin_test.py new file mode 100644 index 0000000..fadc6dd --- /dev/null +++ b/tests/dsadmin_test.py @@ -0,0 +1,240 @@ +from nose import * +from nose.tools import * + +import config +from config import log +from config import * + +import ldap +import time +import sys +import dsadmin +from dsadmin import DSAdmin, Entry +from dsadmin import NoSuchEntryError +from dsadmin import utils +from dsadmin.tools import DSAdminTools +from subprocess import Popen + + +conn = None +added_entries = None +added_backends = None + +def harn_nolog(): + conn.config.loglevel([dsadmin.LOG_DEFAULT]) + conn.config.loglevel([dsadmin.LOG_DEFAULT], level='access') + + +def setup(): + global conn + conn = DSAdmin(**config.auth) + conn.verbose = True + conn.added_entries = [] + conn.added_backends = set(['o=mockbe2']) + conn.added_replicas = [] + harn_nolog() + +def setup_backend(): + global conn + addbackend_harn(conn, 'addressbook6') + +def teardown(): + global conn + conn.rebind() + drop_added_entries(conn) + +def drop_added_entries(conn): + while conn.added_entries: + try: + e = conn.added_entries.pop() + log.info("removing entries %r" % conn.added_backends) + conn.delete_s(e) + except ldap.NOT_ALLOWED_ON_NONLEAF: + log.error("Entry is not a leaf: %r" % e) + except ldap.NO_SUCH_OBJECT: + log.error("Cannot remove entry: %r" % e) + + log.info("removing backends %r" % conn.added_backends) + for suffix in conn.added_backends: + try: + drop_backend(conn, suffix) + except: + log.exception("error removing %r" % suffix) + for r in conn.added_replicas: + try: + drop_backend(conn, suffix=None, bename=r) + except: + log.exception("error removing %r" % r) + + +def drop_backend(conn, suffix, bename=None, maxnum=50): + if not bename: + bename = [x.dn for x in conn.getBackendsForSuffix(suffix)] + + if not bename: + return None + + assert bename, "Missing bename for %r" % suffix + if not hasattr(bename, '__iter__'): + bename = [','.join(['cn=%s' % bename, dsadmin.DN_LDBM])] + for be in bename: + log.debug("removing entry from %r" % be) + leaves = [x.dn for x in conn.search_s( + be, ldap.SCOPE_SUBTREE, '(objectclass=*)', ['cn'])] + # start deleting the leaves - which have the max number of "," + leaves.sort(key=lambda x: x.count(",")) + while leaves and maxnum: + # to avoid infinite loops + # limit the iterations + maxnum -= 1 + try: + log.debug("removing %s" % leaves[-1]) + conn.delete_s(leaves[-1]) + leaves.pop() + except: + leaves.insert(0, leaves.pop()) + + if not maxnum: + raise Exception("BAD") + + +# +# Tests +# + + +def addbackend_harn(conn, name, beattrs=None): + """Create the suffix o=name and its backend.""" + suffix = "o=%s" % name + e = Entry((suffix, { + 'objectclass': ['top', 'organization'], + 'o': [name] + })) + + try: + ret = conn.addSuffix(suffix, bename=name, beattrs=beattrs) + except ldap.ALREADY_EXISTS: + raise + finally: + conn.added_backends.add(suffix) + + conn.add(e) + conn.added_entries.append(e.dn) + + return ret + + +def setupBackend_ok_test(): + "setupBackend_ok calls brooker.Backend.add" + try: + be = conn.setupBackend('o=mockbe5', benamebase='mockbe5') + assert be + except ldap.ALREADY_EXISTS: + raise + finally: + conn.added_backends.add('o=mockbe5') + + +@raises(ldap.ALREADY_EXISTS) +def setupBackend_double_test(): + "setupBackend_double calls brooker.Backend.add" + be1 = conn.setupBackend('o=mockbe3', benamebase='mockbe3') + conn.added_backends.add('o=mockbe3') + be11 = conn.setupBackend('o=mockbe3', benamebase='mockbe3') + + +def addsuffix_test(): + addbackend_harn(conn, 'addressbook16') + conn.added_backends.add('o=addressbook16') + + +def addreplica_write_test(): + name = 'ab3' + user = { + 'binddn': 'uid=rmanager,cn=config', + 'bindpw': 'password' + } + replica = { + 'suffix': 'o=%s' % name, + 'type': dsadmin.MASTER_TYPE, + 'id': 124 + } + replica.update(user) + addbackend_harn(conn, name) + ret = conn.replicaSetupAll(replica) + conn.added_replicas.append(ret['dn']) + assert ret != -1, "Error in setup replica: %s" % ret + + +def prepare_master_replica_test(): + """prepare_master_replica -> Replica.changelog""" + user = { + 'binddn': 'uid=rmanager,cn=config', + 'bindpw': 'password' + } + conn.enableReplLogging() + e = conn.setupBindDN(**user) + conn.added_entries.append(e.dn) + + # only for Writable + e = conn.replica.changelog() + conn.added_entries.append(e.dn) + + +@with_setup(setup_backend) +def setupAgreement_test(): + consumer = MockDSAdmin() + args = { + 'suffix': "o=addressbook6", + #'bename': "userRoot", + 'binddn': "uid=rmanager,cn=config", + 'bindpw': "password", + 'rtype': dsadmin.MASTER_TYPE, + 'rid': '1234' + } + conn.replica.add(**args) + conn.added_entries.append(args['binddn']) + + dn_replica = conn.setupAgreement(consumer, args) + print dn_replica + + +def stop_start_test(): + # dunno why DSAdmin.start|stop writes to dirsrv error-log + conn.errlog = "/tmp/dsadmin-errlog" + open(conn.errlog, "w").close() + DSAdminTools.stop(conn) + log.info("server stopped") + DSAdminTools.start(conn) + log.info("server start") + time.sleep(5) + # save and restore conn settings after restart + tmp = conn.added_backends, conn.added_entries + setup() + conn.added_backends, conn.added_entries = tmp + assert conn.search_s( + *utils.searches['NAMINGCONTEXTS']), "Missing namingcontexts" + + +def setupSSL_test(): + ssl_args = { + 'secport': 636, + 'sourcedir': None, + 'secargs': {'nsSSLPersonalitySSL': 'localhost'}, + } + cert_dir = conn.getDseAttr('nsslapd-certdir') + assert cert_dir, "Cannot retrieve cert dir" + + log.info("Initialize the cert store with an empty password: %r", cert_dir) + fd_null = open('/dev/null', 'w') + open('%s/pin.txt' % cert_dir, 'w').close() + cmd_initialize = 'certutil -d %s -N -f %s/pin.txt' % (cert_dir, cert_dir) + Popen(cmd_initialize.split(), stderr=fd_null) + + log.info("Creating a self-signed cert for the server in %r" % cert_dir) + cmd_mkcert = 'certutil -d %s -S -n localhost -t CTu,Cu,Cu -s cn=localhost -x' % cert_dir + Popen(cmd_mkcert.split(), stdin=open("/dev/urandom"), stderr=fd_null) + + log.info("Testing ssl configuration") + ssl_args.update({'dsadmin': conn}) + DSAdminTools.setupSSL(**ssl_args) diff --git a/tests/entry_test.py b/tests/entry_test.py new file mode 100644 index 0000000..c2f2b3d --- /dev/null +++ b/tests/entry_test.py @@ -0,0 +1,87 @@ +from dsadmin import Entry +import dsadmin +from nose import SkipTest +from nose.tools import raises + +import logging +logging.basicConfig(level=logging.DEBUG) +log = logging.getLogger(__name__) + + +class TestEntry(object): + """A properly initialized Entry: + - accepts well-formed or empty dn and tuples; + - refuses empty dn + """ + def test_init_empty(self): + e = Entry('') + assert not e.dn + + def test_init_with_str(self): + e = Entry('o=pippo') + assert e.dn == 'o=pippo' + + @raises(ValueError) + def test_init_badstr(self): + # This should not be allowed + e = Entry('no equal sign here') + + def test_init_with_tuple(self): + expected = 'pippo' + given = 'o=pippo' + t = (given, { + 'o': [expected], + 'objectclass': ['organization', 'top'] + }) + e = Entry(t) + assert e.dn == given + assert expected in e.o + + def test_update(self): + expected = 'pluto minnie' + given = {'cn': expected} + t = ('o=pippo', { + 'o': ['pippo'], + 'objectclass': ['organization', 'top'] + }) + + e = Entry(t) + e.update(given) + assert e.cn == expected, "Bad cn: %s, expected: %s" % (e.cn, expected) + + #@SkipTest + def test_update_complex(self): + # compare two entries created with different methods + nsuffix, replid, replicatype = "dc=example,dc=com", 5, dsadmin.REPLICA_RDWR_TYPE + binddnlist, legacy = ['uid=pippo, cn=config'], 'off' + dn = "dc=example,dc=com" + entry = Entry(dn) + entry.setValues( + 'objectclass', "top", "nsds5replica", "extensibleobject") + entry.setValues('cn', "replica") + entry.setValues('nsds5replicaroot', nsuffix) + entry.setValues('nsds5replicaid', str(replid)) + entry.setValues('nsds5replicatype', str(replicatype)) + entry.setValues('nsds5flags', "1") + entry.setValues('nsds5replicabinddn', binddnlist) + entry.setValues('nsds5replicalegacyconsumer', legacy) + + uentry = Entry(( + dn, { + 'objectclass': ["top", "nsds5replica", "extensibleobject"], + 'cn': ["replica"], + }) + ) + log.debug("Entry created with dict:", uentry) + # Entry.update *replaces*, so be careful with multi-valued attrs + uentry.update({ + 'nsds5replicaroot': nsuffix, + 'nsds5replicaid': str(replid), + 'nsds5replicatype': str(replicatype), + 'nsds5flags': '1', + 'nsds5replicabinddn': binddnlist, + 'nsds5replicalegacyconsumer': legacy + }) + uentry_s, entry_s = map(str, (uentry, entry)) + assert uentry_s == entry_s, "Mismatching entries [%r] vs [%r]" % ( + uentry, entry) diff --git a/tests/replica_test.py b/tests/replica_test.py new file mode 100644 index 0000000..0e3e0c6 --- /dev/null +++ b/tests/replica_test.py @@ -0,0 +1,318 @@ +"""Brooker classes to organize ldap methods. + Stuff is split in classes, like: + * Replica + * Backend + * Suffix + + You will access this from: + DSAdmin.backend.methodName() +""" + +from nose import * +from nose.tools import * + +import config +from config import log +from config import * + +import ldap +import time +import sys +import dsadmin +from dsadmin import DSAdmin, Entry +from dsadmin import NoSuchEntryError +from dsadmin import utils +from dsadmin.tools import DSAdminTools +from subprocess import Popen +from random import randint +from dsadmin.brooker import Replica +from dsadmin import MASTER_TYPE, DN_MAPPING_TREE, DN_CHANGELOG +# Test harnesses +from dsadmin_test import drop_backend, addbackend_harn +from dsadmin_test import drop_added_entries + +conn = None +added_entries = None +added_backends = None + +MOCK_REPLICA_ID = '12' + + +def setup(): + # uses an existing 389 instance + # add a suffix + # add an agreement + # This setup is quite verbose but to test dsadmin method we should + # do things manually. A better solution would be to use an LDIF. + global conn + conn = DSAdmin(**config.auth) + conn.verbose = True + conn.added_entries = [] + conn.added_backends = set(['o=mockbe1']) + conn.added_replicas = [] + + # add a backend for testing ruv and agreements + addbackend_harn(conn, 'testReplica') + + # add another backend for testing replica.add() + addbackend_harn(conn, 'testReplicaCreation') + + # replication needs changelog + conn.replica.changelog() + # add rmanager entry + try: + conn.add_s(Entry((DN_RMANAGER, { + 'objectclass': "top person inetOrgPerson".split(), + 'sn': ["bind dn pseudo user"], + 'cn': 'replication manager', + 'uid': 'rmanager' + })) + ) + conn.added_entries.append(DN_RMANAGER) + except ldap.ALREADY_EXISTS: + pass + + # add a master replica entry + # to test ruv and agreements + replica_dn = ','.join( + ['cn=replica', 'cn="o=testReplica"', DN_MAPPING_TREE]) + replica_e = Entry(replica_dn) + replica_e.update({ + 'objectclass': ["top", "nsds5replica", "extensibleobject"], + 'cn': "replica", + 'nsds5replicaroot': 'o=testReplica', + 'nsds5replicaid': MOCK_REPLICA_ID, + 'nsds5replicatype': '3', + 'nsds5flags': '1', + 'nsds5replicabinddn': DN_RMANAGER + }) + try: + conn.add_s(replica_e) + except ldap.ALREADY_EXISTS: + pass + conn.added_entries.append(replica_dn) + + agreement_dn = ','.join(('cn=testAgreement', replica_dn)) + agreement_e = Entry(agreement_dn) + agreement_e.update({ + 'objectclass': ["top", "nsds5replicationagreement"], + 'cn': 'testAgreement', + 'nsds5replicahost': 'localhost', + 'nsds5replicaport': '22389', + 'nsds5replicatimeout': '120', + 'nsds5replicabinddn': DN_RMANAGER, + 'nsds5replicacredentials': 'password', + 'nsds5replicabindmethod': 'simple', + 'nsds5replicaroot': 'o=testReplica', + 'nsds5replicaupdateschedule': '0000-2359 0123456', + 'description': 'testAgreement' + }) + try: + conn.add_s(agreement_e) + except ldap.ALREADY_EXISTS: + pass + conn.added_entries.append(agreement_dn) + conn.agreement_dn = agreement_dn + + +def teardown(): + global conn + drop_added_entries(conn) + conn.delete_s(','.join(['cn="o=testreplica"', DN_MAPPING_TREE])) + drop_backend(conn, 'o=testreplica') + conn.delete_s('o=testreplica') + + +def changelog(): + changelog_e = conn.replica.changelog(dbname='foo') + assert changelog_e.data['nsslapd-changelogdir'].endswith('foo') + + +def changelog_default_test(): + e = conn.replica.changelog() + conn.added_entries.append(e.dn) + assert e.dn, "Bad changelog entry: %r " % e + assert e.getValue('nsslapd-changelogdir').endswith("changelogdb"), "Mismatching entry %r " % e.data.get('nsslapd-changelogdir') + conn.delete_s("cn=changelog5,cn=config") + + +def changelog_customdb_test(): + e = conn.replica.changelog(dbname="mockChangelogDb") + conn.added_entries.append(e.dn) + assert e.dn, "Bad changelog entry: %r " % e + assert e.getValue('nsslapd-changelogdir').endswith("mockChangelogDb"), "Mismatching entry %r " % e.data.get('nsslapd-changelogdir') + conn.delete_s("cn=changelog5,cn=config") + + +def changelog_full_path_test(): + e = conn.replica.changelog(dbname="/tmp/mockChangelogDb") + conn.added_entries.append(e.dn) + + assert e.dn, "Bad changelog entry: %r " % e + expect(e, 'nsslapd-changelogdir', "/tmp/mockChangelogDb") + conn.delete_s("cn=changelog5,cn=config") + + +def check_init_test(): + raise NotImplementedError() + + +def disable_logging_test(): + raise NotImplementedError() + + +def enable_logging_test(): + raise NotImplementedError() + + +def status_test(): + status = conn.replica.status(conn.agreement_dn) + log.info(status) + assert status + + +def list_test(): + # was get_entries_test(): + replicas = conn.replica.list() + assert any(['testreplica' in x.dn.lower() for x in replicas]) + + +def ruv_test(): + ruv = conn.replica.ruv(suffix='o=testReplica') + assert ruv, "Missing RUV" + assert len(ruv.rid), "Missing RID" + assert int(MOCK_REPLICA_ID) in ruv.rid.keys() + + +@raises(ldap.NO_SUCH_OBJECT) +def ruv_missing_test(): + ruv = conn.replica.ruv(suffix='o=MISSING') + assert ruv, "Missing RUV" + assert len(ruv.rid), "Missing RID" + assert int(MOCK_REPLICA_ID) in ruv.rid.keys() + + +def start_test(): + raise NotImplementedError() + + +def stop_test(): + raise NotImplementedError() + + +def restart_test(): + raise NotImplementedError() + + +def start_async_test(): + raise NotImplementedError() + + +def wait_for_init_test(): + raise NotImplementedError() + + +def setup_agreement_default_test(): + user = { + 'binddn': DN_RMANAGER, + 'bindpw': "password" + } + params = {'consumer': MockDSAdmin(), 'suffix': "o=testReplica"} + params.update(user) + + agreement_dn = conn.replica.agreement_add(**params) + conn.added_entries.append(agreement_dn) + +@raises(ldap.ALREADY_EXISTS) +def setup_agreement_duplicate_test(): + user = { + 'binddn': DN_RMANAGER, + 'bindpw': "password" + } + params = { + 'consumer': MockDSAdmin(), + 'suffix': "o=testReplica", + 'cn_format': 'testAgreement', + 'description_format': 'testAgreement' + } + params.update(user) + conn.replica.agreement_add(**params) + + +def setup_agreement_test(): + user = { + 'binddn': DN_RMANAGER, + 'bindpw': "password" + } + params = {'consumer': MockDSAdmin(), 'suffix': "o=testReplica"} + params.update(user) + + conn.replica.agreement_add(**params) + # timeout=120, auto_init=False, bindmethod='simple', starttls=False, args=None): + raise NotImplementedError() + +def setup_agreement_fractional_test(): + # TODO: fractiona replicates only a subset of attributes + # + user = { + 'binddn': DN_RMANAGER, + 'bindpw': "password" + } + params = {'consumer': MockDSAdmin(), 'suffix': "o=testReplica"} + params.update(user) + + #conn.replica.agreement_add(**params) + #cn_format=r'meTo_%s:%s', description_format=r'me to %s:%s', timeout=120, auto_init=False, bindmethod='simple', starttls=False, args=None): + raise NotImplementedError() + + +def find_agreements_test(): + agreements = conn.replica.agreements(dn=False) + assert any(['testagreement' in x.dn.lower( + ) for x in agreements]), "Missing agreement" + + +def find_agreements_dn_test(): + agreements_dn = conn.replica.agreements() + assert any(['testagreement' in x.lower( + ) for x in agreements_dn]), "Missing agreement" + + +def setup_replica_test(): + args = { + 'suffix': "o=testReplicaCreation", + 'binddn': DN_RMANAGER, + 'bindpw': "password", + 'rtype': dsadmin.MASTER_TYPE, + 'rid': MOCK_REPLICA_ID + } + # create a replica entry + replica_e = conn.replica.add(**args) + assert 'dn' in replica_e, "Missing dn in replica" + conn.added_entries.append(replica_e['dn']) + + +def setup_replica_hub_test(): + args = { + 'suffix': "o=testReplicaCreation", + 'binddn': DN_RMANAGER, + 'bindpw': "password", + 'rtype': dsadmin.HUB_TYPE, + 'rid': MOCK_REPLICA_ID + } + # create a replica entry + replica_e = conn.replica.add(**args) + assert 'dn' in replica_e, "Missing dn in replica" + conn.added_entries.append(replica_e['dn']) + + +def setup_replica_referrals_test(): + #tombstone_purgedelay=None, purgedelay=None, referrals=None, legacy=False + raise NotImplementedError() + + +def setup_all_replica_test(): + raise NotImplementedError() + +def replica_keep_in_sync_test(): + raise NotImplementedError() diff --git a/tests/utils_test.py b/tests/utils_test.py new file mode 100644 index 0000000..36f6c87 --- /dev/null +++ b/tests/utils_test.py @@ -0,0 +1,124 @@ + +from nose import * +import dsadmin +from dsadmin.utils import * + + +def normalizeDN_test(): + test = [ + (r'dc=example,dc=com', r'dc=example,dc=com'), + (r'dc=example, dc=com', r'dc=example,dc=com'), + (r'cn="dc=example,dc=com",cn=config', + 'cn=dc\\=example\\,dc\\=com,cn=config'), + ] + for k, v in test: + r = normalizeDN(k) + assert r == v, "Mismatch %r vs %r" % (r, v) + + +def escapeDNValue_test(): + test = [(r'"dc=example, dc=com"', r'\"dc\=example\,\ dc\=com\"')] + for k, v in test: + r = escapeDNValue(k) + assert r == v, "Mismatch %r vs %r" % (r, v) + + +def escapeDNFiltValue_test(): + test = [(r'"dc=example, dc=com"', + '\\22dc\\3dexample\\2c\\20dc\\3dcom\\22')] + for k, v in test: + r = escapeDNFiltValue(k) + assert r == v, "Mismatch %r vs %r" % (r, v) + +# +# socket related functions +# +import socket + + +def isLocalHost_test(): + test = [ + ('localhost', True), + ('localhost.localdomain', True), + (socket.gethostname(), True), + ('www.google.it', False)] + for k, v in test: + r = isLocalHost(k) + assert r == v, "Mismatch %r vs %r on %r" % (r, v, k) + + +def update_newhost_with_fqdn_test(): + test = [ + ({'newhost':'localhost'}, ('localhost.localdomain', True)), + ({'newhost': 'remote'}, ('remote', False)), + ] + for k, v in test: + old = k.copy() + expected_host, expected_r = v + r = update_newhost_with_fqdn(k) + assert expected_r == r, "Mismatch %r vs %r for %r" % ( + r, expected_r, old) + assert expected_host == k['newhost'], "Mismatch %r vs %r for %r" % ( + k['newhost'], expected_host, old) + + +def formatInfData_test(): + ret = formatInfData({ + 'newhost': 'localhost.localdomain', + 'newuserid': 'dirsrv', + 'newport' : 12345, + 'newrootdn': 'cn=directory manager', + 'newrootpw': 'password', + 'newsuffix': 'o=base1', + 'newinstance': 'rpolli1' + }) + log.info("content: %r" % ret) + + +def formatInfData_withadmin_test(): + instance_params = { + 'newhost': 'localhost.localdomain', + 'newuserid': 'dirsrv', + 'newport' : 12346, + 'newrootdn': 'cn=directory manager', + 'newrootpw': 'password', + 'newsuffix': 'o=base1', + 'newinstance': 'rpolli2', + } + admin_params = { + 'have_admin': True, + 'create_admin': True, + 'admin_domain': 'example.com', + 'cfgdshost': 'localhost', + 'cfgdsport': 12346, + 'cfgdsuser': 'admin', + 'cfgdspwd': 'admin', + + } + instance_params.update(admin_params) + ret = formatInfData(instance_params) + log.info("content: %r" % ret) + +def formatInfData_withconfigserver_test(): + instance_params = { + 'newhost': 'localhost.localdomain', + 'newuserid': 'dirsrv', + 'newport' : 12346, + 'newrootdn': 'cn=directory manager', + 'newrootpw': 'password', + 'newsuffix': 'o=base1', + 'newinstance': 'rpolli2', + } + admin_params = { + 'have_admin': True, + 'cfgdshost': 'localhost', + 'cfgdsport': 12346, + 'cfgdsuser': 'admin', + 'cfgdspwd': 'admin', + 'admin_domain': 'example.com', + + } + instance_params.update(admin_params) + ret = formatInfData(instance_params) + log.info("content: %r" % ret) + -- 1.7.11.7