report.html

Report generated on 22-Oct-2020 at 23:19:01 by pytest-html v2.1.1

Environment

389-ds-base 1.4.4.4-20201022git1f5aecb.fc32
Packages {"pluggy": "0.13.1", "py": "1.9.0", "pytest": "5.4.3"}
Platform Linux-5.7.7-200.fc32.x86_64-x86_64-with-glibc2.2.5
Plugins {"html": "2.1.1", "libfaketime": "0.1.2", "metadata": "1.10.0"}
Python 3.8.6
cyrus-sasl 2.1.27-4.fc32
nspr 4.29.0-1.fc32
nss 3.57.0-1.fc32
openldap 2.4.47-5.fc32

Summary

2038 tests ran in 14517.87 seconds.

1954 passed, 49 skipped, 57 failed, 4 errors, 19 expected failures, 8 unexpected passes

Results

Result Test Duration Links
Error suites/replication/changelog_test.py::test_changelog_maxage::setup 6.93
topo = <lib389.topologies.TopologyMain object at 0x7f4d2d3a54c0>

@pytest.fixture(scope="module")
def changelog_init(topo):
""" changlog dir is not configuarable, just
enable cn=Retro Changelog Plugin,cn=plugins,cn=config
"""
log.info('Testing Ticket 47669 - Test duration syntax in the changelogs')

# bind as directory manager
topo.ms["master1"].log.info("Bind as %s" % DN_DM)
topo.ms["master1"].simple_bind_s(DN_DM, PASSWORD)

if not ds_supports_new_changelog():
try:
changelogdir = os.path.join(os.path.dirname(topo.ms["master1"].dbdir), 'changelog')
topo.ms["master1"].modify_s(CHANGELOG, [(ldap.MOD_REPLACE, 'nsslapd-changelogdir',
ensure_bytes(changelogdir))])
except ldap.LDAPError as e:
log.error('Failed to modify ' + CHANGELOG + ': error {}'.format(get_ldap_error_msg(e,'desc')))
assert False

try:
topo.ms["master1"].modify_s(RETROCHANGELOG, [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', b'on')])
except ldap.LDAPError as e:
log.error('Failed to enable ' + RETROCHANGELOG + ': error {}'.format(get_ldap_error_msg(e, 'desc')))
assert False

# restart the server
> topo.ms["master1"].restart(timeout=10)

suites/replication/changelog_test.py:166:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:1276: in restart
self.start(timeout, post_open)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:1135: in start
subprocess.check_output(["systemctl", "start", "dirsrv@%s" % self.serverid], stderr=subprocess.STDOUT)
/usr/lib64/python3.8/subprocess.py:411: in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

input = None, capture_output = False, timeout = None, check = True
popenargs = (['systemctl', 'start', 'dirsrv@master1'],)
kwargs = {'stderr': -2, 'stdout': -1}
process = <subprocess.Popen object at 0x7f4d2daa84f0>
stdout = b'Job for dirsrv@master1.service failed because the control process exited with error code.\nSee "systemctl status dirsrv@master1.service" and "journalctl -xe" for details.\n'
stderr = None, retcode = 1

def run(*popenargs,
input=None, capture_output=False, timeout=None, check=False, **kwargs):
"""Run command with arguments and return a CompletedProcess instance.

The returned instance will have attributes args, returncode, stdout and
stderr. By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.

If check is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those streams
were captured.

If timeout is given, and the process takes too long, a TimeoutExpired
exception will be raised.

There is an optional argument "input", allowing you to
pass bytes or a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it will be used internally.

By default, all communication is in bytes, and therefore any "input" should
be bytes, and the stdout and stderr will be bytes. If in text mode, any
"input" should be a string, and stdout and stderr will be strings decoded
according to locale encoding, or by "encoding" if set. Text mode is
triggered by setting any of text, encoding, errors or universal_newlines.

The other arguments are the same as for the Popen constructor.
"""
if input is not None:
if kwargs.get('stdin') is not None:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = PIPE

if capture_output:
if kwargs.get('stdout') is not None or kwargs.get('stderr') is not None:
raise ValueError('stdout and stderr arguments may not be used '
'with capture_output.')
kwargs['stdout'] = PIPE
kwargs['stderr'] = PIPE

with Popen(*popenargs, **kwargs) as process:
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except TimeoutExpired as exc:
process.kill()
if _mswindows:
# Windows accumulates the output in a single blocking
# read() call run on child threads, with the timeout
# being done in a join() on those threads. communicate()
# _after_ kill() is required to collect that and add it
# to the exception.
exc.stdout, exc.stderr = process.communicate()
else:
# POSIX _communicate already populated the output so
# far into the TimeoutExpired exception.
process.wait()
raise
except: # Including KeyboardInterrupt, communicate handled that.
process.kill()
# We don't call process.wait() as .__exit__ does that for us.
raise
retcode = process.poll()
if check and retcode:
> raise CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
E subprocess.CalledProcessError: Command '['systemctl', 'start', 'dirsrv@master1']' returned non-zero exit status 1.

/usr/lib64/python3.8/subprocess.py:512: CalledProcessError
-------------------------------Captured log setup-------------------------------
INFO  tests.suites.replication.changelog_test:changelog_test.py:144 Testing Ticket 47669 - Test duration syntax in the changelogs INFO  lib389:changelog_test.py:147 Bind as cn=Directory Manager
Error suites/replication/changelog_test.py::test_ticket47669_changelog_triminterval::setup 0.00
topo = <lib389.topologies.TopologyMain object at 0x7f4d2d3a54c0>

@pytest.fixture(scope="module")
def changelog_init(topo):
""" changlog dir is not configuarable, just
enable cn=Retro Changelog Plugin,cn=plugins,cn=config
"""
log.info('Testing Ticket 47669 - Test duration syntax in the changelogs')

# bind as directory manager
topo.ms["master1"].log.info("Bind as %s" % DN_DM)
topo.ms["master1"].simple_bind_s(DN_DM, PASSWORD)

if not ds_supports_new_changelog():
try:
changelogdir = os.path.join(os.path.dirname(topo.ms["master1"].dbdir), 'changelog')
topo.ms["master1"].modify_s(CHANGELOG, [(ldap.MOD_REPLACE, 'nsslapd-changelogdir',
ensure_bytes(changelogdir))])
except ldap.LDAPError as e:
log.error('Failed to modify ' + CHANGELOG + ': error {}'.format(get_ldap_error_msg(e,'desc')))
assert False

try:
topo.ms["master1"].modify_s(RETROCHANGELOG, [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', b'on')])
except ldap.LDAPError as e:
log.error('Failed to enable ' + RETROCHANGELOG + ': error {}'.format(get_ldap_error_msg(e, 'desc')))
assert False

# restart the server
> topo.ms["master1"].restart(timeout=10)

suites/replication/changelog_test.py:166:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:1276: in restart
self.start(timeout, post_open)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:1135: in start
subprocess.check_output(["systemctl", "start", "dirsrv@%s" % self.serverid], stderr=subprocess.STDOUT)
/usr/lib64/python3.8/subprocess.py:411: in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

input = None, capture_output = False, timeout = None, check = True
popenargs = (['systemctl', 'start', 'dirsrv@master1'],)
kwargs = {'stderr': -2, 'stdout': -1}
process = <subprocess.Popen object at 0x7f4d2daa84f0>
stdout = b'Job for dirsrv@master1.service failed because the control process exited with error code.\nSee "systemctl status dirsrv@master1.service" and "journalctl -xe" for details.\n'
stderr = None, retcode = 1

def run(*popenargs,
input=None, capture_output=False, timeout=None, check=False, **kwargs):
"""Run command with arguments and return a CompletedProcess instance.

The returned instance will have attributes args, returncode, stdout and
stderr. By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.

If check is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those streams
were captured.

If timeout is given, and the process takes too long, a TimeoutExpired
exception will be raised.

There is an optional argument "input", allowing you to
pass bytes or a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it will be used internally.

By default, all communication is in bytes, and therefore any "input" should
be bytes, and the stdout and stderr will be bytes. If in text mode, any
"input" should be a string, and stdout and stderr will be strings decoded
according to locale encoding, or by "encoding" if set. Text mode is
triggered by setting any of text, encoding, errors or universal_newlines.

The other arguments are the same as for the Popen constructor.
"""
if input is not None:
if kwargs.get('stdin') is not None:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = PIPE

if capture_output:
if kwargs.get('stdout') is not None or kwargs.get('stderr') is not None:
raise ValueError('stdout and stderr arguments may not be used '
'with capture_output.')
kwargs['stdout'] = PIPE
kwargs['stderr'] = PIPE

with Popen(*popenargs, **kwargs) as process:
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except TimeoutExpired as exc:
process.kill()
if _mswindows:
# Windows accumulates the output in a single blocking
# read() call run on child threads, with the timeout
# being done in a join() on those threads. communicate()
# _after_ kill() is required to collect that and add it
# to the exception.
exc.stdout, exc.stderr = process.communicate()
else:
# POSIX _communicate already populated the output so
# far into the TimeoutExpired exception.
process.wait()
raise
except: # Including KeyboardInterrupt, communicate handled that.
process.kill()
# We don't call process.wait() as .__exit__ does that for us.
raise
retcode = process.poll()
if check and retcode:
> raise CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
E subprocess.CalledProcessError: Command '['systemctl', 'start', 'dirsrv@master1']' returned non-zero exit status 1.

/usr/lib64/python3.8/subprocess.py:512: CalledProcessError
Error suites/replication/changelog_test.py::test_retrochangelog_maxage::setup 0.00
topo = <lib389.topologies.TopologyMain object at 0x7f4d2d3a54c0>

@pytest.fixture(scope="module")
def changelog_init(topo):
""" changlog dir is not configuarable, just
enable cn=Retro Changelog Plugin,cn=plugins,cn=config
"""
log.info('Testing Ticket 47669 - Test duration syntax in the changelogs')

# bind as directory manager
topo.ms["master1"].log.info("Bind as %s" % DN_DM)
topo.ms["master1"].simple_bind_s(DN_DM, PASSWORD)

if not ds_supports_new_changelog():
try:
changelogdir = os.path.join(os.path.dirname(topo.ms["master1"].dbdir), 'changelog')
topo.ms["master1"].modify_s(CHANGELOG, [(ldap.MOD_REPLACE, 'nsslapd-changelogdir',
ensure_bytes(changelogdir))])
except ldap.LDAPError as e:
log.error('Failed to modify ' + CHANGELOG + ': error {}'.format(get_ldap_error_msg(e,'desc')))
assert False

try:
topo.ms["master1"].modify_s(RETROCHANGELOG, [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', b'on')])
except ldap.LDAPError as e:
log.error('Failed to enable ' + RETROCHANGELOG + ': error {}'.format(get_ldap_error_msg(e, 'desc')))
assert False

# restart the server
> topo.ms["master1"].restart(timeout=10)

suites/replication/changelog_test.py:166:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:1276: in restart
self.start(timeout, post_open)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:1135: in start
subprocess.check_output(["systemctl", "start", "dirsrv@%s" % self.serverid], stderr=subprocess.STDOUT)
/usr/lib64/python3.8/subprocess.py:411: in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

input = None, capture_output = False, timeout = None, check = True
popenargs = (['systemctl', 'start', 'dirsrv@master1'],)
kwargs = {'stderr': -2, 'stdout': -1}
process = <subprocess.Popen object at 0x7f4d2daa84f0>
stdout = b'Job for dirsrv@master1.service failed because the control process exited with error code.\nSee "systemctl status dirsrv@master1.service" and "journalctl -xe" for details.\n'
stderr = None, retcode = 1

def run(*popenargs,
input=None, capture_output=False, timeout=None, check=False, **kwargs):
"""Run command with arguments and return a CompletedProcess instance.

The returned instance will have attributes args, returncode, stdout and
stderr. By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.

If check is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those streams
were captured.

If timeout is given, and the process takes too long, a TimeoutExpired
exception will be raised.

There is an optional argument "input", allowing you to
pass bytes or a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it will be used internally.

By default, all communication is in bytes, and therefore any "input" should
be bytes, and the stdout and stderr will be bytes. If in text mode, any
"input" should be a string, and stdout and stderr will be strings decoded
according to locale encoding, or by "encoding" if set. Text mode is
triggered by setting any of text, encoding, errors or universal_newlines.

The other arguments are the same as for the Popen constructor.
"""
if input is not None:
if kwargs.get('stdin') is not None:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = PIPE

if capture_output:
if kwargs.get('stdout') is not None or kwargs.get('stderr') is not None:
raise ValueError('stdout and stderr arguments may not be used '
'with capture_output.')
kwargs['stdout'] = PIPE
kwargs['stderr'] = PIPE

with Popen(*popenargs, **kwargs) as process:
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except TimeoutExpired as exc:
process.kill()
if _mswindows:
# Windows accumulates the output in a single blocking
# read() call run on child threads, with the timeout
# being done in a join() on those threads. communicate()
# _after_ kill() is required to collect that and add it
# to the exception.
exc.stdout, exc.stderr = process.communicate()
else:
# POSIX _communicate already populated the output so
# far into the TimeoutExpired exception.
process.wait()
raise
except: # Including KeyboardInterrupt, communicate handled that.
process.kill()
# We don't call process.wait() as .__exit__ does that for us.
raise
retcode = process.poll()
if check and retcode:
> raise CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
E subprocess.CalledProcessError: Command '['systemctl', 'start', 'dirsrv@master1']' returned non-zero exit status 1.

/usr/lib64/python3.8/subprocess.py:512: CalledProcessError
Error suites/replication/changelog_test.py::test_retrochangelog_trimming_crash::setup 0.00
topo = <lib389.topologies.TopologyMain object at 0x7f4d2d3a54c0>

@pytest.fixture(scope="module")
def changelog_init(topo):
""" changlog dir is not configuarable, just
enable cn=Retro Changelog Plugin,cn=plugins,cn=config
"""
log.info('Testing Ticket 47669 - Test duration syntax in the changelogs')

# bind as directory manager
topo.ms["master1"].log.info("Bind as %s" % DN_DM)
topo.ms["master1"].simple_bind_s(DN_DM, PASSWORD)

if not ds_supports_new_changelog():
try:
changelogdir = os.path.join(os.path.dirname(topo.ms["master1"].dbdir), 'changelog')
topo.ms["master1"].modify_s(CHANGELOG, [(ldap.MOD_REPLACE, 'nsslapd-changelogdir',
ensure_bytes(changelogdir))])
except ldap.LDAPError as e:
log.error('Failed to modify ' + CHANGELOG + ': error {}'.format(get_ldap_error_msg(e,'desc')))
assert False

try:
topo.ms["master1"].modify_s(RETROCHANGELOG, [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', b'on')])
except ldap.LDAPError as e:
log.error('Failed to enable ' + RETROCHANGELOG + ': error {}'.format(get_ldap_error_msg(e, 'desc')))
assert False

# restart the server
> topo.ms["master1"].restart(timeout=10)

suites/replication/changelog_test.py:166:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:1276: in restart
self.start(timeout, post_open)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:1135: in start
subprocess.check_output(["systemctl", "start", "dirsrv@%s" % self.serverid], stderr=subprocess.STDOUT)
/usr/lib64/python3.8/subprocess.py:411: in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

input = None, capture_output = False, timeout = None, check = True
popenargs = (['systemctl', 'start', 'dirsrv@master1'],)
kwargs = {'stderr': -2, 'stdout': -1}
process = <subprocess.Popen object at 0x7f4d2daa84f0>
stdout = b'Job for dirsrv@master1.service failed because the control process exited with error code.\nSee "systemctl status dirsrv@master1.service" and "journalctl -xe" for details.\n'
stderr = None, retcode = 1

def run(*popenargs,
input=None, capture_output=False, timeout=None, check=False, **kwargs):
"""Run command with arguments and return a CompletedProcess instance.

The returned instance will have attributes args, returncode, stdout and
stderr. By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.

If check is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those streams
were captured.

If timeout is given, and the process takes too long, a TimeoutExpired
exception will be raised.

There is an optional argument "input", allowing you to
pass bytes or a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it will be used internally.

By default, all communication is in bytes, and therefore any "input" should
be bytes, and the stdout and stderr will be bytes. If in text mode, any
"input" should be a string, and stdout and stderr will be strings decoded
according to locale encoding, or by "encoding" if set. Text mode is
triggered by setting any of text, encoding, errors or universal_newlines.

The other arguments are the same as for the Popen constructor.
"""
if input is not None:
if kwargs.get('stdin') is not None:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = PIPE

if capture_output:
if kwargs.get('stdout') is not None or kwargs.get('stderr') is not None:
raise ValueError('stdout and stderr arguments may not be used '
'with capture_output.')
kwargs['stdout'] = PIPE
kwargs['stderr'] = PIPE

with Popen(*popenargs, **kwargs) as process:
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except TimeoutExpired as exc:
process.kill()
if _mswindows:
# Windows accumulates the output in a single blocking
# read() call run on child threads, with the timeout
# being done in a join() on those threads. communicate()
# _after_ kill() is required to collect that and add it
# to the exception.
exc.stdout, exc.stderr = process.communicate()
else:
# POSIX _communicate already populated the output so
# far into the TimeoutExpired exception.
process.wait()
raise
except: # Including KeyboardInterrupt, communicate handled that.
process.kill()
# We don't call process.wait() as .__exit__ does that for us.
raise
retcode = process.poll()
if check and retcode:
> raise CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
E subprocess.CalledProcessError: Command '['systemctl', 'start', 'dirsrv@master1']' returned non-zero exit status 1.

/usr/lib64/python3.8/subprocess.py:512: CalledProcessError
Failed suites/ds_logs/ds_logs_test.py::test_log_base_dn_when_invalid_attr_request 3.37
topology_st = <lib389.topologies.TopologyMain object at 0x7f4d302dda30>
disable_access_log_buffering = <function disable_access_log_buffering at 0x7f4d3184d4c0>

@pytest.mark.xfail(ds_is_older('1.3.10.1'), reason="May fail because of bug 1662461")
@pytest.mark.bz1662461
@pytest.mark.ds50428
@pytest.mark.ds49969
def test_log_base_dn_when_invalid_attr_request(topology_st, disable_access_log_buffering):
"""Test that DS correctly logs the base dn when a search with invalid attribute request is performed

:id: 859de962-c261-4ffb-8705-97bceab1ba2c
:setup: Standalone instance
:steps:
1. Disable the accesslog-logbuffering config parameter
2. Delete the previous access log
3. Perform a base search on the DEFAULT_SUFFIX, using invalid "" "" attribute request
4. Check the access log file for 'invalid attribute request'
5. Check the access log file for 'SRCH base="\(null\)"'
6. Check the access log file for 'SRCH base="DEFAULT_SUFFIX"'
:expectedresults:
1. Operations are visible in the access log in real time
2. Fresh new access log is created
3. The search operation raises a Protocol error
4. The access log should have an 'invalid attribute request' message
5. The access log should not have "\(null\)" as value for the Search base dn
6. The access log should have the value of DEFAULT_SUFFIX as Search base dn
"""

entry = DSLdapObject(topology_st.standalone, DEFAULT_SUFFIX)

log.info('delete the previous access logs to get a fresh new one')
topology_st.standalone.deleteAccessLogs()

log.info("Search the default suffix, with invalid '\"\" \"\"' attribute request")
log.info("A Protocol error exception should be raised, see https://github.com/389ds/389-ds-base/issues/3028")
# A ldap.PROTOCOL_ERROR exception is expected
with pytest.raises(ldap.PROTOCOL_ERROR):
> assert entry.get_attrs_vals_utf8(['', ''])
E Failed: DID NOT RAISE <class 'ldap.PROTOCOL_ERROR'>

suites/ds_logs/ds_logs_test.py:891: Failed
-------------------------------Captured log setup-------------------------------
INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:200 Disable access log buffering
-------------------------------Captured log call--------------------------------
INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:884 delete the previous access logs to get a fresh new one INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:887 Search the default suffix, with invalid '"" ""' attribute request INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:888 A Protocol error exception should be raised, see https://github.com/389ds/389-ds-base/issues/3028
Failed suites/dynamic_plugins/dynamic_plugins_test.py::test_acceptance 17.56
topology_m2 = <lib389.topologies.TopologyMain object at 0x7f4d2ffaf490>

def test_acceptance(topology_m2):
"""Exercise each plugin and its main features, while
changing the configuration without restarting the server.

:id: 96136538-0151-4b09-9933-0e0cbf2c786c
:setup: 2 Master Instances
:steps:
1. Pause all replication
2. Set nsslapd-dynamic-plugins to on
3. Try to update LDBM config entry
4. Go through all plugin basic functionality
5. Resume replication
6. Go through all plugin basic functionality again
7. Check that data in sync and replication is working
:expectedresults:
1. Success
2. Success
3. Success
4. Success
5. Success
6. Success
7. Success
"""

m1 = topology_m2.ms["master1"]
msg = ' (no replication)'
replication_run = False

# First part of the test should be without replication
topology_m2.pause_all_replicas()

# First enable dynamic plugins
m1.config.replace('nsslapd-dynamic-plugins', 'on')

# Test that critical plugins can be updated even though the change might not be applied
ldbm_config = LDBMConfig(m1)
ldbm_config.replace('description', 'test')

while True:
# First run the tests with replication disabled, then rerun them with replication set up

############################################################################
# Test plugin functionality
############################################################################

log.info('####################################################################')
log.info('Testing Dynamic Plugins Functionality' + msg + '...')
log.info('####################################################################\n')

> acceptance_test.check_all_plugins(topology_m2)

suites/dynamic_plugins/dynamic_plugins_test.py:121:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
suites/plugins/acceptance_test.py:1807: in check_all_plugins
func(topo, args)
suites/plugins/acceptance_test.py:1588: in test_retrocl
plugin.enable()
/usr/local/lib/python3.8/site-packages/lib389/plugins.py:58: in enable
self.set('nsslapd-pluginEnabled', 'on')
/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:446: in set
return self._instance.modify_ext_s(self._dn, [(action, key, value)],
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:613: in modify_ext_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:764: in result3
resp_type, resp_data, resp_msgid, decoded_resp_ctrls, retoid, retval = self.result4(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:774: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d302ce3a0>
func = <built-in method result4 of LDAP object at 0x7f4d30261e10>
args = (411, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.UNWILLING_TO_PERFORM: {'msgtype': 103, 'msgid': 411, 'result': 53, 'desc': 'Server is unwilling to perform', 'ctrls': [], 'info': 'Failed to start plugin "Retro Changelog Plugin". See errors log.'}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: UNWILLING_TO_PERFORM
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39001, 'ldap-secureport': 63701, 'server-id': 'master1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39002, 'ldap-secureport': 63702, 'server-id': 'master2', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:148 Creating replication topology. INFO  lib389.topologies:topologies.py:162 Joining master master2 to master1 ... INFO  lib389.replica:replica.py:2101 SUCCESS: bootstrap to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 completed INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is was created INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is was created INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect 35867916-9e1d-4be9-a3bb-17e97e046745 / got description=None) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect 4f5ce5bf-6475-4d6c-ac47-37e99d73b832 / got description=35867916-9e1d-4be9-a3bb-17e97e046745) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is working INFO  lib389.replica:replica.py:2170 SUCCESS: joined master from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 INFO  lib389.topologies:topologies.py:170 Ensuring master master1 to master2 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 already exists INFO  lib389.topologies:topologies.py:170 Ensuring master master2 to master1 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 already exists
Failed suites/dynamic_plugins/dynamic_plugins_test.py::test_memory_corruption 18.47
topology_m2 = <lib389.topologies.TopologyMain object at 0x7f4d2ffaf490>

def test_memory_corruption(topology_m2):
"""Check the plugins for memory corruption issues while
dynamic plugins option is enabled

:id: 96136538-0151-4b09-9933-0e0cbf2c7862
:setup: 2 Master Instances
:steps:
1. Pause all replication
2. Set nsslapd-dynamic-plugins to on
3. Try to update LDBM config entry
4. Restart the plugin many times in a linked list fashion
restarting previous and preprevious plugins in the list of all plugins
5. Run the functional test
6. Repeat 4 and 5 steps for all plugins
7. Resume replication
8. Go through 4-6 steps once more
9. Check that data in sync and replication is working
:expectedresults:
1. Success
2. Success
3. Success
4. Success
5. Success
6. Success
7. Success
8. Success
9. Success
"""


m1 = topology_m2.ms["master1"]
msg = ' (no replication)'
replication_run = False

# First part of the test should be without replication
topology_m2.pause_all_replicas()

# First enable dynamic plugins
m1.config.replace('nsslapd-dynamic-plugins', 'on')

# Test that critical plugins can be updated even though the change might not be applied
ldbm_config = LDBMConfig(m1)
ldbm_config.replace('description', 'test')

while True:
# First run the tests with replication disabled, then rerun them with replication set up

############################################################################
# Test the stability by exercising the internal lists, callabcks, and task handlers
############################################################################

log.info('####################################################################')
log.info('Testing Dynamic Plugins for Memory Corruption' + msg + '...')
log.info('####################################################################\n')
prev_plugin_test = None
prev_prev_plugin_test = None

for plugin_test in acceptance_test.func_tests:
#
# Restart the plugin several times (and prev plugins) - work that linked list
#
> plugin_test(topology_m2, "restart")

suites/dynamic_plugins/dynamic_plugins_test.py:205:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
suites/plugins/acceptance_test.py:1588: in test_retrocl
plugin.enable()
/usr/local/lib/python3.8/site-packages/lib389/plugins.py:58: in enable
self.set('nsslapd-pluginEnabled', 'on')
/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:446: in set
return self._instance.modify_ext_s(self._dn, [(action, key, value)],
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:613: in modify_ext_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:764: in result3
resp_type, resp_data, resp_msgid, decoded_resp_ctrls, retoid, retval = self.result4(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:774: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d302ce3a0>
func = <built-in method result4 of LDAP object at 0x7f4d30261e10>
args = (849, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.UNWILLING_TO_PERFORM: {'msgtype': 103, 'msgid': 849, 'result': 53, 'desc': 'Server is unwilling to perform', 'ctrls': [], 'info': 'Failed to start plugin "Retro Changelog Plugin". See errors log.'}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: UNWILLING_TO_PERFORM
Failed suites/dynamic_plugins/dynamic_plugins_test.py::test_stress 17.42
topology_m2 = <lib389.topologies.TopologyMain object at 0x7f4d2ffaf490>

@pytest.mark.tier2
def test_stress(topology_m2):
"""Test plugins while under a big load. Perform the test 5 times

:id: 96136538-0151-4b09-9933-0e0cbf2c7863
:setup: 2 Master Instances
:steps:
1. Pause all replication
2. Set nsslapd-dynamic-plugins to on
3. Try to update LDBM config entry
4. Do one run through all tests
5. Enable Referential integrity and MemberOf plugins
6. Launch three new threads to add a bunch of users
7. While we are adding users restart the MemberOf and
Linked Attributes plugins many times
8. Wait for the 'adding' threads to complete
9. Now launch three threads to delete the users
10. Restart both the MemberOf, Referential integrity and
Linked Attributes plugins during these deletes
11. Wait for the 'deleting' threads to complete
12. Now make sure both the MemberOf and Referential integrity plugins still work correctly
13. Cleanup the stress tests (delete the group entry)
14. Perform 4-13 steps five times
15. Resume replication
16. Go through 4-14 steps once more
17. Check that data in sync and replication is working
:expectedresults:
1. Success
2. Success
3. Success
4. Success
5. Success
6. Success
7. Success
8. Success
9. Success
10. Success
11. Success
12. Success
13. Success
14. Success
15. Success
16. Success
17. Success
"""

m1 = topology_m2.ms["master1"]
msg = ' (no replication)'
replication_run = False
stress_max_runs = 5

# First part of the test should be without replication
topology_m2.pause_all_replicas()

# First enable dynamic plugins
m1.config.replace('nsslapd-dynamic-plugins', 'on')

# Test that critical plugins can be updated even though the change might not be applied
ldbm_config = LDBMConfig(m1)
ldbm_config.replace('description', 'test')

while True:
# First run the tests with replication disabled, then rerun them with replication set up

log.info('Do one run through all tests ' + msg + '...')
> acceptance_test.check_all_plugins(topology_m2)

suites/dynamic_plugins/dynamic_plugins_test.py:310:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
suites/plugins/acceptance_test.py:1807: in check_all_plugins
func(topo, args)
suites/plugins/acceptance_test.py:1588: in test_retrocl
plugin.enable()
/usr/local/lib/python3.8/site-packages/lib389/plugins.py:58: in enable
self.set('nsslapd-pluginEnabled', 'on')
/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:446: in set
return self._instance.modify_ext_s(self._dn, [(action, key, value)],
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:613: in modify_ext_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:764: in result3
resp_type, resp_data, resp_msgid, decoded_resp_ctrls, retoid, retval = self.result4(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:774: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d302ce3a0>
func = <built-in method result4 of LDAP object at 0x7f4d30261e10>
args = (1211, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.UNWILLING_TO_PERFORM: {'msgtype': 103, 'msgid': 1211, 'result': 53, 'desc': 'Server is unwilling to perform', 'ctrls': [], 'info': 'Failed to start plugin "Retro Changelog Plugin". See errors log.'}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: UNWILLING_TO_PERFORM
Failed suites/import/import_test.py::test_fast_slow_import 10.75
topo = <lib389.topologies.TopologyMain object at 0x7f4d2e416dc0>
_toggle_private_import_mem = None, _import_clean = None

def test_fast_slow_import(topo, _toggle_private_import_mem, _import_clean):
"""With nsslapd-db-private-import-mem: on is faster import.

:id: 3044331c-9c0e-11ea-ac9f-8c16451d917b
:setup: Standalone Instance
:steps:
1. Let's set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: 0
2. Measure offline import time duration total_time1
3. Now nsslapd-db-private-import-mem:off
4. Measure offline import time duration total_time2
5. total_time1 < total_time2
6. Set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: -1
7. Measure offline import time duration total_time1
8. Now nsslapd-db-private-import-mem:off
9. Measure offline import time duration total_time2
10. total_time1 < total_time2
:expected results:
1. Operation successful
2. Operation successful
3. Operation successful
4. Operation successful
5. Operation successful
6. Operation successful
7. Operation successful
8. Operation successful
9. Operation successful
10. Operation successful
"""
# Let's set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: 0
config = LDBMConfig(topo.standalone)
# Measure offline import time duration total_time1
total_time1 = _import_offline(topo, 20)
# Now nsslapd-db-private-import-mem:off
config.replace('nsslapd-db-private-import-mem', 'off')
accounts = Accounts(topo.standalone, DEFAULT_SUFFIX)
for i in accounts.filter('(uid=*)'):
UserAccount(topo.standalone, i.dn).delete()
# Measure offline import time duration total_time2
total_time2 = _import_offline(topo, 20)
# total_time1 < total_time2
> assert total_time1 < total_time2
E assert 2.2138659954071045 < 2.2032835483551025

suites/import/import_test.py:307: AssertionError
Failed suites/password/regression_test.py::test_unhashed_pw_switch 4.20
topo_master = <lib389.topologies.TopologyMain object at 0x7f4d2d8c00d0>

@pytest.mark.ds49789
def test_unhashed_pw_switch(topo_master):
"""Check that nsslapd-unhashed-pw-switch works corrently

:id: e5aba180-d174-424d-92b0-14fe7bb0b92a
:setup: Master Instance
:steps:
1. A Master is created, enable retrocl (not used here)
2. Create a set of users
3. update userpassword of user1 and check that unhashed#user#password is not logged (default)
4. udpate userpassword of user2 and check that unhashed#user#password is not logged ('nolog')
5. udpate userpassword of user3 and check that unhashed#user#password is logged ('on')
:expectedresults:
1. Success
2. Success
3. Success (unhashed#user#password is not logged in the replication changelog)
4. Success (unhashed#user#password is not logged in the replication changelog)
5. Success (unhashed#user#password is logged in the replication changelog)
"""
MAX_USERS = 10
PEOPLE_DN = ("ou=people," + DEFAULT_SUFFIX)

inst = topo_master.ms["master1"]
inst.modify_s("cn=Retro Changelog Plugin,cn=plugins,cn=config",
[(ldap.MOD_REPLACE, 'nsslapd-changelogmaxage', b'2m'),
(ldap.MOD_REPLACE, 'nsslapd-changelog-trim-interval', b"5s"),
(ldap.MOD_REPLACE, 'nsslapd-logAccess', b'on')])
inst.config.loglevel(vals=[256 + 4], service='access')
inst.restart()
# If you need any test suite initialization,
# please, write additional fixture for that (including finalizer).
# Topology for suites are predefined in lib389/topologies.py.

# enable dynamic plugins, memberof and retro cl plugin
#
log.info('Enable plugins...')
try:
inst.modify_s(DN_CONFIG,
[(ldap.MOD_REPLACE,
'nsslapd-dynamic-plugins',
b'on')])
except ldap.LDAPError as e:
ldap.error('Failed to enable dynamic plugins! ' + e.message['desc'])
assert False

#topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
> inst.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)

suites/password/regression_test.py:268:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/plugins.py:2105: in enable
plugin.enable()
/usr/local/lib/python3.8/site-packages/lib389/plugins.py:58: in enable
self.set('nsslapd-pluginEnabled', 'on')
/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:446: in set
return self._instance.modify_ext_s(self._dn, [(action, key, value)],
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:613: in modify_ext_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:764: in result3
resp_type, resp_data, resp_msgid, decoded_resp_ctrls, retoid, retval = self.result4(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:774: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2d8c0160>
func = <built-in method result4 of LDAP object at 0x7f4d2d9c3cf0>
args = (5, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.UNWILLING_TO_PERFORM: {'msgtype': 103, 'msgid': 5, 'result': 53, 'desc': 'Server is unwilling to perform', 'ctrls': [], 'info': 'Failed to start plugin "Retro Changelog Plugin". See errors log.'}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: UNWILLING_TO_PERFORM
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39001, 'ldap-secureport': 63701, 'server-id': 'master1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:148 Creating replication topology.
-------------------------------Captured log call--------------------------------
INFO  tests.suites.password.regression_test:regression_test.py:257 Enable plugins...
Failed suites/plugins/acceptance_test.py::test_retrocl 6.84
topo = <lib389.topologies.TopologyMain object at 0x7f4d30ffa5e0>, args = None

def test_retrocl(topo, args=None):
"""Test Retro Changelog basic functionality

:id: 9b87493b-0493-46f9-8364-6099d0e5d810
:setup: Standalone Instance
:steps:
1. Enable the plugin
2. Restart the instance
3. Gather the current change count (it's not 1 once we start the stability tests)
4. Add a user
5. Check we logged this in the retro cl
6. Change the config - disable plugin
7. Delete the user
8. Check we didn't log this in the retro cl
9. Check nsslapd-plugin-depends-on-named for the plugin
10. Clean up
:expectedresults:
1. Success
2. Success
3. Success
4. Success
5. Success
6. Success
7. Success
8. Success
9. Success
10. Success
"""

inst = topo[0]

# stop the plugin, and start it
plugin = RetroChangelogPlugin(inst)
plugin.disable()
plugin.enable()

if args == "restart":
return

# If args is None then we run the test suite as pytest standalone and it's not dynamic
if args is None:
> inst.restart()

suites/plugins/acceptance_test.py:1595:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:1276: in restart
self.start(timeout, post_open)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:1135: in start
subprocess.check_output(["systemctl", "start", "dirsrv@%s" % self.serverid], stderr=subprocess.STDOUT)
/usr/lib64/python3.8/subprocess.py:411: in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

input = None, capture_output = False, timeout = None, check = True
popenargs = (['systemctl', 'start', 'dirsrv@standalone1'],)
kwargs = {'stderr': -2, 'stdout': -1}
process = <subprocess.Popen object at 0x7f4d2da0b610>
stdout = b'Job for dirsrv@standalone1.service failed because the control process exited with error code.\nSee "systemctl status dirsrv@standalone1.service" and "journalctl -xe" for details.\n'
stderr = None, retcode = 1

def run(*popenargs,
input=None, capture_output=False, timeout=None, check=False, **kwargs):
"""Run command with arguments and return a CompletedProcess instance.

The returned instance will have attributes args, returncode, stdout and
stderr. By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.

If check is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those streams
were captured.

If timeout is given, and the process takes too long, a TimeoutExpired
exception will be raised.

There is an optional argument "input", allowing you to
pass bytes or a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it will be used internally.

By default, all communication is in bytes, and therefore any "input" should
be bytes, and the stdout and stderr will be bytes. If in text mode, any
"input" should be a string, and stdout and stderr will be strings decoded
according to locale encoding, or by "encoding" if set. Text mode is
triggered by setting any of text, encoding, errors or universal_newlines.

The other arguments are the same as for the Popen constructor.
"""
if input is not None:
if kwargs.get('stdin') is not None:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = PIPE

if capture_output:
if kwargs.get('stdout') is not None or kwargs.get('stderr') is not None:
raise ValueError('stdout and stderr arguments may not be used '
'with capture_output.')
kwargs['stdout'] = PIPE
kwargs['stderr'] = PIPE

with Popen(*popenargs, **kwargs) as process:
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except TimeoutExpired as exc:
process.kill()
if _mswindows:
# Windows accumulates the output in a single blocking
# read() call run on child threads, with the timeout
# being done in a join() on those threads. communicate()
# _after_ kill() is required to collect that and add it
# to the exception.
exc.stdout, exc.stderr = process.communicate()
else:
# POSIX _communicate already populated the output so
# far into the TimeoutExpired exception.
process.wait()
raise
except: # Including KeyboardInterrupt, communicate handled that.
process.kill()
# We don't call process.wait() as .__exit__ does that for us.
raise
retcode = process.poll()
if check and retcode:
> raise CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
E subprocess.CalledProcessError: Command '['systemctl', 'start', 'dirsrv@standalone1']' returned non-zero exit status 1.

/usr/lib64/python3.8/subprocess.py:512: CalledProcessError
Failed suites/plugins/acceptance_test.py::test_rootdn 0.00
topo = <lib389.topologies.TopologyMain object at 0x7f4d30ffa5e0>, args = None

def test_rootdn(topo, args=None):
"""Test Root DNA Access control basic functionality

:id: 9b87493b-0493-46f9-8364-6099d0e5d811
:setup: Standalone Instance
:steps:
1. Enable the plugin
2. Restart the instance
3. Add an user and aci to open up cn=config
4. Set an aci so we can modify the plugin after we deny the root dn
5. Set allowed IP to an unknown host - blocks root dn
6. Bind as Root DN
7. Bind as the user who can make updates to the config
8. Test that invalid plugin changes are rejected
9. Remove the restriction
10. Bind as Root DN
11. Check nsslapd-plugin-depends-on-named for the plugin
12. Clean up
:expectedresults:
1. Success
2. Success
3. Success
4. Success
5. Success
6. Success
7. Success
8. Success
9. Success
10. Success
11. Success
12. Success
"""

inst = topo[0]

# stop the plugin, and start it
plugin = RootDNAccessControlPlugin(inst)
> plugin.disable()

suites/plugins/acceptance_test.py:1704:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/plugins.py:63: in disable
self.set('nsslapd-pluginEnabled', 'off')
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.plugins.RootDNAccessControlPlugin object at 0x7f4d2db28a90>
key = 'nsslapd-pluginEnabled', value = 'off', action = 2

def set(self, key, value, action=ldap.MOD_REPLACE):
"""Perform a specified action on a key with value

:param key: an attribute name
:type key: str
:param value: an attribute value
:type value: str
:param action: - ldap.MOD_REPLACE - by default
- ldap.MOD_ADD
- ldap.MOD_DELETE
:type action: int

:returns: result of modify_s operation
:raises: ValueError - if instance is not online
"""

if action == ldap.MOD_ADD:
action_txt = "ADD"
elif action == ldap.MOD_REPLACE:
action_txt = "REPLACE"
elif action == ldap.MOD_DELETE:
action_txt = "DELETE"
else:
# This should never happen (bug!)
action_txt = "UNKNOWN"

if value is None or len(value) < 512:
self._log.debug("%s set %s: (%r, %r)" % (self._dn, action_txt, key, display_log_value(key, value)))
else:
self._log.debug("%s set %s: (%r, value too large)" % (self._dn, action_txt, key))
if self._instance.state != DIRSRV_STATE_ONLINE:
> raise ValueError("Invalid state. Cannot set properties on instance that is not ONLINE.")
E ValueError: Invalid state. Cannot set properties on instance that is not ONLINE.

/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:438: ValueError
Failed suites/replication/conflict_resolve_test.py::TestTwoMasters::test_complex_add_modify_modrdn_delete 84.29
self = <tests.suites.replication.conflict_resolve_test.TestTwoMasters object at 0x7f4d2d50a370>
topology_m2 = <lib389.topologies.TopologyMain object at 0x7f4d2d4f7790>
base_m2 = <lib389.idm.nscontainer.nsContainer object at 0x7f4d2d50a550>

def test_complex_add_modify_modrdn_delete(self, topology_m2, base_m2):
"""Check that conflict properly resolved for complex operations
which involve add, modify, modrdn and delete

:id: 77f09b18-03d1-45da-940b-1ad2c2908eb1
:setup: Two master replication, test container for entries, enable plugin logging,
audit log, error log for replica and access log for internal
:steps:
1. Add ten users to m1 and wait for replication to happen
2. Pause replication
3. Test add-del on m1 and add on m2
4. Test add-mod on m1 and add on m2
5. Test add-modrdn on m1 and add on m2
6. Test multiple add, modrdn
7. Test Add-del on both masters
8. Test modrdn-modrdn
9. Test modrdn-del
10. Resume replication
11. Check that the entries on both masters are the same and replication is working
:expectedresults:
1. It should pass
2. It should pass
3. It should pass
4. It should pass
5. It should pass
6. It should pass
7. It should pass
8. It should pass
9. It should pass
10. It should pass
11. It should pass
"""

M1 = topology_m2.ms["master1"]
M2 = topology_m2.ms["master2"]

test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None)
test_users_m2 = UserAccounts(M2, base_m2.dn, rdn=None)
repl = ReplicationManager(SUFFIX)

for user_num in range(1100, 1110):
_create_user(test_users_m1, user_num)

repl.test_replication(M1, M2)
topology_m2.pause_all_replicas()

log.info("Test add-del on M1 and add on M2")
user_num += 1
_create_user(test_users_m1, user_num)
_delete_user(test_users_m1, user_num, sleep=True)
_create_user(test_users_m2, user_num, sleep=True)

user_num += 1
_create_user(test_users_m1, user_num, sleep=True)
_create_user(test_users_m2, user_num, sleep=True)
_delete_user(test_users_m1, user_num, sleep=True)

user_num += 1
_create_user(test_users_m2, user_num, sleep=True)
_create_user(test_users_m1, user_num)
_delete_user(test_users_m1, user_num)

log.info("Test add-mod on M1 and add on M2")
user_num += 1
_create_user(test_users_m1, user_num)
_modify_user(test_users_m1, user_num, sleep=True)
_create_user(test_users_m2, user_num, sleep=True)

user_num += 1
_create_user(test_users_m1, user_num, sleep=True)
_create_user(test_users_m2, user_num, sleep=True)
_modify_user(test_users_m1, user_num, sleep=True)

user_num += 1
_create_user(test_users_m2, user_num, sleep=True)
_create_user(test_users_m1, user_num)
_modify_user(test_users_m1, user_num)

log.info("Test add-modrdn on M1 and add on M2")
user_num += 1
_create_user(test_users_m1, user_num)
_rename_user(test_users_m1, user_num, user_num+20, sleep=True)
_create_user(test_users_m2, user_num, sleep=True)

user_num += 1
_create_user(test_users_m1, user_num, sleep=True)
_create_user(test_users_m2, user_num, sleep=True)
_rename_user(test_users_m1, user_num, user_num+20, sleep=True)

user_num += 1
_create_user(test_users_m2, user_num, sleep=True)
_create_user(test_users_m1, user_num)
_rename_user(test_users_m1, user_num, user_num+20)

log.info("Test multiple add, modrdn")
user_num += 1
_create_user(test_users_m1, user_num, sleep=True)
_create_user(test_users_m2, user_num, sleep=True)
_rename_user(test_users_m1, user_num, user_num+20)
_create_user(test_users_m1, user_num, sleep=True)
_modify_user(test_users_m2, user_num, sleep=True)

log.info("Add - del on both masters")
user_num += 1
_create_user(test_users_m1, user_num)
_delete_user(test_users_m1, user_num, sleep=True)
_create_user(test_users_m2, user_num)
_delete_user(test_users_m2, user_num, sleep=True)

log.info("Test modrdn - modrdn")
user_num += 1
_rename_user(test_users_m1, 1109, 1129, sleep=True)
_rename_user(test_users_m2, 1109, 1129, sleep=True)

log.info("Test modrdn - del")
user_num += 1
_rename_user(test_users_m1, 1100, 1120, sleep=True)
_delete_user(test_users_m2, 1100)

user_num += 1
_delete_user(test_users_m2, 1101, sleep=True)
_rename_user(test_users_m1, 1101, 1121)

topology_m2.resume_all_replicas()

repl.test_replication_topology(topology_m2)
time.sleep(30)

user_dns_m1 = [user.dn for user in test_users_m1.list()]
user_dns_m2 = [user.dn for user in test_users_m2.list()]
> assert set(user_dns_m1) == set(user_dns_m2)
E AssertionError: assert {'uid=test_us...,dc=com', ...} == {'uid=test_us...,dc=com', ...}
E Extra items in the left set:
E 'uid=test_user_1117,cn=test_container,dc=example,dc=com'
E 'uid=test_user_1111,cn=test_container,dc=example,dc=com'
E 'uid=test_user_1112,cn=test_container,dc=example,dc=com'
E Full diff:
E {
E 'uid=test_user_1102,cn=test_container,dc=example,dc=com',...
E
E ...Full output truncated (24 lines hidden), use '-vv' to show

suites/replication/conflict_resolve_test.py:369: AssertionError
-------------------------------Captured log call--------------------------------
INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect a8a6b6a1-ffc9-42fe-bbdc-27ab967c7a2d / got description=122662f2-67b8-4525-8952-fb63bbb486f3) INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect a8a6b6a1-ffc9-42fe-bbdc-27ab967c7a2d / got description=122662f2-67b8-4525-8952-fb63bbb486f3) INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect a8a6b6a1-ffc9-42fe-bbdc-27ab967c7a2d / got description=122662f2-67b8-4525-8952-fb63bbb486f3) INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect a8a6b6a1-ffc9-42fe-bbdc-27ab967c7a2d / got description=122662f2-67b8-4525-8952-fb63bbb486f3) INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect a8a6b6a1-ffc9-42fe-bbdc-27ab967c7a2d / got description=122662f2-67b8-4525-8952-fb63bbb486f3) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is working INFO  tests.suites.replication.conflict_resolve_test:conflict_resolve_test.py:285 Test add-del on M1 and add on M2 INFO  tests.suites.replication.conflict_resolve_test:conflict_resolve_test.py:301 Test add-mod on M1 and add on M2 INFO  tests.suites.replication.conflict_resolve_test:conflict_resolve_test.py:317 Test add-modrdn on M1 and add on M2 INFO  tests.suites.replication.conflict_resolve_test:conflict_resolve_test.py:333 Test multiple add, modrdn INFO  tests.suites.replication.conflict_resolve_test:conflict_resolve_test.py:341 Add - del on both masters INFO  tests.suites.replication.conflict_resolve_test:conflict_resolve_test.py:348 Test modrdn - modrdn INFO  tests.suites.replication.conflict_resolve_test:conflict_resolve_test.py:353 Test modrdn - del INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect b040d0d1-f35b-44a2-b106-4d05cb15cf3c / got description=a8a6b6a1-ffc9-42fe-bbdc-27ab967c7a2d) INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect b040d0d1-f35b-44a2-b106-4d05cb15cf3c / got description=a8a6b6a1-ffc9-42fe-bbdc-27ab967c7a2d) INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect b040d0d1-f35b-44a2-b106-4d05cb15cf3c / got description=a8a6b6a1-ffc9-42fe-bbdc-27ab967c7a2d) INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect b040d0d1-f35b-44a2-b106-4d05cb15cf3c / got description=a8a6b6a1-ffc9-42fe-bbdc-27ab967c7a2d) INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect b040d0d1-f35b-44a2-b106-4d05cb15cf3c / got description=a8a6b6a1-ffc9-42fe-bbdc-27ab967c7a2d) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect 82ee0620-ffc2-4789-aa74-48c35e46c7d1 / got description=b040d0d1-f35b-44a2-b106-4d05cb15cf3c) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is working
Failed suites/syncrepl_plugin/basic_test.py::test_syncrepl_basic 6.95
topology = <lib389.topologies.TopologyMain object at 0x7f4d2d358130>

def test_syncrepl_basic(topology):
""" Test basic functionality of the SyncRepl interface

:id: f9fea826-8ae2-412a-8e88-b8e0ba939b06

:setup: Standalone instance

:steps:
1. Enable Retro Changelog
2. Enable Syncrepl
3. Run the syncstate test to check refresh, add, delete, mod.

:expectedresults:
1. Success
1. Success
1. Success
"""
st = topology.standalone
# Enable RetroChangelog.
rcl = RetroChangelogPlugin(st)
rcl.enable()
# Set the default targetid
rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId')
# Enable sync repl
csp = ContentSyncPlugin(st)
csp.enable()
# Restart DS
> st.restart()

suites/syncrepl_plugin/basic_test.py:60:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:1276: in restart
self.start(timeout, post_open)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:1135: in start
subprocess.check_output(["systemctl", "start", "dirsrv@%s" % self.serverid], stderr=subprocess.STDOUT)
/usr/lib64/python3.8/subprocess.py:411: in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

input = None, capture_output = False, timeout = None, check = True
popenargs = (['systemctl', 'start', 'dirsrv@standalone1'],)
kwargs = {'stderr': -2, 'stdout': -1}
process = <subprocess.Popen object at 0x7f4d2d490850>
stdout = b'Job for dirsrv@standalone1.service failed because the control process exited with error code.\nSee "systemctl status dirsrv@standalone1.service" and "journalctl -xe" for details.\n'
stderr = None, retcode = 1

def run(*popenargs,
input=None, capture_output=False, timeout=None, check=False, **kwargs):
"""Run command with arguments and return a CompletedProcess instance.

The returned instance will have attributes args, returncode, stdout and
stderr. By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.

If check is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those streams
were captured.

If timeout is given, and the process takes too long, a TimeoutExpired
exception will be raised.

There is an optional argument "input", allowing you to
pass bytes or a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it will be used internally.

By default, all communication is in bytes, and therefore any "input" should
be bytes, and the stdout and stderr will be bytes. If in text mode, any
"input" should be a string, and stdout and stderr will be strings decoded
according to locale encoding, or by "encoding" if set. Text mode is
triggered by setting any of text, encoding, errors or universal_newlines.

The other arguments are the same as for the Popen constructor.
"""
if input is not None:
if kwargs.get('stdin') is not None:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = PIPE

if capture_output:
if kwargs.get('stdout') is not None or kwargs.get('stderr') is not None:
raise ValueError('stdout and stderr arguments may not be used '
'with capture_output.')
kwargs['stdout'] = PIPE
kwargs['stderr'] = PIPE

with Popen(*popenargs, **kwargs) as process:
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except TimeoutExpired as exc:
process.kill()
if _mswindows:
# Windows accumulates the output in a single blocking
# read() call run on child threads, with the timeout
# being done in a join() on those threads. communicate()
# _after_ kill() is required to collect that and add it
# to the exception.
exc.stdout, exc.stderr = process.communicate()
else:
# POSIX _communicate already populated the output so
# far into the TimeoutExpired exception.
process.wait()
raise
except: # Including KeyboardInterrupt, communicate handled that.
process.kill()
# We don't call process.wait() as .__exit__ does that for us.
raise
retcode = process.poll()
if check and retcode:
> raise CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
E subprocess.CalledProcessError: Command '['systemctl', 'start', 'dirsrv@standalone1']' returned non-zero exit status 1.

/usr/lib64/python3.8/subprocess.py:512: CalledProcessError
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Failed suites/syncrepl_plugin/basic_test.py::test_sync_repl_mep 0.00
topology = <lib389.topologies.TopologyMain object at 0x7f4d2d358130>
request = <FixtureRequest for <Function test_sync_repl_mep>>

def test_sync_repl_mep(topology, request):
"""Test sync repl with MEP plugin that triggers several
updates on the same entry

:id: d9515930-293e-42da-9835-9f255fa6111b
:setup: Standalone Instance
:steps:
1. enable retro/sync_repl/mep
2. Add mep Template and definition entry
3. start sync_repl client
4. Add users with PosixAccount ObjectClass (mep will update it several times)
5. Check that the received cookie are progressing
:expected results:
1. Success
2. Success
3. Success
4. Success
5. Success
"""
inst = topology[0]

# Enable/configure retroCL
plugin = RetroChangelogPlugin(inst)
> plugin.disable()

suites/syncrepl_plugin/basic_test.py:174:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/plugins.py:63: in disable
self.set('nsslapd-pluginEnabled', 'off')
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.plugins.RetroChangelogPlugin object at 0x7f4d2d3d6130>
key = 'nsslapd-pluginEnabled', value = 'off', action = 2

def set(self, key, value, action=ldap.MOD_REPLACE):
"""Perform a specified action on a key with value

:param key: an attribute name
:type key: str
:param value: an attribute value
:type value: str
:param action: - ldap.MOD_REPLACE - by default
- ldap.MOD_ADD
- ldap.MOD_DELETE
:type action: int

:returns: result of modify_s operation
:raises: ValueError - if instance is not online
"""

if action == ldap.MOD_ADD:
action_txt = "ADD"
elif action == ldap.MOD_REPLACE:
action_txt = "REPLACE"
elif action == ldap.MOD_DELETE:
action_txt = "DELETE"
else:
# This should never happen (bug!)
action_txt = "UNKNOWN"

if value is None or len(value) < 512:
self._log.debug("%s set %s: (%r, %r)" % (self._dn, action_txt, key, display_log_value(key, value)))
else:
self._log.debug("%s set %s: (%r, value too large)" % (self._dn, action_txt, key))
if self._instance.state != DIRSRV_STATE_ONLINE:
> raise ValueError("Invalid state. Cannot set properties on instance that is not ONLINE.")
E ValueError: Invalid state. Cannot set properties on instance that is not ONLINE.

/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:438: ValueError
Failed suites/syncrepl_plugin/basic_test.py::test_sync_repl_cookie 0.00
topology = <lib389.topologies.TopologyMain object at 0x7f4d2d358130>
request = <FixtureRequest for <Function test_sync_repl_cookie>>

def test_sync_repl_cookie(topology, request):
"""Test sync_repl cookie are progressing is an increasing order
when there are nested updates

:id: d7fbde25-5702-46ac-b38e-169d7a68e97c
:setup: Standalone Instance
:steps:
1.: enable retroCL
2.: configure retroCL to log nsuniqueid as targetUniqueId
3.: enable content_sync plugin
4.: enable automember
5.: create (2) groups. Few groups can help to reproduce the concurrent updates problem.
6.: configure automember to provision those groups with 'member'
7.: enable and configure memberof plugin
8.: enable plugin log level
9.: restart the server
10.: create a thread dedicated to run a sync repl client
11.: Create (9) users that will generate nested updates (automember/memberof)
12.: stop sync repl client and collect the list of cookie.change_no
13.: check that cookies.change_no are in increasing order
:expectedresults:
1.: succeeds
2.: succeeds
3.: succeeds
4.: succeeds
5.: succeeds
6.: succeeds
7.: succeeds
8.: succeeds
9.: succeeds
10.: succeeds
11.: succeeds
12.: succeeds
13.: succeeds
"""
inst = topology[0]

# Enable/configure retroCL
plugin = RetroChangelogPlugin(inst)
> plugin.disable()

suites/syncrepl_plugin/basic_test.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/plugins.py:63: in disable
self.set('nsslapd-pluginEnabled', 'off')
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.plugins.RetroChangelogPlugin object at 0x7f4d2d27e9a0>
key = 'nsslapd-pluginEnabled', value = 'off', action = 2

def set(self, key, value, action=ldap.MOD_REPLACE):
"""Perform a specified action on a key with value

:param key: an attribute name
:type key: str
:param value: an attribute value
:type value: str
:param action: - ldap.MOD_REPLACE - by default
- ldap.MOD_ADD
- ldap.MOD_DELETE
:type action: int

:returns: result of modify_s operation
:raises: ValueError - if instance is not online
"""

if action == ldap.MOD_ADD:
action_txt = "ADD"
elif action == ldap.MOD_REPLACE:
action_txt = "REPLACE"
elif action == ldap.MOD_DELETE:
action_txt = "DELETE"
else:
# This should never happen (bug!)
action_txt = "UNKNOWN"

if value is None or len(value) < 512:
self._log.debug("%s set %s: (%r, %r)" % (self._dn, action_txt, key, display_log_value(key, value)))
else:
self._log.debug("%s set %s: (%r, value too large)" % (self._dn, action_txt, key))
if self._instance.state != DIRSRV_STATE_ONLINE:
> raise ValueError("Invalid state. Cannot set properties on instance that is not ONLINE.")
E ValueError: Invalid state. Cannot set properties on instance that is not ONLINE.

/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:438: ValueError
Failed suites/syncrepl_plugin/basic_test.py::test_sync_repl_cookie_add_del 0.00
topology = <lib389.topologies.TopologyMain object at 0x7f4d2d358130>
request = <FixtureRequest for <Function test_sync_repl_cookie_add_del>>

def test_sync_repl_cookie_add_del(topology, request):
"""Test sync_repl cookie are progressing is an increasing order
when there add and del

:id: 83e11038-6ed0-4a5b-ac77-e44887ab11e3
:setup: Standalone Instance
:steps:
1.: enable retroCL
2.: configure retroCL to log nsuniqueid as targetUniqueId
3.: enable content_sync plugin
4.: enable automember
5.: create (2) groups. Few groups can help to reproduce the concurrent updates problem.
6.: configure automember to provision those groups with 'member'
7.: enable and configure memberof plugin
8.: enable plugin log level
9.: restart the server
10.: create a thread dedicated to run a sync repl client
11.: Create (3) users that will generate nested updates (automember/memberof)
12.: Delete (3) users
13.: stop sync repl client and collect the list of cookie.change_no
14.: check that cookies.change_no are in increasing order
:expectedresults:
1.: succeeds
2.: succeeds
3.: succeeds
4.: succeeds
5.: succeeds
6.: succeeds
7.: succeeds
8.: succeeds
9.: succeeds
10.: succeeds
11.: succeeds
12.: succeeds
13.: succeeds
14.: succeeds
"""
inst = topology[0]

# Enable/configure retroCL
plugin = RetroChangelogPlugin(inst)
> plugin.disable()

suites/syncrepl_plugin/basic_test.py:407:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/plugins.py:63: in disable
self.set('nsslapd-pluginEnabled', 'off')
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.plugins.RetroChangelogPlugin object at 0x7f4d2d499ac0>
key = 'nsslapd-pluginEnabled', value = 'off', action = 2

def set(self, key, value, action=ldap.MOD_REPLACE):
"""Perform a specified action on a key with value

:param key: an attribute name
:type key: str
:param value: an attribute value
:type value: str
:param action: - ldap.MOD_REPLACE - by default
- ldap.MOD_ADD
- ldap.MOD_DELETE
:type action: int

:returns: result of modify_s operation
:raises: ValueError - if instance is not online
"""

if action == ldap.MOD_ADD:
action_txt = "ADD"
elif action == ldap.MOD_REPLACE:
action_txt = "REPLACE"
elif action == ldap.MOD_DELETE:
action_txt = "DELETE"
else:
# This should never happen (bug!)
action_txt = "UNKNOWN"

if value is None or len(value) < 512:
self._log.debug("%s set %s: (%r, %r)" % (self._dn, action_txt, key, display_log_value(key, value)))
else:
self._log.debug("%s set %s: (%r, value too large)" % (self._dn, action_txt, key))
if self._instance.state != DIRSRV_STATE_ONLINE:
> raise ValueError("Invalid state. Cannot set properties on instance that is not ONLINE.")
E ValueError: Invalid state. Cannot set properties on instance that is not ONLINE.

/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:438: ValueError
Failed suites/syncrepl_plugin/basic_test.py::test_sync_repl_cookie_with_failure 0.00
topology = <lib389.topologies.TopologyMain object at 0x7f4d2d358130>
request = <FixtureRequest for <Function test_sync_repl_cookie_with_failure>>

def test_sync_repl_cookie_with_failure(topology, request):
"""Test sync_repl cookie are progressing is the right order
when there is a failure in nested updates

:id: e0103448-170e-4080-8f22-c34606447ce2
:setup: Standalone Instance
:steps:
1.: enable retroCL
2.: configure retroCL to log nsuniqueid as targetUniqueId
3.: enable content_sync plugin
4.: enable automember
5.: create (4) groups.
make group2 groupOfUniqueNames so the automember
will fail to add 'member' (uniqueMember expected)
6.: configure automember to provision those groups with 'member'
7.: enable and configure memberof plugin
8.: enable plugin log level
9.: restart the server
10.: create a thread dedicated to run a sync repl client
11.: Create a group that will be the only update received by sync repl client
12.: Create (9) users that will generate nested updates (automember/memberof)
13.: stop sync repl client and collect the list of cookie.change_no
14.: check that the list of cookie.change_no contains only the group 'step 11'
:expectedresults:
1.: succeeds
2.: succeeds
3.: succeeds
4.: succeeds
5.: succeeds
6.: succeeds
7.: succeeds
8.: succeeds
9.: succeeds
10.: succeeds
11.: succeeds
12.: Fails (expected)
13.: succeeds
14.: succeeds
"""
inst = topology[0]

# Enable/configure retroCL
plugin = RetroChangelogPlugin(inst)
> plugin.disable()

suites/syncrepl_plugin/basic_test.py:539:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/plugins.py:63: in disable
self.set('nsslapd-pluginEnabled', 'off')
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.plugins.RetroChangelogPlugin object at 0x7f4d2c725280>
key = 'nsslapd-pluginEnabled', value = 'off', action = 2

def set(self, key, value, action=ldap.MOD_REPLACE):
"""Perform a specified action on a key with value

:param key: an attribute name
:type key: str
:param value: an attribute value
:type value: str
:param action: - ldap.MOD_REPLACE - by default
- ldap.MOD_ADD
- ldap.MOD_DELETE
:type action: int

:returns: result of modify_s operation
:raises: ValueError - if instance is not online
"""

if action == ldap.MOD_ADD:
action_txt = "ADD"
elif action == ldap.MOD_REPLACE:
action_txt = "REPLACE"
elif action == ldap.MOD_DELETE:
action_txt = "DELETE"
else:
# This should never happen (bug!)
action_txt = "UNKNOWN"

if value is None or len(value) < 512:
self._log.debug("%s set %s: (%r, %r)" % (self._dn, action_txt, key, display_log_value(key, value)))
else:
self._log.debug("%s set %s: (%r, value too large)" % (self._dn, action_txt, key))
if self._instance.state != DIRSRV_STATE_ONLINE:
> raise ValueError("Invalid state. Cannot set properties on instance that is not ONLINE.")
E ValueError: Invalid state. Cannot set properties on instance that is not ONLINE.

/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:438: ValueError
Failed suites/syncrepl_plugin/openldap_test.py::test_syncrepl_openldap 6.82
topology = <lib389.topologies.TopologyMain object at 0x7f4d2d514460>

@pytest.mark.skipif(ds_is_older('1.4.4.0'), reason="Sync repl does not support openldap compat in older versions")
def test_syncrepl_openldap(topology):
""" Test basic functionality of the openldap syncrepl
compatability handler.

:id: 03039178-2cc6-40bd-b32c-7d6de108828b

:setup: Standalone instance

:steps:
1. Enable Retro Changelog
2. Enable Syncrepl
3. Run the syncstate test to check refresh, add, delete, mod.

:expectedresults:
1. Success
1. Success
1. Success
"""
st = topology.standalone
# Enable RetroChangelog.
rcl = RetroChangelogPlugin(st)
rcl.enable()
# Set the default targetid
rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId')
# Enable sync repl
csp = ContentSyncPlugin(st)
csp.enable()
# Restart DS
> st.restart()

suites/syncrepl_plugin/openldap_test.py:57:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:1276: in restart
self.start(timeout, post_open)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:1135: in start
subprocess.check_output(["systemctl", "start", "dirsrv@%s" % self.serverid], stderr=subprocess.STDOUT)
/usr/lib64/python3.8/subprocess.py:411: in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

input = None, capture_output = False, timeout = None, check = True
popenargs = (['systemctl', 'start', 'dirsrv@standalone1'],)
kwargs = {'stderr': -2, 'stdout': -1}
process = <subprocess.Popen object at 0x7f4d2d45fe80>
stdout = b'Job for dirsrv@standalone1.service failed because the control process exited with error code.\nSee "systemctl status dirsrv@standalone1.service" and "journalctl -xe" for details.\n'
stderr = None, retcode = 1

def run(*popenargs,
input=None, capture_output=False, timeout=None, check=False, **kwargs):
"""Run command with arguments and return a CompletedProcess instance.

The returned instance will have attributes args, returncode, stdout and
stderr. By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.

If check is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those streams
were captured.

If timeout is given, and the process takes too long, a TimeoutExpired
exception will be raised.

There is an optional argument "input", allowing you to
pass bytes or a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it will be used internally.

By default, all communication is in bytes, and therefore any "input" should
be bytes, and the stdout and stderr will be bytes. If in text mode, any
"input" should be a string, and stdout and stderr will be strings decoded
according to locale encoding, or by "encoding" if set. Text mode is
triggered by setting any of text, encoding, errors or universal_newlines.

The other arguments are the same as for the Popen constructor.
"""
if input is not None:
if kwargs.get('stdin') is not None:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = PIPE

if capture_output:
if kwargs.get('stdout') is not None or kwargs.get('stderr') is not None:
raise ValueError('stdout and stderr arguments may not be used '
'with capture_output.')
kwargs['stdout'] = PIPE
kwargs['stderr'] = PIPE

with Popen(*popenargs, **kwargs) as process:
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except TimeoutExpired as exc:
process.kill()
if _mswindows:
# Windows accumulates the output in a single blocking
# read() call run on child threads, with the timeout
# being done in a join() on those threads. communicate()
# _after_ kill() is required to collect that and add it
# to the exception.
exc.stdout, exc.stderr = process.communicate()
else:
# POSIX _communicate already populated the output so
# far into the TimeoutExpired exception.
process.wait()
raise
except: # Including KeyboardInterrupt, communicate handled that.
process.kill()
# We don't call process.wait() as .__exit__ does that for us.
raise
retcode = process.poll()
if check and retcode:
> raise CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
E subprocess.CalledProcessError: Command '['systemctl', 'start', 'dirsrv@standalone1']' returned non-zero exit status 1.

/usr/lib64/python3.8/subprocess.py:512: CalledProcessError
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Failed suites/vlv/regression_test.py::test_bulk_import_when_the_backend_with_vlv_was_recreated 0.38
self = <lib389.mappingTree.MappingTreeLegacy object at 0x7f4d2d2a9f40>
suffix = 'dc=example,dc=com', bename = 'userRoot', parent = None

def create(self, suffix=None, bename=None, parent=None):
'''
Create a mapping tree entry (under "cn=mapping tree,cn=config"),
for the 'suffix' and that is stored in 'bename' backend.
'bename' backend must exist before creating the mapping tree entry.

If a 'parent' is provided that means that we are creating a
sub-suffix mapping tree.

@param suffix - suffix mapped by this mapping tree entry. It will
be the common name ('cn') of the entry
@param benamebase - backend common name (e.g. 'userRoot')
@param parent - if provided is a parent suffix of 'suffix'

@return DN of the mapping tree entry

@raise ldap.NO_SUCH_OBJECT - if the backend entry or parent mapping
tree does not exist
ValueError - if missing a parameter,

'''
# Check suffix is provided
if not suffix:
raise ValueError("suffix is mandatory")
else:
nsuffix = normalizeDN(suffix)

# Check backend name is provided
if not bename:
raise ValueError("backend name is mandatory")

# Check that if the parent suffix is provided then
# it exists a mapping tree for it
if parent:
nparent = normalizeDN(parent)
filt = suffixfilt(parent)
try:
entry = self.conn.getEntry(DN_MAPPING_TREE, ldap.SCOPE_SUBTREE,
filt)
pass
except NoSuchEntryError:
raise ValueError("parent suffix has no mapping tree")
else:
nparent = ""

# Check if suffix exists, return
filt = suffixfilt(suffix)
try:
entry = self.conn.getEntry(DN_MAPPING_TREE, ldap.SCOPE_SUBTREE,
filt)
return entry
except ldap.NO_SUCH_OBJECT:
entry = None

#
# Now start the real work
#

# fix me when we can actually used escaped DNs
dn = ','.join(('cn="%s"' % nsuffix, DN_MAPPING_TREE))
entry = Entry(dn)
entry.update({
'objectclass': ['top', 'extensibleObject', MT_OBJECTCLASS_VALUE],
'nsslapd-state': 'backend',
# the value in the dn has to be DN escaped
# internal code will add the quoted value - unquoted value is
# useful for searching.
MT_PROPNAME_TO_ATTRNAME[MT_SUFFIX]: nsuffix,
MT_PROPNAME_TO_ATTRNAME[MT_BACKEND]: bename
})

# possibly add the parent
if parent:
entry.setValues(MT_PROPNAME_TO_ATTRNAME[MT_PARENT_SUFFIX], nparent)

try:
self.log.debug("Creating entry: %s", entry.dn)
self.log.info("Entry %r", entry)
> self.conn.add_s(entry)

/usr/local/lib/python3.8/site-packages/lib389/mappingTree.py:155:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

args = (dn: cn="dc=example,dc=com",cn=mapping tree,cn=config
cn: dc=example,dc=com
nsslapd-backend: userRoot
nsslapd-state: backend
objectclass: top
objectclass: extensibleObject
objectclass: nsMappingTree

,)
kwargs = {}
c_stack = [FrameInfo(frame=<frame at 0x7f4d30c07040, file '/usr/local/lib/python3.8/site-packages/lib389/__init__.py', line 176,...mbda>', code_context=[' self._inner_hookexec = lambda hook, methods, kwargs: hook.multicall(\n'], index=0), ...]
frame = FrameInfo(frame=<frame at 0x55a9416a2b60, file '/usr/local/lib/python3.8/site-packages/lib389/mappingTree.py', line 15.../lib389/mappingTree.py', lineno=155, function='create', code_context=[' self.conn.add_s(entry)\n'], index=0)
ent = dn: cn="dc=example,dc=com",cn=mapping tree,cn=config
cn: dc=example,dc=com
nsslapd-backend: userRoot
nsslapd-state: backend
objectclass: top
objectclass: extensibleObject
objectclass: nsMappingTree



def inner(*args, **kwargs):
if name in [
'add_s',
'bind_s',
'delete_s',
'modify_s',
'modrdn_s',
'rename_s',
'sasl_interactive_bind_s',
'search_s',
'search_ext_s',
'simple_bind_s',
'unbind_s',
'getEntry',
] and not ('escapehatch' in kwargs and kwargs['escapehatch'] == 'i am sure'):
c_stack = inspect.stack()
frame = c_stack[1]

warnings.warn(DeprecationWarning("Use of raw ldap function %s. This will be removed in a future release. "
"Found in: %s:%s" % (name, frame.filename, frame.lineno)))
# Later, we will add a sleep here to make it even more painful.
# Finally, it will raise an exception.
elif 'escapehatch' in kwargs:
kwargs.pop('escapehatch')

if name == 'result':
objtype, data = f(*args, **kwargs)
# data is either a 2-tuple or a list of 2-tuples
# print data
if data:
if isinstance(data, tuple):
return objtype, Entry(data)
elif isinstance(data, list):
# AD sends back these search references
# if objtype == ldap.RES_SEARCH_RESULT and \
# isinstance(data[-1],tuple) and \
# not data[-1][0]:
# print "Received search reference: "
# pprint.pprint(data[-1][1])
# data.pop() # remove the last non-entry element

return objtype, [Entry(x) for x in data]
else:
raise TypeError("unknown data type %s returned by result" %
type(data))
else:
return objtype, data
elif name.startswith('add'):
# the first arg is self
# the second and third arg are the dn and the data to send
# We need to convert the Entry into the format used by
# python-ldap
ent = args[0]
if isinstance(ent, Entry):
> return f(ent.dn, ent.toTupleList(), *args[2:])

/usr/local/lib/python3.8/site-packages/lib389/__init__.py:176:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c687580>
dn = 'cn="dc=example,dc=com",cn=mapping tree,cn=config'
modlist = [('objectclass', [b'top', b'extensibleObject', b'nsMappingTree']), ('nsslapd-state', [b'backend']), ('cn', [b'dc=example,dc=com']), ('nsslapd-backend', [b'userRoot'])]

def add_s(self,dn,modlist):
> return self.add_ext_s(dn,modlist,None,None)

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:439:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

args = ('cn="dc=example,dc=com",cn=mapping tree,cn=config', [('objectclass', [b'top', b'extensibleObject', b'nsMappingTree']), ('nsslapd-state', [b'backend']), ('cn', [b'dc=example,dc=com']), ('nsslapd-backend', [b'userRoot'])], None, None)
kwargs = {}, ent = 'cn="dc=example,dc=com",cn=mapping tree,cn=config'

def inner(*args, **kwargs):
if name in [
'add_s',
'bind_s',
'delete_s',
'modify_s',
'modrdn_s',
'rename_s',
'sasl_interactive_bind_s',
'search_s',
'search_ext_s',
'simple_bind_s',
'unbind_s',
'getEntry',
] and not ('escapehatch' in kwargs and kwargs['escapehatch'] == 'i am sure'):
c_stack = inspect.stack()
frame = c_stack[1]

warnings.warn(DeprecationWarning("Use of raw ldap function %s. This will be removed in a future release. "
"Found in: %s:%s" % (name, frame.filename, frame.lineno)))
# Later, we will add a sleep here to make it even more painful.
# Finally, it will raise an exception.
elif 'escapehatch' in kwargs:
kwargs.pop('escapehatch')

if name == 'result':
objtype, data = f(*args, **kwargs)
# data is either a 2-tuple or a list of 2-tuples
# print data
if data:
if isinstance(data, tuple):
return objtype, Entry(data)
elif isinstance(data, list):
# AD sends back these search references
# if objtype == ldap.RES_SEARCH_RESULT and \
# isinstance(data[-1],tuple) and \
# not data[-1][0]:
# print "Received search reference: "
# pprint.pprint(data[-1][1])
# data.pop() # remove the last non-entry element

return objtype, [Entry(x) for x in data]
else:
raise TypeError("unknown data type %s returned by result" %
type(data))
else:
return objtype, data
elif name.startswith('add'):
# the first arg is self
# the second and third arg are the dn and the data to send
# We need to convert the Entry into the format used by
# python-ldap
ent = args[0]
if isinstance(ent, Entry):
return f(ent.dn, ent.toTupleList(), *args[2:])
else:
> return f(*args, **kwargs)

/usr/local/lib/python3.8/site-packages/lib389/__init__.py:178:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c687580>
dn = 'cn="dc=example,dc=com",cn=mapping tree,cn=config'
modlist = [('objectclass', [b'top', b'extensibleObject', b'nsMappingTree']), ('nsslapd-state', [b'backend']), ('cn', [b'dc=example,dc=com']), ('nsslapd-backend', [b'userRoot'])]
serverctrls = None, clientctrls = None

def add_ext_s(self,dn,modlist,serverctrls=None,clientctrls=None):
msgid = self.add_ext(dn,modlist,serverctrls,clientctrls)
> resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:425:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

args = (76,), kwargs = {'all': 1, 'timeout': -1}

def inner(*args, **kwargs):
if name in [
'add_s',
'bind_s',
'delete_s',
'modify_s',
'modrdn_s',
'rename_s',
'sasl_interactive_bind_s',
'search_s',
'search_ext_s',
'simple_bind_s',
'unbind_s',
'getEntry',
] and not ('escapehatch' in kwargs and kwargs['escapehatch'] == 'i am sure'):
c_stack = inspect.stack()
frame = c_stack[1]

warnings.warn(DeprecationWarning("Use of raw ldap function %s. This will be removed in a future release. "
"Found in: %s:%s" % (name, frame.filename, frame.lineno)))
# Later, we will add a sleep here to make it even more painful.
# Finally, it will raise an exception.
elif 'escapehatch' in kwargs:
kwargs.pop('escapehatch')

if name == 'result':
objtype, data = f(*args, **kwargs)
# data is either a 2-tuple or a list of 2-tuples
# print data
if data:
if isinstance(data, tuple):
return objtype, Entry(data)
elif isinstance(data, list):
# AD sends back these search references
# if objtype == ldap.RES_SEARCH_RESULT and \
# isinstance(data[-1],tuple) and \
# not data[-1][0]:
# print "Received search reference: "
# pprint.pprint(data[-1][1])
# data.pop() # remove the last non-entry element

return objtype, [Entry(x) for x in data]
else:
raise TypeError("unknown data type %s returned by result" %
type(data))
else:
return objtype, data
elif name.startswith('add'):
# the first arg is self
# the second and third arg are the dn and the data to send
# We need to convert the Entry into the format used by
# python-ldap
ent = args[0]
if isinstance(ent, Entry):
return f(ent.dn, ent.toTupleList(), *args[2:])
else:
return f(*args, **kwargs)
else:
> return f(*args, **kwargs)

/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c687580>, msgid = 76, all = 1
timeout = -1, resp_ctrl_classes = None

def result3(self,msgid=ldap.RES_ANY,all=1,timeout=None,resp_ctrl_classes=None):
> resp_type, resp_data, resp_msgid, decoded_resp_ctrls, retoid, retval = self.result4(
msgid,all,timeout,
add_ctrls=0,add_intermediates=0,add_extop=0,
resp_ctrl_classes=resp_ctrl_classes
)

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:764:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

args = (76, 1, -1)
kwargs = {'add_ctrls': 0, 'add_extop': 0, 'add_intermediates': 0, 'resp_ctrl_classes': None}

def inner(*args, **kwargs):
if name in [
'add_s',
'bind_s',
'delete_s',
'modify_s',
'modrdn_s',
'rename_s',
'sasl_interactive_bind_s',
'search_s',
'search_ext_s',
'simple_bind_s',
'unbind_s',
'getEntry',
] and not ('escapehatch' in kwargs and kwargs['escapehatch'] == 'i am sure'):
c_stack = inspect.stack()
frame = c_stack[1]

warnings.warn(DeprecationWarning("Use of raw ldap function %s. This will be removed in a future release. "
"Found in: %s:%s" % (name, frame.filename, frame.lineno)))
# Later, we will add a sleep here to make it even more painful.
# Finally, it will raise an exception.
elif 'escapehatch' in kwargs:
kwargs.pop('escapehatch')

if name == 'result':
objtype, data = f(*args, **kwargs)
# data is either a 2-tuple or a list of 2-tuples
# print data
if data:
if isinstance(data, tuple):
return objtype, Entry(data)
elif isinstance(data, list):
# AD sends back these search references
# if objtype == ldap.RES_SEARCH_RESULT and \
# isinstance(data[-1],tuple) and \
# not data[-1][0]:
# print "Received search reference: "
# pprint.pprint(data[-1][1])
# data.pop() # remove the last non-entry element

return objtype, [Entry(x) for x in data]
else:
raise TypeError("unknown data type %s returned by result" %
type(data))
else:
return objtype, data
elif name.startswith('add'):
# the first arg is self
# the second and third arg are the dn and the data to send
# We need to convert the Entry into the format used by
# python-ldap
ent = args[0]
if isinstance(ent, Entry):
return f(ent.dn, ent.toTupleList(), *args[2:])
else:
return f(*args, **kwargs)
else:
> return f(*args, **kwargs)

/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c687580>, msgid = 76, all = 1
timeout = -1, add_ctrls = 0, add_intermediates = 0, add_extop = 0
resp_ctrl_classes = None

def result4(self,msgid=ldap.RES_ANY,all=1,timeout=None,add_ctrls=0,add_intermediates=0,add_extop=0,resp_ctrl_classes=None):
if timeout is None:
timeout = self.timeout
> ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:774:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

args = (<built-in method result4 of LDAP object at 0x7f4d2d582f00>, 76, 1, -1, 0, 0, ...)
kwargs = {}

def inner(*args, **kwargs):
if name in [
'add_s',
'bind_s',
'delete_s',
'modify_s',
'modrdn_s',
'rename_s',
'sasl_interactive_bind_s',
'search_s',
'search_ext_s',
'simple_bind_s',
'unbind_s',
'getEntry',
] and not ('escapehatch' in kwargs and kwargs['escapehatch'] == 'i am sure'):
c_stack = inspect.stack()
frame = c_stack[1]

warnings.warn(DeprecationWarning("Use of raw ldap function %s. This will be removed in a future release. "
"Found in: %s:%s" % (name, frame.filename, frame.lineno)))
# Later, we will add a sleep here to make it even more painful.
# Finally, it will raise an exception.
elif 'escapehatch' in kwargs:
kwargs.pop('escapehatch')

if name == 'result':
objtype, data = f(*args, **kwargs)
# data is either a 2-tuple or a list of 2-tuples
# print data
if data:
if isinstance(data, tuple):
return objtype, Entry(data)
elif isinstance(data, list):
# AD sends back these search references
# if objtype == ldap.RES_SEARCH_RESULT and \
# isinstance(data[-1],tuple) and \
# not data[-1][0]:
# print "Received search reference: "
# pprint.pprint(data[-1][1])
# data.pop() # remove the last non-entry element

return objtype, [Entry(x) for x in data]
else:
raise TypeError("unknown data type %s returned by result" %
type(data))
else:
return objtype, data
elif name.startswith('add'):
# the first arg is self
# the second and third arg are the dn and the data to send
# We need to convert the Entry into the format used by
# python-ldap
ent = args[0]
if isinstance(ent, Entry):
return f(ent.dn, ent.toTupleList(), *args[2:])
else:
return f(*args, **kwargs)
else:
> return f(*args, **kwargs)

/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c687580>
func = <built-in method result4 of LDAP object at 0x7f4d2d582f00>
args = (76, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
result = func(*args,**kwargs)
if __debug__ and self._trace_level>=2:
if func.__name__!="unbind_ext":
diagnostic_message_success = self._l.get_option(ldap.OPT_DIAGNOSTIC_MESSAGE)
finally:
self._ldap_object_lock.release()
except LDAPError as e:
exc_type,exc_value,exc_traceback = sys.exc_info()
try:
if 'info' not in e.args[0] and 'errno' in e.args[0]:
e.args[0]['info'] = strerror(e.args[0]['errno'])
except IndexError:
pass
if __debug__ and self._trace_level>=2:
self._trace_file.write('=> LDAPError - %s: %s\n' % (e.__class__.__name__,str(e)))
try:
> reraise(exc_type, exc_value, exc_traceback)

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

exc_type = <class 'ldap.UNWILLING_TO_PERFORM'>
exc_value = UNWILLING_TO_PERFORM({'msgtype': 105, 'msgid': 76, 'result': 53, 'desc': 'Server is unwilling to perform', 'ctrls': []})
exc_traceback = <traceback object at 0x7f4d2dacbf80>

def reraise(exc_type, exc_value, exc_traceback):
"""Re-raise an exception given information from sys.exc_info()

Note that unlike six.reraise, this does not support replacing the
traceback. All arguments must come from a single sys.exc_info() call.
"""
# In Python 3, all exception info is contained in one object.
> raise exc_value

/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c687580>
func = <built-in method result4 of LDAP object at 0x7f4d2d582f00>
args = (76, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.UNWILLING_TO_PERFORM: {'msgtype': 105, 'msgid': 76, 'result': 53, 'desc': 'Server is unwilling to perform', 'ctrls': []}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: UNWILLING_TO_PERFORM

During handling of the above exception, another exception occurred:

topology_m2 = <lib389.topologies.TopologyMain object at 0x7f4d2d2a5e20>

@pytest.mark.DS47966
def test_bulk_import_when_the_backend_with_vlv_was_recreated(topology_m2):
"""
Testing bulk import when the backend with VLV was recreated.
If the test passes without the server crash, 47966 is verified.

:id: 512963fa-fe02-11e8-b1d3-8c16451d917b
:setup: Replication with two masters.
:steps:
1. Generate vlvSearch entry
2. Generate vlvIndex entry
3. Delete the backend instance on Master 2
4. Delete the agreement, replica, and mapping tree, too.
5. Recreate the backend and the VLV index on Master 2.
6. Recreating vlvSrchDn and vlvIndexDn on Master 2.
:expectedresults:
1. Should Success.
2. Should Success.
3. Should Success.
4. Should Success.
5. Should Success.
6. Should Success.
"""
M1 = topology_m2.ms["master1"]
M2 = topology_m2.ms["master2"]
# generate vlvSearch entry
properties_for_search = {
"objectclass": ["top", "vlvSearch"],
"cn": "vlvSrch",
"vlvbase": DEFAULT_SUFFIX,
"vlvfilter": "(|(objectclass=*)(objectclass=ldapsubentry))",
"vlvscope": "2",
}
vlv_searches = VLVSearch(M2)
userroot_vlvsearch = vlv_searches.create(
basedn="cn=userRoot,cn=ldbm database,cn=plugins,cn=config",
properties=properties_for_search,
)
assert "cn=vlvSrch,cn=userRoot,cn=ldbm database,cn=plugins,cn=config" in M2.getEntry(
"cn=vlvSrch,cn=userRoot,cn=ldbm database,cn=plugins,cn=config").dn
# generate vlvIndex entry
properties_for_index = {
"objectclass": ["top", "vlvIndex"],
"cn": "vlvIdx",
"vlvsort": "cn ou sn",
}
vlv_index = VLVIndex(M2)
userroot_index = vlv_index.create(
basedn="cn=vlvSrch,cn=userRoot,cn=ldbm database,cn=plugins,cn=config",
properties=properties_for_index,
)
assert "cn=vlvIdx,cn=vlvSrch,cn=userRoot,cn=ldbm database,cn=plugins,cn=config" in M2.getEntry(
"cn=vlvIdx,cn=vlvSrch,cn=userRoot,cn=ldbm database,cn=plugins,cn=config").dn
# Delete the backend instance on Master 2."
userroot_index.delete()
userroot_vlvsearch.delete_all()
# delete the agreement, replica, and mapping tree, too.
repl = ReplicationManager(DEFAULT_SUFFIX)
repl.remove_master(M2)
MappingTrees(M2).list()[0].delete()
Backends(M2).list()[0].delete()
# Recreate the backend and the VLV index on Master 2.
> M2.mappingtree.create(DEFAULT_SUFFIX, "userRoot")

suites/vlv/regression_test.py:87:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.mappingTree.MappingTreeLegacy object at 0x7f4d2d2a9f40>
suffix = 'dc=example,dc=com', bename = 'userRoot', parent = None

def create(self, suffix=None, bename=None, parent=None):
'''
Create a mapping tree entry (under "cn=mapping tree,cn=config"),
for the 'suffix' and that is stored in 'bename' backend.
'bename' backend must exist before creating the mapping tree entry.

If a 'parent' is provided that means that we are creating a
sub-suffix mapping tree.

@param suffix - suffix mapped by this mapping tree entry. It will
be the common name ('cn') of the entry
@param benamebase - backend common name (e.g. 'userRoot')
@param parent - if provided is a parent suffix of 'suffix'

@return DN of the mapping tree entry

@raise ldap.NO_SUCH_OBJECT - if the backend entry or parent mapping
tree does not exist
ValueError - if missing a parameter,

'''
# Check suffix is provided
if not suffix:
raise ValueError("suffix is mandatory")
else:
nsuffix = normalizeDN(suffix)

# Check backend name is provided
if not bename:
raise ValueError("backend name is mandatory")

# Check that if the parent suffix is provided then
# it exists a mapping tree for it
if parent:
nparent = normalizeDN(parent)
filt = suffixfilt(parent)
try:
entry = self.conn.getEntry(DN_MAPPING_TREE, ldap.SCOPE_SUBTREE,
filt)
pass
except NoSuchEntryError:
raise ValueError("parent suffix has no mapping tree")
else:
nparent = ""

# Check if suffix exists, return
filt = suffixfilt(suffix)
try:
entry = self.conn.getEntry(DN_MAPPING_TREE, ldap.SCOPE_SUBTREE,
filt)
return entry
except ldap.NO_SUCH_OBJECT:
entry = None

#
# Now start the real work
#

# fix me when we can actually used escaped DNs
dn = ','.join(('cn="%s"' % nsuffix, DN_MAPPING_TREE))
entry = Entry(dn)
entry.update({
'objectclass': ['top', 'extensibleObject', MT_OBJECTCLASS_VALUE],
'nsslapd-state': 'backend',
# the value in the dn has to be DN escaped
# internal code will add the quoted value - unquoted value is
# useful for searching.
MT_PROPNAME_TO_ATTRNAME[MT_SUFFIX]: nsuffix,
MT_PROPNAME_TO_ATTRNAME[MT_BACKEND]: bename
})

# possibly add the parent
if parent:
entry.setValues(MT_PROPNAME_TO_ATTRNAME[MT_PARENT_SUFFIX], nparent)

try:
self.log.debug("Creating entry: %s", entry.dn)
self.log.info("Entry %r", entry)
self.conn.add_s(entry)
except ldap.LDAPError as e:
> raise ldap.LDAPError("Error adding suffix entry " + dn, e)
E ldap.LDAPError: ('Error adding suffix entry cn="dc=example,dc=com",cn=mapping tree,cn=config', UNWILLING_TO_PERFORM({'msgtype': 105, 'msgid': 76, 'result': 53, 'desc': 'Server is unwilling to perform', 'ctrls': []}))

/usr/local/lib/python3.8/site-packages/lib389/mappingTree.py:157: LDAPError
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39001, 'ldap-secureport': 63701, 'server-id': 'master1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39002, 'ldap-secureport': 63702, 'server-id': 'master2', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:148 Creating replication topology. INFO  lib389.topologies:topologies.py:162 Joining master master2 to master1 ... INFO  lib389.replica:replica.py:2101 SUCCESS: bootstrap to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 completed INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is was created INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is was created INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect beb8e4be-572c-470d-a644-fcdfd9325dd8 / got description=None) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect 96c72a8c-f624-4b23-a4c0-1357f34581fc / got description=beb8e4be-572c-470d-a644-fcdfd9325dd8) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is working INFO  lib389.replica:replica.py:2170 SUCCESS: joined master from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 INFO  lib389.topologies:topologies.py:170 Ensuring master master1 to master2 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 already exists INFO  lib389.topologies:topologies.py:170 Ensuring master master2 to master1 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 already exists
------------------------------Captured stdout call------------------------------
deleting vlv search: cn=vlvSrch,cn=userRoot,cn=ldbm database,cn=plugins,cn=config deleting vlv search entry...
-------------------------------Captured log call--------------------------------
INFO  lib389:mappingTree.py:154 Entry dn: cn="dc=example,dc=com",cn=mapping tree,cn=config cn: dc=example,dc=com nsslapd-backend: userRoot nsslapd-state: backend objectclass: top objectclass: extensibleObject objectclass: nsMappingTree
Failed tickets/ticket47619_test.py::test_ticket47619_init 6.09
topology_m1c1 = <lib389.topologies.TopologyMain object at 0x7f4d2d4998e0>

def test_ticket47619_init(topology_m1c1):
"""
Initialize the test environment
"""
topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
# topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_MEMBER_OF)
# topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_REFER_INTEGRITY)
topology_m1c1.ms["master1"].stop(timeout=10)
> topology_m1c1.ms["master1"].start(timeout=10)

tickets/ticket47619_test.py:46:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:1135: in start
subprocess.check_output(["systemctl", "start", "dirsrv@%s" % self.serverid], stderr=subprocess.STDOUT)
/usr/lib64/python3.8/subprocess.py:411: in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

input = None, capture_output = False, timeout = None, check = True
popenargs = (['systemctl', 'start', 'dirsrv@master1'],)
kwargs = {'stderr': -2, 'stdout': -1}
process = <subprocess.Popen object at 0x7f4d31132730>
stdout = b'Job for dirsrv@master1.service failed because the control process exited with error code.\nSee "systemctl status dirsrv@master1.service" and "journalctl -xe" for details.\n'
stderr = None, retcode = 1

def run(*popenargs,
input=None, capture_output=False, timeout=None, check=False, **kwargs):
"""Run command with arguments and return a CompletedProcess instance.

The returned instance will have attributes args, returncode, stdout and
stderr. By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.

If check is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those streams
were captured.

If timeout is given, and the process takes too long, a TimeoutExpired
exception will be raised.

There is an optional argument "input", allowing you to
pass bytes or a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it will be used internally.

By default, all communication is in bytes, and therefore any "input" should
be bytes, and the stdout and stderr will be bytes. If in text mode, any
"input" should be a string, and stdout and stderr will be strings decoded
according to locale encoding, or by "encoding" if set. Text mode is
triggered by setting any of text, encoding, errors or universal_newlines.

The other arguments are the same as for the Popen constructor.
"""
if input is not None:
if kwargs.get('stdin') is not None:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = PIPE

if capture_output:
if kwargs.get('stdout') is not None or kwargs.get('stderr') is not None:
raise ValueError('stdout and stderr arguments may not be used '
'with capture_output.')
kwargs['stdout'] = PIPE
kwargs['stderr'] = PIPE

with Popen(*popenargs, **kwargs) as process:
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except TimeoutExpired as exc:
process.kill()
if _mswindows:
# Windows accumulates the output in a single blocking
# read() call run on child threads, with the timeout
# being done in a join() on those threads. communicate()
# _after_ kill() is required to collect that and add it
# to the exception.
exc.stdout, exc.stderr = process.communicate()
else:
# POSIX _communicate already populated the output so
# far into the TimeoutExpired exception.
process.wait()
raise
except: # Including KeyboardInterrupt, communicate handled that.
process.kill()
# We don't call process.wait() as .__exit__ does that for us.
raise
retcode = process.poll()
if check and retcode:
> raise CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
E subprocess.CalledProcessError: Command '['systemctl', 'start', 'dirsrv@master1']' returned non-zero exit status 1.

/usr/lib64/python3.8/subprocess.py:512: CalledProcessError
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39001, 'ldap-secureport': 63701, 'server-id': 'master1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39201, 'ldap-secureport': 63901, 'server-id': 'consumer1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:148 Creating replication topology. INFO  lib389.topologies:topologies.py:175 Joining consumer consumer1 from master1 ... INFO  lib389.replica:replica.py:2101 SUCCESS: bootstrap to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 completed INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 is was created INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 is NOT working (expect d6384e42-dace-4ff1-a2d6-2045a7034d1b / got description=None) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 is working INFO  lib389.replica:replica.py:2285 SUCCESS: joined consumer from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 INFO  lib389.topologies:topologies.py:180 Ensuring consumer consumer1 from master1 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 already exists
Failed tickets/ticket47619_test.py::test_ticket47619_create_index 0.00
topology_m1c1 = <lib389.topologies.TopologyMain object at 0x7f4d2d4998e0>

def test_ticket47619_create_index(topology_m1c1):
args = {INDEX_TYPE: 'eq'}
for attr in ATTRIBUTES:
> topology_m1c1.ms["master1"].index.create(suffix=RETROCL_SUFFIX, attr=attr, args=args)

tickets/ticket47619_test.py:73:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/index.py:210: in create
return self.addIndex(suffix, be_name, attr, indexTypes=indexTypes,
/usr/local/lib/python3.8/site-packages/lib389/index.py:224: in addIndex
entries_backend = self.conn.backend.list(suffix=suffix)
/usr/local/lib/python3.8/site-packages/lib389/backend.py:100: in list
ents = self.conn.search_s(base, scope, filt)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:870: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:864: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:148: in inner
objtype, data = f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:756: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:760: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:764: in result3
resp_type, resp_data, resp_msgid, decoded_resp_ctrls, retoid, retval = self.result4(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:774: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2d57d8e0>
func = <built-in method result4 of LDAP object at 0x7f4d3063f270>
args = (41, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.SERVER_DOWN: {'result': -1, 'desc': "Can't contact LDAP server", 'ctrls': []}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: SERVER_DOWN
-------------------------------Captured log call--------------------------------
INFO  lib389:backend.py:80 List backend with suffix=cn=changelog
Failed tickets/ticket47619_test.py::test_ticket47619_reindex 0.00
topology_m1c1 = <lib389.topologies.TopologyMain object at 0x7f4d2d4998e0>

def test_ticket47619_reindex(topology_m1c1):
'''
Reindex all the attributes in ATTRIBUTES
'''
args = {TASK_WAIT: True}
for attr in ATTRIBUTES:
> rc = topology_m1c1.ms["master1"].tasks.reindex(suffix=RETROCL_SUFFIX, attrname=attr, args=args)

tickets/ticket47619_test.py:83:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/tasks.py:723: in reindex
entries_backend = self.conn.backends.list()
/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:1048: in list
results = self._instance.search_ext_s(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:863: in search_ext_s
msgid = self.search_ext(base,scope,filterstr,attrlist,attrsonly,serverctrls,clientctrls,timeout,sizelimit)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:853: in search_ext
return self._ldap_call(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2d57d8e0>
func = <built-in method search_ext of LDAP object at 0x7f4d3063f270>
args = ('cn=ldbm database,cn=plugins,cn=config', 2, '(&(objectclass=nsBackendInstance))', ['dn'], 0, None, ...)
kwargs = {}, diagnostic_message_success = None, exc_type = None
exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.SERVER_DOWN: {'result': -1, 'desc': "Can't contact LDAP server", 'ctrls': []}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: SERVER_DOWN
Failed tickets/ticket47619_test.py::test_ticket47619_check_indexed_search 0.00
topology_m1c1 = <lib389.topologies.TopologyMain object at 0x7f4d2d4998e0>

def test_ticket47619_check_indexed_search(topology_m1c1):
for attr in ATTRIBUTES:
> ents = topology_m1c1.ms["master1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, "(%s=hello)" % attr)

tickets/ticket47619_test.py:89:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:870: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:863: in search_ext_s
msgid = self.search_ext(base,scope,filterstr,attrlist,attrsonly,serverctrls,clientctrls,timeout,sizelimit)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:853: in search_ext
return self._ldap_call(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2d57d8e0>
func = <built-in method search_ext of LDAP object at 0x7f4d3063f270>
args = ('cn=changelog', 2, '(street=hello)', None, 0, None, ...), kwargs = {}
diagnostic_message_success = None, exc_type = None, exc_value = None
exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.SERVER_DOWN: {'result': -1, 'desc': "Can't contact LDAP server", 'ctrls': []}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: SERVER_DOWN
Failed tickets/ticket47781_test.py::test_ticket47781 3.70
topology_st = <lib389.topologies.TopologyMain object at 0x7f4d2c6f5940>

def test_ticket47781(topology_st):
"""
Testing for a deadlock after doing an online import of an LDIF with
replication data. The replication agreement should be invalid.
"""

log.info('Testing Ticket 47781 - Testing for deadlock after importing LDIF with replication data')

master = topology_st.standalone
repl = ReplicationManager(DEFAULT_SUFFIX)
repl.create_first_master(master)

properties = {RA_NAME: r'meTo_$host:$port',
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
# The agreement should point to a server that does NOT exist (invalid port)
repl_agreement = master.agreement.create(suffix=DEFAULT_SUFFIX,
host=master.host,
port=5555,
properties=properties)

#
# add two entries
#
log.info('Adding two entries...')

master.add_s(Entry(('cn=entry1,dc=example,dc=com', {
'objectclass': 'top person'.split(),
'sn': 'user',
'cn': 'entry1'})))

master.add_s(Entry(('cn=entry2,dc=example,dc=com', {
'objectclass': 'top person'.split(),
'sn': 'user',
'cn': 'entry2'})))

#
# export the replication ldif
#
log.info('Exporting replication ldif...')
args = {EXPORT_REPL_INFO: True}
exportTask = Tasks(master)
exportTask.exportLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args)

#
# Restart the server
#
log.info('Restarting server...')
master.stop()
master.start()

#
# Import the ldif
#
log.info('Import replication LDIF file...')
importTask = Tasks(master)
args = {TASK_WAIT: True}
> importTask.importLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args)

tickets/ticket47781_test.py:85:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.tasks.Tasks object at 0x7f4d2c7260d0>
suffix = 'dc=example,dc=com', benamebase = None, input_file = '/tmp/export.ldif'
args = {'wait': True}

def importLDIF(self, suffix=None, benamebase=None, input_file=None,
args=None):
'''
Import from a LDIF format a given 'suffix' (or 'benamebase' that stores
that suffix). It uses an internal task to acheive this request.

If 'suffix' and 'benamebase' are specified, it uses 'benamebase' first
else 'suffix'.
If both 'suffix' and 'benamebase' are missing it raise ValueError

'input_file' is the ldif input file

@param suffix - suffix of the backend
@param benamebase - 'commonname'/'cn' of the backend (e.g. 'userRoot')
@param ldif_input - file that will contain the entries in LDIF format
to import
@param args - is a dictionary that contains modifier of the import task
wait: True/[False] - If True, 'export' waits for the completion
of the task before to return

@return None

@raise ValueError

'''
if self.conn.state != DIRSRV_STATE_ONLINE:
raise ValueError("Invalid Server State %s! Must be online" % self.conn.state)

# Checking the parameters
if not benamebase and not suffix:
raise ValueError("Specify either bename or suffix")

if not input_file:
raise ValueError("input_file is mandatory")

if not os.path.exists(input_file):
> raise ValueError("Import file (%s) does not exist" % input_file)
E ValueError: Import file (/tmp/export.ldif) does not exist

/usr/local/lib/python3.8/site-packages/lib389/tasks.py:473: ValueError
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
-------------------------------Captured log call--------------------------------
INFO  lib389:tasks.py:567 Export task export_10222020_220851 for file /tmp/export.ldif completed successfully
Failed tickets/ticket47871_test.py::test_ticket47871_init 5.42
topology_m1c1 = <lib389.topologies.TopologyMain object at 0x7f4d2d423a30>

def test_ticket47871_init(topology_m1c1):
"""
Initialize the test environment
"""
topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
mod = [(ldap.MOD_REPLACE, 'nsslapd-changelogmaxage', b"10s"), # 10 second triming
(ldap.MOD_REPLACE, 'nsslapd-changelog-trim-interval', b"5s")]
topology_m1c1.ms["master1"].modify_s("cn=%s,%s" % (PLUGIN_RETRO_CHANGELOG, DN_PLUGIN), mod)
# topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_MEMBER_OF)
# topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_REFER_INTEGRITY)
topology_m1c1.ms["master1"].stop(timeout=10)
> topology_m1c1.ms["master1"].start(timeout=10)

tickets/ticket47871_test.py:51:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:1135: in start
subprocess.check_output(["systemctl", "start", "dirsrv@%s" % self.serverid], stderr=subprocess.STDOUT)
/usr/lib64/python3.8/subprocess.py:411: in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

input = None, capture_output = False, timeout = None, check = True
popenargs = (['systemctl', 'start', 'dirsrv@master1'],)
kwargs = {'stderr': -2, 'stdout': -1}
process = <subprocess.Popen object at 0x7f4d2c79b340>
stdout = b'Job for dirsrv@master1.service failed because the control process exited with error code.\nSee "systemctl status dirsrv@master1.service" and "journalctl -xe" for details.\n'
stderr = None, retcode = 1

def run(*popenargs,
input=None, capture_output=False, timeout=None, check=False, **kwargs):
"""Run command with arguments and return a CompletedProcess instance.

The returned instance will have attributes args, returncode, stdout and
stderr. By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.

If check is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those streams
were captured.

If timeout is given, and the process takes too long, a TimeoutExpired
exception will be raised.

There is an optional argument "input", allowing you to
pass bytes or a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it will be used internally.

By default, all communication is in bytes, and therefore any "input" should
be bytes, and the stdout and stderr will be bytes. If in text mode, any
"input" should be a string, and stdout and stderr will be strings decoded
according to locale encoding, or by "encoding" if set. Text mode is
triggered by setting any of text, encoding, errors or universal_newlines.

The other arguments are the same as for the Popen constructor.
"""
if input is not None:
if kwargs.get('stdin') is not None:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = PIPE

if capture_output:
if kwargs.get('stdout') is not None or kwargs.get('stderr') is not None:
raise ValueError('stdout and stderr arguments may not be used '
'with capture_output.')
kwargs['stdout'] = PIPE
kwargs['stderr'] = PIPE

with Popen(*popenargs, **kwargs) as process:
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except TimeoutExpired as exc:
process.kill()
if _mswindows:
# Windows accumulates the output in a single blocking
# read() call run on child threads, with the timeout
# being done in a join() on those threads. communicate()
# _after_ kill() is required to collect that and add it
# to the exception.
exc.stdout, exc.stderr = process.communicate()
else:
# POSIX _communicate already populated the output so
# far into the TimeoutExpired exception.
process.wait()
raise
except: # Including KeyboardInterrupt, communicate handled that.
process.kill()
# We don't call process.wait() as .__exit__ does that for us.
raise
retcode = process.poll()
if check and retcode:
> raise CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
E subprocess.CalledProcessError: Command '['systemctl', 'start', 'dirsrv@master1']' returned non-zero exit status 1.

/usr/lib64/python3.8/subprocess.py:512: CalledProcessError
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39001, 'ldap-secureport': 63701, 'server-id': 'master1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39201, 'ldap-secureport': 63901, 'server-id': 'consumer1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:148 Creating replication topology. INFO  lib389.topologies:topologies.py:175 Joining consumer consumer1 from master1 ... INFO  lib389.replica:replica.py:2101 SUCCESS: bootstrap to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 completed INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 is was created INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 is NOT working (expect 0db0f081-5568-4877-90ac-0374032dbe9c / got description=None) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 is working INFO  lib389.replica:replica.py:2285 SUCCESS: joined consumer from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 INFO  lib389.topologies:topologies.py:180 Ensuring consumer consumer1 from master1 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 already exists
Failed tickets/ticket47871_test.py::test_ticket47871_1 0.00
topology_m1c1 = <lib389.topologies.TopologyMain object at 0x7f4d2d423a30>

def test_ticket47871_1(topology_m1c1):
'''
ADD entries and check they are all in the retrocl
'''
# add dummy entries
for cpt in range(MAX_OTHERS):
name = "%s%d" % (OTHER_NAME, cpt)
> topology_m1c1.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
'objectclass': "top person".split(),
'sn': name,
'cn': name})))

tickets/ticket47871_test.py:66:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:176: in inner
return f(ent.dn, ent.toTupleList(), *args[2:])
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:439: in add_s
return self.add_ext_s(dn,modlist,None,None)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:178: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:425: in add_ext_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:764: in result3
resp_type, resp_data, resp_msgid, decoded_resp_ctrls, retoid, retval = self.result4(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:774: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2d462520>
func = <built-in method result4 of LDAP object at 0x7f4d2d3168a0>
args = (42, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.SERVER_DOWN: {'result': -1, 'desc': "Can't contact LDAP server", 'ctrls': []}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: SERVER_DOWN
Failed tickets/ticket47871_test.py::test_ticket47871_2 6.01
topology_m1c1 = <lib389.topologies.TopologyMain object at 0x7f4d2d423a30>

def test_ticket47871_2(topology_m1c1):
'''
Wait until there is just a last entries
'''
MAX_TRIES = 10
TRY_NO = 1
while TRY_NO <= MAX_TRIES:
time.sleep(6) # at least 1 trimming occurred
> ents = topology_m1c1.ms["master1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)")

tickets/ticket47871_test.py:91:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:870: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:863: in search_ext_s
msgid = self.search_ext(base,scope,filterstr,attrlist,attrsonly,serverctrls,clientctrls,timeout,sizelimit)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:853: in search_ext
return self._ldap_call(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2d462520>
func = <built-in method search_ext of LDAP object at 0x7f4d2d3168a0>
args = ('cn=changelog', 1, '(objectclass=*)', None, 0, None, ...), kwargs = {}
diagnostic_message_success = None, exc_type = None, exc_value = None
exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.SERVER_DOWN: {'result': -1, 'desc': "Can't contact LDAP server", 'ctrls': []}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: SERVER_DOWN
Failed tickets/ticket47931_test.py::test_ticket47931 0.04
topology_st = <lib389.topologies.TopologyMain object at 0x7f4d2d739a00>

def test_ticket47931(topology_st):
"""Test Retro Changelog and MemberOf deadlock fix.
Verification steps:
- Enable retro cl and memberOf.
- Create two backends: A & B.
- Configure retrocl scoping for backend A.
- Configure memberOf plugin for uniquemember
- Create group in backend A.
- In parallel, add members to the group on A, and make modifications
to entries in backend B.
- Make sure the server does not hang during the updates to both
backends.

"""

# Enable dynamic plugins to make plugin configuration easier
try:
topology_st.standalone.modify_s(DN_CONFIG,
[(ldap.MOD_REPLACE,
'nsslapd-dynamic-plugins',
b'on')])
except ldap.LDAPError as e:
log.error('Failed to enable dynamic plugins! ' + e.args[0]['desc'])
assert False

# Enable the plugins
topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
> topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)

tickets/ticket47931_test.py:80:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/plugins.py:2105: in enable
plugin.enable()
/usr/local/lib/python3.8/site-packages/lib389/plugins.py:58: in enable
self.set('nsslapd-pluginEnabled', 'on')
/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:446: in set
return self._instance.modify_ext_s(self._dn, [(action, key, value)],
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:613: in modify_ext_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:764: in result3
resp_type, resp_data, resp_msgid, decoded_resp_ctrls, retoid, retval = self.result4(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:774: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2d739820>
func = <built-in method result4 of LDAP object at 0x7f4d2d0aa8d0>
args = (7, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.UNWILLING_TO_PERFORM: {'msgtype': 103, 'msgid': 7, 'result': 53, 'desc': 'Server is unwilling to perform', 'ctrls': [], 'info': 'Failed to start plugin "Retro Changelog Plugin". See errors log.'}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: UNWILLING_TO_PERFORM
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Failed tickets/ticket47988_test.py::test_ticket47988_init 6.21
topology_m2 = <lib389.topologies.TopologyMain object at 0x7f4d2c74c730>

def test_ticket47988_init(topology_m2):
"""
It adds
- Objectclass with MAY 'member'
- an entry ('bind_entry') with which we bind to test the 'SELFDN' operation
It deletes the anonymous aci

"""

_header(topology_m2, 'test_ticket47988_init')

# enable acl error logging
mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(str(8192)))] # REPL
topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
topology_m2.ms["master2"].modify_s(DN_CONFIG, mod)

mod = [(ldap.MOD_REPLACE, 'nsslapd-accesslog-level', ensure_bytes(str(260)))] # Internal op
topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
topology_m2.ms["master2"].modify_s(DN_CONFIG, mod)

# add dummy entries
for cpt in range(MAX_OTHERS):
name = "%s%d" % (OTHER_NAME, cpt)
topology_m2.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
'objectclass': "top person".split(),
'sn': name,
'cn': name})))

# check that entry 0 is replicated before
loop = 0
entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
while loop <= 10:
try:
ent = topology_m2.ms["master2"].getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber'])
break
except ldap.NO_SUCH_OBJECT:
time.sleep(1)
loop += 1
assert (loop <= 10)

topology_m2.ms["master1"].stop(timeout=10)
topology_m2.ms["master2"].stop(timeout=10)

# install the specific schema M1: ipa3.3, M2: ipa4.1
schema_file = os.path.join(topology_m2.ms["master1"].getDir(__file__, DATA_DIR), "ticket47988/schema_ipa3.3.tar.gz")
_install_schema(topology_m2.ms["master1"], schema_file)
schema_file = os.path.join(topology_m2.ms["master1"].getDir(__file__, DATA_DIR), "ticket47988/schema_ipa4.1.tar.gz")
_install_schema(topology_m2.ms["master2"], schema_file)

> topology_m2.ms["master1"].start(timeout=10)

/export/tests/tickets/ticket47988_test.py:157:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:1135: in start
subprocess.check_output(["systemctl", "start", "dirsrv@%s" % self.serverid], stderr=subprocess.STDOUT)
/usr/lib64/python3.8/subprocess.py:411: in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

input = None, capture_output = False, timeout = None, check = True
popenargs = (['systemctl', 'start', 'dirsrv@master1'],)
kwargs = {'stderr': -2, 'stdout': -1}
process = <subprocess.Popen object at 0x7f4d2c735670>
stdout = b'Job for dirsrv@master1.service failed because the control process exited with error code.\nSee "systemctl status dirsrv@master1.service" and "journalctl -xe" for details.\n'
stderr = None, retcode = 1

def run(*popenargs,
input=None, capture_output=False, timeout=None, check=False, **kwargs):
"""Run command with arguments and return a CompletedProcess instance.

The returned instance will have attributes args, returncode, stdout and
stderr. By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.

If check is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those streams
were captured.

If timeout is given, and the process takes too long, a TimeoutExpired
exception will be raised.

There is an optional argument "input", allowing you to
pass bytes or a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it will be used internally.

By default, all communication is in bytes, and therefore any "input" should
be bytes, and the stdout and stderr will be bytes. If in text mode, any
"input" should be a string, and stdout and stderr will be strings decoded
according to locale encoding, or by "encoding" if set. Text mode is
triggered by setting any of text, encoding, errors or universal_newlines.

The other arguments are the same as for the Popen constructor.
"""
if input is not None:
if kwargs.get('stdin') is not None:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = PIPE

if capture_output:
if kwargs.get('stdout') is not None or kwargs.get('stderr') is not None:
raise ValueError('stdout and stderr arguments may not be used '
'with capture_output.')
kwargs['stdout'] = PIPE
kwargs['stderr'] = PIPE

with Popen(*popenargs, **kwargs) as process:
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except TimeoutExpired as exc:
process.kill()
if _mswindows:
# Windows accumulates the output in a single blocking
# read() call run on child threads, with the timeout
# being done in a join() on those threads. communicate()
# _after_ kill() is required to collect that and add it
# to the exception.
exc.stdout, exc.stderr = process.communicate()
else:
# POSIX _communicate already populated the output so
# far into the TimeoutExpired exception.
process.wait()
raise
except: # Including KeyboardInterrupt, communicate handled that.
process.kill()
# We don't call process.wait() as .__exit__ does that for us.
raise
retcode = process.poll()
if check and retcode:
> raise CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
E subprocess.CalledProcessError: Command '['systemctl', 'start', 'dirsrv@master1']' returned non-zero exit status 1.

/usr/lib64/python3.8/subprocess.py:512: CalledProcessError
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39001, 'ldap-secureport': 63701, 'server-id': 'master1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39002, 'ldap-secureport': 63702, 'server-id': 'master2', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:148 Creating replication topology. INFO  lib389.topologies:topologies.py:162 Joining master master2 to master1 ... INFO  lib389.replica:replica.py:2101 SUCCESS: bootstrap to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 completed INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is was created INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is was created INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect 6bdc3cd7-6836-44a6-a1e5-9c4c3ba952b8 / got description=None) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect ba420fab-83de-4516-8da1-df8b31113a2a / got description=6bdc3cd7-6836-44a6-a1e5-9c4c3ba952b8) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is working INFO  lib389.replica:replica.py:2170 SUCCESS: joined master from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 INFO  lib389.topologies:topologies.py:170 Ensuring master master1 to master2 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 already exists INFO  lib389.topologies:topologies.py:170 Ensuring master master2 to master1 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 already exists
-------------------------------Captured log call--------------------------------
INFO  lib389:ticket47988_test.py:64 ############################################### INFO  lib389:ticket47988_test.py:65 ####### INFO  lib389:ticket47988_test.py:66 ####### test_ticket47988_init INFO  lib389:ticket47988_test.py:67 ####### INFO  lib389:ticket47988_test.py:68 ################################################### INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/02common.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/50ns-admin.ldif INFO  lib389:ticket47988_test.py:98 replace /etc/dirsrv/slapd-master1/schema/99user.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/60nss-ldap.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/60autofs.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/50ns-web.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/60samba.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/10dna-plugin.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/05rfc4523.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/60basev2.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/10automember-plugin.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/05rfc2927.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/10mep-plugin.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/60ipadns.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/10rfc2307.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/50ns-mail.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/05rfc4524.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/60trust.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/60ipaconfig.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/50ns-directory.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/60eduperson.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/60mozilla.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/65ipasudo.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/60rfc3712.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/60rfc2739.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/50ns-value.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/60acctpolicy.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/01core389.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/60sabayon.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/60pam-plugin.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/00core.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/25java-object.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/60sudo.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/70ipaotp.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/60pureftpd.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/61kerberos-ipav3.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/60kerberos.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/60basev3.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/06inetorgperson.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/30ns-common.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/28pilot.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/20subscriber.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/50ns-certificate.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master1/schema/60posix-winsync-plugin.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/02common.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/50ns-admin.ldif INFO  lib389:ticket47988_test.py:98 replace /etc/dirsrv/slapd-master2/schema/99user.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/60nss-ldap.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/60autofs.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/50ns-web.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/60samba.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/10dna-plugin.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/05rfc4523.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/60basev2.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/10automember-plugin.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/05rfc2927.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/10mep-plugin.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/60ipadns.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/10rfc2307.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/50ns-mail.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/05rfc4524.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/60trust.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/60ipaconfig.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/50ns-directory.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/60eduperson.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/60mozilla.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/65ipasudo.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/60rfc3712.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/60rfc2739.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/50ns-value.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/60acctpolicy.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/01core389.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/60sabayon.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/60pam-plugin.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/00core.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/25java-object.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/60sudo.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/70ipaotp.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/60pureftpd.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/61kerberos-ipav3.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/60kerberos.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/60basev3.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/06inetorgperson.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/30ns-common.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/28pilot.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/20subscriber.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/50ns-certificate.ldif INFO  lib389:ticket47988_test.py:102 add /etc/dirsrv/slapd-master2/schema/60posix-winsync-plugin.ldif
Failed tickets/ticket47988_test.py::test_ticket47988_1 0.00
topology_m2 = <lib389.topologies.TopologyMain object at 0x7f4d2c74c730>

def test_ticket47988_1(topology_m2):
'''
Check that replication is working and pause replication M2->M1
'''
_header(topology_m2, 'test_ticket47988_1')

topology_m2.ms["master1"].log.debug("\n\nCheck that replication is working and pause replication M2->M1\n")
> _do_update_entry(supplier=topology_m2.ms["master2"], consumer=topology_m2.ms["master1"], attempts=5)

/export/tests/tickets/ticket47988_test.py:234:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/export/tests/tickets/ticket47988_test.py:184: in _do_update_entry
supplier.modify_s(entryDN, mod)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:640: in modify_s
return self.modify_ext_s(dn,modlist,None,None)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:613: in modify_ext_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:764: in result3
resp_type, resp_data, resp_msgid, decoded_resp_ctrls, retoid, retval = self.result4(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:774: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c7657f0>
func = <built-in method result4 of LDAP object at 0x7f4d2c75fd20>
args = (26, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.SERVER_DOWN: {'result': -1, 'desc': "Can't contact LDAP server", 'ctrls': []}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: SERVER_DOWN
-------------------------------Captured log call--------------------------------
INFO  lib389:ticket47988_test.py:64 ############################################### INFO  lib389:ticket47988_test.py:65 ####### INFO  lib389:ticket47988_test.py:66 ####### test_ticket47988_1 INFO  lib389:ticket47988_test.py:67 ####### INFO  lib389:ticket47988_test.py:68 ###################################################
Failed tickets/ticket47988_test.py::test_ticket47988_2 0.00
topology_m2 = <lib389.topologies.TopologyMain object at 0x7f4d2c74c730>

def test_ticket47988_2(topology_m2):
'''
Update M1 schema and trigger update M1->M2
So M1 should learn new/extended definitions that are in M2 schema
'''
_header(topology_m2, 'test_ticket47988_2')

topology_m2.ms["master1"].log.debug("\n\nUpdate M1 schema and an entry on M1\n")
> master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn()

/export/tests/tickets/ticket47988_test.py:246:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/schema.py:604: in get_schema_csn
ents = self.conn.search_s(DN_SCHEMA, ldap.SCOPE_BASE,
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:870: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:864: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:148: in inner
objtype, data = f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:756: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:760: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:764: in result3
resp_type, resp_data, resp_msgid, decoded_resp_ctrls, retoid, retval = self.result4(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:774: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c74cb50>
func = <built-in method result4 of LDAP object at 0x7f4d2c765420>
args = (62, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.SERVER_DOWN: {'result': -1, 'desc': "Can't contact LDAP server", 'ctrls': []}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: SERVER_DOWN
-------------------------------Captured log call--------------------------------
INFO  lib389:ticket47988_test.py:64 ############################################### INFO  lib389:ticket47988_test.py:65 ####### INFO  lib389:ticket47988_test.py:66 ####### test_ticket47988_2 INFO  lib389:ticket47988_test.py:67 ####### INFO  lib389:ticket47988_test.py:68 ###################################################
Failed tickets/ticket47988_test.py::test_ticket47988_3 0.00
topology_m2 = <lib389.topologies.TopologyMain object at 0x7f4d2c74c730>

def test_ticket47988_3(topology_m2):
'''
Resume replication M2->M1 and check replication is still working
'''
_header(topology_m2, 'test_ticket47988_3')

> _resume_M2_to_M1(topology_m2)

/export/tests/tickets/ticket47988_test.py:283:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/export/tests/tickets/ticket47988_test.py:222: in _resume_M2_to_M1
ents = topology_m2.ms["master2"].agreement.list(suffix=SUFFIX)
/usr/local/lib/python3.8/site-packages/lib389/agreement.py:905: in list
replica_entries = self.conn.replica.list(suffix)
/usr/local/lib/python3.8/site-packages/lib389/replica.py:178: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:870: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:863: in search_ext_s
msgid = self.search_ext(base,scope,filterstr,attrlist,attrsonly,serverctrls,clientctrls,timeout,sizelimit)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:853: in search_ext
return self._ldap_call(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c7657f0>
func = <built-in method search_ext of LDAP object at 0x7f4d2c75fd20>
args = ('cn=mapping tree,cn=config', 2, '(&(objectclass=nsds5Replica)(nsDS5ReplicaRoot=dc=example,dc=com))', None, 0, None, ...)
kwargs = {}, diagnostic_message_success = None, exc_type = None
exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.SERVER_DOWN: {'result': -1, 'desc': "Can't contact LDAP server", 'ctrls': []}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: SERVER_DOWN
-------------------------------Captured log call--------------------------------
INFO  lib389:ticket47988_test.py:64 ############################################### INFO  lib389:ticket47988_test.py:65 ####### INFO  lib389:ticket47988_test.py:66 ####### test_ticket47988_3 INFO  lib389:ticket47988_test.py:67 ####### INFO  lib389:ticket47988_test.py:68 ################################################### INFO  lib389:ticket47988_test.py:221 ######################### resume RA M2->M1 ######################
Failed tickets/ticket47988_test.py::test_ticket47988_4 0.01
topology_m2 = <lib389.topologies.TopologyMain object at 0x7f4d2c74c730>

def test_ticket47988_4(topology_m2):
'''
Check schemaCSN is identical on both server
And save the nsschemaCSN to later check they do not change unexpectedly
'''
_header(topology_m2, 'test_ticket47988_4')

> master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn()

/export/tests/tickets/ticket47988_test.py:295:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/schema.py:604: in get_schema_csn
ents = self.conn.search_s(DN_SCHEMA, ldap.SCOPE_BASE,
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:870: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:863: in search_ext_s
msgid = self.search_ext(base,scope,filterstr,attrlist,attrsonly,serverctrls,clientctrls,timeout,sizelimit)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:853: in search_ext
return self._ldap_call(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c74cb50>
func = <built-in method search_ext of LDAP object at 0x7f4d2c765420>
args = ('cn=schema', 0, 'objectclass=*', ['nsSchemaCSN'], 0, None, ...)
kwargs = {}, diagnostic_message_success = None, exc_type = None
exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.SERVER_DOWN: {'result': -1, 'desc': "Can't contact LDAP server", 'ctrls': []}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: SERVER_DOWN
-------------------------------Captured log call--------------------------------
INFO  lib389:ticket47988_test.py:64 ############################################### INFO  lib389:ticket47988_test.py:65 ####### INFO  lib389:ticket47988_test.py:66 ####### test_ticket47988_4 INFO  lib389:ticket47988_test.py:67 ####### INFO  lib389:ticket47988_test.py:68 ###################################################
Failed tickets/ticket47988_test.py::test_ticket47988_5 0.00
topology_m2 = <lib389.topologies.TopologyMain object at 0x7f4d2c74c730>

def test_ticket47988_5(topology_m2):
'''
Check schemaCSN do not change unexpectedly
'''
_header(topology_m2, 'test_ticket47988_5')

> _do_update_entry(supplier=topology_m2.ms["master1"], consumer=topology_m2.ms["master2"], attempts=5)

/export/tests/tickets/ticket47988_test.py:313:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/export/tests/tickets/ticket47988_test.py:184: in _do_update_entry
supplier.modify_s(entryDN, mod)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:640: in modify_s
return self.modify_ext_s(dn,modlist,None,None)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:612: in modify_ext_s
msgid = self.modify_ext(dn,modlist,serverctrls,clientctrls)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:609: in modify_ext
return self._ldap_call(self._l.modify_ext,dn,modlist,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c74cb50>
func = <built-in method modify_ext of LDAP object at 0x7f4d2c765420>
args = ('cn=other_entry0,dc=example,dc=com', [(2, 'telephonenumber', b'134')], None, None)
kwargs = {}, diagnostic_message_success = None, exc_type = None
exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.SERVER_DOWN: {'result': -1, 'desc': "Can't contact LDAP server", 'ctrls': []}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: SERVER_DOWN
-------------------------------Captured log call--------------------------------
INFO  lib389:ticket47988_test.py:64 ############################################### INFO  lib389:ticket47988_test.py:65 ####### INFO  lib389:ticket47988_test.py:66 ####### test_ticket47988_5 INFO  lib389:ticket47988_test.py:67 ####### INFO  lib389:ticket47988_test.py:68 ###################################################
Failed tickets/ticket47988_test.py::test_ticket47988_6 0.00
topology_m2 = <lib389.topologies.TopologyMain object at 0x7f4d2c74c730>

def test_ticket47988_6(topology_m2):
'''
Update M1 schema and trigger update M2->M1
So M2 should learn new/extended definitions that are in M1 schema
'''

_header(topology_m2, 'test_ticket47988_6')

topology_m2.ms["master1"].log.debug("\n\nUpdate M1 schema and an entry on M1\n")
> master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn()

/export/tests/tickets/ticket47988_test.py:336:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/schema.py:604: in get_schema_csn
ents = self.conn.search_s(DN_SCHEMA, ldap.SCOPE_BASE,
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:870: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:863: in search_ext_s
msgid = self.search_ext(base,scope,filterstr,attrlist,attrsonly,serverctrls,clientctrls,timeout,sizelimit)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:853: in search_ext
return self._ldap_call(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c74cb50>
func = <built-in method search_ext of LDAP object at 0x7f4d2c765420>
args = ('cn=schema', 0, 'objectclass=*', ['nsSchemaCSN'], 0, None, ...)
kwargs = {}, diagnostic_message_success = None, exc_type = None
exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.SERVER_DOWN: {'result': -1, 'desc': "Can't contact LDAP server", 'ctrls': []}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: SERVER_DOWN
-------------------------------Captured log call--------------------------------
INFO  lib389:ticket47988_test.py:64 ############################################### INFO  lib389:ticket47988_test.py:65 ####### INFO  lib389:ticket47988_test.py:66 ####### test_ticket47988_6 INFO  lib389:ticket47988_test.py:67 ####### INFO  lib389:ticket47988_test.py:68 ###################################################
Failed tickets/ticket48005_test.py::test_ticket48005_usn 4.85
topology_st = <lib389.topologies.TopologyMain object at 0x7f4d2c30ca60>

def test_ticket48005_usn(topology_st):
'''
Enable entryusn
Delete all user entries.
Run USN tombstone cleanup task
Shutdown the server
Check if a core file was generated or not
If no core was found, this test case was successful.
'''
log.info("Ticket 48005 usn test...")
topology_st.standalone.plugins.enable(name=PLUGIN_USN)

topology_st.standalone.restart(timeout=10)

try:
> entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=inetorgperson)")

/export/tests/tickets/ticket48005_test.py:283:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:870: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:864: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:148: in inner
objtype, data = f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:756: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:760: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:764: in result3
resp_type, resp_data, resp_msgid, decoded_resp_ctrls, retoid, retval = self.result4(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:774: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c30c280>
func = <built-in method result4 of LDAP object at 0x7f4d26f06ba0>
args = (3, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.NO_SUCH_OBJECT: {'msgtype': 101, 'msgid': 3, 'result': 32, 'desc': 'No such object', 'ctrls': []}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: NO_SUCH_OBJECT
-------------------------------Captured log call--------------------------------
INFO  tests.tickets.ticket48005_test:ticket48005_test.py:277 Ticket 48005 usn test...
Failed tickets/ticket48013_test.py::test_ticket48013 0.03
topology_st = <lib389.topologies.TopologyMain object at 0x7f4d2c756d90>

def test_ticket48013(topology_st):
'''
Content Synchonization: Test that invalid cookies are caught
'''

cookies = ('#', '##', 'a#a#a', 'a#a#1')

# Enable dynamic plugins
try:
topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')])
except ldap.LDAPError as e:
log.error('Failed to enable dynamic plugin! {}'.format(e.args[0]['desc']))
assert False

# Enable retro changelog
> topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)

/export/tests/tickets/ticket48013_test.py:58:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/plugins.py:2105: in enable
plugin.enable()
/usr/local/lib/python3.8/site-packages/lib389/plugins.py:58: in enable
self.set('nsslapd-pluginEnabled', 'on')
/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:446: in set
return self._instance.modify_ext_s(self._dn, [(action, key, value)],
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:613: in modify_ext_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:764: in result3
resp_type, resp_data, resp_msgid, decoded_resp_ctrls, retoid, retval = self.result4(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:774: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c7569d0>
func = <built-in method result4 of LDAP object at 0x7f4d2c4f7cf0>
args = (5, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.UNWILLING_TO_PERFORM: {'msgtype': 103, 'msgid': 5, 'result': 53, 'desc': 'Server is unwilling to perform', 'ctrls': [], 'info': 'Failed to start plugin "Retro Changelog Plugin". See errors log.'}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: UNWILLING_TO_PERFORM
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Failed tickets/ticket48194_test.py::test_run_1 7.12
topology_st = <lib389.topologies.TopologyMain object at 0x7f4d2c74cdc0>

def test_run_1(topology_st):
"""
Check nsSSL3Ciphers: +all
All ciphers are enabled except null.
Note: default allowWeakCipher (i.e., off) for +all
"""
_header(topology_st, 'Test Case 2 - Check the ciphers availability for "+all" with default allowWeakCiphers')

topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'64')])
# Make sure allowWeakCipher is not set.
topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'allowWeakCipher', None)])

log.info("\n######################### Restarting the server ######################\n")
topology_st.standalone.stop(timeout=10)
os.system('mv %s %s.48194_0' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(2)
topology_st.standalone.start(timeout=120)

> connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False)

/export/tests/tickets/ticket48194_test.py:158:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x7f4d2c74cdc0>
cipher = 'DES-CBC3-SHA', expect = False

def connectWithOpenssl(topology_st, cipher, expect):
"""
Connect with the given cipher
Condition:
If expect is True, the handshake should be successful.
If expect is False, the handshake should be refused with
access log: "Cannot communicate securely with peer:
no common encryption algorithm(s)."
"""
log.info("Testing %s -- expect to handshake %s", cipher, "successfully" if expect else "failed")

myurl = 'localhost:%s' % LDAPSPORT
cmdline = ['/usr/bin/openssl', 's_client', '-connect', myurl, '-cipher', cipher]

strcmdline = " ".join(cmdline)
log.info("Running cmdline: %s", strcmdline)

try:
proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
except ValueError:
log.info("%s failed: %s", cmdline, ValueError)
proc.kill()

while True:
l = proc.stdout.readline()
if l == b"":
break
if b'Cipher is' in l:
log.info("Found: %s", l)
if expect:
if b'(NONE)' in l:
assert False
else:
proc.stdin.close()
assert True
else:
if b'(NONE)' in l:
assert True
else:
proc.stdin.close()
> assert False
E assert False

/export/tests/tickets/ticket48194_test.py:117: AssertionError
-------------------------------Captured log call--------------------------------
INFO  lib389:ticket48194_test.py:40 ############################################### INFO  lib389:ticket48194_test.py:41 ####### Test Case 2 - Check the ciphers availability for "+all" with default allowWeakCiphers INFO  lib389:ticket48194_test.py:42 ############################################### INFO  lib389.utils:ticket48194_test.py:151 ######################### Restarting the server ###################### INFO  lib389.utils:ticket48194_test.py:86 Testing DES-CBC3-SHA -- expect to handshake failed INFO  lib389.utils:ticket48194_test.py:92 Running cmdline: /usr/bin/openssl s_client -connect localhost:63601 -cipher DES-CBC3-SHA INFO  lib389.utils:ticket48194_test.py:105 Found: b'New, TLSv1.3, Cipher is TLS_AES_128_GCM_SHA256\n'
Failed tickets/ticket48194_test.py::test_run_2 6.55
topology_st = <lib389.topologies.TopologyMain object at 0x7f4d2c74cdc0>

def test_run_2(topology_st):
"""
Check nsSSL3Ciphers: +rsa_aes_128_sha,+rsa_aes_256_sha
rsa_aes_128_sha, tls_rsa_aes_128_sha, rsa_aes_256_sha, tls_rsa_aes_256_sha are enabled.
default allowWeakCipher
"""
_header(topology_st,
'Test Case 3 - Check the ciphers availability for "+rsa_aes_128_sha,+rsa_aes_256_sha" with default allowWeakCipher')

topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
topology_st.standalone.modify_s(ENCRYPTION_DN,
[(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'+rsa_aes_128_sha,+rsa_aes_256_sha')])

log.info("\n######################### Restarting the server ######################\n")
topology_st.standalone.stop(timeout=10)
os.system('mv %s %s.48194_1' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(2)
topology_st.standalone.start(timeout=120)

connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False)
connectWithOpenssl(topology_st, 'AES256-SHA256', False)
> connectWithOpenssl(topology_st, 'AES128-SHA', True)

/export/tests/tickets/ticket48194_test.py:184:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x7f4d2c74cdc0>
cipher = 'AES128-SHA', expect = True

def connectWithOpenssl(topology_st, cipher, expect):
"""
Connect with the given cipher
Condition:
If expect is True, the handshake should be successful.
If expect is False, the handshake should be refused with
access log: "Cannot communicate securely with peer:
no common encryption algorithm(s)."
"""
log.info("Testing %s -- expect to handshake %s", cipher, "successfully" if expect else "failed")

myurl = 'localhost:%s' % LDAPSPORT
cmdline = ['/usr/bin/openssl', 's_client', '-connect', myurl, '-cipher', cipher]

strcmdline = " ".join(cmdline)
log.info("Running cmdline: %s", strcmdline)

try:
proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
except ValueError:
log.info("%s failed: %s", cmdline, ValueError)
proc.kill()

while True:
l = proc.stdout.readline()
if l == b"":
break
if b'Cipher is' in l:
log.info("Found: %s", l)
if expect:
if b'(NONE)' in l:
> assert False
E assert False

/export/tests/tickets/ticket48194_test.py:108: AssertionError
-------------------------------Captured log call--------------------------------
INFO  lib389:ticket48194_test.py:40 ############################################### INFO  lib389:ticket48194_test.py:41 ####### Test Case 3 - Check the ciphers availability for "+rsa_aes_128_sha,+rsa_aes_256_sha" with default allowWeakCipher INFO  lib389:ticket48194_test.py:42 ############################################### INFO  lib389.utils:ticket48194_test.py:175 ######################### Restarting the server ###################### INFO  lib389.utils:ticket48194_test.py:86 Testing DES-CBC3-SHA -- expect to handshake failed INFO  lib389.utils:ticket48194_test.py:92 Running cmdline: /usr/bin/openssl s_client -connect localhost:63601 -cipher DES-CBC3-SHA INFO  lib389.utils:ticket48194_test.py:105 Found: b'New, (NONE), Cipher is (NONE)\n' INFO  lib389.utils:ticket48194_test.py:86 Testing AES256-SHA256 -- expect to handshake failed INFO  lib389.utils:ticket48194_test.py:92 Running cmdline: /usr/bin/openssl s_client -connect localhost:63601 -cipher AES256-SHA256 INFO  lib389.utils:ticket48194_test.py:105 Found: b'New, (NONE), Cipher is (NONE)\n' INFO  lib389.utils:ticket48194_test.py:86 Testing AES128-SHA -- expect to handshake successfully INFO  lib389.utils:ticket48194_test.py:92 Running cmdline: /usr/bin/openssl s_client -connect localhost:63601 -cipher AES128-SHA INFO  lib389.utils:ticket48194_test.py:105 Found: b'New, (NONE), Cipher is (NONE)\n'
Failed tickets/ticket48194_test.py::test_run_4 7.16
topology_st = <lib389.topologies.TopologyMain object at 0x7f4d2c74cdc0>

def test_run_4(topology_st):
"""
Check no nsSSL3Ciphers
Default ciphers are enabled.
default allowWeakCipher
"""
_header(topology_st, 'Test Case 5 - Check no nsSSL3Ciphers (-all) with default allowWeakCipher')

topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'nsSSL3Ciphers', b'-all')])

log.info("\n######################### Restarting the server ######################\n")
topology_st.standalone.stop(timeout=10)
os.system('mv %s %s.48194_3' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(2)
topology_st.standalone.start(timeout=120)

> connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False)

/export/tests/tickets/ticket48194_test.py:228:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x7f4d2c74cdc0>
cipher = 'DES-CBC3-SHA', expect = False

def connectWithOpenssl(topology_st, cipher, expect):
"""
Connect with the given cipher
Condition:
If expect is True, the handshake should be successful.
If expect is False, the handshake should be refused with
access log: "Cannot communicate securely with peer:
no common encryption algorithm(s)."
"""
log.info("Testing %s -- expect to handshake %s", cipher, "successfully" if expect else "failed")

myurl = 'localhost:%s' % LDAPSPORT
cmdline = ['/usr/bin/openssl', 's_client', '-connect', myurl, '-cipher', cipher]

strcmdline = " ".join(cmdline)
log.info("Running cmdline: %s", strcmdline)

try:
proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
except ValueError:
log.info("%s failed: %s", cmdline, ValueError)
proc.kill()

while True:
l = proc.stdout.readline()
if l == b"":
break
if b'Cipher is' in l:
log.info("Found: %s", l)
if expect:
if b'(NONE)' in l:
assert False
else:
proc.stdin.close()
assert True
else:
if b'(NONE)' in l:
assert True
else:
proc.stdin.close()
> assert False
E assert False

/export/tests/tickets/ticket48194_test.py:117: AssertionError
-------------------------------Captured log call--------------------------------
INFO  lib389:ticket48194_test.py:40 ############################################### INFO  lib389:ticket48194_test.py:41 ####### Test Case 5 - Check no nsSSL3Ciphers (-all) with default allowWeakCipher INFO  lib389:ticket48194_test.py:42 ############################################### INFO  lib389.utils:ticket48194_test.py:221 ######################### Restarting the server ###################### INFO  lib389.utils:ticket48194_test.py:86 Testing DES-CBC3-SHA -- expect to handshake failed INFO  lib389.utils:ticket48194_test.py:92 Running cmdline: /usr/bin/openssl s_client -connect localhost:63601 -cipher DES-CBC3-SHA INFO  lib389.utils:ticket48194_test.py:105 Found: b'New, TLSv1.3, Cipher is TLS_AES_128_GCM_SHA256\n'
Failed tickets/ticket48194_test.py::test_run_5 7.00
topology_st = <lib389.topologies.TopologyMain object at 0x7f4d2c74cdc0>

def test_run_5(topology_st):
"""
Check nsSSL3Ciphers: default
Default ciphers are enabled.
default allowWeakCipher
"""
_header(topology_st, 'Test Case 6 - Check default nsSSL3Ciphers (default setting) with default allowWeakCipher')

topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'default')])

log.info("\n######################### Restarting the server ######################\n")
topology_st.standalone.stop(timeout=10)
os.system('mv %s %s.48194_4' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(2)
topology_st.standalone.start(timeout=120)

> connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False)

/export/tests/tickets/ticket48194_test.py:250:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x7f4d2c74cdc0>
cipher = 'DES-CBC3-SHA', expect = False

def connectWithOpenssl(topology_st, cipher, expect):
"""
Connect with the given cipher
Condition:
If expect is True, the handshake should be successful.
If expect is False, the handshake should be refused with
access log: "Cannot communicate securely with peer:
no common encryption algorithm(s)."
"""
log.info("Testing %s -- expect to handshake %s", cipher, "successfully" if expect else "failed")

myurl = 'localhost:%s' % LDAPSPORT
cmdline = ['/usr/bin/openssl', 's_client', '-connect', myurl, '-cipher', cipher]

strcmdline = " ".join(cmdline)
log.info("Running cmdline: %s", strcmdline)

try:
proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
except ValueError:
log.info("%s failed: %s", cmdline, ValueError)
proc.kill()

while True:
l = proc.stdout.readline()
if l == b"":
break
if b'Cipher is' in l:
log.info("Found: %s", l)
if expect:
if b'(NONE)' in l:
assert False
else:
proc.stdin.close()
assert True
else:
if b'(NONE)' in l:
assert True
else:
proc.stdin.close()
> assert False
E assert False

/export/tests/tickets/ticket48194_test.py:117: AssertionError
-------------------------------Captured log call--------------------------------
INFO  lib389:ticket48194_test.py:40 ############################################### INFO  lib389:ticket48194_test.py:41 ####### Test Case 6 - Check default nsSSL3Ciphers (default setting) with default allowWeakCipher INFO  lib389:ticket48194_test.py:42 ############################################### INFO  lib389.utils:ticket48194_test.py:243 ######################### Restarting the server ###################### INFO  lib389.utils:ticket48194_test.py:86 Testing DES-CBC3-SHA -- expect to handshake failed INFO  lib389.utils:ticket48194_test.py:92 Running cmdline: /usr/bin/openssl s_client -connect localhost:63601 -cipher DES-CBC3-SHA INFO  lib389.utils:ticket48194_test.py:105 Found: b'New, TLSv1.3, Cipher is TLS_AES_128_GCM_SHA256\n'
Failed tickets/ticket48194_test.py::test_run_6 6.79
topology_st = <lib389.topologies.TopologyMain object at 0x7f4d2c74cdc0>

def test_run_6(topology_st):
"""
Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256
All ciphers are disabled.
default allowWeakCipher
"""
_header(topology_st,
'Test Case 7 - Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256 with default allowWeakCipher')

topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
topology_st.standalone.modify_s(ENCRYPTION_DN,
[(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'+all,-TLS_RSA_WITH_AES_256_CBC_SHA256')])

log.info("\n######################### Restarting the server ######################\n")
topology_st.standalone.stop(timeout=10)
os.system('mv %s %s.48194_5' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(2)
topology_st.standalone.start(timeout=120)

> connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False)

/export/tests/tickets/ticket48194_test.py:274:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x7f4d2c74cdc0>
cipher = 'DES-CBC3-SHA', expect = False

def connectWithOpenssl(topology_st, cipher, expect):
"""
Connect with the given cipher
Condition:
If expect is True, the handshake should be successful.
If expect is False, the handshake should be refused with
access log: "Cannot communicate securely with peer:
no common encryption algorithm(s)."
"""
log.info("Testing %s -- expect to handshake %s", cipher, "successfully" if expect else "failed")

myurl = 'localhost:%s' % LDAPSPORT
cmdline = ['/usr/bin/openssl', 's_client', '-connect', myurl, '-cipher', cipher]

strcmdline = " ".join(cmdline)
log.info("Running cmdline: %s", strcmdline)

try:
proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
except ValueError:
log.info("%s failed: %s", cmdline, ValueError)
proc.kill()

while True:
l = proc.stdout.readline()
if l == b"":
break
if b'Cipher is' in l:
log.info("Found: %s", l)
if expect:
if b'(NONE)' in l:
assert False
else:
proc.stdin.close()
assert True
else:
if b'(NONE)' in l:
assert True
else:
proc.stdin.close()
> assert False
E assert False

/export/tests/tickets/ticket48194_test.py:117: AssertionError
-------------------------------Captured log call--------------------------------
INFO  lib389:ticket48194_test.py:40 ############################################### INFO  lib389:ticket48194_test.py:41 ####### Test Case 7 - Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256 with default allowWeakCipher INFO  lib389:ticket48194_test.py:42 ############################################### INFO  lib389.utils:ticket48194_test.py:267 ######################### Restarting the server ###################### INFO  lib389.utils:ticket48194_test.py:86 Testing DES-CBC3-SHA -- expect to handshake failed INFO  lib389.utils:ticket48194_test.py:92 Running cmdline: /usr/bin/openssl s_client -connect localhost:63601 -cipher DES-CBC3-SHA INFO  lib389.utils:ticket48194_test.py:105 Found: b'New, TLSv1.3, Cipher is TLS_AES_128_GCM_SHA256\n'
Failed tickets/ticket48194_test.py::test_run_8 6.82
topology_st = <lib389.topologies.TopologyMain object at 0x7f4d2c74cdc0>

def test_run_8(topology_st):
"""
Check nsSSL3Ciphers: default + allowWeakCipher: off
Strong Default ciphers are enabled.
"""
_header(topology_st, 'Test Case 9 - Check default nsSSL3Ciphers (default setting + allowWeakCipher: off)')

topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'default'),
(ldap.MOD_REPLACE, 'allowWeakCipher', b'off')])

log.info("\n######################### Restarting the server ######################\n")
topology_st.standalone.stop(timeout=10)
os.system('mv %s %s.48194_7' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(2)
topology_st.standalone.start(timeout=120)

> connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False)

/export/tests/tickets/ticket48194_test.py:297:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x7f4d2c74cdc0>
cipher = 'DES-CBC3-SHA', expect = False

def connectWithOpenssl(topology_st, cipher, expect):
"""
Connect with the given cipher
Condition:
If expect is True, the handshake should be successful.
If expect is False, the handshake should be refused with
access log: "Cannot communicate securely with peer:
no common encryption algorithm(s)."
"""
log.info("Testing %s -- expect to handshake %s", cipher, "successfully" if expect else "failed")

myurl = 'localhost:%s' % LDAPSPORT
cmdline = ['/usr/bin/openssl', 's_client', '-connect', myurl, '-cipher', cipher]

strcmdline = " ".join(cmdline)
log.info("Running cmdline: %s", strcmdline)

try:
proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
except ValueError:
log.info("%s failed: %s", cmdline, ValueError)
proc.kill()

while True:
l = proc.stdout.readline()
if l == b"":
break
if b'Cipher is' in l:
log.info("Found: %s", l)
if expect:
if b'(NONE)' in l:
assert False
else:
proc.stdin.close()
assert True
else:
if b'(NONE)' in l:
assert True
else:
proc.stdin.close()
> assert False
E assert False

/export/tests/tickets/ticket48194_test.py:117: AssertionError
-------------------------------Captured log call--------------------------------
INFO  lib389:ticket48194_test.py:40 ############################################### INFO  lib389:ticket48194_test.py:41 ####### Test Case 9 - Check default nsSSL3Ciphers (default setting + allowWeakCipher: off) INFO  lib389:ticket48194_test.py:42 ############################################### INFO  lib389.utils:ticket48194_test.py:290 ######################### Restarting the server ###################### INFO  lib389.utils:ticket48194_test.py:86 Testing DES-CBC3-SHA -- expect to handshake failed INFO  lib389.utils:ticket48194_test.py:92 Running cmdline: /usr/bin/openssl s_client -connect localhost:63601 -cipher DES-CBC3-SHA INFO  lib389.utils:ticket48194_test.py:105 Found: b'New, TLSv1.3, Cipher is TLS_AES_128_GCM_SHA256\n'
Failed tickets/ticket48266_test.py::test_ticket48266_count_csn_evaluation 0.13
topology_m2 = <lib389.topologies.TopologyMain object at 0x7f4d2c03e610>
entries = None

def test_ticket48266_count_csn_evaluation(topology_m2, entries):
ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX)
assert len(ents) == 1
> first_csn = _get_first_not_replicated_csn(topology_m2)

/export/tests/tickets/ticket48266_test.py:176:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_m2 = <lib389.topologies.TopologyMain object at 0x7f4d2c03e610>

def _get_first_not_replicated_csn(topology_m2):
name = "cn=%s2,%s" % (NEW_ACCOUNT, SUFFIX)

# read the first CSN that will not be replicated
mod = [(ldap.MOD_REPLACE, 'telephonenumber', ensure_bytes('123456'))]
topology_m2.ms["master1"].modify_s(name, mod)
msgid = topology_m2.ms["master1"].search_ext(name, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
rtype, rdata, rmsgid = topology_m2.ms["master1"].result2(msgid)
attrs = None
for dn, raw_attrs in rdata:
topology_m2.ms["master1"].log.info("dn: %s" % dn)
if 'nscpentrywsi' in raw_attrs:
attrs = raw_attrs['nscpentrywsi']
assert attrs
for attr in attrs:
if ensure_str(attr.lower()).startswith('telephonenumber'):
break
assert attr

log.info("############# %s " % name)
# now retrieve the CSN of the operation we are looking for
csn = None
found_ops = topology_m2.ms['master1'].ds_access_log.match(".*MOD dn=\"%s\".*" % name)
assert(len(found_ops) > 0)
found_op = topology_m2.ms['master1'].ds_access_log.parse_line(found_ops[-1])
log.info(found_op)

# Now look for the related CSN
found_csns = topology_m2.ms['master1'].ds_access_log.match(".*conn=%s op=%s RESULT.*" % (found_op['conn'], found_op['op']))
assert(len(found_csns) > 0)
found_csn = topology_m2.ms['master1'].ds_access_log.parse_line(found_csns[-1])
log.info(found_csn)
> return found_csn['csn']
E KeyError: 'csn'

/export/tests/tickets/ticket48266_test.py:147: KeyError
-------------------------------Captured log call--------------------------------
INFO  lib389:ticket48266_test.py:125 dn: cn=new_account2,dc=example,dc=com INFO  tests.tickets.ticket48266_test:ticket48266_test.py:134 ############# cn=new_account2,dc=example,dc=com INFO  tests.tickets.ticket48266_test:ticket48266_test.py:140 {'action': 'MOD', 'timestamp': '[22/Oct/2020:22:23:05.855615483 -0400]', 'conn': '1', 'op': '12', 'rem': 'dn="cn=new_account2,dc=example,dc=com"', 'datetime': datetime.datetime(2020, 9, 22, 22, 0, 0, 855615, tzinfo=tzoffset(None, -14400))} INFO  tests.tickets.ticket48266_test:ticket48266_test.py:146 {'action': 'RESULT', 'timestamp': '[22/Oct/2020:22:23:05.919015636 -0400]', 'conn': '1', 'op': '12', 'rem': 'err=0 tag=103 nentries=0 wtime=0.000173770 optime=0.063418243 etime=0.063584251 csn=5f923e89000100010000', 'datetime': datetime.datetime(2020, 9, 22, 22, 0, 0, 919015, tzinfo=tzoffset(None, -14400))}
Failed tickets/ticket48325_test.py::test_ticket48325 0.01
topology_m1h1c1 = <lib389.topologies.TopologyMain object at 0x7f4d2c11c790>

def test_ticket48325(topology_m1h1c1):
"""
Test that the RUV element order is correctly maintained when promoting
a hub or consumer.
"""

#
# Promote consumer to master
#
C1 = topology_m1h1c1.cs["consumer1"]
M1 = topology_m1h1c1.ms["master1"]
H1 = topology_m1h1c1.hs["hub1"]
repl = ReplicationManager(DEFAULT_SUFFIX)
> repl._ensure_changelog(C1)

/export/tests/tickets/ticket48325_test.py:53:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/replica.py:1945: in _ensure_changelog
cl.create(properties={
/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:971: in create
return self._create(rdn, properties, basedn, ensure=False)
/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:946: in _create
self._instance.add_ext_s(e, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure')
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:176: in inner
return f(ent.dn, ent.toTupleList(), *args[2:])
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:425: in add_ext_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:764: in result3
resp_type, resp_data, resp_msgid, decoded_resp_ctrls, retoid, retval = self.result4(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:774: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c11c520>
func = <built-in method result4 of LDAP object at 0x7f4d2c0f8d80>
args = (15, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.UNWILLING_TO_PERFORM: {'msgtype': 105, 'msgid': 15, 'result': 53, 'desc': 'Server is unwilling to perform', 'ctrls': [], 'info': 'Changelog configuration is part of the backend configuration'}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: UNWILLING_TO_PERFORM
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39001, 'ldap-secureport': 63701, 'server-id': 'master1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39101, 'ldap-secureport': 63801, 'server-id': 'hub1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39201, 'ldap-secureport': 63901, 'server-id': 'consumer1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:515 Creating replication topology. INFO  lib389.replica:replica.py:2101 SUCCESS: bootstrap to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39101 completed INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39101 is was created INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39101 is NOT working (expect f61e3c1a-adc7-4da5-9cc9-dd02d2d7408a / got description=None) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39101 is working INFO  lib389.replica:replica.py:2228 SUCCESS: joined consumer from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39101 INFO  lib389.replica:replica.py:2101 SUCCESS: bootstrap to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 completed INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39101 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 is was created INFO  lib389.replica:replica.py:2285 SUCCESS: joined consumer from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39101 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 is NOT working (expect 50ef0e28-728d-46ee-a36f-41fb53df524c / got description=f61e3c1a-adc7-4da5-9cc9-dd02d2d7408a) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 is working
Failed tickets/ticket48637_test.py::test_ticket48637 4.69
topology_st = <lib389.topologies.TopologyMain object at 0x7f4d2c1e6fa0>

def test_ticket48637(topology_st):
"""Test for entry cache corruption

This requires automember and managed entry plugins to be configured.

Then remove the group that automember would use to trigger a failure when
adding a new entry. Automember fails, and then managed entry also fails.

Make sure a base search on the entry returns error 32
"""

if DEBUGGING:
# Add debugging steps(if any)...
pass

#
# Add our setup entries
#
try:
topology_st.standalone.add_s(Entry((PEOPLE_OU, {
'objectclass': 'top organizationalunit'.split(),
'ou': 'people'})))
except ldap.ALREADY_EXISTS:
pass
except ldap.LDAPError as e:
log.fatal('Failed to add people ou: ' + str(e))
assert False

try:
topology_st.standalone.add_s(Entry((GROUP_OU, {
'objectclass': 'top organizationalunit'.split(),
'ou': 'groups'})))
except ldap.ALREADY_EXISTS:
pass
except ldap.LDAPError as e:
log.fatal('Failed to add groups ou: ' + str(e))
assert False

try:
topology_st.standalone.add_s(Entry((MEP_OU, {
'objectclass': 'top extensibleObject'.split(),
'ou': 'mep'})))
except ldap.LDAPError as e:
log.fatal('Failed to add MEP ou: ' + str(e))
assert False

try:
topology_st.standalone.add_s(Entry((MEP_TEMPLATE, {
'objectclass': 'top mepTemplateEntry'.split(),
'cn': 'mep template',
'mepRDNAttr': 'cn',
'mepStaticAttr': 'objectclass: groupofuniquenames',
'mepMappedAttr': 'cn: $uid'})))
except ldap.LDAPError as e:
log.fatal('Failed to add MEP ou: ' + str(e))
assert False

#
# Configure automember
#
try:
topology_st.standalone.add_s(Entry((AUTO_DN, {
'cn': 'All Users',
'objectclass': ['top', 'autoMemberDefinition'],
'autoMemberScope': 'dc=example,dc=com',
'autoMemberFilter': 'objectclass=person',
'autoMemberDefaultGroup': GROUP_DN,
'autoMemberGroupingAttr': 'uniquemember:dn'})))
except ldap.LDAPError as e:
log.fatal('Failed to configure automember plugin : ' + str(e))
assert False

#
# Configure managed entry plugin
#
try:
topology_st.standalone.add_s(Entry((MEP_DN, {
'cn': 'MEP Definition',
'objectclass': ['top', 'extensibleObject'],
'originScope': 'ou=people,dc=example,dc=com',
'originFilter': 'objectclass=person',
'managedBase': 'ou=groups,dc=example,dc=com',
'managedTemplate': MEP_TEMPLATE})))
except ldap.LDAPError as e:
log.fatal('Failed to configure managed entry plugin : ' + str(e))
assert False

#
# Restart DS
#
topology_st.standalone.restart(timeout=30)

#
# Add entry that should fail since the automember group does not exist
#
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'uid': 'test',
'objectclass': ['top', 'person', 'extensibleObject'],
'sn': 'test',
'cn': 'test'})))
except ldap.LDAPError as e:
pass

#
# Search for the entry - it should not be returned
#
try:
entry = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_SUBTREE,
'objectclass=*')
if entry:
log.fatal('Entry was incorrectly returned')
> assert False
E assert False

/export/tests/tickets/ticket48637_test.py:139: AssertionError
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
-------------------------------Captured log call--------------------------------
CRITICAL tests.tickets.ticket48637_test:ticket48637_test.py:138 Entry was incorrectly returned
Failed tickets/ticket48759_test.py::test_ticket48759 0.63
topology_st = <lib389.topologies.TopologyMain object at 0x7f4d2c3346a0>

def test_ticket48759(topology_st):
"""
The fix for ticket 48759 has to prevent plugin calls for tombstone purging

The test uses the memberof and retrocl plugins to verify this.
In tombstone purging without the fix the mmeberof plugin is called,
if the tombstone entry is a group,
it modifies the user entries for the group
and if retrocl is enabled this mod is written to the retrocl

The test sequence is:
- enable replication
- enable memberof and retro cl plugin
- add user entries
- add a group and add the users as members
- verify memberof is set to users
- delete the group
- verify memberof is removed from users
- add group again
- verify memberof is set to users
- get number of changes in retro cl for one user
- configure tombstone purging
- wait for purge interval to pass
- add a dummy entry to increase maxcsn
- wait for purge interval to pass two times
- get number of changes in retro cl for user again
- assert there was no additional change
"""

log.info('Testing Ticket 48759 - no plugin calls for tombstone purging')

#
# Setup Replication
#
log.info('Setting up replication...')
repl = ReplicationManager(DEFAULT_SUFFIX)
repl.create_first_master(topology_st.standalone)
#
# enable dynamic plugins, memberof and retro cl plugin
#
log.info('Enable plugins...')
try:
topology_st.standalone.config.set('nsslapd-dynamic-plugins', 'on')
except ldap.LDAPError as e:
ldap.error('Failed to enable dynamic plugins! ' + e.args[0]['desc'])
assert False

topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
> topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)

/export/tests/tickets/ticket48759_test.py:125:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/plugins.py:2105: in enable
plugin.enable()
/usr/local/lib/python3.8/site-packages/lib389/plugins.py:58: in enable
self.set('nsslapd-pluginEnabled', 'on')
/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:446: in set
return self._instance.modify_ext_s(self._dn, [(action, key, value)],
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:613: in modify_ext_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:764: in result3
resp_type, resp_data, resp_msgid, decoded_resp_ctrls, retoid, retval = self.result4(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:774: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c393ac0>
func = <built-in method result4 of LDAP object at 0x7f4d2c2bc6f0>
args = (19, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.UNWILLING_TO_PERFORM: {'msgtype': 103, 'msgid': 19, 'result': 53, 'desc': 'Server is unwilling to perform', 'ctrls': [], 'info': 'Failed to start plugin "Retro Changelog Plugin". See errors log.'}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: UNWILLING_TO_PERFORM
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Failed tickets/ticket48784_test.py::test_ticket48784 36.07
Fixture "add_entry" called directly. Fixtures are not meant to be called directly,
but are created automatically when test functions request them as parameters.
See https://docs.pytest.org/en/latest/fixture.html for more information about fixtures, and
https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly about how to update your code.
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39001, 'ldap-secureport': 63701, 'server-id': 'master1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39002, 'ldap-secureport': 63702, 'server-id': 'master2', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:148 Creating replication topology. INFO  lib389.topologies:topologies.py:162 Joining master master2 to master1 ... INFO  lib389.replica:replica.py:2101 SUCCESS: bootstrap to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 completed INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is was created INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is was created INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect 99ad0410-1189-43d7-89a2-abb377019a91 / got description=None) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect c2e81060-1b4e-4de0-849e-f9f91b3d2b70 / got description=99ad0410-1189-43d7-89a2-abb377019a91) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is working INFO  lib389.replica:replica.py:2170 SUCCESS: joined master from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 INFO  lib389.topologies:topologies.py:170 Ensuring master master1 to master2 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 already exists INFO  lib389.topologies:topologies.py:170 Ensuring master master2 to master1 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 already exists
-------------------------------Captured log call--------------------------------
INFO  tests.tickets.ticket48784_test:ticket48784_test.py:90 Ticket 48784 - Allow usage of OpenLDAP libraries that don't use NSS for crypto INFO  tests.tickets.ticket48784_test:ticket48784_test.py:50 ######################### Configure SSL/TLS agreements ###################### INFO  tests.tickets.ticket48784_test:ticket48784_test.py:51 ######################## master1 <-- startTLS -> master2 ##################### INFO  tests.tickets.ticket48784_test:ticket48784_test.py:53 ##### Update the agreement of master1 INFO  tests.tickets.ticket48784_test:ticket48784_test.py:58 ##### Update the agreement of master2 INFO  tests.tickets.ticket48784_test:ticket48784_test.py:68 ######################### Configure SSL/TLS agreements Done ######################
Failed tickets/ticket48798_test.py::test_ticket48798 13.20
topology_st = <lib389.topologies.TopologyMain object at 0x7f4d2c7e6970>

def test_ticket48798(topology_st):
"""
Test DH param sizes offered by DS.

"""
topology_st.standalone.enable_tls()

# Confirm that we have a connection, and that it has DH

# Open a socket to the port.
# Check the security settings.
> size = check_socket_dh_param_size(topology_st.standalone.host, topology_st.standalone.sslport)

/export/tests/tickets/ticket48798_test.py:46:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/export/tests/tickets/ticket48798_test.py:23: in check_socket_dh_param_size
output = check_output(cmd, shell=True)
/usr/lib64/python3.8/subprocess.py:411: in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

input = None, capture_output = False, timeout = None, check = True
popenargs = ('echo quit | openssl s_client -connect ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:63601 -msg -cipher DH | grep -A 1 ServerKeyExchange',)
kwargs = {'shell': True, 'stdout': -1}
process = <subprocess.Popen object at 0x7f4d2d0cca90>, stdout = b''
stderr = None, retcode = 1

def run(*popenargs,
input=None, capture_output=False, timeout=None, check=False, **kwargs):
"""Run command with arguments and return a CompletedProcess instance.

The returned instance will have attributes args, returncode, stdout and
stderr. By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.

If check is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those streams
were captured.

If timeout is given, and the process takes too long, a TimeoutExpired
exception will be raised.

There is an optional argument "input", allowing you to
pass bytes or a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it will be used internally.

By default, all communication is in bytes, and therefore any "input" should
be bytes, and the stdout and stderr will be bytes. If in text mode, any
"input" should be a string, and stdout and stderr will be strings decoded
according to locale encoding, or by "encoding" if set. Text mode is
triggered by setting any of text, encoding, errors or universal_newlines.

The other arguments are the same as for the Popen constructor.
"""
if input is not None:
if kwargs.get('stdin') is not None:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = PIPE

if capture_output:
if kwargs.get('stdout') is not None or kwargs.get('stderr') is not None:
raise ValueError('stdout and stderr arguments may not be used '
'with capture_output.')
kwargs['stdout'] = PIPE
kwargs['stderr'] = PIPE

with Popen(*popenargs, **kwargs) as process:
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except TimeoutExpired as exc:
process.kill()
if _mswindows:
# Windows accumulates the output in a single blocking
# read() call run on child threads, with the timeout
# being done in a join() on those threads. communicate()
# _after_ kill() is required to collect that and add it
# to the exception.
exc.stdout, exc.stderr = process.communicate()
else:
# POSIX _communicate already populated the output so
# far into the TimeoutExpired exception.
process.wait()
raise
except: # Including KeyboardInterrupt, communicate handled that.
process.kill()
# We don't call process.wait() as .__exit__ does that for us.
raise
retcode = process.poll()
if check and retcode:
> raise CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
E subprocess.CalledProcessError: Command 'echo quit | openssl s_client -connect ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:63601 -msg -cipher DH | grep -A 1 ServerKeyExchange' returned non-zero exit status 1.

/usr/lib64/python3.8/subprocess.py:512: CalledProcessError
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
------------------------------Captured stderr call------------------------------
depth=1 C = AU, ST = Queensland, L = 389ds, O = testing, CN = ssca.389ds.example.com verify return:1 depth=0 C = AU, ST = Queensland, L = 389ds, O = testing, GN = c31934c0-1b34-4a34-bff2-9bca65c80419, CN = ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com verify error:num=7:certificate signature failure verify return:1 depth=0 C = AU, ST = Queensland, L = 389ds, O = testing, GN = c31934c0-1b34-4a34-bff2-9bca65c80419, CN = ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com verify return:1 DONE
Failed tickets/ticket48961_test.py::test_ticket48961_storagescheme 0.01
topology_st = <lib389.topologies.TopologyMain object at 0x7f4d2c0ddb20>

def test_ticket48961_storagescheme(topology_st):
"""
Test deleting of the storage scheme.
"""

default = topology_st.standalone.config.get_attr_val('passwordStorageScheme')
# Change it
topology_st.standalone.config.set('passwordStorageScheme', 'CLEAR')
# Now delete it
> topology_st.standalone.config.remove('passwordStorageScheme', None)

/export/tests/tickets/ticket48961_test.py:28:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:316: in remove
self.set(key, value, action=ldap.MOD_DELETE)
/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:446: in set
return self._instance.modify_ext_s(self._dn, [(action, key, value)],
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:613: in modify_ext_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:764: in result3
resp_type, resp_data, resp_msgid, decoded_resp_ctrls, retoid, retval = self.result4(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:774: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c0ddaf0>
func = <built-in method result4 of LDAP object at 0x7f4d2c092840>
args = (5, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.OPERATIONS_ERROR: {'msgtype': 103, 'msgid': 5, 'result': 1, 'desc': 'Operations error', 'ctrls': [], 'info': 'passwordStorageScheme: deleting the value is not allowed.'}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: OPERATIONS_ERROR
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Failed tickets/ticket48961_test.py::test_ticket48961_deleteall 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x7f4d2c0ddb20>

def test_ticket48961_deleteall(topology_st):
"""
Test that we can delete all valid attrs, and that a few are rejected.
"""
attr_to_test = {
'nsslapd-listenhost': 'localhost',
'nsslapd-securelistenhost': 'localhost',
'nsslapd-allowed-sasl-mechanisms': 'GSSAPI',
'nsslapd-svrtab': 'Some bogus data', # This one could reset?
}
attr_to_fail = {
# These are the values that should always be dn dse.ldif too
'nsslapd-localuser': 'dirsrv',
'nsslapd-defaultnamingcontext': 'dc=example,dc=com', # Can't delete
'nsslapd-accesslog': '/opt/dirsrv/var/log/dirsrv/slapd-standalone/access',
'nsslapd-auditlog': '/opt/dirsrv/var/log/dirsrv/slapd-standalone/audit',
'nsslapd-errorlog': '/opt/dirsrv/var/log/dirsrv/slapd-standalone/errors',
'nsslapd-tmpdir': '/tmp',
'nsslapd-rundir': '/opt/dirsrv/var/run/dirsrv',
'nsslapd-bakdir': '/opt/dirsrv/var/lib/dirsrv/slapd-standalone/bak',
'nsslapd-certdir': '/opt/dirsrv/etc/dirsrv/slapd-standalone',
'nsslapd-instancedir': '/opt/dirsrv/lib/dirsrv/slapd-standalone',
'nsslapd-ldifdir': '/opt/dirsrv/var/lib/dirsrv/slapd-standalone/ldif',
'nsslapd-lockdir': '/opt/dirsrv/var/lock/dirsrv/slapd-standalone',
'nsslapd-schemadir': '/opt/dirsrv/etc/dirsrv/slapd-standalone/schema',
'nsslapd-workingdir': '/opt/dirsrv/var/log/dirsrv/slapd-standalone',
'nsslapd-localhost': 'localhost.localdomain',
# These can't be reset, but might be in dse.ldif. Probably in libglobs.
'nsslapd-certmap-basedn': 'cn=certmap,cn=config',
'nsslapd-port': '38931', # Can't delete
'nsslapd-secureport': '636', # Can't delete
'nsslapd-conntablesize': '1048576',
'nsslapd-rootpw': '{SSHA512}...',
# These are hardcoded server magic.
'nsslapd-hash-filters': 'off', # Can't delete
'nsslapd-requiresrestart': 'cn=config:nsslapd-port', # Can't change
'nsslapd-plugin': 'cn=case ignore string syntax,cn=plugins,cn=config', # Can't change
'nsslapd-privatenamespaces': 'cn=schema', # Can't change
'nsslapd-allowed-to-delete-attrs': 'None', # Can't delete
'nsslapd-accesslog-list': 'List!', # Can't delete
'nsslapd-auditfaillog-list': 'List!',
'nsslapd-auditlog-list': 'List!',
'nsslapd-errorlog-list': 'List!',
'nsslapd-config': 'cn=config',
'nsslapd-versionstring': '389-Directory/1.3.6.0',
'objectclass': '',
'cn': '',
# These are the odd values
'nsslapd-backendconfig': 'cn=config,cn=userRoot,cn=ldbm database,cn=plugins,cn=config', # Doesn't exist?
'nsslapd-betype': 'ldbm database', # Doesn't exist?
'nsslapd-connection-buffer': 1, # Has an ldap problem
'nsslapd-malloc-mmap-threshold': '-10', # Defunct anyway
'nsslapd-malloc-mxfast': '-10',
'nsslapd-malloc-trim-threshold': '-10',
'nsslapd-referralmode': '',
'nsslapd-saslpath': '',
'passwordadmindn': '',
}

> config_entry = topology_st.standalone.config.raw_entry()

/export/tests/tickets/ticket48961_test.py:101:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.config.Config object at 0x7f4d2c092820>, name = 'raw_entry'

def __getattr__(self, name):
"""This enables a bit of magic to allow us to wrap any function ending with
_json to it's form without json, then transformed. It means your function
*must* return it's values as a dict of:

{ attr : [val, val, ...], attr : [], ... }
to be supported.
"""

if (name.endswith('_json')):
int_name = name.replace('_json', '')
pfunc = partial(self._jsonify, getattr(self, int_name))
return pfunc
else:
> raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
E AttributeError: 'Config' object has no attribute 'raw_entry'

/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:199: AttributeError
Failed tickets/ticket48973_test.py::test_ticket48973_homeDirectory_indexing 9.79
topology = <tests.tickets.ticket48973_test.TopologyStandalone object at 0x7f4d2c2e7a00>

def test_ticket48973_homeDirectory_indexing(topology):
"""
Check that homedirectory is indexed with syntax (ces)
- triggers index
- no failure on index
- do a search indexed with exact value (ces) and no default_mr_indexer_create warning
- do a search indexed with uppercase value (ces) and no default_mr_indexer_create warning
"""
entry_ext = 1

try:
ent = topology.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE)
except ldap.NO_SUCH_OBJECT:
topology.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, {
'objectclass': "top nsIndex".split(),
'cn': HOMEDIRECTORY_CN,
'nsSystemIndex': 'false',
'nsIndexType': 'eq'})))

args = {TASK_WAIT: True}
topology.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args)

log.info("Check indexing succeeded with no specified matching rule")
assert not _find_first_indexing_failure(topology, "unknown or invalid matching rule")
assert not _find_first_indexing_failure(topology, "default_mr_indexer_create: warning")
assert not _find_first_indexing_failure(topology, "default_mr_indexer_create - Plugin .* does not handle")

_check_entry(topology, filterHead="homeDirectory", filterValueUpper=False, entry_ext=entry_ext,found=True, indexed=True)

> _check_entry(topology, filterHead="homeDirectory:caseExactIA5Match:", filterValueUpper=False, entry_ext=entry_ext, found=True, indexed=False)

/export/tests/tickets/ticket48973_test.py:251:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology = <tests.tickets.ticket48973_test.TopologyStandalone object at 0x7f4d2c2e7a00>
filterHead = 'homeDirectory:caseExactIA5Match:', filterValueUpper = False
entry_ext = 1, found = True, indexed = False

def _check_entry(topology, filterHead=None, filterValueUpper=False, entry_ext=None, found=False, indexed=False):
# Search with CES with exact value -> find an entry + indexed
if filterValueUpper:
homehead = HOMEHEAD.upper()
else:
homehead = HOMEHEAD
searchedHome = "%s%d" % (homehead, entry_ext)
Filter = "(%s=%s)" % (filterHead, searchedHome)
log.info("Search %s" % Filter)
ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, Filter)
if found:
assert len(ents) == 1
assert ents[0].hasAttr('homedirectory')
valueHome = ensure_bytes("%s%d" % (HOMEHEAD, entry_ext))
assert valueHome in ents[0].getValues('homedirectory')
else:
assert len(ents) == 0

result = _find_next_notes(topology, Filter)
log.info("result=%s" % result)
if indexed:
assert not "notes=U" in result
else:
> assert "notes=U" in result
E AssertionError: assert 'notes=U' in '[22/Oct/2020:22:38:37.895819701 -0400] conn=1 op=2 RESULT err=0 tag=101 nentries=1 wtime=0.000229420 optime=0.005544109 etime=0.005766439\n'

/export/tests/tickets/ticket48973_test.py:188: AssertionError
-------------------------------Captured log call--------------------------------
INFO  lib389:tasks.py:798 Index task index_attrs_10222020_223832 completed successfully INFO  tests.tickets.ticket48973_test:ticket48973_test.py:244 Check indexing succeeded with no specified matching rule INFO  tests.tickets.ticket48973_test:ticket48973_test.py:173 Search (homeDirectory=/home/xyz_1) INFO  tests.tickets.ticket48973_test:ticket48973_test.py:184 result=[22/Oct/2020:22:38:34.030316529 -0400] conn=1 op=10 RESULT err=0 tag=101 nentries=1 wtime=0.000232439 optime=0.000373701 etime=0.000602887 INFO  tests.tickets.ticket48973_test:ticket48973_test.py:173 Search (homeDirectory:caseExactIA5Match:=/home/xyz_1) INFO  tests.tickets.ticket48973_test:ticket48973_test.py:184 result=[22/Oct/2020:22:38:37.895819701 -0400] conn=1 op=2 RESULT err=0 tag=101 nentries=1 wtime=0.000229420 optime=0.005544109 etime=0.005766439
Failed tickets/ticket49073_test.py::test_ticket49073 8.04
topology_m2 = <lib389.topologies.TopologyMain object at 0x7f4d2c4ce250>

def test_ticket49073(topology_m2):
"""Write your replication test here.

To access each DirSrv instance use: topology_m2.ms["master1"], topology_m2.ms["master2"],
..., topology_m2.hub1, ..., topology_m2.consumer1,...

Also, if you need any testcase initialization,
please, write additional fixture for that(include finalizer).
"""
topology_m2.ms["master1"].plugins.enable(name=PLUGIN_MEMBER_OF)
topology_m2.ms["master1"].restart(timeout=10)
topology_m2.ms["master2"].plugins.enable(name=PLUGIN_MEMBER_OF)
topology_m2.ms["master2"].restart(timeout=10)

# Configure fractional to prevent total init to send memberof
ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX)
assert len(ents) == 1
log.info('update %s to add nsDS5ReplicatedAttributeListTotal' % ents[0].dn)
> topology_m2.ms["master1"].modify_s(ents[0].dn,
[(ldap.MOD_REPLACE,
'nsDS5ReplicatedAttributeListTotal',
'(objectclass=*) $ EXCLUDE '),
(ldap.MOD_REPLACE,
'nsDS5ReplicatedAttributeList',
'(objectclass=*) $ EXCLUDE memberOf')])

/export/tests/tickets/ticket49073_test.py:97:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:640: in modify_s
return self.modify_ext_s(dn,modlist,None,None)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:612: in modify_ext_s
msgid = self.modify_ext(dn,modlist,serverctrls,clientctrls)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:609: in modify_ext
return self._ldap_call(self._l.modify_ext,dn,modlist,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c4ce460>
func = <built-in method modify_ext of LDAP object at 0x7f4d2c0907b0>
args = ('cn=002,cn=replica,cn=dc\\3Dexample\\2Cdc\\3Dcom,cn=mapping tree,cn=config', [(2, 'nsDS5ReplicatedAttributeListTotal', '(objectclass=*) $ EXCLUDE '), (2, 'nsDS5ReplicatedAttributeList', '(objectclass=*) $ EXCLUDE memberOf')], None, None)
kwargs = {}, diagnostic_message_success = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E TypeError: ('Tuple_to_LDAPMod(): expected a byte string in the list', '(')

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: TypeError
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39001, 'ldap-secureport': 63701, 'server-id': 'master1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39002, 'ldap-secureport': 63702, 'server-id': 'master2', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:148 Creating replication topology. INFO  lib389.topologies:topologies.py:162 Joining master master2 to master1 ... INFO  lib389.replica:replica.py:2101 SUCCESS: bootstrap to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 completed INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is was created INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is was created INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect 10c7f103-5109-4ecf-ba5b-832b00c24278 / got description=None) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect 51862dcc-8cd7-415d-9e8e-2bbbffbb2965 / got description=10c7f103-5109-4ecf-ba5b-832b00c24278) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is working INFO  lib389.replica:replica.py:2170 SUCCESS: joined master from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 INFO  lib389.topologies:topologies.py:170 Ensuring master master1 to master2 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 already exists INFO  lib389.topologies:topologies.py:170 Ensuring master master2 to master1 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 already exists
-------------------------------Captured log call--------------------------------
INFO  tests.tickets.ticket49073_test:ticket49073_test.py:96 update cn=002,cn=replica,cn=dc\3Dexample\2Cdc\3Dcom,cn=mapping tree,cn=config to add nsDS5ReplicatedAttributeListTotal
Failed tickets/ticket49192_test.py::test_ticket49192 0.00
topo = <lib389.topologies.TopologyMain object at 0x7f4d2d094100>

def test_ticket49192(topo):
"""Trigger deadlock when removing suffix
"""

#
# Create a second suffix/backend
#
log.info('Creating second backend...')
> topo.standalone.backends.create(None, properties={
BACKEND_NAME: "Second_Backend",
'suffix': "o=hang.com",
})

/export/tests/tickets/ticket49192_test.py:35:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:1169: in create
return co.create(rdn, properties, self._basedn)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.backend.Backend object at 0x7f4d2d09cfd0>, dn = None
properties = {'name': 'Second_Backend', 'suffix': 'o=hang.com'}
basedn = 'cn=ldbm database,cn=plugins,cn=config'

def create(self, dn=None, properties=None, basedn=DN_LDBM):
"""Add a new backend entry, create mapping tree,
and, if requested, sample entries

:param dn: DN of the new entry
:type dn: str
:param properties: Attributes and parameters for the new entry
:type properties: dict
:param basedn: Base DN of the new entry
:type basedn: str

:returns: DSLdapObject of the created entry
"""

sample_entries = False
parent_suffix = False

# normalize suffix (remove spaces between comps)
if dn is not None:
dn_comps = ldap.dn.explode_dn(dn.lower())
dn = ",".join(dn_comps)

if properties is not None:
> suffix_dn = properties['nsslapd-suffix'].lower()
E KeyError: 'nsslapd-suffix'

/usr/local/lib/python3.8/site-packages/lib389/backend.py:590: KeyError
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
-------------------------------Captured log call--------------------------------
INFO  tests.tickets.ticket49192_test:ticket49192_test.py:34 Creating second backend...
Failed tickets/ticket49287_test.py::test_ticket49287 12.15
self = <lib389.mappingTree.MappingTreeLegacy object at 0x7f4d2c48db50>
suffix = 'dc=test,dc=com', bename = 'test', parent = None

def create(self, suffix=None, bename=None, parent=None):
'''
Create a mapping tree entry (under "cn=mapping tree,cn=config"),
for the 'suffix' and that is stored in 'bename' backend.
'bename' backend must exist before creating the mapping tree entry.

If a 'parent' is provided that means that we are creating a
sub-suffix mapping tree.

@param suffix - suffix mapped by this mapping tree entry. It will
be the common name ('cn') of the entry
@param benamebase - backend common name (e.g. 'userRoot')
@param parent - if provided is a parent suffix of 'suffix'

@return DN of the mapping tree entry

@raise ldap.NO_SUCH_OBJECT - if the backend entry or parent mapping
tree does not exist
ValueError - if missing a parameter,

'''
# Check suffix is provided
if not suffix:
raise ValueError("suffix is mandatory")
else:
nsuffix = normalizeDN(suffix)

# Check backend name is provided
if not bename:
raise ValueError("backend name is mandatory")

# Check that if the parent suffix is provided then
# it exists a mapping tree for it
if parent:
nparent = normalizeDN(parent)
filt = suffixfilt(parent)
try:
entry = self.conn.getEntry(DN_MAPPING_TREE, ldap.SCOPE_SUBTREE,
filt)
pass
except NoSuchEntryError:
raise ValueError("parent suffix has no mapping tree")
else:
nparent = ""

# Check if suffix exists, return
filt = suffixfilt(suffix)
try:
entry = self.conn.getEntry(DN_MAPPING_TREE, ldap.SCOPE_SUBTREE,
filt)
return entry
except ldap.NO_SUCH_OBJECT:
entry = None

#
# Now start the real work
#

# fix me when we can actually used escaped DNs
dn = ','.join(('cn="%s"' % nsuffix, DN_MAPPING_TREE))
entry = Entry(dn)
entry.update({
'objectclass': ['top', 'extensibleObject', MT_OBJECTCLASS_VALUE],
'nsslapd-state': 'backend',
# the value in the dn has to be DN escaped
# internal code will add the quoted value - unquoted value is
# useful for searching.
MT_PROPNAME_TO_ATTRNAME[MT_SUFFIX]: nsuffix,
MT_PROPNAME_TO_ATTRNAME[MT_BACKEND]: bename
})

# possibly add the parent
if parent:
entry.setValues(MT_PROPNAME_TO_ATTRNAME[MT_PARENT_SUFFIX], nparent)

try:
self.log.debug("Creating entry: %s", entry.dn)
self.log.info("Entry %r", entry)
> self.conn.add_s(entry)

/usr/local/lib/python3.8/site-packages/lib389/mappingTree.py:155:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

args = (dn: cn="dc=test,dc=com",cn=mapping tree,cn=config
cn: dc=test,dc=com
nsslapd-backend: test
nsslapd-state: backend
objectclass: top
objectclass: extensibleObject
objectclass: nsMappingTree

,)
kwargs = {}
c_stack = [FrameInfo(frame=<frame at 0x7f4d2c40a840, file '/usr/local/lib/python3.8/site-packages/lib389/__init__.py', line 176,...neno=187, function='_multicall', code_context=[' res = hook_impl.function(*args)\n'], index=0), ...]
frame = FrameInfo(frame=<frame at 0x55a9416a2b60, file '/usr/local/lib/python3.8/site-packages/lib389/mappingTree.py', line 15.../lib389/mappingTree.py', lineno=155, function='create', code_context=[' self.conn.add_s(entry)\n'], index=0)
ent = dn: cn="dc=test,dc=com",cn=mapping tree,cn=config
cn: dc=test,dc=com
nsslapd-backend: test
nsslapd-state: backend
objectclass: top
objectclass: extensibleObject
objectclass: nsMappingTree



def inner(*args, **kwargs):
if name in [
'add_s',
'bind_s',
'delete_s',
'modify_s',
'modrdn_s',
'rename_s',
'sasl_interactive_bind_s',
'search_s',
'search_ext_s',
'simple_bind_s',
'unbind_s',
'getEntry',
] and not ('escapehatch' in kwargs and kwargs['escapehatch'] == 'i am sure'):
c_stack = inspect.stack()
frame = c_stack[1]

warnings.warn(DeprecationWarning("Use of raw ldap function %s. This will be removed in a future release. "
"Found in: %s:%s" % (name, frame.filename, frame.lineno)))
# Later, we will add a sleep here to make it even more painful.
# Finally, it will raise an exception.
elif 'escapehatch' in kwargs:
kwargs.pop('escapehatch')

if name == 'result':
objtype, data = f(*args, **kwargs)
# data is either a 2-tuple or a list of 2-tuples
# print data
if data:
if isinstance(data, tuple):
return objtype, Entry(data)
elif isinstance(data, list):
# AD sends back these search references
# if objtype == ldap.RES_SEARCH_RESULT and \
# isinstance(data[-1],tuple) and \
# not data[-1][0]:
# print "Received search reference: "
# pprint.pprint(data[-1][1])
# data.pop() # remove the last non-entry element

return objtype, [Entry(x) for x in data]
else:
raise TypeError("unknown data type %s returned by result" %
type(data))
else:
return objtype, data
elif name.startswith('add'):
# the first arg is self
# the second and third arg are the dn and the data to send
# We need to convert the Entry into the format used by
# python-ldap
ent = args[0]
if isinstance(ent, Entry):
> return f(ent.dn, ent.toTupleList(), *args[2:])

/usr/local/lib/python3.8/site-packages/lib389/__init__.py:176:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c478340>
dn = 'cn="dc=test,dc=com",cn=mapping tree,cn=config'
modlist = [('objectclass', [b'top', b'extensibleObject', b'nsMappingTree']), ('nsslapd-state', [b'backend']), ('cn', [b'dc=test,dc=com']), ('nsslapd-backend', [b'test'])]

def add_s(self,dn,modlist):
> return self.add_ext_s(dn,modlist,None,None)

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:439:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

args = ('cn="dc=test,dc=com",cn=mapping tree,cn=config', [('objectclass', [b'top', b'extensibleObject', b'nsMappingTree']), ('nsslapd-state', [b'backend']), ('cn', [b'dc=test,dc=com']), ('nsslapd-backend', [b'test'])], None, None)
kwargs = {}, ent = 'cn="dc=test,dc=com",cn=mapping tree,cn=config'

def inner(*args, **kwargs):
if name in [
'add_s',
'bind_s',
'delete_s',
'modify_s',
'modrdn_s',
'rename_s',
'sasl_interactive_bind_s',
'search_s',
'search_ext_s',
'simple_bind_s',
'unbind_s',
'getEntry',
] and not ('escapehatch' in kwargs and kwargs['escapehatch'] == 'i am sure'):
c_stack = inspect.stack()
frame = c_stack[1]

warnings.warn(DeprecationWarning("Use of raw ldap function %s. This will be removed in a future release. "
"Found in: %s:%s" % (name, frame.filename, frame.lineno)))
# Later, we will add a sleep here to make it even more painful.
# Finally, it will raise an exception.
elif 'escapehatch' in kwargs:
kwargs.pop('escapehatch')

if name == 'result':
objtype, data = f(*args, **kwargs)
# data is either a 2-tuple or a list of 2-tuples
# print data
if data:
if isinstance(data, tuple):
return objtype, Entry(data)
elif isinstance(data, list):
# AD sends back these search references
# if objtype == ldap.RES_SEARCH_RESULT and \
# isinstance(data[-1],tuple) and \
# not data[-1][0]:
# print "Received search reference: "
# pprint.pprint(data[-1][1])
# data.pop() # remove the last non-entry element

return objtype, [Entry(x) for x in data]
else:
raise TypeError("unknown data type %s returned by result" %
type(data))
else:
return objtype, data
elif name.startswith('add'):
# the first arg is self
# the second and third arg are the dn and the data to send
# We need to convert the Entry into the format used by
# python-ldap
ent = args[0]
if isinstance(ent, Entry):
return f(ent.dn, ent.toTupleList(), *args[2:])
else:
> return f(*args, **kwargs)

/usr/local/lib/python3.8/site-packages/lib389/__init__.py:178:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c478340>
dn = 'cn="dc=test,dc=com",cn=mapping tree,cn=config'
modlist = [('objectclass', [b'top', b'extensibleObject', b'nsMappingTree']), ('nsslapd-state', [b'backend']), ('cn', [b'dc=test,dc=com']), ('nsslapd-backend', [b'test'])]
serverctrls = None, clientctrls = None

def add_ext_s(self,dn,modlist,serverctrls=None,clientctrls=None):
msgid = self.add_ext(dn,modlist,serverctrls,clientctrls)
> resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:425:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

args = (4,), kwargs = {'all': 1, 'timeout': -1}

def inner(*args, **kwargs):
if name in [
'add_s',
'bind_s',
'delete_s',
'modify_s',
'modrdn_s',
'rename_s',
'sasl_interactive_bind_s',
'search_s',
'search_ext_s',
'simple_bind_s',
'unbind_s',
'getEntry',
] and not ('escapehatch' in kwargs and kwargs['escapehatch'] == 'i am sure'):
c_stack = inspect.stack()
frame = c_stack[1]

warnings.warn(DeprecationWarning("Use of raw ldap function %s. This will be removed in a future release. "
"Found in: %s:%s" % (name, frame.filename, frame.lineno)))
# Later, we will add a sleep here to make it even more painful.
# Finally, it will raise an exception.
elif 'escapehatch' in kwargs:
kwargs.pop('escapehatch')

if name == 'result':
objtype, data = f(*args, **kwargs)
# data is either a 2-tuple or a list of 2-tuples
# print data
if data:
if isinstance(data, tuple):
return objtype, Entry(data)
elif isinstance(data, list):
# AD sends back these search references
# if objtype == ldap.RES_SEARCH_RESULT and \
# isinstance(data[-1],tuple) and \
# not data[-1][0]:
# print "Received search reference: "
# pprint.pprint(data[-1][1])
# data.pop() # remove the last non-entry element

return objtype, [Entry(x) for x in data]
else:
raise TypeError("unknown data type %s returned by result" %
type(data))
else:
return objtype, data
elif name.startswith('add'):
# the first arg is self
# the second and third arg are the dn and the data to send
# We need to convert the Entry into the format used by
# python-ldap
ent = args[0]
if isinstance(ent, Entry):
return f(ent.dn, ent.toTupleList(), *args[2:])
else:
return f(*args, **kwargs)
else:
> return f(*args, **kwargs)

/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c478340>, msgid = 4, all = 1
timeout = -1, resp_ctrl_classes = None

def result3(self,msgid=ldap.RES_ANY,all=1,timeout=None,resp_ctrl_classes=None):
> resp_type, resp_data, resp_msgid, decoded_resp_ctrls, retoid, retval = self.result4(
msgid,all,timeout,
add_ctrls=0,add_intermediates=0,add_extop=0,
resp_ctrl_classes=resp_ctrl_classes
)

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:764:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

args = (4, 1, -1)
kwargs = {'add_ctrls': 0, 'add_extop': 0, 'add_intermediates': 0, 'resp_ctrl_classes': None}

def inner(*args, **kwargs):
if name in [
'add_s',
'bind_s',
'delete_s',
'modify_s',
'modrdn_s',
'rename_s',
'sasl_interactive_bind_s',
'search_s',
'search_ext_s',
'simple_bind_s',
'unbind_s',
'getEntry',
] and not ('escapehatch' in kwargs and kwargs['escapehatch'] == 'i am sure'):
c_stack = inspect.stack()
frame = c_stack[1]

warnings.warn(DeprecationWarning("Use of raw ldap function %s. This will be removed in a future release. "
"Found in: %s:%s" % (name, frame.filename, frame.lineno)))
# Later, we will add a sleep here to make it even more painful.
# Finally, it will raise an exception.
elif 'escapehatch' in kwargs:
kwargs.pop('escapehatch')

if name == 'result':
objtype, data = f(*args, **kwargs)
# data is either a 2-tuple or a list of 2-tuples
# print data
if data:
if isinstance(data, tuple):
return objtype, Entry(data)
elif isinstance(data, list):
# AD sends back these search references
# if objtype == ldap.RES_SEARCH_RESULT and \
# isinstance(data[-1],tuple) and \
# not data[-1][0]:
# print "Received search reference: "
# pprint.pprint(data[-1][1])
# data.pop() # remove the last non-entry element

return objtype, [Entry(x) for x in data]
else:
raise TypeError("unknown data type %s returned by result" %
type(data))
else:
return objtype, data
elif name.startswith('add'):
# the first arg is self
# the second and third arg are the dn and the data to send
# We need to convert the Entry into the format used by
# python-ldap
ent = args[0]
if isinstance(ent, Entry):
return f(ent.dn, ent.toTupleList(), *args[2:])
else:
return f(*args, **kwargs)
else:
> return f(*args, **kwargs)

/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c478340>, msgid = 4, all = 1
timeout = -1, add_ctrls = 0, add_intermediates = 0, add_extop = 0
resp_ctrl_classes = None

def result4(self,msgid=ldap.RES_ANY,all=1,timeout=None,add_ctrls=0,add_intermediates=0,add_extop=0,resp_ctrl_classes=None):
if timeout is None:
timeout = self.timeout
> ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:774:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

args = (<built-in method result4 of LDAP object at 0x7f4d2c47c4e0>, 4, 1, -1, 0, 0, ...)
kwargs = {}

def inner(*args, **kwargs):
if name in [
'add_s',
'bind_s',
'delete_s',
'modify_s',
'modrdn_s',
'rename_s',
'sasl_interactive_bind_s',
'search_s',
'search_ext_s',
'simple_bind_s',
'unbind_s',
'getEntry',
] and not ('escapehatch' in kwargs and kwargs['escapehatch'] == 'i am sure'):
c_stack = inspect.stack()
frame = c_stack[1]

warnings.warn(DeprecationWarning("Use of raw ldap function %s. This will be removed in a future release. "
"Found in: %s:%s" % (name, frame.filename, frame.lineno)))
# Later, we will add a sleep here to make it even more painful.
# Finally, it will raise an exception.
elif 'escapehatch' in kwargs:
kwargs.pop('escapehatch')

if name == 'result':
objtype, data = f(*args, **kwargs)
# data is either a 2-tuple or a list of 2-tuples
# print data
if data:
if isinstance(data, tuple):
return objtype, Entry(data)
elif isinstance(data, list):
# AD sends back these search references
# if objtype == ldap.RES_SEARCH_RESULT and \
# isinstance(data[-1],tuple) and \
# not data[-1][0]:
# print "Received search reference: "
# pprint.pprint(data[-1][1])
# data.pop() # remove the last non-entry element

return objtype, [Entry(x) for x in data]
else:
raise TypeError("unknown data type %s returned by result" %
type(data))
else:
return objtype, data
elif name.startswith('add'):
# the first arg is self
# the second and third arg are the dn and the data to send
# We need to convert the Entry into the format used by
# python-ldap
ent = args[0]
if isinstance(ent, Entry):
return f(ent.dn, ent.toTupleList(), *args[2:])
else:
return f(*args, **kwargs)
else:
> return f(*args, **kwargs)

/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c478340>
func = <built-in method result4 of LDAP object at 0x7f4d2c47c4e0>
args = (4, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
result = func(*args,**kwargs)
if __debug__ and self._trace_level>=2:
if func.__name__!="unbind_ext":
diagnostic_message_success = self._l.get_option(ldap.OPT_DIAGNOSTIC_MESSAGE)
finally:
self._ldap_object_lock.release()
except LDAPError as e:
exc_type,exc_value,exc_traceback = sys.exc_info()
try:
if 'info' not in e.args[0] and 'errno' in e.args[0]:
e.args[0]['info'] = strerror(e.args[0]['errno'])
except IndexError:
pass
if __debug__ and self._trace_level>=2:
self._trace_file.write('=> LDAPError - %s: %s\n' % (e.__class__.__name__,str(e)))
try:
> reraise(exc_type, exc_value, exc_traceback)

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

exc_type = <class 'ldap.UNWILLING_TO_PERFORM'>
exc_value = UNWILLING_TO_PERFORM({'msgtype': 105, 'msgid': 4, 'result': 53, 'desc': 'Server is unwilling to perform', 'ctrls': []})
exc_traceback = <traceback object at 0x7f4d2c48a340>

def reraise(exc_type, exc_value, exc_traceback):
"""Re-raise an exception given information from sys.exc_info()

Note that unlike six.reraise, this does not support replacing the
traceback. All arguments must come from a single sys.exc_info() call.
"""
# In Python 3, all exception info is contained in one object.
> raise exc_value

/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2c478340>
func = <built-in method result4 of LDAP object at 0x7f4d2c47c4e0>
args = (4, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.UNWILLING_TO_PERFORM: {'msgtype': 105, 'msgid': 4, 'result': 53, 'desc': 'Server is unwilling to perform', 'ctrls': []}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: UNWILLING_TO_PERFORM

During handling of the above exception, another exception occurred:

topology_m2 = <lib389.topologies.TopologyMain object at 0x7f4d2c4720d0>

def test_ticket49287(topology_m2):
"""
test case for memberof and conflict entries

"""

# return
M1 = topology_m2.ms["master1"]
M2 = topology_m2.ms["master2"]

config_memberof(M1)
config_memberof(M2)

_enable_spec_logging(M1)
_enable_spec_logging(M2)

_disable_nunc_stans(M1)
_disable_nunc_stans(M2)

M1.restart(timeout=10)
M2.restart(timeout=10)

testbase = 'dc=test,dc=com'
bename = 'test'
> create_backend(M1, M2, testbase, bename)

/export/tests/tickets/ticket49287_test.py:282:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/export/tests/tickets/ticket49287_test.py:204: in create_backend
s1.mappingtree.create(beSuffix, beName)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.mappingTree.MappingTreeLegacy object at 0x7f4d2c48db50>
suffix = 'dc=test,dc=com', bename = 'test', parent = None

def create(self, suffix=None, bename=None, parent=None):
'''
Create a mapping tree entry (under "cn=mapping tree,cn=config"),
for the 'suffix' and that is stored in 'bename' backend.
'bename' backend must exist before creating the mapping tree entry.

If a 'parent' is provided that means that we are creating a
sub-suffix mapping tree.

@param suffix - suffix mapped by this mapping tree entry. It will
be the common name ('cn') of the entry
@param benamebase - backend common name (e.g. 'userRoot')
@param parent - if provided is a parent suffix of 'suffix'

@return DN of the mapping tree entry

@raise ldap.NO_SUCH_OBJECT - if the backend entry or parent mapping
tree does not exist
ValueError - if missing a parameter,

'''
# Check suffix is provided
if not suffix:
raise ValueError("suffix is mandatory")
else:
nsuffix = normalizeDN(suffix)

# Check backend name is provided
if not bename:
raise ValueError("backend name is mandatory")

# Check that if the parent suffix is provided then
# it exists a mapping tree for it
if parent:
nparent = normalizeDN(parent)
filt = suffixfilt(parent)
try:
entry = self.conn.getEntry(DN_MAPPING_TREE, ldap.SCOPE_SUBTREE,
filt)
pass
except NoSuchEntryError:
raise ValueError("parent suffix has no mapping tree")
else:
nparent = ""

# Check if suffix exists, return
filt = suffixfilt(suffix)
try:
entry = self.conn.getEntry(DN_MAPPING_TREE, ldap.SCOPE_SUBTREE,
filt)
return entry
except ldap.NO_SUCH_OBJECT:
entry = None

#
# Now start the real work
#

# fix me when we can actually used escaped DNs
dn = ','.join(('cn="%s"' % nsuffix, DN_MAPPING_TREE))
entry = Entry(dn)
entry.update({
'objectclass': ['top', 'extensibleObject', MT_OBJECTCLASS_VALUE],
'nsslapd-state': 'backend',
# the value in the dn has to be DN escaped
# internal code will add the quoted value - unquoted value is
# useful for searching.
MT_PROPNAME_TO_ATTRNAME[MT_SUFFIX]: nsuffix,
MT_PROPNAME_TO_ATTRNAME[MT_BACKEND]: bename
})

# possibly add the parent
if parent:
entry.setValues(MT_PROPNAME_TO_ATTRNAME[MT_PARENT_SUFFIX], nparent)

try:
self.log.debug("Creating entry: %s", entry.dn)
self.log.info("Entry %r", entry)
self.conn.add_s(entry)
except ldap.LDAPError as e:
> raise ldap.LDAPError("Error adding suffix entry " + dn, e)
E ldap.LDAPError: ('Error adding suffix entry cn="dc=test,dc=com",cn=mapping tree,cn=config', UNWILLING_TO_PERFORM({'msgtype': 105, 'msgid': 4, 'result': 53, 'desc': 'Server is unwilling to perform', 'ctrls': []}))

/usr/local/lib/python3.8/site-packages/lib389/mappingTree.py:157: LDAPError
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39001, 'ldap-secureport': 63701, 'server-id': 'master1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39002, 'ldap-secureport': 63702, 'server-id': 'master2', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:148 Creating replication topology. INFO  lib389.topologies:topologies.py:162 Joining master master2 to master1 ... INFO  lib389.replica:replica.py:2101 SUCCESS: bootstrap to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 completed INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is was created INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is was created INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect ff1ffd9d-3e78-4d4e-acdf-05ead5f5c19a / got description=None) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect afa9fe7e-8d29-4657-821c-d165f18b17ef / got description=ff1ffd9d-3e78-4d4e-acdf-05ead5f5c19a) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is working INFO  lib389.replica:replica.py:2170 SUCCESS: joined master from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 INFO  lib389.topologies:topologies.py:170 Ensuring master master1 to master2 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 already exists INFO  lib389.topologies:topologies.py:170 Ensuring master master2 to master1 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 already exists
-------------------------------Captured log call--------------------------------
INFO  tests.tickets.ticket49287_test:ticket49287_test.py:77 update cn=002,cn=replica,cn=dc\3Dexample\2Cdc\3Dcom,cn=mapping tree,cn=config to add nsDS5ReplicatedAttributeListTotal INFO  tests.tickets.ticket49287_test:ticket49287_test.py:77 update cn=001,cn=replica,cn=dc\3Dexample\2Cdc\3Dcom,cn=mapping tree,cn=config to add nsDS5ReplicatedAttributeListTotal INFO  lib389:mappingTree.py:154 Entry dn: cn="dc=test,dc=com",cn=mapping tree,cn=config cn: dc=test,dc=com nsslapd-backend: test nsslapd-state: backend objectclass: top objectclass: extensibleObject objectclass: nsMappingTree
Failed tickets/ticket49303_test.py::test_ticket49303 17.20
topo = <lib389.topologies.TopologyMain object at 0x7f4d2c4d4c40>

def test_ticket49303(topo):
"""
Test the nsTLSAllowClientRenegotiation setting.
"""
sslport = SECUREPORT_STANDALONE1

log.info("Ticket 49303 - Allow disabling of SSL renegotiation")

# No value set, defaults to reneg allowed
enable_ssl(topo.standalone, sslport)
> assert try_reneg(HOST_STANDALONE1, sslport) is True
E AssertionError: assert False is True
E + where False = try_reneg('LOCALHOST', 63601)

/export/tests/tickets/ticket49303_test.py:88: AssertionError
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
-------------------------------Captured log call--------------------------------
INFO  tests.tickets.ticket49303_test:ticket49303_test.py:84 Ticket 49303 - Allow disabling of SSL renegotiation
Failed tickets/ticket49412_test.py::test_ticket49412 0.00
topo = <lib389.topologies.TopologyMain object at 0x7f4d2d078520>

def test_ticket49412(topo):
"""Specify a test case purpose or name here

:id: 4c7681ff-0511-4256-9589-bdcad84c13e6
:setup: Fill in set up configuration here
:steps:
1. Fill in test case steps here
2. And indent them like this (RST format requirement)
:expectedresults:
1. Fill in the result that is expected
2. For each test step
"""

M1 = topo.ms["master1"]

# wrong call with invalid value (should be str(60)
# that create replace with NULL value
# it should fail with UNWILLING_TO_PERFORM
try:
> M1.modify_s(CHANGELOG, [(ldap.MOD_REPLACE, MAXAGE_ATTR, 60),
(ldap.MOD_REPLACE, TRIMINTERVAL, 10)])

/export/tests/tickets/ticket49412_test.py:44:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:640: in modify_s
return self.modify_ext_s(dn,modlist,None,None)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:613: in modify_ext_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:764: in result3
resp_type, resp_data, resp_msgid, decoded_resp_ctrls, retoid, retval = self.result4(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:774: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d2d09d6a0>
func = <built-in method result4 of LDAP object at 0x7f4d2d08fde0>
args = (39, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.NO_SUCH_OBJECT: {'msgtype': 103, 'msgid': 39, 'result': 32, 'desc': 'No such object', 'ctrls': []}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: NO_SUCH_OBJECT
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39001, 'ldap-secureport': 63701, 'server-id': 'master1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39201, 'ldap-secureport': 63901, 'server-id': 'consumer1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:148 Creating replication topology. INFO  lib389.topologies:topologies.py:175 Joining consumer consumer1 from master1 ... INFO  lib389.replica:replica.py:2101 SUCCESS: bootstrap to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 completed INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 is was created INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 is NOT working (expect dcf897a1-47d8-490b-a8df-247cbaaff6f7 / got description=None) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 is working INFO  lib389.replica:replica.py:2285 SUCCESS: joined consumer from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 INFO  lib389.topologies:topologies.py:180 Ensuring consumer consumer1 from master1 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39201 already exists
Failed tickets/ticket49463_test.py::test_ticket_49463 186.19
topo = <lib389.topologies.TopologyMain object at 0x7f4d26efa6d0>

def test_ticket_49463(topo):
"""Specify a test case purpose or name here

:id: 2a68e8be-387d-4ac7-9452-1439e8483c13
:setup: Fill in set up configuration here
:steps:
1. Enable fractional replication
2. Enable replication logging
3. Check that replication is working fine
4. Generate skipped updates to create keep alive entries
5. Remove M3 from the topology
6. issue cleanAllRuv FORCE that will run on M1 then propagated M2 and M4
7. Check that Number DEL keep alive '3' is <= 1
8. Check M1 is the originator of cleanAllRuv and M2/M4 the propagated ones
9. Check replication M1,M2 and M4 can recover
10. Remove M4 from the topology
11. Issue cleanAllRuv not force while M2 is stopped (that hangs the cleanAllRuv)
12. Check that nsds5ReplicaCleanRUV is correctly encoded on M1 (last value: 1)
13. Check that nsds5ReplicaCleanRUV encoding survives M1 restart
14. Check that nsds5ReplicaCleanRUV encoding is valid on M2 (last value: 0)
15. Check that (for M4 cleanAllRUV) M1 is Originator and M2 propagation
:expectedresults:
1. No report of failure when the RUV is updated
"""

# Step 1 - Configure fractional (skip telephonenumber) replication
M1 = topo.ms["master1"]
M2 = topo.ms["master2"]
M3 = topo.ms["master3"]
M4 = topo.ms["master4"]
repl = ReplicationManager(DEFAULT_SUFFIX)
fractional_server_to_replica(M1, M2)
fractional_server_to_replica(M1, M3)
fractional_server_to_replica(M1, M4)

fractional_server_to_replica(M2, M1)
fractional_server_to_replica(M2, M3)
fractional_server_to_replica(M2, M4)

fractional_server_to_replica(M3, M1)
fractional_server_to_replica(M3, M2)
fractional_server_to_replica(M3, M4)

fractional_server_to_replica(M4, M1)
fractional_server_to_replica(M4, M2)
fractional_server_to_replica(M4, M3)

# Step 2 - enable internal op logging and replication debug
for i in (M1, M2, M3, M4):
i.config.loglevel(vals=[256 + 4], service='access')
i.config.loglevel(vals=[LOG_REPLICA, LOG_DEFAULT], service='error')

# Step 3 - Check that replication is working fine
add_user(M1, 11, desc="add to M1")
add_user(M2, 21, desc="add to M2")
add_user(M3, 31, desc="add to M3")
add_user(M4, 41, desc="add to M4")

for i in (M1, M2, M3, M4):
for j in (M1, M2, M3, M4):
if i == j:
continue
repl.wait_for_replication(i, j)

# Step 4 - Generate skipped updates to create keep alive entries
for i in (M1, M2, M3, M4):
cn = '%s_%d' % (USER_CN, 11)
dn = 'uid=%s,ou=People,%s' % (cn, SUFFIX)
users = UserAccount(i, dn)
for j in range(110):
users.set('telephoneNumber', str(j))

# Step 5 - Remove M3 from the topology
M3.stop()
M1.agreement.delete(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port)
M2.agreement.delete(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port)
M4.agreement.delete(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port)

# Step 6 - Then issue cleanAllRuv FORCE that will run on M1, M2 and M4
M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='3',
force=True, args={TASK_WAIT: True})

# Step 7 - Count the number of received DEL of the keep alive 3
for i in (M1, M2, M4):
i.restart()
regex = re.compile(".*DEL dn=.cn=repl keep alive 3.*")
for i in (M1, M2, M4):
count = count_pattern_accesslog(M1, regex)
log.debug("count on %s = %d" % (i, count))

# check that DEL is replicated once (If DEL is kept in the fix)
# check that DEL is is not replicated (If DEL is finally no long done in the fix)
assert ((count == 1) or (count == 0))

# Step 8 - Check that M1 is Originator of cleanAllRuv and M2, M4 propagation
regex = re.compile(".*Original task deletes Keep alive entry .3.*")
assert pattern_errorlog(M1, regex)

regex = re.compile(".*Propagated task does not delete Keep alive entry .3.*")
assert pattern_errorlog(M2, regex)
assert pattern_errorlog(M4, regex)

# Step 9 - Check replication M1,M2 and M4 can recover
add_user(M1, 12, desc="add to M1")
add_user(M2, 22, desc="add to M2")
for i in (M1, M2, M4):
for j in (M1, M2, M4):
if i == j:
continue
repl.wait_for_replication(i, j)

# Step 10 - Remove M4 from the topology
M4.stop()
M1.agreement.delete(suffix=SUFFIX, consumer_host=M4.host, consumer_port=M4.port)
M2.agreement.delete(suffix=SUFFIX, consumer_host=M4.host, consumer_port=M4.port)

# Step 11 - Issue cleanAllRuv not force while M2 is stopped (that hangs the cleanAllRuv)
M2.stop()
M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='4',
force=False, args={TASK_WAIT: False})

# Step 12
# CleanAllRuv is hanging waiting for M2 to restart
# Check that nsds5ReplicaCleanRUV is correctly encoded on M1
replicas = Replicas(M1)
replica = replicas.list()[0]
time.sleep(0.5)
replica.present('nsds5ReplicaCleanRUV')
log.info("M1: nsds5ReplicaCleanRUV=%s" % replica.get_attr_val_utf8('nsds5replicacleanruv'))
regex = re.compile("^4:.*:no:1$")
> assert regex.match(replica.get_attr_val_utf8('nsds5replicacleanruv'))
E AssertionError: assert None
E + where None = <built-in method match of re.Pattern object at 0x7f4d2d226e30>('4:no:1:dc=example,dc=com')
E + where <built-in method match of re.Pattern object at 0x7f4d2d226e30> = re.compile('^4:.*:no:1$').match
E + and '4:no:1:dc=example,dc=com' = <bound method DSLdapObject.get_attr_val_utf8 of <lib389.replica.Replica object at 0x7f4d26eed8b0>>('nsds5replicacleanruv')
E + where <bound method DSLdapObject.get_attr_val_utf8 of <lib389.replica.Replica object at 0x7f4d26eed8b0>> = <lib389.replica.Replica object at 0x7f4d26eed8b0>.get_attr_val_utf8

/export/tests/tickets/ticket49463_test.py:188: AssertionError
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39001, 'ldap-secureport': 63701, 'server-id': 'master1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39002, 'ldap-secureport': 63702, 'server-id': 'master2', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39003, 'ldap-secureport': 63703, 'server-id': 'master3', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39004, 'ldap-secureport': 63704, 'server-id': 'master4', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:148 Creating replication topology. INFO  lib389.topologies:topologies.py:162 Joining master master2 to master1 ... INFO  lib389.replica:replica.py:2101 SUCCESS: bootstrap to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 completed INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is was created INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is was created INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect 6b22b4be-cb5c-487a-a5be-9cb19fded884 / got description=None) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect c70616db-a4ec-468f-af1a-4de30d59e357 / got description=6b22b4be-cb5c-487a-a5be-9cb19fded884) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is working INFO  lib389.replica:replica.py:2170 SUCCESS: joined master from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 INFO  lib389.topologies:topologies.py:162 Joining master master3 to master1 ... INFO  lib389.replica:replica.py:2101 SUCCESS: bootstrap to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 completed INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 is was created INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is was created INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 is NOT working (expect 07bffa42-357c-45df-94d9-21e8178b72fe / got description=c70616db-a4ec-468f-af1a-4de30d59e357) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect 00d56d90-f40c-482a-8ddb-b871d6ea2eac / got description=07bffa42-357c-45df-94d9-21e8178b72fe) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is working INFO  lib389.replica:replica.py:2170 SUCCESS: joined master from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 INFO  lib389.topologies:topologies.py:162 Joining master master4 to master1 ... INFO  lib389.replica:replica.py:2101 SUCCESS: bootstrap to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 completed INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 is was created INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is was created INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 is NOT working (expect b5602740-743a-422a-b6bb-690e9f4f4d76 / got description=00d56d90-f40c-482a-8ddb-b871d6ea2eac) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect e9edc404-ab2e-4ed6-8447-b86239eaef45 / got description=b5602740-743a-422a-b6bb-690e9f4f4d76) INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect e9edc404-ab2e-4ed6-8447-b86239eaef45 / got description=b5602740-743a-422a-b6bb-690e9f4f4d76) INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect e9edc404-ab2e-4ed6-8447-b86239eaef45 / got description=b5602740-743a-422a-b6bb-690e9f4f4d76) INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect e9edc404-ab2e-4ed6-8447-b86239eaef45 / got description=b5602740-743a-422a-b6bb-690e9f4f4d76) INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect e9edc404-ab2e-4ed6-8447-b86239eaef45 / got description=b5602740-743a-422a-b6bb-690e9f4f4d76) INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect e9edc404-ab2e-4ed6-8447-b86239eaef45 / got description=b5602740-743a-422a-b6bb-690e9f4f4d76) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is working INFO  lib389.replica:replica.py:2170 SUCCESS: joined master from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 INFO  lib389.topologies:topologies.py:170 Ensuring master master1 to master2 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 already exists INFO  lib389.topologies:topologies.py:170 Ensuring master master1 to master3 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 already exists INFO  lib389.topologies:topologies.py:170 Ensuring master master1 to master4 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 already exists INFO  lib389.topologies:topologies.py:170 Ensuring master master2 to master1 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 already exists INFO  lib389.topologies:topologies.py:170 Ensuring master master2 to master3 ... INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 is was created INFO  lib389.topologies:topologies.py:170 Ensuring master master2 to master4 ... INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 is was created INFO  lib389.topologies:topologies.py:170 Ensuring master master3 to master1 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 already exists INFO  lib389.topologies:topologies.py:170 Ensuring master master3 to master2 ... INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is was created INFO  lib389.topologies:topologies.py:170 Ensuring master master3 to master4 ... INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 is was created INFO  lib389.topologies:topologies.py:170 Ensuring master master4 to master1 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 already exists INFO  lib389.topologies:topologies.py:170 Ensuring master master4 to master2 ... INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is was created INFO  lib389.topologies:topologies.py:170 Ensuring master master4 to master3 ... INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 is was created
-------------------------------Captured log call--------------------------------
INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 already exists INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 already exists INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 already exists INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 already exists INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 already exists INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 already exists INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 already exists INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 already exists INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 already exists INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 already exists INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 already exists INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 already exists INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect da9d27af-e189-4d63-b06b-d0db30c38089 / got description=e9edc404-ab2e-4ed6-8447-b86239eaef45) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 is NOT working (expect 6c9fa08e-cafe-4d7b-9629-1c9cc71662f6 / got description=da9d27af-e189-4d63-b06b-d0db30c38089) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 is NOT working (expect 3a51c1a9-f9af-45eb-869e-5f69a2739a1a / got description=6c9fa08e-cafe-4d7b-9629-1c9cc71662f6) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect db5399a9-b5a8-434b-8f04-ae9cd2ebbc03 / got description=3a51c1a9-f9af-45eb-869e-5f69a2739a1a) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 is NOT working (expect 2995f132-e9ee-454f-b4c3-70bfc9e0bb36 / got description=db5399a9-b5a8-434b-8f04-ae9cd2ebbc03) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 is NOT working (expect 8afbe4e7-411f-465d-b077-09579b8bcf7e / got description=2995f132-e9ee-454f-b4c3-70bfc9e0bb36) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect 0658a8f6-fe1f-48bf-8cbf-beb5c24d135f / got description=8afbe4e7-411f-465d-b077-09579b8bcf7e) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect 92cb4094-8bae-4f8a-a0de-8c561234ff04 / got description=8afbe4e7-411f-465d-b077-09579b8bcf7e) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 is NOT working (expect 1dc784ae-82c7-403f-b151-fb15bd8a1063 / got description=92cb4094-8bae-4f8a-a0de-8c561234ff04) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect 39c11e26-111c-4296-b09d-d87b4bf4cd44 / got description=1dc784ae-82c7-403f-b151-fb15bd8a1063) INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect 39c11e26-111c-4296-b09d-d87b4bf4cd44 / got description=1dc784ae-82c7-403f-b151-fb15bd8a1063) INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect 39c11e26-111c-4296-b09d-d87b4bf4cd44 / got description=1dc784ae-82c7-403f-b151-fb15bd8a1063) INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect 39c11e26-111c-4296-b09d-d87b4bf4cd44 / got description=1dc784ae-82c7-403f-b151-fb15bd8a1063) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect 2d66666f-5867-41c8-80fa-b5c3aa776410 / got description=39c11e26-111c-4296-b09d-d87b4bf4cd44) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 is NOT working (expect 0fd6e4bb-19a8-4761-9c72-d0f5fd6474d6 / got description=2d66666f-5867-41c8-80fa-b5c3aa776410) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 is working INFO  lib389:agreement.py:1095 Agreement (cn=003,cn=replica,cn=dc\3Dexample\2Cdc\3Dcom,cn=mapping tree,cn=config) was successfully removed INFO  lib389:agreement.py:1095 Agreement (cn=003,cn=replica,cn=dc\3Dexample\2Cdc\3Dcom,cn=mapping tree,cn=config) was successfully removed INFO  lib389:agreement.py:1095 Agreement (cn=003,cn=replica,cn=dc\3Dexample\2Cdc\3Dcom,cn=mapping tree,cn=config) was successfully removed INFO  lib389:tasks.py:1400 cleanAllRUV task (task-10222020_225351) completed successfully INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect f6fc8e9e-236e-421f-8d12-3e4d3081c017 / got description=0fd6e4bb-19a8-4761-9c72-d0f5fd6474d6) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 is NOT working (expect deeaa052-9781-466e-871a-3ca7e794ccc9 / got description=f6fc8e9e-236e-421f-8d12-3e4d3081c017) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect ea6e1439-fbcd-4b88-ba35-d4d46c7c280e / got description=deeaa052-9781-466e-871a-3ca7e794ccc9) INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect ea6e1439-fbcd-4b88-ba35-d4d46c7c280e / got description=deeaa052-9781-466e-871a-3ca7e794ccc9) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 is NOT working (expect d6f78fb5-97a6-4ea1-be67-04db490f7968 / got description=ea6e1439-fbcd-4b88-ba35-d4d46c7c280e) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect f2ef3297-8e00-482e-a277-9b8c188588d8 / got description=d6f78fb5-97a6-4ea1-be67-04db490f7968) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect 3b41e7e6-bbb4-47a8-a03c-306fa59f0e68 / got description=f2ef3297-8e00-482e-a277-9b8c188588d8) INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect 3b41e7e6-bbb4-47a8-a03c-306fa59f0e68 / got description=f2ef3297-8e00-482e-a277-9b8c188588d8) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39004 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is working INFO  lib389:agreement.py:1095 Agreement (cn=004,cn=replica,cn=dc\3Dexample\2Cdc\3Dcom,cn=mapping tree,cn=config) was successfully removed INFO  lib389:agreement.py:1095 Agreement (cn=004,cn=replica,cn=dc\3Dexample\2Cdc\3Dcom,cn=mapping tree,cn=config) was successfully removed INFO  lib389:tasks.py:1400 cleanAllRUV task (task-10222020_225441) completed successfully INFO  lib389.utils:ticket49463_test.py:186 M1: nsds5ReplicaCleanRUV=4:no:1:dc=example,dc=com
Failed tickets/ticket50232_test.py::test_ticket50232_normal 0.66
topology_st = <lib389.topologies.TopologyMain object at 0x7f4d26ac6e20>

def test_ticket50232_normal(topology_st):
"""
The fix for ticket 50232


The test sequence is:
- create suffix
- add suffix entry and some child entries
- "normally" done after populating suffix: enable replication
- get RUV and database generation
- export -r
- import
- get RUV and database generation
- assert database generation has not changed
"""

log.info('Testing Ticket 50232 - export creates not imprtable ldif file, normal creation order')

topology_st.standalone.backend.create(NORMAL_SUFFIX, {BACKEND_NAME: NORMAL_BACKEND_NAME})
topology_st.standalone.mappingtree.create(NORMAL_SUFFIX, bename=NORMAL_BACKEND_NAME, parent=None)

_populate_suffix(topology_st.standalone, NORMAL_BACKEND_NAME)

repl = ReplicationManager(DEFAULT_SUFFIX)
> repl._ensure_changelog(topology_st.standalone)

/export/tests/tickets/ticket50232_test.py:113:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/replica.py:1945: in _ensure_changelog
cl.create(properties={
/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:971: in create
return self._create(rdn, properties, basedn, ensure=False)
/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:946: in _create
self._instance.add_ext_s(e, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure')
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:176: in inner
return f(ent.dn, ent.toTupleList(), *args[2:])
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:425: in add_ext_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:764: in result3
resp_type, resp_data, resp_msgid, decoded_resp_ctrls, retoid, retval = self.result4(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:774: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d26ac6520>
func = <built-in method result4 of LDAP object at 0x7f4d26abf990>
args = (13, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.UNWILLING_TO_PERFORM: {'msgtype': 105, 'msgid': 13, 'result': 53, 'desc': 'Server is unwilling to perform', 'ctrls': [], 'info': 'Changelog configuration is part of the backend configuration'}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: UNWILLING_TO_PERFORM
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
-------------------------------Captured log call--------------------------------
INFO  lib389:backend.py:80 List backend with suffix=o=normal INFO  lib389:backend.py:290 Creating a local backend INFO  lib389:backend.py:76 List backend cn=normal,cn=ldbm database,cn=plugins,cn=config INFO  lib389:__init__.py:1772 Found entry dn: cn=normal,cn=ldbm database,cn=plugins,cn=config cn: normal nsslapd-cachememsize: 512000 nsslapd-cachesize: -1 nsslapd-directory: /var/lib/dirsrv/slapd-standalone1/db/normal nsslapd-dncachememsize: 16777216 nsslapd-readonly: off nsslapd-require-index: off nsslapd-require-internalop-index: off nsslapd-suffix: o=normal objectClass: top objectClass: extensibleObject objectClass: nsBackendInstance INFO  lib389:mappingTree.py:154 Entry dn: cn="o=normal",cn=mapping tree,cn=config cn: o=normal nsslapd-backend: normal nsslapd-state: backend objectclass: top objectclass: extensibleObject objectclass: nsMappingTree INFO  lib389:__init__.py:1772 Found entry dn: cn=o\3Dnormal,cn=mapping tree,cn=config cn: o=normal nsslapd-backend: normal nsslapd-state: backend objectClass: top objectClass: extensibleObject objectClass: nsMappingTree
Failed tickets/ticket50232_test.py::test_ticket50232_reverse 0.08
topology_st = <lib389.topologies.TopologyMain object at 0x7f4d26ac6e20>

def test_ticket50232_reverse(topology_st):
"""
The fix for ticket 50232


The test sequence is:
- create suffix
- enable replication before suffix enztry is added
- add suffix entry and some child entries
- get RUV and database generation
- export -r
- import
- get RUV and database generation
- assert database generation has not changed
"""

log.info('Testing Ticket 50232 - export creates not imprtable ldif file, normal creation order')

#
# Setup Replication
#
log.info('Setting up replication...')
repl = ReplicationManager(DEFAULT_SUFFIX)
# repl.create_first_master(topology_st.standalone)
#
# enable dynamic plugins, memberof and retro cl plugin
#
topology_st.standalone.backend.create(REVERSE_SUFFIX, {BACKEND_NAME: REVERSE_BACKEND_NAME})
topology_st.standalone.mappingtree.create(REVERSE_SUFFIX, bename=REVERSE_BACKEND_NAME, parent=None)

> _enable_replica(topology_st.standalone, REVERSE_SUFFIX)

/export/tests/tickets/ticket50232_test.py:155:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/export/tests/tickets/ticket50232_test.py:35: in _enable_replica
repl._ensure_changelog(instance)
/usr/local/lib/python3.8/site-packages/lib389/replica.py:1945: in _ensure_changelog
cl.create(properties={
/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:971: in create
return self._create(rdn, properties, basedn, ensure=False)
/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:946: in _create
self._instance.add_ext_s(e, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure')
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:176: in inner
return f(ent.dn, ent.toTupleList(), *args[2:])
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:425: in add_ext_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:764: in result3
resp_type, resp_data, resp_msgid, decoded_resp_ctrls, retoid, retval = self.result4(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:774: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d26ac6520>
func = <built-in method result4 of LDAP object at 0x7f4d26abf990>
args = (22, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.UNWILLING_TO_PERFORM: {'msgtype': 105, 'msgid': 22, 'result': 53, 'desc': 'Server is unwilling to perform', 'ctrls': [], 'info': 'Changelog configuration is part of the backend configuration'}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: UNWILLING_TO_PERFORM
-------------------------------Captured log call--------------------------------
INFO  lib389:backend.py:80 List backend with suffix=o=reverse INFO  lib389:backend.py:290 Creating a local backend INFO  lib389:backend.py:76 List backend cn=reverse,cn=ldbm database,cn=plugins,cn=config INFO  lib389:__init__.py:1772 Found entry dn: cn=reverse,cn=ldbm database,cn=plugins,cn=config cn: reverse nsslapd-cachememsize: 512000 nsslapd-cachesize: -1 nsslapd-directory: /var/lib/dirsrv/slapd-standalone1/db/reverse nsslapd-dncachememsize: 16777216 nsslapd-readonly: off nsslapd-require-index: off nsslapd-require-internalop-index: off nsslapd-suffix: o=reverse objectClass: top objectClass: extensibleObject objectClass: nsBackendInstance INFO  lib389:mappingTree.py:154 Entry dn: cn="o=reverse",cn=mapping tree,cn=config cn: o=reverse nsslapd-backend: reverse nsslapd-state: backend objectclass: top objectclass: extensibleObject objectclass: nsMappingTree INFO  lib389:__init__.py:1772 Found entry dn: cn=o\3Dreverse,cn=mapping tree,cn=config cn: o=reverse nsslapd-backend: reverse nsslapd-state: backend objectClass: top objectClass: extensibleObject objectClass: nsMappingTree
XFailed suites/acl/syntax_test.py::test_aci_invalid_syntax_fail[test_targattrfilters_18] 0.01
topo = <lib389.topologies.TopologyMain object at 0x7f4d307300d0>
real_value = '(target = ldap:///cn=Jeff Vedder,ou=Product Development,dc=example,dc=com)(targetattr=*)(version 3.0; acl "Name of th...3123123123123123123123123123123123123123123123123123123123123123123123123123123123123123123123123123123123123123123";)'

@pytest.mark.xfail(reason='https://bugzilla.redhat.com/show_bug.cgi?id=1691473')
@pytest.mark.parametrize("real_value", [a[1] for a in FAILED],
ids=[a[0] for a in FAILED])
def test_aci_invalid_syntax_fail(topo, real_value):
"""

Try to set wrong ACI syntax.

:id: 83c40784-fff5-49c8-9535-7064c9c19e7e
:parametrized: yes
:setup: Standalone Instance
:steps:
1. Create ACI
2. Try to setup the ACI with Instance
:expectedresults:
1. It should pass
2. It should not pass
"""
domain = Domain(topo.standalone, DEFAULT_SUFFIX)
with pytest.raises(ldap.INVALID_SYNTAX):
> domain.add("aci", real_value)
E Failed: DID NOT RAISE <class 'ldap.INVALID_SYNTAX'>

suites/acl/syntax_test.py:215: Failed
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
XFailed suites/acl/syntax_test.py::test_aci_invalid_syntax_fail[test_targattrfilters_20] 0.01
topo = <lib389.topologies.TopologyMain object at 0x7f4d307300d0>
real_value = '(target = ldap:///cn=Jeff Vedder,ou=Product Development,dc=example,dc=com)(targetattr=*)(version 3.0; acl "Name of the ACI"; deny(write)userdns="ldap:///anyone";)'

@pytest.mark.xfail(reason='https://bugzilla.redhat.com/show_bug.cgi?id=1691473')
@pytest.mark.parametrize("real_value", [a[1] for a in FAILED],
ids=[a[0] for a in FAILED])
def test_aci_invalid_syntax_fail(topo, real_value):
"""

Try to set wrong ACI syntax.

:id: 83c40784-fff5-49c8-9535-7064c9c19e7e
:parametrized: yes
:setup: Standalone Instance
:steps:
1. Create ACI
2. Try to setup the ACI with Instance
:expectedresults:
1. It should pass
2. It should not pass
"""
domain = Domain(topo.standalone, DEFAULT_SUFFIX)
with pytest.raises(ldap.INVALID_SYNTAX):
> domain.add("aci", real_value)
E Failed: DID NOT RAISE <class 'ldap.INVALID_SYNTAX'>

suites/acl/syntax_test.py:215: Failed
XFailed suites/acl/syntax_test.py::test_aci_invalid_syntax_fail[test_bind_rule_set_with_more_than_three] 0.01
topo = <lib389.topologies.TopologyMain object at 0x7f4d307300d0>
real_value = '(target = ldap:///dc=example,dc=com)(targetattr=*)(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:////////anyone";)'

@pytest.mark.xfail(reason='https://bugzilla.redhat.com/show_bug.cgi?id=1691473')
@pytest.mark.parametrize("real_value", [a[1] for a in FAILED],
ids=[a[0] for a in FAILED])
def test_aci_invalid_syntax_fail(topo, real_value):
"""

Try to set wrong ACI syntax.

:id: 83c40784-fff5-49c8-9535-7064c9c19e7e
:parametrized: yes
:setup: Standalone Instance
:steps:
1. Create ACI
2. Try to setup the ACI with Instance
:expectedresults:
1. It should pass
2. It should not pass
"""
domain = Domain(topo.standalone, DEFAULT_SUFFIX)
with pytest.raises(ldap.INVALID_SYNTAX):
> domain.add("aci", real_value)
E Failed: DID NOT RAISE <class 'ldap.INVALID_SYNTAX'>

suites/acl/syntax_test.py:215: Failed
XFailed suites/acl/userattr_test.py::test_mod_see_also_positive[(LEVEL_3, CHILDREN)] 0.07
topo = <lib389.topologies.TopologyMain object at 0x7f4d318c5610>
_add_user = None, user = 'uid=Grandparent,ou=Inheritance,dc=example,dc=com'
entry = 'ou=CHILDREN,ou=PARENTS,ou=GRANDPARENTS,ou=ANCESTORS,ou=Inheritance,dc=example,dc=com'

@pytest.mark.parametrize("user,entry", [
(CAN, ROLEDNACCESS),
(CAN, USERDNACCESS),
(CAN, GROUPDNACCESS),
(CAN, LDAPURLACCESS),
(CAN, ATTRNAMEACCESS),
(LEVEL_0, OU_2),
(LEVEL_1, ANCESTORS),
(LEVEL_2, GRANDPARENTS),
(LEVEL_4, OU_2),
(LEVEL_4, ANCESTORS),
(LEVEL_4, GRANDPARENTS),
(LEVEL_4, PARENTS),
(LEVEL_4, CHILDREN),
pytest.param(LEVEL_3, CHILDREN, marks=pytest.mark.xfail(reason="May be some bug")),
], ids=[
"(CAN,ROLEDNACCESS)",
"(CAN,USERDNACCESS)",
"(CAN,GROUPDNACCESS)",
"(CAN,LDAPURLACCESS)",
"(CAN,ATTRNAMEACCESS)",
"(LEVEL_0, OU_2)",
"(LEVEL_1,ANCESTORS)",
"(LEVEL_2,GRANDPARENTS)",
"(LEVEL_4,OU_2)",
"(LEVEL_4, ANCESTORS)",
"(LEVEL_4,GRANDPARENTS)",
"(LEVEL_4,PARENTS)",
"(LEVEL_4,CHILDREN)",
"(LEVEL_3, CHILDREN)"
])
def test_mod_see_also_positive(topo, _add_user, user, entry):
"""
Try to set seeAlso on entry with binding specific user, it will success
as per the ACI.

:id: 65745426-7a01-11e8-8ac2-8c16451d917b
:parametrized: yes
:setup: Standalone Instance
:steps:
1. Add test entry
2. Add ACI
3. User should follow ACI role
:expectedresults:
1. Entry should be added
2. Operation should succeed
3. Operation should succeed
"""
conn = UserAccount(topo.standalone, user).bind(PW_DM)
> UserAccount(conn, entry).replace('seeAlso', 'cn=1')

suites/acl/userattr_test.py:216:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:280: in replace
self.set(key, value, action=ldap.MOD_REPLACE)
/usr/local/lib/python3.8/site-packages/lib389/_mapped_object.py:446: in set
return self._instance.modify_ext_s(self._dn, [(action, key, value)],
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:613: in modify_ext_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:764: in result3
resp_type, resp_data, resp_msgid, decoded_resp_ctrls, retoid, retval = self.result4(
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:774: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/usr/local/lib/python3.8/site-packages/lib389/__init__.py:180: in inner
return f(*args, **kwargs)
/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:340: in _ldap_call
reraise(exc_type, exc_value, exc_traceback)
/usr/local/lib64/python3.8/site-packages/ldap/compat.py:46: in reraise
raise exc_value
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f4d304d8e20>
func = <built-in method result4 of LDAP object at 0x7f4d304e7390>
args = (5, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
exc_type = None, exc_value = None, exc_traceback = None

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E ldap.INSUFFICIENT_ACCESS: {'msgtype': 103, 'msgid': 5, 'result': 50, 'desc': 'Insufficient access', 'ctrls': [], 'info': "Insufficient 'write' privilege to the 'seeAlso' attribute of entry 'ou=children,ou=parents,ou=grandparents,ou=ancestors,ou=inheritance,dc=example,dc=com'.\n"}

/usr/local/lib64/python3.8/site-packages/ldap/ldapobject.py:324: INSUFFICIENT_ACCESS
XFailed suites/config/config_test.py::test_defaultnamingcontext_1 0.38
topo = <lib389.topologies.TopologyMain object at 0x7f4d303cf460>

@pytest.mark.xfail(reason="This may fail due to bug 1610234")
def test_defaultnamingcontext_1(topo):
"""This test case should be part of function test_defaultnamingcontext
Please move it back after we have a fix for bug 1610234
"""
log.info("Remove the original suffix which is currently nsslapd-defaultnamingcontext"
"and check nsslapd-defaultnamingcontext become empty.")

""" Please remove these declarations after moving the test
to function test_defaultnamingcontext
"""
backends = Backends(topo.standalone)
test_db2 = 'test2_db'
test_suffix2 = 'dc=test2,dc=com'
b2 = backends.create(properties={'cn': test_db2,
'nsslapd-suffix': test_suffix2})
b2.delete()
> assert topo.standalone.config.get_attr_val_utf8('nsslapd-defaultnamingcontext') == ' '
E AssertionError: assert 'dc=example,dc=com' == ' '
E Strings contain only whitespace, escaping them using repr()
E - ' '
E + 'dc=example,dc=com'

suites/config/config_test.py:280: AssertionError
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.config_test:config_test.py:268 Remove the original suffix which is currently nsslapd-defaultnamingcontextand check nsslapd-defaultnamingcontext become empty.
XFailed suites/export/export_test.py::test_dbtasks_db2ldif_with_non_accessible_ldif_file_path_output 3.52
topo = <lib389.topologies.TopologyMain object at 0x7f4d2ecc2ca0>

@pytest.mark.bz1860291
@pytest.mark.xfail(reason="bug 1860291")
@pytest.mark.skipif(ds_is_older("1.3.10", "1.4.2"), reason="Not implemented")
def test_dbtasks_db2ldif_with_non_accessible_ldif_file_path_output(topo):
"""Export with db2ldif, giving a ldif file path which can't be accessed by the user (dirsrv by default)

:id: fcc63387-e650-40a7-b643-baa68c190037
:setup: Standalone Instance - entries imported in the db
:steps:
1. Stop the server
2. Launch db2ldif with a non accessible ldif file path
3. check the error reported in the command output
:expected results:
1. Operation successful
2. Operation properly fails
3. An clear error message is reported as output of the cli
"""
export_ldif = '/tmp/nonexistent/export.ldif'

log.info("Stopping the instance...")
topo.standalone.stop()

log.info("Performing an offline export to a non accessible ldif file path - should fail and output a clear error message")
expected_output="No such file or directory"
> run_db2ldif_and_clear_logs(topo, topo.standalone, DEFAULT_BENAME, export_ldif, expected_output)

suites/export/export_test.py:150:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology = <lib389.topologies.TopologyMain object at 0x7f4d2ecc2ca0>
instance = <lib389.DirSrv object at 0x7f4d2ecc25b0>, backend = 'userRoot'
ldif = '/tmp/nonexistent/export.ldif', output_msg = 'No such file or directory'
encrypt = False, repl = False

def run_db2ldif_and_clear_logs(topology, instance, backend, ldif, output_msg, encrypt=False, repl=False):
args = FakeArgs()
args.instance = instance.serverid
args.backend = backend
args.encrypted = encrypt
args.replication = repl
args.ldif = ldif

dbtasks_db2ldif(instance, topology.logcap.log, args)

log.info('checking output msg')
if not topology.logcap.contains(output_msg):
log.error('The output message is not the expected one')
> assert False
E assert False

suites/export/export_test.py:36: AssertionError
------------------------------Captured stderr call------------------------------
ldiffile: /tmp/nonexistent/export.ldif
-------------------------------Captured log call--------------------------------
INFO  lib389.utils:export_test.py:145 Stopping the instance... INFO  lib389.utils:export_test.py:148 Performing an offline export to a non accessible ldif file path - should fail and output a clear error message CRITICAL LogCapture:dbtasks.py:40 db2ldif failed INFO  lib389.utils:export_test.py:33 checking output msg ERROR  lib389.utils:export_test.py:35 The output message is not the expected one
XFailed suites/replication/conflict_resolve_test.py::TestTwoMasters::test_memberof_groups 0.00
self = <tests.suites.replication.conflict_resolve_test.TestTwoMasters object at 0x7f4d2d50a940>
topology_m2 = <lib389.topologies.TopologyMain object at 0x7f4d2d4f7790>
base_m2 = <lib389.idm.nscontainer.nsContainer object at 0x7f4d2d206250>

def test_memberof_groups(self, topology_m2, base_m2):
"""Check that conflict properly resolved for operations
with memberOf and groups

:id: 77f09b18-03d1-45da-940b-1ad2c2908eb3
:setup: Two master replication, test container for entries, enable plugin logging,
audit log, error log for replica and access log for internal
:steps:
1. Enable memberOf plugin
2. Add 30 users to m1 and wait for replication to happen
3. Pause replication
4. Create a group on m1 and m2
5. Create a group on m1 and m2, delete from m1
6. Create a group on m1, delete from m1, and create on m2,
7. Create a group on m2 and m1, delete from m1
8. Create two different groups on m2
9. Resume replication
10. Check that the entries on both masters are the same and replication is working
:expectedresults:
1. It should pass
2. It should pass
3. It should pass
4. It should pass
5. It should pass
6. It should pass
7. It should pass
8. It should pass
9. It should pass
10. It should pass
"""

> pytest.xfail("Issue 49591 - work in progress")
E _pytest.outcomes.XFailed: Issue 49591 - work in progress

suites/replication/conflict_resolve_test.py:402: XFailed
XFailed suites/replication/conflict_resolve_test.py::TestTwoMasters::test_managed_entries 0.00
self = <tests.suites.replication.conflict_resolve_test.TestTwoMasters object at 0x7f4d2d1f35b0>
topology_m2 = <lib389.topologies.TopologyMain object at 0x7f4d2d4f7790>

def test_managed_entries(self, topology_m2):
"""Check that conflict properly resolved for operations
with managed entries

:id: 77f09b18-03d1-45da-940b-1ad2c2908eb4
:setup: Two master replication, test container for entries, enable plugin logging,
audit log, error log for replica and access log for internal
:steps:
1. Create ou=managed_users and ou=managed_groups under test container
2. Configure managed entries plugin and add a template to test container
3. Add a user to m1 and wait for replication to happen
4. Pause replication
5. Create a user on m1 and m2 with a same group ID on both master
6. Create a user on m1 and m2 with a different group ID on both master
7. Resume replication
8. Check that the entries on both masters are the same and replication is working
:expectedresults:
1. It should pass
2. It should pass
3. It should pass
4. It should pass
5. It should pass
6. It should pass
7. It should pass
8. It should pass
"""

> pytest.xfail("Issue 49591 - work in progress")
E _pytest.outcomes.XFailed: Issue 49591 - work in progress

suites/replication/conflict_resolve_test.py:493: XFailed
XFailed suites/replication/conflict_resolve_test.py::TestTwoMasters::test_nested_entries_with_children 0.00
self = <tests.suites.replication.conflict_resolve_test.TestTwoMasters object at 0x7f4d2d1fa130>
topology_m2 = <lib389.topologies.TopologyMain object at 0x7f4d2d4f7790>
base_m2 = <lib389.idm.nscontainer.nsContainer object at 0x7f4d2d1fa370>

def test_nested_entries_with_children(self, topology_m2, base_m2):
"""Check that conflict properly resolved for operations
with nested entries with children

:id: 77f09b18-03d1-45da-940b-1ad2c2908eb5
:setup: Two master replication, test container for entries, enable plugin logging,
audit log, error log for replica and access log for internal
:steps:
1. Add 15 containers to m1 and wait for replication to happen
2. Pause replication
3. Create parent-child on master2 and master1
4. Create parent-child on master1 and master2
5. Create parent-child on master1 and master2 different child rdn
6. Create parent-child on master1 and delete parent on master2
7. Create parent on master1, delete it and parent-child on master2, delete them
8. Create parent on master1, delete it and parent-two children on master2
9. Create parent-two children on master1 and parent-child on master2, delete them
10. Create three subsets inside existing container entry, applying only part of changes on m2
11. Create more combinations of the subset with parent-child on m1 and parent on m2
12. Delete container on m1, modify user1 on m1, create parent on m2 and modify user2 on m2
13. Resume replication
14. Check that the entries on both masters are the same and replication is working
:expectedresults:
1. It should pass
2. It should pass
3. It should pass
4. It should pass
5. It should pass
6. It should pass
7. It should pass
8. It should pass
9. It should pass
10. It should pass
11. It should pass
12. It should pass
13. It should pass
14. It should pass
"""

> pytest.xfail("Issue 49591 - work in progress")
E _pytest.outcomes.XFailed: Issue 49591 - work in progress

suites/replication/conflict_resolve_test.py:584: XFailed
XFailed suites/replication/conflict_resolve_test.py::TestThreeMasters::test_nested_entries 0.00
self = <tests.suites.replication.conflict_resolve_test.TestThreeMasters object at 0x7f4d2d211e80>
topology_m3 = <lib389.topologies.TopologyMain object at 0x7f4d2d467700>
base_m3 = <lib389.idm.nscontainer.nsContainer object at 0x7f4d2d4f1f10>

def test_nested_entries(self, topology_m3, base_m3):
"""Check that conflict properly resolved for operations
with nested entries with children

:id: 77f09b18-03d1-45da-940b-1ad2c2908eb6
:setup: Three master replication, test container for entries, enable plugin logging,
audit log, error log for replica and access log for internal
:steps:
1. Add 15 containers to m1 and wait for replication to happen
2. Pause replication
3. Create two child entries under each of two entries
4. Create three child entries under each of three entries
5. Create two parents on m1 and m2, then on m1 - create a child and delete one parent,
on m2 - delete one parent and create a child
6. Test a few more parent-child combinations with three instances
7. Resume replication
8. Check that the entries on both masters are the same and replication is working
:expectedresults:
1. It should pass
2. It should pass
3. It should pass
4. It should pass
5. It should pass
6. It should pass
7. It should pass
8. It should pass
"""

> pytest.xfail("Issue 49591 - work in progress")
E _pytest.outcomes.XFailed: Issue 49591 - work in progress

suites/replication/conflict_resolve_test.py:968: XFailed
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39001, 'ldap-secureport': 63701, 'server-id': 'master1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39002, 'ldap-secureport': 63702, 'server-id': 'master2', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39003, 'ldap-secureport': 63703, 'server-id': 'master3', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:148 Creating replication topology. INFO  lib389.topologies:topologies.py:162 Joining master master2 to master1 ... INFO  lib389.replica:replica.py:2101 SUCCESS: bootstrap to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 completed INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is was created INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is was created INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect dfb47986-2609-44f9-91e7-37ff17b3aa07 / got description=None) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect 1715e2c2-b5e1-44cf-8879-c6dc542a7463 / got description=dfb47986-2609-44f9-91e7-37ff17b3aa07) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is working INFO  lib389.replica:replica.py:2170 SUCCESS: joined master from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 INFO  lib389.topologies:topologies.py:162 Joining master master3 to master1 ... INFO  lib389.replica:replica.py:2101 SUCCESS: bootstrap to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 completed INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 is was created INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is was created INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 is NOT working (expect c81cf148-7b90-4878-82b9-6b18535b9846 / got description=1715e2c2-b5e1-44cf-8879-c6dc542a7463) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect ead71b5c-30f6-4c33-8514-278e9e03a5db / got description=c81cf148-7b90-4878-82b9-6b18535b9846) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is working INFO  lib389.replica:replica.py:2170 SUCCESS: joined master from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 INFO  lib389.topologies:topologies.py:170 Ensuring master master1 to master2 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 already exists INFO  lib389.topologies:topologies.py:170 Ensuring master master1 to master3 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 already exists INFO  lib389.topologies:topologies.py:170 Ensuring master master2 to master1 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 already exists INFO  lib389.topologies:topologies.py:170 Ensuring master master2 to master3 ... INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 is was created INFO  lib389.topologies:topologies.py:170 Ensuring master master3 to master1 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 already exists INFO  lib389.topologies:topologies.py:170 Ensuring master master3 to master2 ... INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39003 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is was created
XFailed suites/replication/replica_config_test.py::test_agmt_num_add[nsds5ReplicaPort-0-65535-9999999999999999999999999999999999999999999999999999999999999999999-invalid-389] 0.07
topo = <lib389.topologies.TopologyMain object at 0x7f4d2d1c6a30>
attr = 'nsds5ReplicaPort', too_small = '0', too_big = '65535'
overflow = '9999999999999999999999999999999999999999999999999999999999999999999'
notnum = 'invalid', valid = '389'

@pytest.mark.xfail(reason="Agreement validation current does not work.")
@pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", agmt_attrs)
def test_agmt_num_add(topo, attr, too_small, too_big, overflow, notnum, valid):
"""Test all the number values you can set for a replica config entry

:id: a8b47d4a-a089-4d70-8070-e6181209bf94
:parametrized: yes
:setup: standalone instance
:steps:
1. Use a value that is too small
2. Use a value that is too big
3. Use a value that overflows the int
4. Use a value with character value (not a number)
5. Use a valid value
:expectedresults:
1. Add is rejected
2. Add is rejected
3. Add is rejected
4. Add is rejected
5. Add is allowed
"""

agmt_reset(topo)
replica = replica_setup(topo)

agmts = Agreements(topo.standalone, basedn=replica.dn)

# Test too small
perform_invalid_create(agmts, agmt_dict, attr, too_small)
# Test too big
> perform_invalid_create(agmts, agmt_dict, attr, too_big)

suites/replication/replica_config_test.py:217:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

many = <lib389.agreement.Agreements object at 0x7f4d2d432640>
properties = {'cn': 'test_agreement', 'nsDS5ReplicaBindDN': 'uid=tester', 'nsDS5ReplicaBindMethod': 'SIMPLE', 'nsDS5ReplicaHost': 'localhost.localdomain', ...}
attr = 'nsds5ReplicaPort', value = '65535'

def perform_invalid_create(many, properties, attr, value):
my_properties = copy.deepcopy(properties)
my_properties[attr] = value
with pytest.raises(ldap.LDAPError) as ei:
> many.create(properties=my_properties)
E Failed: DID NOT RAISE <class 'ldap.LDAPError'>

suites/replication/replica_config_test.py:108: Failed
XFailed suites/replication/replica_config_test.py::test_agmt_num_modify[nsds5ReplicaPort-0-65535-9999999999999999999999999999999999999999999999999999999999999999999-invalid-389] 0.18
topo = <lib389.topologies.TopologyMain object at 0x7f4d2d1c6a30>
attr = 'nsds5ReplicaPort', too_small = '0', too_big = '65535'
overflow = '9999999999999999999999999999999999999999999999999999999999999999999'
notnum = 'invalid', valid = '389'

@pytest.mark.xfail(reason="Agreement validation current does not work.")
@pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", agmt_attrs)
def test_agmt_num_modify(topo, attr, too_small, too_big, overflow, notnum, valid):
"""Test all the number values you can set for a replica config entry

:id: a8b47d4a-a089-4d70-8070-e6181209bf95
:parametrized: yes
:setup: standalone instance
:steps:
1. Replace a value that is too small
2. Replace a value that is too big
3. Replace a value that overflows the int
4. Replace a value with character value (not a number)
5. Replace a vlue with a valid value
:expectedresults:
1. Value is rejected
2. Value is rejected
3. Value is rejected
4. Value is rejected
5. Value is allowed
"""

agmt = agmt_setup(topo)

# Value too small
> perform_invalid_modify(agmt, attr, too_small)

suites/replication/replica_config_test.py:253:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

o = <lib389.agreement.Agreement object at 0x7f4d2d3a9d00>
attr = 'nsds5ReplicaPort', value = '0'

def perform_invalid_modify(o, attr, value):
with pytest.raises(ldap.LDAPError) as ei:
> o.replace(attr, value)
E Failed: DID NOT RAISE <class 'ldap.LDAPError'>

suites/replication/replica_config_test.py:113: Failed
XFailed suites/replication/replica_config_test.py::test_agmt_num_modify[nsds5ReplicaTimeout--1-9223372036854775807-9999999999999999999999999999999999999999999999999999999999999999999-invalid-6] 0.21
topo = <lib389.topologies.TopologyMain object at 0x7f4d2d1c6a30>
attr = 'nsds5ReplicaTimeout', too_small = '-1', too_big = '9223372036854775807'
overflow = '9999999999999999999999999999999999999999999999999999999999999999999'
notnum = 'invalid', valid = '6'

@pytest.mark.xfail(reason="Agreement validation current does not work.")
@pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", agmt_attrs)
def test_agmt_num_modify(topo, attr, too_small, too_big, overflow, notnum, valid):
"""Test all the number values you can set for a replica config entry

:id: a8b47d4a-a089-4d70-8070-e6181209bf95
:parametrized: yes
:setup: standalone instance
:steps:
1. Replace a value that is too small
2. Replace a value that is too big
3. Replace a value that overflows the int
4. Replace a value with character value (not a number)
5. Replace a vlue with a valid value
:expectedresults:
1. Value is rejected
2. Value is rejected
3. Value is rejected
4. Value is rejected
5. Value is allowed
"""

agmt = agmt_setup(topo)

# Value too small
perform_invalid_modify(agmt, attr, too_small)
# Value too big
> perform_invalid_modify(agmt, attr, too_big)

suites/replication/replica_config_test.py:255:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

o = <lib389.agreement.Agreement object at 0x7f4d2d2110a0>
attr = 'nsds5ReplicaTimeout', value = '9223372036854775807'

def perform_invalid_modify(o, attr, value):
with pytest.raises(ldap.LDAPError) as ei:
> o.replace(attr, value)
E Failed: DID NOT RAISE <class 'ldap.LDAPError'>

suites/replication/replica_config_test.py:113: Failed
XFailed suites/replication/replica_config_test.py::test_agmt_num_modify[nsds5ReplicaBusyWaitTime--1-9223372036854775807-9999999999999999999999999999999999999999999999999999999999999999999-invalid-6] 0.19
topo = <lib389.topologies.TopologyMain object at 0x7f4d2d1c6a30>
attr = 'nsds5ReplicaBusyWaitTime', too_small = '-1'
too_big = '9223372036854775807'
overflow = '9999999999999999999999999999999999999999999999999999999999999999999'
notnum = 'invalid', valid = '6'

@pytest.mark.xfail(reason="Agreement validation current does not work.")
@pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", agmt_attrs)
def test_agmt_num_modify(topo, attr, too_small, too_big, overflow, notnum, valid):
"""Test all the number values you can set for a replica config entry

:id: a8b47d4a-a089-4d70-8070-e6181209bf95
:parametrized: yes
:setup: standalone instance
:steps:
1. Replace a value that is too small
2. Replace a value that is too big
3. Replace a value that overflows the int
4. Replace a value with character value (not a number)
5. Replace a vlue with a valid value
:expectedresults:
1. Value is rejected
2. Value is rejected
3. Value is rejected
4. Value is rejected
5. Value is allowed
"""

agmt = agmt_setup(topo)

# Value too small
perform_invalid_modify(agmt, attr, too_small)
# Value too big
> perform_invalid_modify(agmt, attr, too_big)

suites/replication/replica_config_test.py:255:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

o = <lib389.agreement.Agreement object at 0x7f4d2d4f1f10>
attr = 'nsds5ReplicaBusyWaitTime', value = '9223372036854775807'

def perform_invalid_modify(o, attr, value):
with pytest.raises(ldap.LDAPError) as ei:
> o.replace(attr, value)
E Failed: DID NOT RAISE <class 'ldap.LDAPError'>

suites/replication/replica_config_test.py:113: Failed
XFailed suites/replication/replica_config_test.py::test_agmt_num_modify[nsds5ReplicaSessionPauseTime--1-9223372036854775807-9999999999999999999999999999999999999999999999999999999999999999999-invalid-6] 0.19
topo = <lib389.topologies.TopologyMain object at 0x7f4d2d1c6a30>
attr = 'nsds5ReplicaSessionPauseTime', too_small = '-1'
too_big = '9223372036854775807'
overflow = '9999999999999999999999999999999999999999999999999999999999999999999'
notnum = 'invalid', valid = '6'

@pytest.mark.xfail(reason="Agreement validation current does not work.")
@pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", agmt_attrs)
def test_agmt_num_modify(topo, attr, too_small, too_big, overflow, notnum, valid):
"""Test all the number values you can set for a replica config entry

:id: a8b47d4a-a089-4d70-8070-e6181209bf95
:parametrized: yes
:setup: standalone instance
:steps:
1. Replace a value that is too small
2. Replace a value that is too big
3. Replace a value that overflows the int
4. Replace a value with character value (not a number)
5. Replace a vlue with a valid value
:expectedresults:
1. Value is rejected
2. Value is rejected
3. Value is rejected
4. Value is rejected
5. Value is allowed
"""

agmt = agmt_setup(topo)

# Value too small
perform_invalid_modify(agmt, attr, too_small)
# Value too big
> perform_invalid_modify(agmt, attr, too_big)

suites/replication/replica_config_test.py:255:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

o = <lib389.agreement.Agreement object at 0x7f4d2d2a42b0>
attr = 'nsds5ReplicaSessionPauseTime', value = '9223372036854775807'

def perform_invalid_modify(o, attr, value):
with pytest.raises(ldap.LDAPError) as ei:
> o.replace(attr, value)
E Failed: DID NOT RAISE <class 'ldap.LDAPError'>

suites/replication/replica_config_test.py:113: Failed
XFailed suites/replication/replica_config_test.py::test_agmt_num_modify[nsds5ReplicaFlowControlWindow--1-9223372036854775807-9999999999999999999999999999999999999999999999999999999999999999999-invalid-6] 0.19
topo = <lib389.topologies.TopologyMain object at 0x7f4d2d1c6a30>
attr = 'nsds5ReplicaFlowControlWindow', too_small = '-1'
too_big = '9223372036854775807'
overflow = '9999999999999999999999999999999999999999999999999999999999999999999'
notnum = 'invalid', valid = '6'

@pytest.mark.xfail(reason="Agreement validation current does not work.")
@pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", agmt_attrs)
def test_agmt_num_modify(topo, attr, too_small, too_big, overflow, notnum, valid):
"""Test all the number values you can set for a replica config entry

:id: a8b47d4a-a089-4d70-8070-e6181209bf95
:parametrized: yes
:setup: standalone instance
:steps:
1. Replace a value that is too small
2. Replace a value that is too big
3. Replace a value that overflows the int
4. Replace a value with character value (not a number)
5. Replace a vlue with a valid value
:expectedresults:
1. Value is rejected
2. Value is rejected
3. Value is rejected
4. Value is rejected
5. Value is allowed
"""

agmt = agmt_setup(topo)

# Value too small
perform_invalid_modify(agmt, attr, too_small)
# Value too big
> perform_invalid_modify(agmt, attr, too_big)

suites/replication/replica_config_test.py:255:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

o = <lib389.agreement.Agreement object at 0x7f4d2d577190>
attr = 'nsds5ReplicaFlowControlWindow', value = '9223372036854775807'

def perform_invalid_modify(o, attr, value):
with pytest.raises(ldap.LDAPError) as ei:
> o.replace(attr, value)
E Failed: DID NOT RAISE <class 'ldap.LDAPError'>

suites/replication/replica_config_test.py:113: Failed
XFailed suites/replication/replica_config_test.py::test_agmt_num_modify[nsds5ReplicaFlowControlPause--1-9223372036854775807-9999999999999999999999999999999999999999999999999999999999999999999-invalid-6] 0.20
topo = <lib389.topologies.TopologyMain object at 0x7f4d2d1c6a30>
attr = 'nsds5ReplicaFlowControlPause', too_small = '-1'
too_big = '9223372036854775807'
overflow = '9999999999999999999999999999999999999999999999999999999999999999999'
notnum = 'invalid', valid = '6'

@pytest.mark.xfail(reason="Agreement validation current does not work.")
@pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", agmt_attrs)
def test_agmt_num_modify(topo, attr, too_small, too_big, overflow, notnum, valid):
"""Test all the number values you can set for a replica config entry

:id: a8b47d4a-a089-4d70-8070-e6181209bf95
:parametrized: yes
:setup: standalone instance
:steps:
1. Replace a value that is too small
2. Replace a value that is too big
3. Replace a value that overflows the int
4. Replace a value with character value (not a number)
5. Replace a vlue with a valid value
:expectedresults:
1. Value is rejected
2. Value is rejected
3. Value is rejected
4. Value is rejected
5. Value is allowed
"""

agmt = agmt_setup(topo)

# Value too small
perform_invalid_modify(agmt, attr, too_small)
# Value too big
> perform_invalid_modify(agmt, attr, too_big)

suites/replication/replica_config_test.py:255:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

o = <lib389.agreement.Agreement object at 0x7f4d2d2a9940>
attr = 'nsds5ReplicaFlowControlPause', value = '9223372036854775807'

def perform_invalid_modify(o, attr, value):
with pytest.raises(ldap.LDAPError) as ei:
> o.replace(attr, value)
E Failed: DID NOT RAISE <class 'ldap.LDAPError'>

suites/replication/replica_config_test.py:113: Failed
XFailed suites/replication/replica_config_test.py::test_agmt_num_modify[nsds5ReplicaProtocolTimeout--1-9223372036854775807-9999999999999999999999999999999999999999999999999999999999999999999-invalid-6] 0.36
topo = <lib389.topologies.TopologyMain object at 0x7f4d2d1c6a30>
attr = 'nsds5ReplicaProtocolTimeout', too_small = '-1'
too_big = '9223372036854775807'
overflow = '9999999999999999999999999999999999999999999999999999999999999999999'
notnum = 'invalid', valid = '6'

@pytest.mark.xfail(reason="Agreement validation current does not work.")
@pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", agmt_attrs)
def test_agmt_num_modify(topo, attr, too_small, too_big, overflow, notnum, valid):
"""Test all the number values you can set for a replica config entry

:id: a8b47d4a-a089-4d70-8070-e6181209bf95
:parametrized: yes
:setup: standalone instance
:steps:
1. Replace a value that is too small
2. Replace a value that is too big
3. Replace a value that overflows the int
4. Replace a value with character value (not a number)
5. Replace a vlue with a valid value
:expectedresults:
1. Value is rejected
2. Value is rejected
3. Value is rejected
4. Value is rejected
5. Value is allowed
"""

agmt = agmt_setup(topo)

# Value too small
perform_invalid_modify(agmt, attr, too_small)
# Value too big
> perform_invalid_modify(agmt, attr, too_big)

suites/replication/replica_config_test.py:255:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

o = <lib389.agreement.Agreement object at 0x7f4d2d5a3910>
attr = 'nsds5ReplicaProtocolTimeout', value = '9223372036854775807'

def perform_invalid_modify(o, attr, value):
with pytest.raises(ldap.LDAPError) as ei:
> o.replace(attr, value)
E Failed: DID NOT RAISE <class 'ldap.LDAPError'>

suites/replication/replica_config_test.py:113: Failed
XFailed suites/replication/ruvstore_test.py::test_memoryruv_sync_with_databaseruv 0.20
topo = <lib389.topologies.TopologyMain object at 0x7f4d312df250>

@pytest.mark.xfail(reason="No method to safety access DB ruv currently exists online.")
def test_memoryruv_sync_with_databaseruv(topo):
"""Check if memory ruv and database ruv are synced

:id: 5f38ac5f-6353-460d-bf60-49cafffda5b3
:setup: Replication with two masters.
:steps: 1. Add user to server and compare memory ruv and database ruv.
2. Modify description of user and compare memory ruv and database ruv.
3. Modrdn of user and compare memory ruv and database ruv.
4. Delete user and compare memory ruv and database ruv.
:expectedresults:
1. For add user, the memory ruv and database ruv should be the same.
2. For modify operation, the memory ruv and database ruv should be the same.
3. For modrdn operation, the memory ruv and database ruv should be the same.
4. For delete operation, the memory ruv and database ruv should be the same.
"""

log.info('Adding user: {} to master1'.format(TEST_ENTRY_NAME))
users = UserAccounts(topo.ms['master1'], DEFAULT_SUFFIX)
tuser = users.create(properties=USER_PROPERTIES)
> _compare_memoryruv_and_databaseruv(topo, 'add')

suites/replication/ruvstore_test.py:139:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topo = <lib389.topologies.TopologyMain object at 0x7f4d312df250>
operation_type = 'add'

def _compare_memoryruv_and_databaseruv(topo, operation_type):
"""Compare the memoryruv and databaseruv for ldap operations"""

log.info('Checking memory ruv for ldap: {} operation'.format(operation_type))
replicas = Replicas(topo.ms['master1'])
replica = replicas.list()[0]
memory_ruv = replica.get_attr_val_utf8('nsds50ruv')

log.info('Checking database ruv for ldap: {} operation'.format(operation_type))
> entry = replicas.get_ruv_entry(DEFAULT_SUFFIX)
E AttributeError: 'Replicas' object has no attribute 'get_ruv_entry'

suites/replication/ruvstore_test.py:81: AttributeError
-------------------------------Captured log call--------------------------------
INFO  tests.suites.replication.ruvstore_test:ruvstore_test.py:136 Adding user: rep2lusr to master1 INFO  tests.suites.replication.ruvstore_test:ruvstore_test.py:75 Checking memory ruv for ldap: add operation INFO  tests.suites.replication.ruvstore_test:ruvstore_test.py:80 Checking database ruv for ldap: add operation
XPassed suites/acl/syntax_test.py::test_aci_invalid_syntax_fail[test_Use_double_equal_instead_of_equal_in_the_targetattr] 0.18
No log output captured.
XPassed suites/acl/syntax_test.py::test_aci_invalid_syntax_fail[test_Use_double_equal_instead_of_equal_in_the_targetfilter] 0.03
No log output captured.
XPassed suites/replication/replica_config_test.py::test_agmt_num_add[nsds5ReplicaTimeout--1-9223372036854775807-9999999999999999999999999999999999999999999999999999999999999999999-invalid-6] 0.23
No log output captured.
XPassed suites/replication/replica_config_test.py::test_agmt_num_add[nsds5ReplicaBusyWaitTime--1-9223372036854775807-9999999999999999999999999999999999999999999999999999999999999999999-invalid-6] 0.19
No log output captured.
XPassed suites/replication/replica_config_test.py::test_agmt_num_add[nsds5ReplicaSessionPauseTime--1-9223372036854775807-9999999999999999999999999999999999999999999999999999999999999999999-invalid-6] 0.22
No log output captured.
XPassed suites/replication/replica_config_test.py::test_agmt_num_add[nsds5ReplicaFlowControlWindow--1-9223372036854775807-9999999999999999999999999999999999999999999999999999999999999999999-invalid-6] 0.21
No log output captured.
XPassed suites/replication/replica_config_test.py::test_agmt_num_add[nsds5ReplicaFlowControlPause--1-9223372036854775807-9999999999999999999999999999999999999999999999999999999999999999999-invalid-6] 0.20
No log output captured.
XPassed suites/replication/replica_config_test.py::test_agmt_num_add[nsds5ReplicaProtocolTimeout--1-9223372036854775807-9999999999999999999999999999999999999999999999999999999999999999999-invalid-6] 0.23
No log output captured.
Skipped suites/auth_token/basic_auth_test.py::test_ldap_auth_token_config::setup 0.00
('suites/auth_token/basic_auth_test.py', 28, 'Skipped: Auth tokens are not available in older versions')
Skipped suites/auth_token/basic_auth_test.py::test_ldap_auth_token_nsuser::setup 0.00
('suites/auth_token/basic_auth_test.py', 75, 'Skipped: Auth tokens are not available in older versions')
Skipped suites/auth_token/basic_auth_test.py::test_ldap_auth_token_disabled::setup 0.00
('suites/auth_token/basic_auth_test.py', 144, 'Skipped: Auth tokens are not available in older versions')
Skipped suites/auth_token/basic_auth_test.py::test_ldap_auth_token_directory_manager::setup 0.00
('suites/auth_token/basic_auth_test.py', 194, 'Skipped: Auth tokens are not available in older versions')
Skipped suites/auth_token/basic_auth_test.py::test_ldap_auth_token_anonymous::setup 0.00
('suites/auth_token/basic_auth_test.py', 217, 'Skipped: Auth tokens are not available in older versions')
Skipped suites/config/regression_test.py::test_set_cachememsize_to_custom_value::setup 0.00
('suites/config/regression_test.py', 34, 'Skipped: available memory is too low')
Skipped suites/entryuuid/basic_test.py::test_entryuuid_indexed_import_and_search::setup 0.00
('suites/entryuuid/basic_test.py', 73, 'Skipped: Entryuuid is not available in older versions')
Skipped suites/entryuuid/basic_test.py::test_entryuuid_unindexed_import_and_search::setup 0.00
('suites/entryuuid/basic_test.py', 113, 'Skipped: Entryuuid is not available in older versions')
Skipped suites/entryuuid/basic_test.py::test_entryuuid_generation_on_add::setup 0.00
('suites/entryuuid/basic_test.py', 155, 'Skipped: Entryuuid is not available in older versions')
Skipped suites/entryuuid/basic_test.py::test_entryuuid_fixup_task::setup 0.00
('suites/entryuuid/basic_test.py', 179, 'Skipped: Entryuuid is not available in older versions')
Skipped suites/healthcheck/health_config_test.py::test_healthcheck_logging_format_should_be_revised::setup 0.00
('suites/healthcheck/health_config_test.py', 91, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/health_config_test.py::test_healthcheck_RI_plugin_is_misconfigured::setup 0.00
('suites/healthcheck/health_config_test.py', 134, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/health_config_test.py::test_healthcheck_RI_plugin_missing_indexes::setup 0.00
('suites/healthcheck/health_config_test.py', 183, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/health_config_test.py::test_healthcheck_virtual_attr_incorrectly_indexed::setup 0.00
('suites/healthcheck/health_config_test.py', 235, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/health_config_test.py::test_healthcheck_low_disk_space::setup 0.00
('suites/healthcheck/health_config_test.py', 295, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/health_config_test.py::test_healthcheck_notes_unindexed_search::setup 0.00
('suites/healthcheck/health_config_test.py', 340, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/health_config_test.py::test_healthcheck_notes_unknown_attribute::setup 0.00
('suites/healthcheck/health_config_test.py', 389, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/health_repl_test.py::test_healthcheck_replication_replica_not_reachable::setup 0.00
('suites/healthcheck/health_repl_test.py', 80, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/health_repl_test.py::test_healthcheck_changelog_trimming_not_configured::setup 0.00
('suites/healthcheck/health_repl_test.py', 132, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/health_repl_test.py::test_healthcheck_replication_presence_of_conflict_entries::setup 0.00
('suites/healthcheck/health_repl_test.py', 179, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/health_repl_test.py::test_healthcheck_replication_out_of_sync_broken::setup 0.00
('suites/healthcheck/health_repl_test.py', 224, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/health_security_test.py::test_healthcheck_insecure_pwd_hash_configured::setup 0.00
('suites/healthcheck/health_security_test.py', 86, 'Skipped: These tests can only be run with python installer and disabled ASAN')
Skipped suites/healthcheck/health_security_test.py::test_healthcheck_min_allowed_tls_version_too_low::setup 0.00
('suites/healthcheck/health_security_test.py', 135, 'Skipped: These tests can only be run with python installer and disabled ASAN')
Skipped suites/healthcheck/health_security_test.py::test_healthcheck_resolvconf_bad_file_perm::setup 0.00
('suites/healthcheck/health_security_test.py', 194, 'Skipped: These tests can only be run with python installer and disabled ASAN')
Skipped suites/healthcheck/health_security_test.py::test_healthcheck_pwdfile_bad_file_perm::setup 0.00
('suites/healthcheck/health_security_test.py', 237, 'Skipped: These tests can only be run with python installer and disabled ASAN')
Skipped suites/healthcheck/health_security_test.py::test_healthcheck_certif_expiring_within_30d::setup 0.00
('suites/healthcheck/health_security_test.py', 281, 'Skipped: These tests can only be run with python installer and disabled ASAN')
Skipped suites/healthcheck/health_security_test.py::test_healthcheck_certif_expired::setup 0.00
('suites/healthcheck/health_security_test.py', 319, 'Skipped: These tests can only be run with python installer and disabled ASAN')
Skipped suites/healthcheck/health_sync_test.py::test_healthcheck_replication_out_of_sync_not_broken::setup 0.00
('suites/healthcheck/health_sync_test.py', 70, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/healthcheck_test.py::test_healthcheck_disabled_suffix::setup 0.00
('suites/healthcheck/healthcheck_test.py', 75, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/healthcheck_test.py::test_healthcheck_standalone::setup 0.00
('suites/healthcheck/healthcheck_test.py', 103, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/healthcheck_test.py::test_healthcheck_list_checks::setup 0.00
('suites/healthcheck/healthcheck_test.py', 127, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/healthcheck_test.py::test_healthcheck_list_errors::setup 0.00
('suites/healthcheck/healthcheck_test.py', 167, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/healthcheck_test.py::test_healthcheck_check_option::setup 0.00
('suites/healthcheck/healthcheck_test.py', 216, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/healthcheck_test.py::test_healthcheck_standalone_tls::setup 0.00
('suites/healthcheck/healthcheck_test.py', 261, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/healthcheck_test.py::test_healthcheck_replication::setup 0.00
('suites/healthcheck/healthcheck_test.py', 288, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/healthcheck_test.py::test_healthcheck_replication_tls::setup 0.00
('suites/healthcheck/healthcheck_test.py', 324, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/healthcheck_test.py::test_healthcheck_backend_missing_mapping_tree::setup 0.00
('suites/healthcheck/healthcheck_test.py', 361, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/healthcheck_test.py::test_healthcheck_unable_to_query_backend::setup 0.00
('suites/healthcheck/healthcheck_test.py', 412, 'Skipped: These tests need to use python installer')
Skipped suites/healthcheck/healthcheck_test.py::test_healthcheck_database_not_initialized::setup 0.00
('suites/healthcheck/healthcheck_test.py', 462, 'Skipped: These tests need to use python installer')
Skipped suites/memory_leaks/MMR_double_free_test.py::test_MMR_double_free::setup 0.00
('suites/memory_leaks/MMR_double_free_test.py', 67, "Skipped: Don't run if ASAN is not enabled")
Skipped suites/memory_leaks/range_search_test.py::test_range_search::setup 0.00
('suites/memory_leaks/range_search_test.py', 24, "Skipped: Don't run if ASAN is not enabled")
Skipped suites/migration/export_data_test.py::test_export_data_from_source_host::setup 0.00
('suites/migration/export_data_test.py', 24, 'Skipped: This test is meant to execute in specific test environment')
Skipped suites/migration/import_data_test.py::test_import_data_to_target_host::setup 0.00
('suites/migration/import_data_test.py', 24, 'Skipped: This test is meant to execute in specific test environment')
Skipped suites/replication/changelog_test.py::test_cldump_files_removed::setup 0.00
('suites/replication/changelog_test.py', 235, 'Skipped: does not work for prefix builds')
Skipped suites/replication/changelog_test.py::test_changelog_compactdbinterval::setup 0.00
('suites/replication/changelog_test.py', 630, 'Skipped: changelog compaction is done by the backend itself, with id2entry as well, nsslapd-changelogcompactdb-interval is no longer supported')
Skipped suites/rewriters/adfilter_test.py::test_adfilter_objectSid::setup 0.00
('suites/rewriters/adfilter_test.py', 90, 'Skipped: It is missing samba python bindings')
Skipped tickets/ticket47462_test.py::test_ticket47462::setup 0.00
('tickets/ticket47462_test.py', 39, 'Skipped: Upgrade scripts are supported only on versions < 1.4.x')
Skipped tickets/ticket47815_test.py::test_ticket47815::setup 0.00
('tickets/ticket47815_test.py', 26, 'Skipped: Not implemented, or invalid by nsMemberOf')
Skipped tickets/ticket49121_test.py::test_ticket49121::setup 0.00
('tickets/ticket49121_test.py', 32, "Skipped: Don't run if ASAN is not enabled")
Passed suites/acl/acivattr_test.py::test_positive[(ENG_USER, ENG_MANAGER, REAL_EQ_ACI)] 0.04
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Passed suites/acl/acivattr_test.py::test_positive[(ENG_USER, ENG_MANAGER, REAL_PRES_ACI)] 0.04
No log output captured.
Passed suites/acl/acivattr_test.py::test_positive[(ENG_USER, ENG_MANAGER, REAL_SUB_ACI)] 0.05
No log output captured.
Passed suites/acl/acivattr_test.py::test_positive[(ENG_USER, ENG_MANAGER, ROLE_PRES_ACI)] 0.04
No log output captured.
Passed suites/acl/acivattr_test.py::test_positive[(ENG_USER, ENG_MANAGER, ROLE_SUB_ACI)] 0.04
No log output captured.
Passed suites/acl/acivattr_test.py::test_positive[(ENG_USER, ENG_MANAGER, COS_EQ_ACI)] 0.05
No log output captured.
Passed suites/acl/acivattr_test.py::test_positive[(ENG_USER, ENG_MANAGER, COS_PRES_ACI)] 0.04
No log output captured.
Passed suites/acl/acivattr_test.py::test_positive[(ENG_USER, ENG_MANAGER, COS_SUB_ACI)] 0.05
No log output captured.
Passed suites/acl/acivattr_test.py::test_positive[(ENG_USER, ENG_MANAGER, LDAPURL_ACI)] 0.05
No log output captured.
Passed suites/acl/acivattr_test.py::test_negative[(ENG_USER, SALES_MANAGER, REAL_EQ_ACI)] 0.32
No log output captured.
Passed suites/acl/acivattr_test.py::test_negative[(ENG_USER, SALES_OU, REAL_PRES_ACI)] 0.05
No log output captured.
Passed suites/acl/acivattr_test.py::test_negative[(ENG_USER, SALES_MANAGER, REAL_SUB_ACI)] 0.05
No log output captured.
Passed suites/acl/acivattr_test.py::test_negative[(ENG_USER, SALES_MANAGER, ROLE_EQ_ACI)] 0.05
No log output captured.
Passed suites/acl/acivattr_test.py::test_negative[(ENG_USER, SALES_MANAGER, ROLE_PRES_ACI)] 0.05
No log output captured.
Passed suites/acl/acivattr_test.py::test_negative[(ENG_USER, SALES_MANAGER, ROLE_SUB_ACI)] 0.05
No log output captured.
Passed suites/acl/acivattr_test.py::test_negative[(ENG_USER, SALES_MANAGER, COS_EQ_ACI)] 0.06
No log output captured.
Passed suites/acl/acivattr_test.py::test_negative[(ENG_USER, SALES_MANAGER, COS_PRES_ACI)] 0.05
No log output captured.
Passed suites/acl/acivattr_test.py::test_negative[(ENG_USER, SALES_MANAGER, COS_SUB_ACI)] 0.05
No log output captured.
Passed suites/acl/acivattr_test.py::test_negative[(SALES_UESER, SALES_MANAGER, LDAPURL_ACI)] 0.12
No log output captured.
Passed suites/acl/acivattr_test.py::test_negative[(ENG_USER, ENG_MANAGER, ROLE_EQ_ACI)] 0.06
No log output captured.
Passed suites/acl/acl_deny_test.py::test_multi_deny_aci 11.92
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389:acl_deny_test.py:39 Add uid=tuser1,ou=People,dc=example,dc=com INFO  lib389:acl_deny_test.py:50 Add uid=tuser,ou=People,dc=example,dc=com
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_deny_test.py:82 Pass 1 INFO  lib389:acl_deny_test.py:85 Testing two searches behave the same... INFO  lib389:acl_deny_test.py:128 Testing search does not return any entries... INFO  lib389:acl_deny_test.py:82 Pass 2 INFO  lib389:acl_deny_test.py:85 Testing two searches behave the same... INFO  lib389:acl_deny_test.py:128 Testing search does not return any entries... INFO  lib389:acl_deny_test.py:192 Test PASSED
Passed suites/acl/acl_test.py::test_aci_attr_subtype_targetattr[lang-ja] 0.01
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39001, 'ldap-secureport': 63701, 'server-id': 'master1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39002, 'ldap-secureport': 63702, 'server-id': 'master2', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:148 Creating replication topology. INFO  lib389.topologies:topologies.py:162 Joining master master2 to master1 ... INFO  lib389.replica:replica.py:2101 SUCCESS: bootstrap to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 completed INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is was created INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is was created INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect ebd85cf0-6595-4b05-aa21-4e302217b5bf / got description=None) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect eecd5b04-ece3-4499-b40a-9b20117faa1c / got description=ebd85cf0-6595-4b05-aa21-4e302217b5bf) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is working INFO  lib389.replica:replica.py:2170 SUCCESS: joined master from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 INFO  lib389.topologies:topologies.py:170 Ensuring master master1 to master2 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 already exists INFO  lib389.topologies:topologies.py:170 Ensuring master master2 to master1 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 already exists INFO  tests.suites.acl.acl_test:acl_test.py:77 ========Executing test with 'lang-ja' subtype======== INFO  tests.suites.acl.acl_test:acl_test.py:78 Add a target attribute INFO  tests.suites.acl.acl_test:acl_test.py:81 Add a user attribute INFO  tests.suites.acl.acl_test:acl_test.py:89 Add an ACI with attribute subtype
-------------------------------Captured log call--------------------------------
INFO  tests.suites.acl.acl_test:acl_test.py:119 Search for the added attribute INFO  tests.suites.acl.acl_test:acl_test.py:126 The added attribute was found
Passed suites/acl/acl_test.py::test_aci_attr_subtype_targetattr[binary] 0.01
-------------------------------Captured log setup-------------------------------
INFO  tests.suites.acl.acl_test:acl_test.py:77 ========Executing test with 'binary' subtype======== INFO  tests.suites.acl.acl_test:acl_test.py:78 Add a target attribute INFO  tests.suites.acl.acl_test:acl_test.py:81 Add a user attribute INFO  tests.suites.acl.acl_test:acl_test.py:89 Add an ACI with attribute subtype
-------------------------------Captured log call--------------------------------
INFO  tests.suites.acl.acl_test:acl_test.py:119 Search for the added attribute INFO  tests.suites.acl.acl_test:acl_test.py:126 The added attribute was found
Passed suites/acl/acl_test.py::test_aci_attr_subtype_targetattr[phonetic] 0.00
-------------------------------Captured log setup-------------------------------
INFO  tests.suites.acl.acl_test:acl_test.py:77 ========Executing test with 'phonetic' subtype======== INFO  tests.suites.acl.acl_test:acl_test.py:78 Add a target attribute INFO  tests.suites.acl.acl_test:acl_test.py:81 Add a user attribute INFO  tests.suites.acl.acl_test:acl_test.py:89 Add an ACI with attribute subtype
-------------------------------Captured log call--------------------------------
INFO  tests.suites.acl.acl_test:acl_test.py:119 Search for the added attribute INFO  tests.suites.acl.acl_test:acl_test.py:126 The added attribute was found
Passed suites/acl/acl_test.py::test_mode_default_add_deny 0.03
-------------------------------Captured log setup-------------------------------
INFO  lib389:acl_test.py:234 ######## INITIALIZATION ######## INFO  lib389:acl_test.py:237 Add uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:247 Add cn=staged user,dc=example,dc=com INFO  lib389:acl_test.py:251 Add cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:255 Add cn=excepts,cn=accounts,dc=example,dc=com
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:287 ######## mode moddn_aci : ADD (should fail) ######## INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:295 Try to add cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:304 Exception (expected): INSUFFICIENT_ACCESS
Passed suites/acl/acl_test.py::test_mode_default_delete_deny 0.02
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:322 ######## DELETE (should fail) ######## INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:329 Try to delete cn=staged user,dc=example,dc=com INFO  lib389:acl_test.py:334 Exception (expected): INSUFFICIENT_ACCESS
Passed suites/acl/acl_test.py::test_moddn_staging_prod[0-cn=staged user,dc=example,dc=com-cn=accounts,dc=example,dc=com-False] 0.35
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:369 ######## MOVE staging -> Prod (0) ######## INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:381 Try to MODDN uid=new_account0,cn=staged user,dc=example,dc=com -> uid=new_account0,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:388 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:acl_test.py:392 ######## MOVE to and from equality filter ######## INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:399 Try to MODDN uid=new_account0,cn=staged user,dc=example,dc=com -> uid=new_account0,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com
Passed suites/acl/acl_test.py::test_moddn_staging_prod[1-cn=staged user,dc=example,dc=com-cn=accounts,dc=example,dc=com-False] 0.34
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:369 ######## MOVE staging -> Prod (1) ######## INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:381 Try to MODDN uid=new_account1,cn=staged user,dc=example,dc=com -> uid=new_account1,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:388 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:acl_test.py:392 ######## MOVE to and from equality filter ######## INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:399 Try to MODDN uid=new_account1,cn=staged user,dc=example,dc=com -> uid=new_account1,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com
Passed suites/acl/acl_test.py::test_moddn_staging_prod[2-cn=staged user,dc=example,dc=com-cn=bad*,dc=example,dc=com-True] 0.33
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:369 ######## MOVE staging -> Prod (2) ######## INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:381 Try to MODDN uid=new_account2,cn=staged user,dc=example,dc=com -> uid=new_account2,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:388 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:acl_test.py:392 ######## MOVE to and from equality filter ######## INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:399 Try to MODDN uid=new_account2,cn=staged user,dc=example,dc=com -> uid=new_account2,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:402 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com
Passed suites/acl/acl_test.py::test_moddn_staging_prod[3-cn=st*,dc=example,dc=com-cn=accounts,dc=example,dc=com-False] 0.46
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:369 ######## MOVE staging -> Prod (3) ######## INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:381 Try to MODDN uid=new_account3,cn=staged user,dc=example,dc=com -> uid=new_account3,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:388 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:acl_test.py:392 ######## MOVE to and from equality filter ######## INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:399 Try to MODDN uid=new_account3,cn=staged user,dc=example,dc=com -> uid=new_account3,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com
Passed suites/acl/acl_test.py::test_moddn_staging_prod[4-cn=bad*,dc=example,dc=com-cn=accounts,dc=example,dc=com-True] 0.33
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:369 ######## MOVE staging -> Prod (4) ######## INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:381 Try to MODDN uid=new_account4,cn=staged user,dc=example,dc=com -> uid=new_account4,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:388 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:acl_test.py:392 ######## MOVE to and from equality filter ######## INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:399 Try to MODDN uid=new_account4,cn=staged user,dc=example,dc=com -> uid=new_account4,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:402 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com
Passed suites/acl/acl_test.py::test_moddn_staging_prod[5-cn=st*,dc=example,dc=com-cn=ac*,dc=example,dc=com-False] 0.33
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:369 ######## MOVE staging -> Prod (5) ######## INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:381 Try to MODDN uid=new_account5,cn=staged user,dc=example,dc=com -> uid=new_account5,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:388 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:acl_test.py:392 ######## MOVE to and from equality filter ######## INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:399 Try to MODDN uid=new_account5,cn=staged user,dc=example,dc=com -> uid=new_account5,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com
Passed suites/acl/acl_test.py::test_moddn_staging_prod[6-None-cn=ac*,dc=example,dc=com-False] 0.32
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:369 ######## MOVE staging -> Prod (6) ######## INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:381 Try to MODDN uid=new_account6,cn=staged user,dc=example,dc=com -> uid=new_account6,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:388 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:acl_test.py:392 ######## MOVE to and from equality filter ######## INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:399 Try to MODDN uid=new_account6,cn=staged user,dc=example,dc=com -> uid=new_account6,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com
Passed suites/acl/acl_test.py::test_moddn_staging_prod[7-cn=st*,dc=example,dc=com-None-False] 0.37
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:369 ######## MOVE staging -> Prod (7) ######## INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:381 Try to MODDN uid=new_account7,cn=staged user,dc=example,dc=com -> uid=new_account7,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:388 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:acl_test.py:392 ######## MOVE to and from equality filter ######## INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:399 Try to MODDN uid=new_account7,cn=staged user,dc=example,dc=com -> uid=new_account7,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com
Passed suites/acl/acl_test.py::test_moddn_staging_prod[8-None-None-False] 0.33
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:369 ######## MOVE staging -> Prod (8) ######## INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:381 Try to MODDN uid=new_account8,cn=staged user,dc=example,dc=com -> uid=new_account8,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:388 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:acl_test.py:392 ######## MOVE to and from equality filter ######## INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:399 Try to MODDN uid=new_account8,cn=staged user,dc=example,dc=com -> uid=new_account8,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com
Passed suites/acl/acl_test.py::test_moddn_staging_prod_9 1.39
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:446 ######## MOVE staging -> Prod (9) ######## INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:459 Try to MODDN uid=new_account9,cn=staged user,dc=example,dc=com -> uid=new_account9,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:466 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:acl_test.py:472 Disable the moddn right INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:477 ######## MOVE to and from equality filter ######## INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:485 Try to MODDN uid=new_account9,cn=staged user,dc=example,dc=com -> uid=new_account9,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:492 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:514 Try to MODDN uid=new_account9,cn=staged user,dc=example,dc=com -> uid=new_account9,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:525 Enable the moddn right INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:529 ######## MOVE staging -> Prod (10) ######## INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:541 Try to MODDN uid=new_account10,cn=staged user,dc=example,dc=com -> uid=new_account10,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:548 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:565 Try to MODDN uid=new_account10,cn=staged user,dc=example,dc=com -> uid=new_account10,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:572 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:581 ######## MOVE to and from equality filter ######## INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:587 Try to MODDN uid=new_account10,cn=staged user,dc=example,dc=com -> uid=new_account10,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com
Passed suites/acl/acl_test.py::test_moddn_prod_staging 0.61
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:616 ######## MOVE staging -> Prod (11) ######## INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:629 Try to MODDN uid=new_account11,cn=staged user,dc=example,dc=com -> uid=new_account11,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:636 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:acl_test.py:640 ######## MOVE to and from equality filter ######## INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:646 Try to MODDN uid=new_account11,cn=staged user,dc=example,dc=com -> uid=new_account11,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:661 Try to move back MODDN uid=new_account11,cn=accounts,dc=example,dc=com -> uid=new_account11,cn=staged user,dc=example,dc=com INFO  lib389:acl_test.py:668 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com
Passed suites/acl/acl_test.py::test_check_repl_M2_to_M1 1.11
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:698 Bind as cn=Directory Manager (M2) INFO  lib389:acl_test.py:718 Update (M2) uid=new_account12,cn=staged user,dc=example,dc=com (description) INFO  lib389:acl_test.py:731 Update uid=new_account12,cn=staged user,dc=example,dc=com (description) replicated on M1
Passed suites/acl/acl_test.py::test_moddn_staging_prod_except 0.34
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:756 ######## MOVE staging -> Prod (13) ######## INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:768 Try to MODDN uid=new_account13,cn=staged user,dc=example,dc=com -> uid=new_account13,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:775 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:acl_test.py:779 ######## MOVE to and from equality filter ######## INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:161 Add a DENY aci under cn=excepts,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:786 Try to MODDN uid=new_account13,cn=staged user,dc=example,dc=com -> uid=new_account13,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:792 ######## MOVE staging -> Prod/Except (14) ######## INFO  lib389:acl_test.py:798 Try to MODDN uid=new_account14,cn=staged user,dc=example,dc=com -> uid=new_account14,cn=excepts,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:805 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:161 Add a DENY aci under cn=excepts,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com
Passed suites/acl/acl_test.py::test_mode_default_ger_no_moddn 0.00
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:832 ######## mode moddn_aci : GER no moddn ######## INFO  lib389:acl_test.py:843 dn: cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:843 dn: cn=excepts,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:843 dn: uid=new_account0,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:843 dn: uid=new_account1,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:843 dn: uid=new_account3,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:843 dn: uid=new_account5,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:843 dn: uid=new_account6,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:843 dn: uid=new_account7,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:843 dn: uid=new_account8,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:843 dn: uid=new_account9,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:843 dn: uid=new_account10,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:843 dn: uid=new_account11,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:843 dn: uid=new_account13,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:846 ######## entryLevelRights: b'v'
Passed suites/acl/acl_test.py::test_mode_default_ger_with_moddn 0.29
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:870 ######## mode moddn_aci: GER with moddn ######## INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:888 dn: cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:888 dn: cn=excepts,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:888 dn: uid=new_account0,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:888 dn: uid=new_account1,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:888 dn: uid=new_account3,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:888 dn: uid=new_account5,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:888 dn: uid=new_account6,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:888 dn: uid=new_account7,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:888 dn: uid=new_account8,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:888 dn: uid=new_account9,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:888 dn: uid=new_account10,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:888 dn: uid=new_account11,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:888 dn: uid=new_account13,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:891 ######## entryLevelRights: b'vn' INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com
Passed suites/acl/acl_test.py::test_mode_legacy_ger_no_moddn1 0.12
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:921 ######## Disable the moddn aci mod ######## INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:925 ######## mode legacy 1: GER no moddn ######## INFO  lib389:acl_test.py:935 dn: cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:935 dn: cn=excepts,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:935 dn: uid=new_account0,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:935 dn: uid=new_account1,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:935 dn: uid=new_account3,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:935 dn: uid=new_account5,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:935 dn: uid=new_account6,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:935 dn: uid=new_account7,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:935 dn: uid=new_account8,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:935 dn: uid=new_account9,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:935 dn: uid=new_account10,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:935 dn: uid=new_account11,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:935 dn: uid=new_account13,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:938 ######## entryLevelRights: b'v'
Passed suites/acl/acl_test.py::test_mode_legacy_ger_no_moddn2 0.19
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:964 ######## Disable the moddn aci mod ######## INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:968 ######## mode legacy 2: GER no moddn ######## INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:985 dn: cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:985 dn: cn=excepts,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:985 dn: uid=new_account0,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:985 dn: uid=new_account1,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:985 dn: uid=new_account3,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:985 dn: uid=new_account5,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:985 dn: uid=new_account6,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:985 dn: uid=new_account7,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:985 dn: uid=new_account8,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:985 dn: uid=new_account9,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:985 dn: uid=new_account10,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:985 dn: uid=new_account11,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:985 dn: uid=new_account13,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:988 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com
Passed suites/acl/acl_test.py::test_mode_legacy_ger_with_moddn 0.26
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:1024 ######## Disable the moddn aci mod ######## INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:1028 ######## mode legacy : GER with moddn ######## INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager INFO  lib389:acl_test.py:140 Bind as uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:1050 dn: cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1050 dn: cn=excepts,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1050 dn: uid=new_account0,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1050 dn: uid=new_account1,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1050 dn: uid=new_account3,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1050 dn: uid=new_account5,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1050 dn: uid=new_account6,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1050 dn: uid=new_account7,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1050 dn: uid=new_account8,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1050 dn: uid=new_account9,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1050 dn: uid=new_account10,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1050 dn: uid=new_account11,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1050 dn: uid=new_account13,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1053 ######## entryLevelRights: b'vn' INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager
Passed suites/acl/acl_test.py::test_rdn_write_get_ger 0.01
-------------------------------Captured log setup-------------------------------
INFO  lib389:acl_test.py:1064 ######## Add entry tuser ########
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:1088 ######## GER rights for anonymous ######## INFO  lib389:acl_test.py:1098 dn: dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: cn=Directory Administrators,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: ou=Groups,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: ou=People,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: ou=Special Users,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: cn=Accounting Managers,ou=Groups,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: cn=HR Managers,ou=Groups,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: cn=QA Managers,ou=Groups,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: cn=PD Managers,ou=Groups,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: cn=replication_managers,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: ou=Services,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: cn=ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:63701,ou=Services,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: cn=ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:63702,ou=Services,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: uid=bind_entry,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: cn=staged user,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: cn=excepts,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: uid=new_account0,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: uid=new_account1,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: uid=new_account2,cn=staged user,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: uid=new_account3,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: uid=new_account4,cn=staged user,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: uid=new_account5,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: uid=new_account6,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: uid=new_account7,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: uid=new_account8,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: uid=new_account9,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: uid=new_account10,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: uid=new_account11,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: uid=new_account12,cn=staged user,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: uid=new_account13,cn=accounts,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: uid=new_account14,cn=staged user,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: uid=new_account15,cn=staged user,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: uid=new_account16,cn=staged user,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: uid=new_account17,cn=staged user,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: uid=new_account18,cn=staged user,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: uid=new_account19,cn=staged user,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v' INFO  lib389:acl_test.py:1098 dn: cn=tuser,dc=example,dc=com INFO  lib389:acl_test.py:1100 ######## entryLevelRights: b'v'
Passed suites/acl/acl_test.py::test_rdn_write_modrdn_anonymous 0.03
-------------------------------Captured log call--------------------------------
INFO  lib389:acl_test.py:1127 dn: INFO  lib389:acl_test.py:1129 ######## 'objectClass': [b'top'] INFO  lib389:acl_test.py:1129 ######## 'defaultnamingcontext': [b'dc=example,dc=com'] INFO  lib389:acl_test.py:1129 ######## 'dataversion': [b'020201022231755'] INFO  lib389:acl_test.py:1129 ######## 'netscapemdsuffix': [b'cn=ldap://dc=ci-vm-10-0-139-92,dc=hosted,dc=upshift,dc=rdu2,dc=redhat,dc=com:39001'] INFO  lib389:acl_test.py:1134 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:acl_test.py:1141 The entry was not renamed (expected) INFO  lib389:acl_test.py:134 Bind as cn=Directory Manager
Passed suites/acl/deladd_test.py::test_allow_delete_access_to_groupdn 0.07
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Passed suites/acl/deladd_test.py::test_allow_add_access_to_anyone 0.05
No log output captured.
Passed suites/acl/deladd_test.py::test_allow_delete_access_to_anyone 0.04
No log output captured.
Passed suites/acl/deladd_test.py::test_allow_delete_access_not_to_userdn 0.06
No log output captured.
Passed suites/acl/deladd_test.py::test_allow_delete_access_not_to_group 0.06
No log output captured.
Passed suites/acl/deladd_test.py::test_allow_add_access_to_parent 0.05
No log output captured.
Passed suites/acl/deladd_test.py::test_allow_delete_access_to_parent 0.05
No log output captured.
Passed suites/acl/deladd_test.py::test_allow_delete_access_to_dynamic_group 0.05
No log output captured.
Passed suites/acl/deladd_test.py::test_allow_delete_access_to_dynamic_group_uid 0.06
No log output captured.
Passed suites/acl/deladd_test.py::test_allow_delete_access_not_to_dynamic_group 0.06
No log output captured.
Passed suites/acl/enhanced_aci_modrnd_test.py::test_enhanced_aci_modrnd 0.02
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created. INFO  tests.suites.acl.enhanced_aci_modrnd_test:enhanced_aci_modrnd_test.py:32 Add a container: ou=test_ou_1,dc=example,dc=com INFO  tests.suites.acl.enhanced_aci_modrnd_test:enhanced_aci_modrnd_test.py:39 Add a container: ou=test_ou_2,dc=example,dc=com INFO  tests.suites.acl.enhanced_aci_modrnd_test:enhanced_aci_modrnd_test.py:46 Add a user: cn=test_user,ou=test_ou_1,dc=example,dc=com INFO  tests.suites.acl.enhanced_aci_modrnd_test:enhanced_aci_modrnd_test.py:60 Add an ACI 'allow (all)' by cn=test_user,ou=test_ou_1,dc=example,dc=com to the ou=test_ou_1,dc=example,dc=com INFO  tests.suites.acl.enhanced_aci_modrnd_test:enhanced_aci_modrnd_test.py:64 Add an ACI 'allow (all)' by cn=test_user,ou=test_ou_1,dc=example,dc=com to the ou=test_ou_2,dc=example,dc=com
-------------------------------Captured log call--------------------------------
INFO  tests.suites.acl.enhanced_aci_modrnd_test:enhanced_aci_modrnd_test.py:95 Bind as cn=test_user,ou=test_ou_1,dc=example,dc=com INFO  tests.suites.acl.enhanced_aci_modrnd_test:enhanced_aci_modrnd_test.py:99 User MODRDN operation from ou=test_ou_1,dc=example,dc=com to ou=test_ou_2,dc=example,dc=com INFO  tests.suites.acl.enhanced_aci_modrnd_test:enhanced_aci_modrnd_test.py:105 Check there is no user in ou=test_ou_1,dc=example,dc=com INFO  tests.suites.acl.enhanced_aci_modrnd_test:enhanced_aci_modrnd_test.py:111 Check there is our user in ou=test_ou_2,dc=example,dc=com
Passed suites/acl/globalgroup_part2_test.py::test_undefined_in_group_eval_five 0.07
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Passed suites/acl/globalgroup_part2_test.py::test_undefined_in_group_eval_six 0.06
No log output captured.
Passed suites/acl/globalgroup_part2_test.py::test_undefined_in_group_eval_seven 0.03
No log output captured.
Passed suites/acl/globalgroup_part2_test.py::test_undefined_in_group_eval_eight 0.03
No log output captured.
Passed suites/acl/globalgroup_part2_test.py::test_undefined_in_group_eval_nine 0.03
No log output captured.
Passed suites/acl/globalgroup_part2_test.py::test_undefined_in_group_eval_ten 0.05
No log output captured.
Passed suites/acl/globalgroup_part2_test.py::test_undefined_in_group_eval_eleven 0.04
No log output captured.
Passed suites/acl/globalgroup_part2_test.py::test_undefined_in_group_eval_twelve 0.03
No log output captured.
Passed suites/acl/globalgroup_part2_test.py::test_undefined_in_group_eval_fourteen 0.07
No log output captured.
Passed suites/acl/globalgroup_part2_test.py::test_undefined_in_group_eval_fifteen 0.04
No log output captured.
Passed suites/acl/globalgroup_part2_test.py::test_undefined_in_group_eval_sixteen 0.03
No log output captured.
Passed suites/acl/globalgroup_part2_test.py::test_undefined_in_group_eval_seventeen 0.03
No log output captured.
Passed suites/acl/globalgroup_part2_test.py::test_undefined_in_group_eval_eighteen 0.03
No log output captured.
Passed suites/acl/globalgroup_test.py::test_caching_changes 0.05
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Passed suites/acl/globalgroup_test.py::test_deny_group_member_all_rights_to_user 0.07
No log output captured.
Passed suites/acl/globalgroup_test.py::test_deny_group_member_all_rights_to_group_members 0.03
No log output captured.
Passed suites/acl/globalgroup_test.py::test_deeply_nested_groups_aci_denial 0.05
No log output captured.
Passed suites/acl/globalgroup_test.py::test_deeply_nested_groups_aci_denial_two 0.02
No log output captured.
Passed suites/acl/globalgroup_test.py::test_deeply_nested_groups_aci_allow 0.02
No log output captured.
Passed suites/acl/globalgroup_test.py::test_deeply_nested_groups_aci_allow_two 0.03
No log output captured.
Passed suites/acl/globalgroup_test.py::test_undefined_in_group_eval 0.03
No log output captured.
Passed suites/acl/globalgroup_test.py::test_undefined_in_group_eval_two 0.02
No log output captured.
Passed suites/acl/globalgroup_test.py::test_undefined_in_group_eval_three 0.02
No log output captured.
Passed suites/acl/globalgroup_test.py::test_undefined_in_group_eval_four 0.06
No log output captured.
Passed suites/acl/keywords_part2_test.py::test_access_from_certain_network_only_ip 4.12
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Passed suites/acl/keywords_part2_test.py::test_connectin_from_an_unauthorized_network 0.13
No log output captured.
Passed suites/acl/keywords_part2_test.py::test_ip_keyword_test_noip_cannot 0.09
No log output captured.
Passed suites/acl/keywords_part2_test.py::test_user_can_access_the_data_at_any_time 0.08
No log output captured.
Passed suites/acl/keywords_part2_test.py::test_user_can_access_the_data_only_in_the_morning 0.10
No log output captured.
Passed suites/acl/keywords_part2_test.py::test_user_can_access_the_data_only_in_the_afternoon 0.07
No log output captured.
Passed suites/acl/keywords_part2_test.py::test_timeofday_keyword 1.14
No log output captured.
Passed suites/acl/keywords_part2_test.py::test_dayofweek_keyword_test_everyday_can_access 0.08
No log output captured.
Passed suites/acl/keywords_part2_test.py::test_dayofweek_keyword_today_can_access 0.07
No log output captured.
Passed suites/acl/keywords_part2_test.py::test_user_cannot_access_the_data_at_all 0.09
No log output captured.
Passed suites/acl/keywords_test.py::test_user_binds_with_a_password_and_can_access_the_data 0.06
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Passed suites/acl/keywords_test.py::test_user_binds_with_a_bad_password_and_cannot_access_the_data 0.01
No log output captured.
Passed suites/acl/keywords_test.py::test_anonymous_user_cannot_access_the_data 0.04
No log output captured.
Passed suites/acl/keywords_test.py::test_authenticated_but_has_no_rigth_on_the_data 0.53
No log output captured.
Passed suites/acl/keywords_test.py::test_the_bind_client_is_accessing_the_directory 0.01
No log output captured.
Passed suites/acl/keywords_test.py::test_users_binds_with_a_password_and_can_access_the_data 0.02
No log output captured.
Passed suites/acl/keywords_test.py::test_user_binds_without_any_password_and_cannot_access_the_data 0.02
No log output captured.
Passed suites/acl/keywords_test.py::test_user_can_access_the_data_when_connecting_from_any_machine 0.04
No log output captured.
Passed suites/acl/keywords_test.py::test_user_can_access_the_data_when_connecting_from_internal_ds_network_only 0.04
No log output captured.
Passed suites/acl/keywords_test.py::test_user_can_access_the_data_when_connecting_from_some_network_only 0.04
No log output captured.
Passed suites/acl/keywords_test.py::test_from_an_unauthorized_network 0.04
No log output captured.
Passed suites/acl/keywords_test.py::test_user_cannot_access_the_data_when_connecting_from_an_unauthorized_network_2 0.07
No log output captured.
Passed suites/acl/keywords_test.py::test_user_cannot_access_the_data_if_not_from_a_certain_domain 0.06
No log output captured.
Passed suites/acl/keywords_test.py::test_dnsalias_keyword_test_nodns_cannot 0.31
No log output captured.
Passed suites/acl/keywords_test.py::test_user_can_access_from_ipv4_or_ipv6_address[127.0.0.1] 0.04
No log output captured.
Passed suites/acl/keywords_test.py::test_user_can_access_from_ipv4_or_ipv6_address[[::1]] 0.02
No log output captured.
Passed suites/acl/misc_test.py::test_accept_aci_in_addition_to_acl 0.38
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Passed suites/acl/misc_test.py::test_more_then_40_acl_will_crash_slapd 0.28
-------------------------------Captured log setup-------------------------------
INFO  lib389:misc_test.py:65 Exception (expected): ALREADY_EXISTS
Passed suites/acl/misc_test.py::test_search_access_should_not_include_read_access 0.01
-------------------------------Captured log setup-------------------------------
INFO  lib389:misc_test.py:65 Exception (expected): ALREADY_EXISTS
Passed suites/acl/misc_test.py::test_only_allow_some_targetattr 0.05
-------------------------------Captured log setup-------------------------------
INFO  lib389:misc_test.py:65 Exception (expected): ALREADY_EXISTS
Passed suites/acl/misc_test.py::test_only_allow_some_targetattr_two 0.32
-------------------------------Captured log setup-------------------------------
INFO  lib389:misc_test.py:65 Exception (expected): ALREADY_EXISTS
Passed suites/acl/misc_test.py::test_memberurl_needs_to_be_normalized 0.12
-------------------------------Captured log setup-------------------------------
INFO  lib389:misc_test.py:65 Exception (expected): ALREADY_EXISTS
Passed suites/acl/misc_test.py::test_greater_than_200_acls_can_be_created 4.44
-------------------------------Captured log setup-------------------------------
INFO  lib389:misc_test.py:65 Exception (expected): ALREADY_EXISTS
Passed suites/acl/misc_test.py::test_server_bahaves_properly_with_very_long_attribute_names 0.06
-------------------------------Captured log setup-------------------------------
INFO  lib389:misc_test.py:65 Exception (expected): ALREADY_EXISTS
Passed suites/acl/misc_test.py::test_do_bind_as_201_distinct_users 150.04
-------------------------------Captured log setup-------------------------------
INFO  lib389:misc_test.py:65 Exception (expected): ALREADY_EXISTS
Passed suites/acl/modify_test.py::test_allow_write_access_to_targetattr_with_a_single_attribute 0.09
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Passed suites/acl/modify_test.py::test_allow_write_access_to_targetattr_with_multiple_attibutes 0.06
No log output captured.
Passed suites/acl/modify_test.py::test_allow_write_access_to_userdn_all 0.11
No log output captured.
Passed suites/acl/modify_test.py::test_allow_write_access_to_userdn_with_wildcards_in_dn 0.05
No log output captured.
Passed suites/acl/modify_test.py::test_allow_write_access_to_userdn_with_multiple_dns 0.43
No log output captured.
Passed suites/acl/modify_test.py::test_allow_write_access_to_target_with_wildcards 0.19
No log output captured.
Passed suites/acl/modify_test.py::test_allow_write_access_to_userdnattr 0.09
No log output captured.
Passed suites/acl/modify_test.py::test_allow_selfwrite_access_to_anyone 0.07
No log output captured.
Passed suites/acl/modify_test.py::test_uniquemember_should_also_be_the_owner 0.21
No log output captured.
Passed suites/acl/modify_test.py::test_aci_with_both_allow_and_deny 0.15
No log output captured.
Passed suites/acl/modify_test.py::test_allow_owner_to_modify_entry 0.09
No log output captured.
Passed suites/acl/modrdn_test.py::test_allow_write_privilege_to_anyone 0.53
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Passed suites/acl/modrdn_test.py::test_allow_write_privilege_to_dynamic_group_with_scope_set_to_base_in_ldap_url 0.02
No log output captured.
Passed suites/acl/modrdn_test.py::test_write_access_to_naming_atributes 0.04
No log output captured.
Passed suites/acl/modrdn_test.py::test_write_access_to_naming_atributes_two 0.09
No log output captured.
Passed suites/acl/modrdn_test.py::test_access_aci_list_contains_any_deny_rule 0.19
No log output captured.
Passed suites/acl/modrdn_test.py::test_renaming_target_entry 0.06
No log output captured.
Passed suites/acl/repeated_ldap_add_test.py::test_repeated_ldap_add 32.64
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
------------------------------Captured stdout call------------------------------
uid=buser123,ou=BOU,dc=example,dc=com inactivated.
-------------------------------Captured log call--------------------------------
INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:184 Testing Bug 1347760 - Information disclosure via repeated use of LDAP ADD operation, etc. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:186 Disabling accesslog logbuffering INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:189 Bind as {cn=Directory Manager,password} INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:192 Adding ou=BOU a bind user belongs to. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:197 Adding a bind user. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:204 Adding a test user. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:211 Deleting aci in dc=example,dc=com. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:214 While binding as DM, acquire an access log path and instance dir INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:220 Bind case 1. the bind user has no rights to read the entry itself, bind should be successful. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:221 Bind as {uid=buser123,ou=BOU,dc=example,dc=com,buser123} who has no access rights. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:229 Access log path: /var/log/dirsrv/slapd-standalone1/access INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:231 Bind case 2-1. the bind user does not exist, bind should fail with error INVALID_CREDENTIALS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:233 Bind as {uid=bogus,dc=example,dc=com,bogus} who does not exist. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:237 Exception (expected): INVALID_CREDENTIALS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:238 Desc Invalid credentials INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:246 Cause found - [22/Oct/2020:19:22:35.223791750 -0400] conn=1 op=11 RESULT err=49 tag=97 nentries=0 wtime=0.000151951 optime=0.005243173 etime=0.005393883 - No such entry INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:249 Bind case 2-2. the bind user's suffix does not exist, bind should fail with error INVALID_CREDENTIALS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:251 Bind as {uid=bogus,ou=people,dc=bogus,bogus} who does not exist. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:260 Cause found - [22/Oct/2020:19:22:36.232668582 -0400] conn=1 op=12 RESULT err=49 tag=97 nentries=0 wtime=0.000151571 optime=0.004258615 etime=0.004399570 - No suffix for bind dn found INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:263 Bind case 2-3. the bind user's password is wrong, bind should fail with error INVALID_CREDENTIALS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:265 Bind as {uid=buser123,ou=BOU,dc=example,dc=com,bogus} who does not exist. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:269 Exception (expected): INVALID_CREDENTIALS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:270 Desc Invalid credentials INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:278 Cause found - [22/Oct/2020:19:22:37.266596070 -0400] conn=1 op=13 RESULT err=49 tag=97 nentries=0 wtime=0.000145266 optime=0.030556824 etime=0.030695064 - Invalid credentials INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:281 Adding aci for uid=buser123,ou=BOU,dc=example,dc=com to ou=BOU,dc=example,dc=com. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:283 aci: (targetattr="*")(version 3.0; acl "buser123"; allow(all) userdn = "ldap:///uid=buser123,ou=BOU,dc=example,dc=com";) INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:284 Bind as {cn=Directory Manager,password} INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:289 Bind case 3. the bind user has the right to read the entry itself, bind should be successful. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:290 Bind as {uid=buser123,ou=BOU,dc=example,dc=com,buser123} which should be ok. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:293 The following operations are against the subtree the bind user uid=buser123,ou=BOU,dc=example,dc=com has no rights. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:297 Search case 1. the bind user has no rights to read the search entry, it should return no search results with <class 'ldap.SUCCESS'> INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Searching existing entry uid=tuser0,ou=people,dc=example,dc=com, which should be ok. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:141 Search should return none INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:303 Search case 2-1. the search entry does not exist, the search should return no search results with SUCCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Searching non-existing entry uid=bogus,dc=example,dc=com, which should be ok. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:141 Search should return none INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:309 Search case 2-2. the search entry does not exist, the search should return no search results with SUCCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Searching non-existing entry uid=bogus,ou=people,dc=example,dc=com, which should be ok. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:141 Search should return none INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:316 Add case 1. the bind user has no rights AND the adding entry exists, it should fail with INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Adding existing entry uid=tuser0,ou=people,dc=example,dc=com, which should fail with INSUFFICIENT_ACCESS. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:131 Exception (expected): INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:132 Desc Insufficient access INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:322 Add case 2-1. the bind user has no rights AND the adding entry does not exist, it should fail with INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Adding non-existing entry uid=bogus,dc=example,dc=com, which should fail with INSUFFICIENT_ACCESS. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:131 Exception (expected): INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:132 Desc Insufficient access INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:328 Add case 2-2. the bind user has no rights AND the adding entry does not exist, it should fail with INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Adding non-existing entry uid=bogus,ou=people,dc=example,dc=com, which should fail with INSUFFICIENT_ACCESS. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:131 Exception (expected): INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:132 Desc Insufficient access INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:335 Modify case 1. the bind user has no rights AND the modifying entry exists, it should fail with INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Modifying existing entry uid=tuser0,ou=people,dc=example,dc=com, which should fail with INSUFFICIENT_ACCESS. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:131 Exception (expected): INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:132 Desc Insufficient access INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:341 Modify case 2-1. the bind user has no rights AND the modifying entry does not exist, it should fail with INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Modifying non-existing entry uid=bogus,dc=example,dc=com, which should fail with INSUFFICIENT_ACCESS. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:131 Exception (expected): INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:132 Desc Insufficient access INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:347 Modify case 2-2. the bind user has no rights AND the modifying entry does not exist, it should fail with INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Modifying non-existing entry uid=bogus,ou=people,dc=example,dc=com, which should fail with INSUFFICIENT_ACCESS. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:131 Exception (expected): INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:132 Desc Insufficient access INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:354 Modrdn case 1. the bind user has no rights AND the renaming entry exists, it should fail with INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Renaming existing entry uid=tuser0,ou=people,dc=example,dc=com, which should fail with INSUFFICIENT_ACCESS. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:131 Exception (expected): INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:132 Desc Insufficient access INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:360 Modrdn case 2-1. the bind user has no rights AND the renaming entry does not exist, it should fail with INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Renaming non-existing entry uid=bogus,dc=example,dc=com, which should fail with INSUFFICIENT_ACCESS. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:131 Exception (expected): INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:132 Desc Insufficient access INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:366 Modrdn case 2-2. the bind user has no rights AND the renaming entry does not exist, it should fail with INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Renaming non-existing entry uid=bogus,ou=people,dc=example,dc=com, which should fail with INSUFFICIENT_ACCESS. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:131 Exception (expected): INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:132 Desc Insufficient access INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:372 Modrdn case 3. the bind user has no rights AND the node moving an entry to exists, it should fail with INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Moving to existing superior ou=groups,dc=example,dc=com, which should fail with INSUFFICIENT_ACCESS. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:131 Exception (expected): INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:132 Desc Insufficient access INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:378 Modrdn case 4-1. the bind user has no rights AND the node moving an entry to does not, it should fail with INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Moving to non-existing superior ou=OU,dc=example,dc=com, which should fail with INSUFFICIENT_ACCESS. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:131 Exception (expected): INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:132 Desc Insufficient access INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:384 Modrdn case 4-2. the bind user has no rights AND the node moving an entry to does not, it should fail with INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Moving to non-existing superior ou=OU,dc=example,dc=com, which should fail with INSUFFICIENT_ACCESS. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:131 Exception (expected): INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:132 Desc Insufficient access INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:391 Delete case 1. the bind user has no rights AND the deleting entry exists, it should fail with INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Deleting existing entry uid=tuser0,ou=people,dc=example,dc=com, which should fail with INSUFFICIENT_ACCESS. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:131 Exception (expected): INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:132 Desc Insufficient access INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:397 Delete case 2-1. the bind user has no rights AND the deleting entry does not exist, it should fail with INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Deleting non-existing entry uid=bogus,dc=example,dc=com, which should fail with INSUFFICIENT_ACCESS. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:131 Exception (expected): INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:132 Desc Insufficient access INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:403 Delete case 2-2. the bind user has no rights AND the deleting entry does not exist, it should fail with INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Deleting non-existing entry uid=bogus,ou=people,dc=example,dc=com, which should fail with INSUFFICIENT_ACCESS. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:131 Exception (expected): INSUFFICIENT_ACCESS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:132 Desc Insufficient access INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:407 EXTRA: Check no regressions INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:408 Adding aci for uid=buser123,ou=BOU,dc=example,dc=com to dc=example,dc=com. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:410 Bind as {cn=Directory Manager,password} INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:415 Bind as {uid=buser123,ou=BOU,dc=example,dc=com,buser123}. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:425 Search case. the search entry does not exist, the search should fail with NO_SUCH_OBJECT INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Searching non-existing entry uid=bogus,ou=people,dc=example,dc=com, which should fail with NO_SUCH_OBJECT. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:131 Exception (expected): NO_SUCH_OBJECT INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:132 Desc No such object INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:431 Add case. the adding entry already exists, it should fail with ALREADY_EXISTS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Adding existing entry uid=tuser0,ou=people,dc=example,dc=com, which should fail with ALREADY_EXISTS. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:131 Exception (expected): ALREADY_EXISTS INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:132 Desc Already exists INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:436 Modify case. the modifying entry does not exist, it should fail with NO_SUCH_OBJECT INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Modifying non-existing entry uid=bogus,dc=example,dc=com, which should fail with NO_SUCH_OBJECT. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:131 Exception (expected): NO_SUCH_OBJECT INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:132 Desc No such object INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:441 Modrdn case 1. the renaming entry does not exist, it should fail with NO_SUCH_OBJECT INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Renaming non-existing entry uid=bogus,dc=example,dc=com, which should fail with NO_SUCH_OBJECT. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:131 Exception (expected): NO_SUCH_OBJECT INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:132 Desc No such object INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:446 Modrdn case 2. the node moving an entry to does not, it should fail with NO_SUCH_OBJECT INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Moving to non-existing superior ou=OU,dc=example,dc=com, which should fail with NO_SUCH_OBJECT. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:131 Exception (expected): NO_SUCH_OBJECT INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:132 Desc No such object INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:451 Delete case. the deleting entry does not exist, it should fail with NO_SUCH_OBJECT INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:108 Deleting non-existing entry uid=bogus,dc=example,dc=com, which should fail with NO_SUCH_OBJECT. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:131 Exception (expected): NO_SUCH_OBJECT INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:132 Desc No such object INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:148 PASSED INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:454 Inactivate uid=buser123,ou=BOU,dc=example,dc=com INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:461 ['/usr/sbin/ns-inactivate.pl', '-Z', 'standalone1', '-D', 'cn=Directory Manager', '-w', 'password', '-I', 'uid=buser123,ou=BOU,dc=example,dc=com'] INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:465 Bind as {uid=buser123,ou=BOU,dc=example,dc=com,buser123} which should fail with UNWILLING_TO_PERFORM. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:469 Exception (expected): UNWILLING_TO_PERFORM INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:470 Desc Server is unwilling to perform INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:473 Bind as {uid=buser123,ou=BOU,dc=example,dc=com,bogus} which should fail with UNWILLING_TO_PERFORM. INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:477 Exception (expected): UNWILLING_TO_PERFORM INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:478 Desc Server is unwilling to perform INFO  tests.suites.acl.repeated_ldap_add_test:repeated_ldap_add_test.py:481 SUCCESS
Passed suites/acl/roledn_test.py::test_mod_seealso_positive[(STEVE_ROLE, NESTED_ROLE_TESTER)] 0.05
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Passed suites/acl/roledn_test.py::test_mod_seealso_positive[(HARRY_ROLE, NESTED_ROLE_TESTER)] 0.04
No log output captured.
Passed suites/acl/roledn_test.py::test_mod_seealso_positive[(MARY_ROLE, NOT_RULE_ACCESS)] 0.04
No log output captured.
Passed suites/acl/roledn_test.py::test_mod_seealso_positive[(STEVE_ROLE, OR_RULE_ACCESS)] 0.04
No log output captured.
Passed suites/acl/roledn_test.py::test_mod_seealso_positive[(HARRY_ROLE, OR_RULE_ACCESS)] 0.04
No log output captured.
Passed suites/acl/roledn_test.py::test_mod_seealso_positive[(STEVE_ROLE, ALL_ACCESS)] 0.05
No log output captured.
Passed suites/acl/roledn_test.py::test_mod_seealso_positive[(HARRY_ROLE, ALL_ACCESS)] 0.04
No log output captured.
Passed suites/acl/roledn_test.py::test_mod_seealso_positive[(MARY_ROLE, ALL_ACCESS)] 0.06
No log output captured.
Passed suites/acl/roledn_test.py::test_mod_seealso_negative[(MARY_ROLE, NESTED_ROLE_TESTER)] 0.04
No log output captured.
Passed suites/acl/roledn_test.py::test_mod_seealso_negative[(STEVE_ROLE, NOT_RULE_ACCESS)] 0.05
No log output captured.
Passed suites/acl/roledn_test.py::test_mod_seealso_negative[(HARRY_ROLE, NOT_RULE_ACCESS)] 0.05
No log output captured.
Passed suites/acl/roledn_test.py::test_mod_seealso_negative[(MARY_ROLE , OR_RULE_ACCESS)] 0.06
No log output captured.
Passed suites/acl/roledn_test.py::test_mod_anonseealso_positive[NOT_RULE_ACCESS] 0.01
No log output captured.
Passed suites/acl/roledn_test.py::test_mod_anonseealso_positive[ALL_ACCESS] 0.01
No log output captured.
Passed suites/acl/roledn_test.py::test_mod_anonseealso_negaive[NESTED_ROLE_TESTER] 0.02
No log output captured.
Passed suites/acl/roledn_test.py::test_mod_anonseealso_negaive[OR_RULE_ACCESS] 0.03
No log output captured.
Passed suites/acl/search_real_part2_test.py::test_deny_all_access_with__target_set_on_non_leaf 1.00
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Passed suites/acl/search_real_part2_test.py::test_deny_all_access_with__target_set_on_wildcard_non_leaf 0.91
No log output captured.
Passed suites/acl/search_real_part2_test.py::test_deny_all_access_with__target_set_on_wildcard_leaf 1.16
No log output captured.
Passed suites/acl/search_real_part2_test.py::test_deny_all_access_with_targetfilter_using_equality_search 0.84
No log output captured.
Passed suites/acl/search_real_part2_test.py::test_deny_all_access_with_targetfilter_using_equality_search_two 0.90
No log output captured.
Passed suites/acl/search_real_part2_test.py::test_deny_all_access_with_targetfilter_using_substring_search 0.68
No log output captured.
Passed suites/acl/search_real_part2_test.py::test_deny_all_access_with_targetfilter_using_substring_search_two 1.11
No log output captured.
Passed suites/acl/search_real_part2_test.py::test_deny_all_access_with_targetfilter_using_boolean_or_of_two_equality_search 0.18
No log output captured.
Passed suites/acl/search_real_part2_test.py::test_deny_all_access_to__userdn_two 0.79
No log output captured.
Passed suites/acl/search_real_part2_test.py::test_deny_all_access_with_userdn 0.99
No log output captured.
Passed suites/acl/search_real_part2_test.py::test_deny_all_access_with_targetfilter_using_presence_search 0.14
No log output captured.
Passed suites/acl/search_real_part3_test.py::test_deny_search_access_to_userdn_with_ldap_url 0.92
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Passed suites/acl/search_real_part3_test.py::test_deny_search_access_to_userdn_with_ldap_url_two 0.72
No log output captured.
Passed suites/acl/search_real_part3_test.py::test_deny_search_access_to_userdn_with_ldap_url_matching_all_users 0.96
No log output captured.
Passed suites/acl/search_real_part3_test.py::test_deny_read_access_to_a_dynamic_group 0.64
No log output captured.
Passed suites/acl/search_real_part3_test.py::test_deny_read_access_to_dynamic_group_with_host_port_set_on_ldap_url 0.49
No log output captured.
Passed suites/acl/search_real_part3_test.py::test_deny_read_access_to_dynamic_group_with_scope_set_to_one_in_ldap_url 0.49
No log output captured.
Passed suites/acl/search_real_part3_test.py::test_deny_read_access_to_dynamic_group_two 0.87
No log output captured.
Passed suites/acl/search_real_part3_test.py::test_deny_access_to_group_should_deny_access_to_all_uniquemember 0.75
No log output captured.
Passed suites/acl/search_real_part3_test.py::test_entry_with_lots_100_attributes 8.95
No log output captured.
Passed suites/acl/search_real_part3_test.py::test_groupdnattr_value_is_another_group 0.16
No log output captured.
Passed suites/acl/search_real_test.py::test_deny_all_access_with_target_set 0.94
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Passed suites/acl/search_real_test.py::test_deny_all_access_to_a_target_with_wild_card 0.87
No log output captured.
Passed suites/acl/search_real_test.py::test_deny_all_access_without_a_target_set 1.14
No log output captured.
Passed suites/acl/search_real_test.py::test_deny_read_search_and_compare_access_with_target_and_targetattr_set 0.97
No log output captured.
Passed suites/acl/search_real_test.py::test_deny_read_access_to_multiple_groupdns 1.16
No log output captured.
Passed suites/acl/search_real_test.py::test_deny_all_access_to_userdnattr 0.81
No log output captured.
Passed suites/acl/search_real_test.py::test_deny_all_access_with__target_set 1.07
No log output captured.
Passed suites/acl/search_real_test.py::test_deny_all_access_with__targetattr_set 1.57
No log output captured.
Passed suites/acl/search_real_test.py::test_deny_all_access_with_targetattr_set 0.86
No log output captured.
Passed suites/acl/selfdn_permissions_test.py::test_selfdn_permission_add 0.51
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389:selfdn_permissions_test.py:58 Add OCticket47653 that allows 'member' attribute INFO  lib389:selfdn_permissions_test.py:63 Add cn=bind_entry, dc=example,dc=com
-------------------------------Captured log call--------------------------------
INFO  lib389:selfdn_permissions_test.py:106 ######################### ADD ###################### INFO  lib389:selfdn_permissions_test.py:109 Bind as cn=bind_entry, dc=example,dc=com INFO  lib389:selfdn_permissions_test.py:139 Try to add Add cn=test_entry, dc=example,dc=com (aci is missing): dn: cn=test_entry, dc=example,dc=com cn: test_entry member: cn=bind_entry, dc=example,dc=com objectclass: top objectclass: person objectclass: OCticket47653 postalAddress: here postalCode: 1234 sn: test_entry INFO  lib389:selfdn_permissions_test.py:143 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:selfdn_permissions_test.py:147 Bind as cn=Directory Manager and add the ADD SELFDN aci INFO  lib389:selfdn_permissions_test.py:159 Bind as cn=bind_entry, dc=example,dc=com INFO  lib389:selfdn_permissions_test.py:164 Try to add Add cn=test_entry, dc=example,dc=com (member is missing) INFO  lib389:selfdn_permissions_test.py:172 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:selfdn_permissions_test.py:178 Try to add Add cn=test_entry, dc=example,dc=com (with several member values) INFO  lib389:selfdn_permissions_test.py:181 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:selfdn_permissions_test.py:184 Try to add Add cn=test_entry, dc=example,dc=com should be successful
Passed suites/acl/selfdn_permissions_test.py::test_selfdn_permission_search 0.35
-------------------------------Captured log call--------------------------------
INFO  lib389:selfdn_permissions_test.py:205 ######################### SEARCH ###################### INFO  lib389:selfdn_permissions_test.py:207 Bind as cn=bind_entry, dc=example,dc=com INFO  lib389:selfdn_permissions_test.py:211 Try to search cn=test_entry, dc=example,dc=com (aci is missing) INFO  lib389:selfdn_permissions_test.py:216 Bind as cn=Directory Manager and add the READ/SEARCH SELFDN aci INFO  lib389:selfdn_permissions_test.py:229 Bind as cn=bind_entry, dc=example,dc=com INFO  lib389:selfdn_permissions_test.py:233 Try to search cn=test_entry, dc=example,dc=com should be successful
Passed suites/acl/selfdn_permissions_test.py::test_selfdn_permission_modify 0.54
-------------------------------Captured log call--------------------------------
INFO  lib389:selfdn_permissions_test.py:256 Bind as cn=bind_entry, dc=example,dc=com INFO  lib389:selfdn_permissions_test.py:259 ######################### MODIFY ###################### INFO  lib389:selfdn_permissions_test.py:263 Try to modify cn=test_entry, dc=example,dc=com (aci is missing) INFO  lib389:selfdn_permissions_test.py:267 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:selfdn_permissions_test.py:271 Bind as cn=Directory Manager and add the WRITE SELFDN aci INFO  lib389:selfdn_permissions_test.py:284 Bind as cn=bind_entry, dc=example,dc=com INFO  lib389:selfdn_permissions_test.py:288 Try to modify cn=test_entry, dc=example,dc=com. It should succeeds
Passed suites/acl/selfdn_permissions_test.py::test_selfdn_permission_delete 0.32
-------------------------------Captured log call--------------------------------
INFO  lib389:selfdn_permissions_test.py:314 ######################### DELETE ###################### INFO  lib389:selfdn_permissions_test.py:317 Bind as cn=bind_entry, dc=example,dc=com INFO  lib389:selfdn_permissions_test.py:322 Try to delete cn=test_entry, dc=example,dc=com (aci is missing) INFO  lib389:selfdn_permissions_test.py:325 Exception (expected): INSUFFICIENT_ACCESS INFO  lib389:selfdn_permissions_test.py:329 Bind as cn=Directory Manager and add the READ/SEARCH SELFDN aci INFO  lib389:selfdn_permissions_test.py:341 Bind as cn=bind_entry, dc=example,dc=com INFO  lib389:selfdn_permissions_test.py:345 Try to delete cn=test_entry, dc=example,dc=com should be successful
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_targattrfilters_1] 0.03
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_targattrfilters_2] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_targattrfilters_3] 0.03
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_targattrfilters_4] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_targattrfilters_5] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_targattrfilters_6] 0.07
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_targattrfilters_7] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_targattrfilters_8] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_targattrfilters_9] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_targattrfilters_10] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_targattrfilters_11] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_targattrfilters_12] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_targattrfilters_13] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_targattrfilters_14] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_targattrfilters_15] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_targattrfilters_16] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_targattrfilters_17] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_targattrfilters_19] 0.03
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_targattrfilters_21] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_targattrfilters_22] 0.04
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_targattrfilters_23] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_Missing_acl_mispel] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_Missing_acl_string] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_Wrong_version_string] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_Missing_version_string] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_Authenticate_statement] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_Multiple_targets] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_Target_set_to_self] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_target_set_with_ldap_instead_of_ldap] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_target_set_with_more_than_three] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_target_set_with_less_than_three] 0.03
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_bind_rule_set_with_less_than_three] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_Use_semicolon_instead_of_comma_in_permission] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_Use_double_equal_instead_of_equal_in_the_target] 0.03
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_use_double_equal_instead_of_equal_in_user_and_group_access] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_donot_cote_the_name_of_the_aci] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_extra_parentheses_case_1] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_extra_parentheses_case_2] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_extra_parentheses_case_3] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_no_semicolon_at_the_end_of_the_aci] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_a_character_different_of_a_semicolon_at_the_end_of_the_aci] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_bad_filter] 0.03
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_Use_double_equal_instead_of_equal_in_the_targattrfilters] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_aci_invalid_syntax[test_Use_double_equal_instead_of_equal_inside_the_targattrfilters] 0.02
No log output captured.
Passed suites/acl/syntax_test.py::test_target_set_above_the_entry_test 0.02
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_positive[(CAN,ROLEDNACCESS)] 0.06
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Passed suites/acl/userattr_test.py::test_mod_see_also_positive[(CAN,USERDNACCESS)] 0.02
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_positive[(CAN,GROUPDNACCESS)] 0.01
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_positive[(CAN,LDAPURLACCESS)] 0.02
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_positive[(CAN,ATTRNAMEACCESS)] 0.02
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_positive[(LEVEL_0, OU_2)] 0.06
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_positive[(LEVEL_1,ANCESTORS)] 0.04
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_positive[(LEVEL_2,GRANDPARENTS)] 0.10
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_positive[(LEVEL_4,OU_2)] 0.07
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_positive[(LEVEL_4, ANCESTORS)] 0.27
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_positive[(LEVEL_4,GRANDPARENTS)] 0.02
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_positive[(LEVEL_4,PARENTS)] 0.02
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_positive[(LEVEL_4,CHILDREN)] 0.02
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_negative[(CANNOT,ROLEDNACCESS)] 0.06
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_negative[(CANNOT,USERDNACCESS)] 0.03
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_negative[(CANNOT,GROUPDNACCESS)] 0.03
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_negative[(CANNOT,LDAPURLACCESS)] 0.28
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_negative[(CANNOT,ATTRNAMEACCESS)] 0.03
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_negative[(LEVEL_0, ANCESTORS)] 0.06
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_negative[(LEVEL_0,GRANDPARENTS)] 0.03
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_negative[(LEVEL_0,PARENTS)] 0.03
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_negative[(LEVEL_0,CHILDREN)] 0.03
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_negative[(LEVEL_2,PARENTS)] 0.06
No log output captured.
Passed suites/acl/userattr_test.py::test_mod_see_also_negative[(LEVEL_4,GRANDSONS)] 0.05
No log output captured.
Passed suites/acl/userattr_test.py::test_last_three[uid=Ananda Borah,ou=Accounting,dc=example,dc=com-uid=USERDNACCESS,ou=Accounting,dc=example,dc=com] 0.07
No log output captured.
Passed suites/acl/userattr_test.py::test_last_three[uid=Ananda Borah,ou=Accounting,dc=example,dc=com-uid=ROLEDNACCESS,ou=Accounting,dc=example,dc=com] 0.03
No log output captured.
Passed suites/acl/userattr_test.py::test_last_three[uid=Ananda Borah,ou=Accounting,dc=example,dc=com-uid=GROUPDNACCESS,ou=Accounting,dc=example,dc=com] 0.04
No log output captured.
Passed suites/acl/valueacl_part2_test.py::test_we_can_search_as_expected 0.01
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Passed suites/acl/valueacl_part2_test.py::test_we_can_mod_title_as_expected 0.06
No log output captured.
Passed suites/acl/valueacl_part2_test.py::test_modify_with_multiple_filters 0.04
No log output captured.
Passed suites/acl/valueacl_part2_test.py::test_denied_by_multiple_filters 0.07
No log output captured.
Passed suites/acl/valueacl_part2_test.py::test_allowed_add_one_attribute 0.04
No log output captured.
Passed suites/acl/valueacl_part2_test.py::test_cannot_add_an_entry_with_attribute_values_we_are_not_allowed_add 0.08
No log output captured.
Passed suites/acl/valueacl_part2_test.py::test_on_modrdn 0.05
No log output captured.
Passed suites/acl/valueacl_part2_test.py::test_on_modrdn_allow 0.05
No log output captured.
Passed suites/acl/valueacl_part2_test.py::test_targattrfilters_keyword 0.10
No log output captured.
Passed suites/acl/valueacl_test.py::test_delete_an_attribute_value_we_are_not_allowed_to_delete 0.07
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Passed suites/acl/valueacl_test.py::test_donot_allow_write_access_to_title_if_value_is_not_architect 0.08
No log output captured.
Passed suites/acl/valueacl_test.py::test_delete_an_attribute_value_we_are_allowed_to_delete 0.05
No log output captured.
Passed suites/acl/valueacl_test.py::test_delete_an_attribute_value_we_are_not_allowed_to_deleted 0.07
No log output captured.
Passed suites/acl/valueacl_test.py::test_allow_modify_replace 0.08
No log output captured.
Passed suites/acl/valueacl_test.py::test_allow_modify_delete 0.11
No log output captured.
Passed suites/acl/valueacl_test.py::test_replace_an_attribute_if_we_lack 0.08
No log output captured.
Passed suites/acl/valueacl_test.py::test_remove_an_attribute_if_we_have_del_rights_to_all_attr_value 0.28
No log output captured.
Passed suites/acl/valueacl_test.py::test_remove_an_attribute_if_we_donot_have_del_rights_to_all_attr_value 0.07
No log output captured.
Passed suites/acl/valueacl_test.py::test_remove_an_attribute_if_we_have_del_rights_to_all_attr_values 0.05
No log output captured.
Passed suites/acl/valueacl_test.py::test_cantnot_delete_an_entry_with_attribute_values_we_are_not_allowed_delete 0.08
No log output captured.
Passed suites/acl/valueacl_test.py::test_we_can_add_and_delete_an_entry_with_attribute_values_we_are_allowed_add_and_delete 0.06
No log output captured.
Passed suites/acl/valueacl_test.py::test_allow_title 0.07
No log output captured.
Passed suites/acl/valueacl_test.py::test_allow_to_modify 0.06
No log output captured.
Passed suites/acl/valueacl_test.py::test_selfwrite_does_not_confer_write_on_a_targattrfilters_atribute 0.06
No log output captured.
Passed suites/acl/valueacl_test.py::test_selfwrite_continues_to_give_rights_to_attr_in_targetattr_list 0.06
No log output captured.
Passed suites/acl/valueacl_test.py::test_add_an_attribute_value_we_are_allowed_to_add_with_ldapanyone 0.06
No log output captured.
Passed suites/acl/valueacl_test.py::test_hierarchy 0.07
No log output captured.
Passed suites/acl/valueacl_test.py::test_targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_expected 0.04
No log output captured.
Passed suites/acl/valueacl_test.py::test_targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_expected_two 0.01
No log output captured.
Passed suites/attr_encryption/attr_encryption_test.py::test_basic 6.05
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created. INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:35 Enable TLS for attribute encryption INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:38 Enables attribute encryption INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:42 Enables attribute encryption for employeeNumber and telephoneNumber INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:46 Add a test user with encrypted attributes
-------------------------------Captured log call--------------------------------
INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:81 Restart the server INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:87 Extracting values of cn from the list of objects in encrypt_attrs INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:88 And appending the cn values in a list INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:93 Check employeenumber encryption is enabled INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:96 Check telephoneNumber encryption is enabled INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:99 Check that encrypted attribute is present for user i.e. telephonenumber
Passed suites/attr_encryption/attr_encryption_test.py::test_export_import_ciphertext 13.18
------------------------------Captured stderr call------------------------------
ldiffile: /var/lib/dirsrv/slapd-standalone1/ldif/export_ciphertext.ldif
-------------------------------Captured log call--------------------------------
INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:127 Export data as ciphertext INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:138 Check that the attribute is present in the exported file INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:139 Check that the encrypted value of attribute is not present in the exported file INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:145 Delete the test user entry with encrypted data INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:148 Import data as ciphertext, which was exported previously INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:159 Check that the data with encrypted attribute is imported properly
Passed suites/attr_encryption/attr_encryption_test.py::test_export_import_plaintext 16.22
------------------------------Captured stderr call------------------------------
[22/Oct/2020:19:25:59.958723608 -0400] - WARN - Security Initialization - /tmp is not a private namespace. pem files not exported there [22/Oct/2020:19:25:59.964205764 -0400] - INFO - slapd_extract_cert - CA CERT NAME: Self-Signed-CA [22/Oct/2020:19:25:59.967367813 -0400] - ERR - slapd_extract_cert - Unable to open "/tmp/slapd-standalone1/Self-Signed-CA.pem" for writing (-5950, 2). [22/Oct/2020:19:25:59.971018675 -0400] - WARN - Security Initialization - SSL alert: Sending pin request to SVRCore. You may need to run systemd-tty-ask-password-agent to provide the password. [22/Oct/2020:19:26:00.177931359 -0400] - INFO - slapd_extract_cert - SERVER CERT NAME: Server-Cert [22/Oct/2020:19:26:00.182066435 -0400] - WARN - Security Initialization - /tmp is not a private namespace. pem files not exported there [22/Oct/2020:19:26:00.185318905 -0400] - WARN - Security Initialization - /tmp is not a private namespace. pem files not exported there ldiffile: /var/lib/dirsrv/slapd-standalone1/ldif/export_plaintext.ldif [22/Oct/2020:19:26:07.114291445 -0400] - WARN - Security Initialization - /tmp is not a private namespace. pem files not exported there [22/Oct/2020:19:26:07.121273636 -0400] - INFO - slapd_extract_cert - CA CERT NAME: Self-Signed-CA [22/Oct/2020:19:26:07.124949229 -0400] - ERR - slapd_extract_cert - Unable to open "/tmp/slapd-standalone1/Self-Signed-CA.pem" for writing (-5950, 2). [22/Oct/2020:19:26:07.129265056 -0400] - WARN - Security Initialization - SSL alert: Sending pin request to SVRCore. You may need to run systemd-tty-ask-password-agent to provide the password. [22/Oct/2020:19:26:07.393503832 -0400] - INFO - slapd_extract_cert - SERVER CERT NAME: Server-Cert [22/Oct/2020:19:26:07.397389959 -0400] - WARN - Security Initialization - /tmp is not a private namespace. pem files not exported there [22/Oct/2020:19:26:07.401873272 -0400] - WARN - Security Initialization - /tmp is not a private namespace. pem files not exported there
-------------------------------Captured log call--------------------------------
INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:189 Export data as plain text INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:200 Check that the attribute is present in the exported file INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:201 Check that the plain text value of the encrypted attribute is present in the exported file INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:205 Delete the test user entry with encrypted data INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:208 Import data as plain text, which was exported previously INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:219 Check that the attribute is imported properly
Passed suites/attr_encryption/attr_encryption_test.py::test_attr_encryption_unindexed 5.93
------------------------------Captured stderr call------------------------------
ldiffile: /var/lib/dirsrv/slapd-standalone1/ldif/emp_num_ciphertext.ldif
-------------------------------Captured log call--------------------------------
INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:242 Export data as cipher text INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:253 Check that the attribute is present in the exported file INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:254 Check that the encrypted value of attribute is not present in the exported file
Passed suites/attr_encryption/attr_encryption_test.py::test_attr_encryption_multiple_backends 9.38
------------------------------Captured stderr call------------------------------
ldiffile: /var/lib/dirsrv/slapd-standalone1/ldif/export_db1.ldif ldiffile: /var/lib/dirsrv/slapd-standalone1/ldif/export_db2.ldif
-------------------------------Captured log call--------------------------------
INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:287 Add two test backends INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:307 Enables attribute encryption for telephoneNumber in test_backend1 INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:312 Enables attribute encryption for employeeNumber in test_backend2 INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:317 Add a test user with encrypted attributes in both backends INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:326 Export data as ciphertext from both backends INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:343 Check that the attribute is present in the exported file in db1 INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:344 Check that the encrypted value of attribute is not present in the exported file in db1 INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:350 Check that the attribute is present in the exported file in db2 INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:351 Check that the encrypted value of attribute is not present in the exported file in db2 INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:357 Delete test backends
Passed suites/attr_encryption/attr_encryption_test.py::test_attr_encryption_backends 9.08
------------------------------Captured stderr call------------------------------
ldiffile: /var/lib/dirsrv/slapd-standalone1/ldif/export_db1.ldif ldiffile: /var/lib/dirsrv/slapd-standalone1/ldif/export_db2.ldif
-------------------------------Captured log call--------------------------------
INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:386 Add two test backends INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:405 Enables attribute encryption for telephoneNumber in test_backend1 INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:410 Add a test user with telephoneNumber in both backends INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:419 Export data as ciphertext from both backends INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:436 Check that the attribute is present in the exported file in db1 INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:437 Check that the encrypted value of attribute is not present in the exported file in db1 INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:443 Check that the attribute is present in the exported file in db2 INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:444 Check that the value of attribute is also present in the exported file in db2 INFO  tests.suites.attr_encryption.attr_encryption_test:attr_encryption_test.py:450 Delete test backends
Passed suites/automember_plugin/automember_mod_test.py::test_mods 10.16
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
-------------------------------Captured log call--------------------------------
INFO  tests.suites.automember_plugin.automember_mod_test:automember_mod_test.py:135 Test PASSED
Passed suites/automember_plugin/automember_test.py::test_automemberscope 0.00
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Passed suites/automember_plugin/automember_test.py::test_automemberfilter 0.12
No log output captured.
Passed suites/automember_plugin/automember_test.py::test_adduser 0.49
No log output captured.
Passed suites/automember_plugin/automember_test.py::test_delete_default_group 4.08
No log output captured.
Passed suites/automember_plugin/automember_test.py::test_no_default_group 4.79
No log output captured.
Passed suites/automember_plugin/automember_test.py::test_delete_target_group 5.00
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_disable_the_plug_in 0.02
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39001, 'ldap-secureport': 63701, 'server-id': 'master1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:148 Creating replication topology.
Passed suites/automember_plugin/basic_test.py::test_custom_config_area 0.28
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_ability_to_control_behavior_of_modifiers_name 9.29
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_posixaccount_objectclass_automemberdefaultgroup 0.05
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_duplicated_member_attributes_added_when_the_entry_is_re_created 0.32
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_multi_valued_automemberdefaultgroup_for_hostgroups 0.06
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_plugin_creates_member_attributes_of_the_automemberdefaultgroup 0.06
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_multi_valued_automemberdefaultgroup_with_uniquemember 8.41
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_invalid_automembergroupingattr_member 0.05
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_valid_and_invalid_automembergroupingattr 0.13
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_add_regular_expressions_for_user_groups_and_check_for_member_attribute_after_adding_users 0.12
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_matching_gid_role_inclusive_regular_expression[autoMembers_22-5288-5289-Contractor-5291-5292-Contractors] 0.05
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_matching_gid_role_inclusive_regular_expression[autoMembers_21-1161-1162-Contractor-1162-1163-Contractors] 0.06
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_matching_gid_role_inclusive_regular_expression[autoMembers_20-1188-1189-CEO-1191-1192-Contractors] 0.06
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_matching_gid_role_inclusive_regular_expression[autoMembers_15-9288-9289-Manager-9291-9292-Managers] 0.05
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_matching_gid_role_inclusive_regular_expression[autoMembers_14-561-562-Manager-562-563-Managers] 0.05
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_matching_gid_role_inclusive_regular_expression[autoMembers_13-9788-9789-VPEngg-9392-9393-Managers] 0.08
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_gid_and_role_inclusive_exclusive_regular_expression[autoMembers_26-5788-5789-Intern-Contractors-SuffDef1-5] 0.32
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_gid_and_role_inclusive_exclusive_regular_expression[autoMembers_25-9788-9789-Employee-Contractors-Managers-1] 0.04
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_gid_and_role_inclusive_exclusive_regular_expression[autoMembers_24-1110-1111-Employee-Contractors-SuffDef1-5] 0.06
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_gid_and_role_inclusive_exclusive_regular_expression[autoMembers_23-2788-2789-Contractor-Contractors-SuffDef1-5] 0.05
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_gid_and_role_inclusive_exclusive_regular_expression[autoMembers_19-5788-5789-HRManager-Managers-SuffDef1-5] 0.09
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_gid_and_role_inclusive_exclusive_regular_expression[autoMembers_18-6788-6789-Junior-Managers-SuffDef1-5] 0.07
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_gid_and_role_inclusive_exclusive_regular_expression[autoMembers_17-562-563-Junior-Managers-SuffDef1-5] 0.07
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_gid_and_role_inclusive_exclusive_regular_expression[autoMembers_16-6788-6789-Manager-Managers-SuffDef1-5] 0.06
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_managers_contractors_exclusive_regex_rules_member_uid[autoMembers_32-555-720-Employee-SubDef1-SubDef3] 0.08
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_managers_contractors_exclusive_regex_rules_member_uid[autoMembers_31-515-200-Junior-SubDef1-SubDef5] 0.07
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_managers_contractors_exclusive_regex_rules_member_uid[autoMembers_30-999-400-Supervisor-SubDef1-SubDef2] 0.07
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_managers_contractors_exclusive_regex_rules_member_uid[autoMembers_28-555-3663-ContractHR-Contractors,cn=subsuffGroups-Managers,cn=subsuffGroups] 0.07
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_managers_inclusive_regex_rule[autoMembers_27-595-690-ContractHR-Managers-Contractors] 0.07
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_managers_inclusive_regex_rule[autoMembers_29-8195-2753-Employee-Contractors-Managers] 0.05
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_managers_inclusive_regex_rule[autoMembers_33-545-3333-Supervisor-Contractors-Managers] 0.07
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_managers_inclusive_regex_rule[autoMembers_34-8195-693-Temporary-Managers-Contractors] 0.07
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_reject_invalid_config_and_we_donot_deadlock_the_server 8.02
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_automemtask_re_build_task 10.41
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_automemtask_export_task 9.37
-------------------------------Captured log call--------------------------------
INFO  lib389:tasks.py:1039 Automember Export Updates task (task-10222020_192835) completed successfully
Passed suites/automember_plugin/basic_test.py::test_automemtask_mapping 2.16
-------------------------------Captured log call--------------------------------
INFO  lib389:tasks.py:1087 Automember Map Updates task (task-10222020_192838) completed successfully
Passed suites/automember_plugin/basic_test.py::test_automemtask_re_build 8.05
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_automemtask_export 12.00
-------------------------------Captured log call--------------------------------
INFO  lib389:tasks.py:1039 Automember Export Updates task (task-10222020_192857) completed successfully
Passed suites/automember_plugin/basic_test.py::test_automemtask_run_re_build 20.62
No log output captured.
Passed suites/automember_plugin/basic_test.py::test_automemtask_run_export 14.69
-------------------------------Captured log call--------------------------------
INFO  lib389:tasks.py:1039 Automember Export Updates task (task-10222020_192935) completed successfully
Passed suites/automember_plugin/configuration_test.py::test_configuration 4.58
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Passed suites/backups/backup_test.py::test_missing_backend 4.95
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Passed suites/basic/basic_test.py::test_basic_ops 0.19
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Passed suites/basic/basic_test.py::test_basic_import_export 49.81
------------------------------Captured stderr call------------------------------
ldiffile: /var/lib/dirsrv/slapd-standalone1/ldif/export.ldif
Passed suites/basic/basic_test.py::test_basic_backup 9.68
-------------------------------Captured log call--------------------------------
INFO  lib389:tasks.py:619 Backup task backup_10222020_193100 completed successfully INFO  lib389:tasks.py:673 Restore task restore_10222020_193102 completed successfully
Passed suites/basic/basic_test.py::test_basic_db2index 5.92
------------------------------Captured stderr call------------------------------
[22/Oct/2020:19:31:12.946154643 -0400] - INFO - ldbm_instance_config_cachememsize_set - force a minimal value 512000 [22/Oct/2020:19:31:12.954451568 -0400] - INFO - check_and_set_import_cache - pagesize: 4096, available bytes 7579443200, process usage 22761472 [22/Oct/2020:19:31:12.957748675 -0400] - INFO - check_and_set_import_cache - Import allocates 2960720KB import cache. [22/Oct/2020:19:31:12.962161834 -0400] - INFO - bdb_copy_directory - Backing up file 0 (/var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/givenName.db) [22/Oct/2020:19:31:12.964983088 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone1/db/userRoot/givenName.db to /var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/givenName.db [22/Oct/2020:19:31:12.968143318 -0400] - INFO - bdb_copy_directory - Backing up file 1 (/var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/aci.db) [22/Oct/2020:19:31:12.972686431 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone1/db/userRoot/aci.db to /var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/aci.db [22/Oct/2020:19:31:12.975877265 -0400] - INFO - bdb_copy_directory - Backing up file 2 (/var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/sn.db) [22/Oct/2020:19:31:12.979373938 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone1/db/userRoot/sn.db to /var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/sn.db [22/Oct/2020:19:31:12.984420604 -0400] - INFO - bdb_copy_directory - Backing up file 3 (/var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/numsubordinates.db) [22/Oct/2020:19:31:12.988267085 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone1/db/userRoot/numsubordinates.db to /var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/numsubordinates.db [22/Oct/2020:19:31:12.991854716 -0400] - INFO - bdb_copy_directory - Backing up file 4 (/var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/entryusn.db) [22/Oct/2020:19:31:12.995744641 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone1/db/userRoot/entryusn.db to /var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/entryusn.db [22/Oct/2020:19:31:12.998864731 -0400] - INFO - bdb_copy_directory - Backing up file 5 (/var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/nsuniqueid.db) [22/Oct/2020:19:31:13.001729133 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone1/db/userRoot/nsuniqueid.db to /var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/nsuniqueid.db [22/Oct/2020:19:31:13.004762864 -0400] - INFO - bdb_copy_directory - Backing up file 6 (/var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/ancestorid.db) [22/Oct/2020:19:31:13.007755183 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone1/db/userRoot/ancestorid.db to /var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/ancestorid.db [22/Oct/2020:19:31:13.010824177 -0400] - INFO - bdb_copy_directory - Backing up file 7 (/var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/parentid.db) [22/Oct/2020:19:31:13.013975026 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone1/db/userRoot/parentid.db to /var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/parentid.db [22/Oct/2020:19:31:13.022275963 -0400] - INFO - bdb_copy_directory - Backing up file 8 (/var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/mail.db) [22/Oct/2020:19:31:13.025772993 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone1/db/userRoot/mail.db to /var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/mail.db [22/Oct/2020:19:31:13.029238479 -0400] - INFO - bdb_copy_directory - Backing up file 9 (/var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/telephoneNumber.db) [22/Oct/2020:19:31:13.032371017 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone1/db/userRoot/telephoneNumber.db to /var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/telephoneNumber.db [22/Oct/2020:19:31:13.035889710 -0400] - INFO - bdb_copy_directory - Backing up file 10 (/var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/cn.db) [22/Oct/2020:19:31:13.039793194 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone1/db/userRoot/cn.db to /var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/cn.db [22/Oct/2020:19:31:13.043468935 -0400] - INFO - bdb_copy_directory - Backing up file 11 (/var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/DBVERSION) [22/Oct/2020:19:31:13.049956924 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone1/db/userRoot/DBVERSION to /var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/DBVERSION [22/Oct/2020:19:31:13.053056904 -0400] - INFO - bdb_copy_directory - Backing up file 12 (/var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/uid.db) [22/Oct/2020:19:31:13.056196313 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone1/db/userRoot/uid.db to /var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/uid.db [22/Oct/2020:19:31:13.059432529 -0400] - INFO - bdb_copy_directory - Backing up file 13 (/var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/entryrdn.db) [22/Oct/2020:19:31:13.066433216 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone1/db/userRoot/entryrdn.db to /var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/entryrdn.db [22/Oct/2020:19:31:13.069576870 -0400] - INFO - bdb_copy_directory - Backing up file 14 (/var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/uniquemember.db) [22/Oct/2020:19:31:13.072995847 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone1/db/userRoot/uniquemember.db to /var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/uniquemember.db [22/Oct/2020:19:31:13.076613815 -0400] - INFO - bdb_copy_directory - Backing up file 15 (/var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/id2entry.db) [22/Oct/2020:19:31:13.080530769 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone1/db/userRoot/id2entry.db to /var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/id2entry.db [22/Oct/2020:19:31:13.084382887 -0400] - INFO - bdb_copy_directory - Backing up file 16 (/var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/objectclass.db) [22/Oct/2020:19:31:13.087470352 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone1/db/userRoot/objectclass.db to /var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/userRoot/objectclass.db [22/Oct/2020:19:31:13.092591744 -0400] - INFO - upgradedb_core - userRoot: Start upgradedb. [22/Oct/2020:19:31:13.096671936 -0400] - INFO - bdb_instance_start - Import is running with nsslapd-db-private-import-mem on; No other process is allowed to access the database [22/Oct/2020:19:31:13.099491127 -0400] - INFO - check_and_set_import_cache - pagesize: 4096, available bytes 7578746880, process usage 23715840 [22/Oct/2020:19:31:13.102237236 -0400] - INFO - check_and_set_import_cache - Import allocates 2960448KB import cache. [22/Oct/2020:19:31:13.333068105 -0400] - INFO - bdb_import_main - reindex userRoot: Index buffering enabled with bucket size 100 [22/Oct/2020:19:31:14.039302679 -0400] - INFO - import_monitor_threads - reindex userRoot: Workers finished; cleaning up... [22/Oct/2020:19:31:14.243459571 -0400] - INFO - import_monitor_threads - reindex userRoot: Workers cleaned up. [22/Oct/2020:19:31:14.247276454 -0400] - INFO - bdb_import_main - reindex userRoot: Cleaning up producer thread... [22/Oct/2020:19:31:14.251010875 -0400] - INFO - bdb_import_main - reindex userRoot: Indexing complete. Post-processing... [22/Oct/2020:19:31:14.253860576 -0400] - INFO - bdb_import_main - reindex userRoot: Generating numsubordinates (this may take several minutes to complete)... [22/Oct/2020:19:31:14.257191878 -0400] - INFO - bdb_import_main - reindex userRoot: Generating numSubordinates complete. [22/Oct/2020:19:31:14.261028357 -0400] - INFO - bdb_get_nonleaf_ids - reindex userRoot: Gathering ancestorid non-leaf IDs... [22/Oct/2020:19:31:14.263929330 -0400] - INFO - bdb_get_nonleaf_ids - reindex userRoot: Finished gathering ancestorid non-leaf IDs. [22/Oct/2020:19:31:14.267572208 -0400] - INFO - ldbm_get_nonleaf_ids - reindex userRoot: Starting sort of ancestorid non-leaf IDs... [22/Oct/2020:19:31:14.271390712 -0400] - INFO - ldbm_get_nonleaf_ids - reindex userRoot: Finished sort of ancestorid non-leaf IDs. [22/Oct/2020:19:31:14.279271653 -0400] - INFO - bdb_ancestorid_new_idl_create_index - reindex userRoot: Creating ancestorid index (new idl)... [22/Oct/2020:19:31:14.286850708 -0400] - INFO - bdb_ancestorid_new_idl_create_index - reindex userRoot: Created ancestorid index (new idl). [22/Oct/2020:19:31:14.304858199 -0400] - INFO - bdb_import_main - reindex userRoot: Flushing caches... [22/Oct/2020:19:31:14.308075300 -0400] - INFO - bdb_import_main - reindex userRoot: Closing files... [22/Oct/2020:19:31:14.410116269 -0400] - INFO - bdb_import_main - reindex userRoot: Reindexing complete. Processed 160 entries in 1 seconds. (160.00 entries/sec) [22/Oct/2020:19:31:14.415441322 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone1/db/log.0000000001 to /var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/log.0000000001 [22/Oct/2020:19:31:14.432723670 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone1/db/DBVERSION to /var/lib/dirsrv/slapd-standalone1/bak/reindex_2020-10-22T19:31:12.796202/DBVERSION [22/Oct/2020:19:31:14.436203817 -0400] - INFO - bdb_pre_close - All database threads now stopped [22/Oct/2020:19:31:14.621138863 -0400] - INFO - slapd_exemode_db2index - Backend Instance: userRoot [22/Oct/2020:19:31:14.631016521 -0400] - INFO - ldbm_instance_config_cachememsize_set - force a minimal value 512000 [22/Oct/2020:19:31:14.638217685 -0400] - INFO - bdb_instance_start - Import is running with nsslapd-db-private-import-mem on; No other process is allowed to access the database [22/Oct/2020:19:31:14.641544714 -0400] - INFO - check_and_set_import_cache - pagesize: 4096, available bytes 7584354304, process usage 23003136 [22/Oct/2020:19:31:14.644589847 -0400] - INFO - check_and_set_import_cache - Import allocates 2962638KB import cache. [22/Oct/2020:19:31:14.786118348 -0400] - INFO - bdb_db2index - userRoot: Indexing attribute: uid [22/Oct/2020:19:31:14.789733668 -0400] - ERR - libdb - BDB1566 txn_checkpoint interface requires an environment configured for the transaction subsystem [22/Oct/2020:19:31:14.793075539 -0400] - ERR - bdb_force_checkpoint - Checkpoint FAILED, error Invalid argument (22) [22/Oct/2020:19:31:14.804848189 -0400] - INFO - bdb_db2index - userRoot: Finished indexing. [22/Oct/2020:19:31:14.829789000 -0400] - INFO - bdb_pre_close - All database threads now stopped
Passed suites/basic/basic_test.py::test_basic_acl 0.51
No log output captured.
Passed suites/basic/basic_test.py::test_basic_searches 0.09
No log output captured.
Passed suites/basic/basic_test.py::test_search_req_attrs[attrs0-cn-False] 0.00
No log output captured.
Passed suites/basic/basic_test.py::test_search_req_attrs[attrs1-cn-True] 0.00
No log output captured.
Passed suites/basic/basic_test.py::test_search_req_attrs[attrs2-nsUniqueId-True] 0.00
No log output captured.
Passed suites/basic/basic_test.py::test_search_req_attrs[attrs3-cn-True] 0.00
No log output captured.
Passed suites/basic/basic_test.py::test_search_req_attrs[attrs4-cn-True] 0.00
No log output captured.
Passed suites/basic/basic_test.py::test_basic_referrals 3.35
No log output captured.
Passed suites/basic/basic_test.py::test_basic_systemctl 12.35
No log output captured.
Passed suites/basic/basic_test.py::test_basic_ldapagent 5.03
No log output captured.
Passed suites/basic/basic_test.py::test_basic_dse_survives_kill9 11.50
No log output captured.
Passed suites/basic/basic_test.py::test_def_rootdse_attr[namingContexts] 0.02
No log output captured.
Passed suites/basic/basic_test.py::test_def_rootdse_attr[supportedLDAPVersion] 0.02
No log output captured.
Passed suites/basic/basic_test.py::test_def_rootdse_attr[supportedControl] 0.02
No log output captured.
Passed suites/basic/basic_test.py::test_def_rootdse_attr[supportedExtension] 0.02
No log output captured.
Passed suites/basic/basic_test.py::test_def_rootdse_attr[supportedSASLMechanisms] 0.02
No log output captured.
Passed suites/basic/basic_test.py::test_def_rootdse_attr[vendorName] 0.02
No log output captured.
Passed suites/basic/basic_test.py::test_def_rootdse_attr[vendorVersion] 0.02
No log output captured.
Passed suites/basic/basic_test.py::test_mod_def_rootdse_attr[namingContexts] 0.25
No log output captured.
Passed suites/basic/basic_test.py::test_mod_def_rootdse_attr[supportedLDAPVersion] 0.00
No log output captured.
Passed suites/basic/basic_test.py::test_mod_def_rootdse_attr[supportedControl] 0.00
No log output captured.
Passed suites/basic/basic_test.py::test_mod_def_rootdse_attr[supportedExtension] 0.00
No log output captured.
Passed suites/basic/basic_test.py::test_mod_def_rootdse_attr[supportedSASLMechanisms] 0.00
No log output captured.
Passed suites/basic/basic_test.py::test_mod_def_rootdse_attr[vendorName] 0.00
No log output captured.
Passed suites/basic/basic_test.py::test_mod_def_rootdse_attr[vendorVersion] 0.00
No log output captured.
Passed suites/basic/basic_test.py::test_basic_anonymous_search 0.03
No log output captured.
Passed suites/basic/basic_test.py::test_search_original_type 0.02
No log output captured.
Passed suites/basic/basic_test.py::test_search_ou 0.01
No log output captured.
Passed suites/basic/basic_test.py::test_connection_buffer_size 0.02
No log output captured.
Passed suites/basic/basic_test.py::test_critical_msg_on_empty_range_idl 6.80
No log output captured.
Passed suites/basic/basic_test.py::test_ldbm_modification_audit_log 13.41
No log output captured.
Passed suites/basic/basic_test.py::test_dscreate 15.77
------------------------------Captured stdout call------------------------------
Starting installation... Completed installation for test_dscreate
Passed suites/basic/basic_test.py::test_dscreate_ldapi 0.00
-----------------------------Captured stdout setup------------------------------
Starting installation... Completed installation for test-longname-deadbeef-deadbeef-deadbeef-deadbeef-deadbeef
-------------------------------Captured log call--------------------------------
DEBUG  RootDSE:_mapped_object.py:635 get_attr_vals('supportedControl')
Passed suites/basic/basic_test.py::test_dscreate_multiple_dashes_name 12.65
-----------------------------Captured stdout setup------------------------------
Starting installation... Completed installation for test-longname-deadbeef-deadbeef-deadbeef-deadbeef-deadbeef
Passed suites/basic/basic_test.py::test_dscreate_with_different_rdn[c=uk] 16.70
------------------------------Captured stdout call------------------------------
Starting installation... Completed installation for test_different_rdn
Passed suites/basic/basic_test.py::test_dscreate_with_different_rdn[cn=test_user] 14.92
-----------------------------Captured stdout setup------------------------------
Removing instance ... Completed instance removal
------------------------------Captured stdout call------------------------------
Starting installation... Completed installation for test_different_rdn
Passed suites/basic/basic_test.py::test_dscreate_with_different_rdn[dc=example,dc=com] 15.52
-----------------------------Captured stdout setup------------------------------
Removing instance ... Completed instance removal
------------------------------Captured stdout call------------------------------
Starting installation... Completed installation for test_different_rdn
Passed suites/basic/basic_test.py::test_dscreate_with_different_rdn[o=south] 15.02
-----------------------------Captured stdout setup------------------------------
Removing instance ... Completed instance removal
------------------------------Captured stdout call------------------------------
Starting installation... Completed installation for test_different_rdn
Passed suites/basic/basic_test.py::test_dscreate_with_different_rdn[ou=sales] 15.91
-----------------------------Captured stdout setup------------------------------
Removing instance ... Completed instance removal
------------------------------Captured stdout call------------------------------
Starting installation... Completed installation for test_different_rdn
Passed suites/basic/basic_test.py::test_dscreate_with_different_rdn[wrong=some_value] 14.02
-----------------------------Captured stdout setup------------------------------
Removing instance ... Completed instance removal
------------------------------Captured stdout call------------------------------
Starting installation... Error: Instance creation failed! Suffix RDN 'wrong' in 'wrong=some_value' is not supported. Supported RDN's are: 'c', 'cn', 'dc', 'o', and 'ou'
-------------------------------Captured log call--------------------------------
CRITICAL tests.suites.basic.basic_test:basic_test.py:1474 dscreate failed! Error (1) None
Passed suites/betxns/betxn_test.py::test_betxt_7bit 5.35
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
-------------------------------Captured log call--------------------------------
INFO  tests.suites.betxns.betxn_test:betxn_test.py:52 Running test_betxt_7bit... INFO  tests.suites.betxns.betxn_test:betxn_test.py:78 test_betxt_7bit: PASSED
Passed suites/betxns/betxn_test.py::test_betxn_attr_uniqueness 3.96
-------------------------------Captured log call--------------------------------
INFO  tests.suites.betxns.betxn_test:betxn_test.py:133 test_betxn_attr_uniqueness: PASSED
Passed suites/betxns/betxn_test.py::test_betxn_memberof 4.71
-------------------------------Captured log call--------------------------------
INFO  tests.suites.betxns.betxn_test:betxn_test.py:179 test_betxn_memberof: PASSED
Passed suites/betxns/betxn_test.py::test_betxn_modrdn_memberof_cache_corruption 4.77
-------------------------------Captured log call--------------------------------
INFO  tests.suites.betxns.betxn_test:betxn_test.py:233 test_betxn_modrdn_memberof: PASSED
Passed suites/betxns/betxn_test.py::test_ri_and_mep_cache_corruption 0.33
-------------------------------Captured log call--------------------------------
INFO  tests.suites.betxns.betxn_test:betxn_test.py:357 Test PASSED
Passed suites/clu/clu_test.py::test_clu_pwdhash 0.04
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
-------------------------------Captured log call--------------------------------
INFO  tests.suites.clu.clu_test:clu_test.py:40 Running test_clu_pwdhash... INFO  tests.suites.clu.clu_test:clu_test.py:54 pwdhash generated: {SSHA}v0cP7X9DRrwnAToNig3DM3wh1YYMSChu5lBJpQ== INFO  tests.suites.clu.clu_test:clu_test.py:55 test_clu_pwdhash: PASSED
Passed suites/clu/clu_test.py::test_clu_pwdhash_mod 0.06
-------------------------------Captured log call--------------------------------
INFO  tests.suites.clu.clu_test:clu_test.py:78 Running test_clu_pwdhash_mod... INFO  tests.suites.clu.clu_test:clu_test.py:87 pwdhash generated: {SSHA256}STBzvOUu1bcnqokgoCL2IvEyPj0G1h9kDKw6dcWO+2fnZzuoK7myLA== INFO  tests.suites.clu.clu_test:clu_test.py:88 returned the hashed string using the algorithm set in nsslapd-rootpwstoragescheme
Passed suites/clu/dbgen_test.py::test_dsconf_dbgen_users 6.45
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
-------------------------------Captured log call--------------------------------
INFO  tests.suites.clu.dbgen_test:dbgen_test.py:119 Run ldifgen to create users ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:56 Generating LDIF with the following options: INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - suffix=dc=example,dc=com INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - parent=ou=people,dc=example,dc=com INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - number=1000 INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - rdn-cn=False INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - generic=True INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - start-idx=50 INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - localize=False INFO  tests.suites.clu.dbgen_test:dbgen.py:62 - ldif-file=/var/lib/dirsrv/slapd-standalone1/ldif/created.ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:63 Writing LDIF ... INFO  tests.suites.clu.dbgen_test:dbgen.py:196 Successfully created LDIF file: /var/lib/dirsrv/slapd-standalone1/ldif/created.ldif INFO  tests.suites.clu.dbgen_test:dbgen_test.py:122 Check if file exists INFO  tests.suites.clu.dbgen_test:dbgen_test.py:67 Check if content is present in output INFO  tests.suites.clu.dbgen_test:dbgen_test.py:71 Reset log file for next test INFO  tests.suites.clu.dbgen_test:dbgen_test.py:127 Get number of accounts before import INFO  tests.suites.clu.dbgen_test:dbgen_test.py:48 Stopping the server and running offline import... INFO  tests.suites.clu.dbgen_test:dbgen_test.py:133 Check that accounts are imported
Passed suites/clu/dbgen_test.py::test_dsconf_dbgen_groups 33.13
------------------------------Captured stderr call------------------------------
ldap_add: Already exists (68) ldap_add: Already exists (68) ldap_add: Already exists (68)
-------------------------------Captured log call--------------------------------
INFO  tests.suites.clu.dbgen_test:dbgen_test.py:183 Run ldifgen to create group ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:56 Generating LDIF with the following options: INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - NAME=myGroup INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - parent=ou=groups,dc=example,dc=com INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - suffix=dc=example,dc=com INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - number=1 INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - num-members=1000 INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - create-members=True INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - member-attr=uniquemember INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - member-parent=ou=people,dc=example,dc=com INFO  tests.suites.clu.dbgen_test:dbgen.py:62 - ldif-file=/var/lib/dirsrv/slapd-standalone1/ldif/created.ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:63 Writing LDIF ... INFO  tests.suites.clu.dbgen_test:dbgen.py:250 Successfully created LDIF file: /var/lib/dirsrv/slapd-standalone1/ldif/created.ldif INFO  tests.suites.clu.dbgen_test:dbgen_test.py:186 Check if file exists INFO  tests.suites.clu.dbgen_test:dbgen_test.py:67 Check if content is present in output INFO  tests.suites.clu.dbgen_test:dbgen_test.py:71 Reset log file for next test INFO  tests.suites.clu.dbgen_test:dbgen_test.py:191 Get number of accounts before import INFO  tests.suites.clu.dbgen_test:dbgen_test.py:57 Add entries from ldif file with ldapmodify INFO  tests.suites.clu.dbgen_test:dbgen_test.py:200 Check that accounts are imported INFO  tests.suites.clu.dbgen_test:dbgen_test.py:203 Check that group is imported
Passed suites/clu/dbgen_test.py::test_dsconf_dbgen_cos_classic 0.13
-------------------------------Captured log call--------------------------------
INFO  tests.suites.clu.dbgen_test:dbgen_test.py:256 Run ldifgen to create COS definition ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:56 Generating LDIF with the following options: INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - type=classic INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - NAME=My_Postal_Def INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - parent=ou=cos definitions,dc=example,dc=com INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - create-parent=True INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - cos-specifier=businessCategory INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - cos-attr=['postalcode', 'telephonenumber'] INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - cos-template=cn=sales,cn=classicCoS,dc=example,dc=com INFO  tests.suites.clu.dbgen_test:dbgen.py:62 - ldif-file=/var/lib/dirsrv/slapd-standalone1/ldif/created.ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:63 Writing LDIF ... INFO  tests.suites.clu.dbgen_test:dbgen.py:304 Successfully created LDIF file: /var/lib/dirsrv/slapd-standalone1/ldif/created.ldif INFO  tests.suites.clu.dbgen_test:dbgen_test.py:259 Check if file exists INFO  tests.suites.clu.dbgen_test:dbgen_test.py:67 Check if content is present in output INFO  tests.suites.clu.dbgen_test:dbgen_test.py:71 Reset log file for next test INFO  tests.suites.clu.dbgen_test:dbgen_test.py:57 Add entries from ldif file with ldapmodify INFO  tests.suites.clu.dbgen_test:dbgen_test.py:267 Check that COS definition is imported
Passed suites/clu/dbgen_test.py::test_dsconf_dbgen_cos_pointer 0.03
-------------------------------Captured log call--------------------------------
INFO  tests.suites.clu.dbgen_test:dbgen_test.py:322 Run ldifgen to create COS definition ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:56 Generating LDIF with the following options: INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - type=pointer INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - NAME=My_Postal_Def_pointer INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - parent=ou=cos pointer definitions,dc=example,dc=com INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - create-parent=True INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - cos-attr=['postalcode', 'telephonenumber'] INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - cos-template=cn=sales,cn=pointerCoS,dc=example,dc=com INFO  tests.suites.clu.dbgen_test:dbgen.py:62 - ldif-file=/var/lib/dirsrv/slapd-standalone1/ldif/created.ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:63 Writing LDIF ... INFO  tests.suites.clu.dbgen_test:dbgen.py:304 Successfully created LDIF file: /var/lib/dirsrv/slapd-standalone1/ldif/created.ldif INFO  tests.suites.clu.dbgen_test:dbgen_test.py:325 Check if file exists INFO  tests.suites.clu.dbgen_test:dbgen_test.py:67 Check if content is present in output INFO  tests.suites.clu.dbgen_test:dbgen_test.py:71 Reset log file for next test INFO  tests.suites.clu.dbgen_test:dbgen_test.py:57 Add entries from ldif file with ldapmodify INFO  tests.suites.clu.dbgen_test:dbgen_test.py:333 Check that COS definition is imported
Passed suites/clu/dbgen_test.py::test_dsconf_dbgen_cos_indirect 0.03
-------------------------------Captured log call--------------------------------
INFO  tests.suites.clu.dbgen_test:dbgen_test.py:387 Run ldifgen to create COS definition ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:56 Generating LDIF with the following options: INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - type=indirect INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - NAME=My_Postal_Def_indirect INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - parent=ou=cos indirect definitions,dc=example,dc=com INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - create-parent=True INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - cos-specifier=businessCategory INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - cos-attr=['postalcode', 'telephonenumber'] INFO  tests.suites.clu.dbgen_test:dbgen.py:62 - ldif-file=/var/lib/dirsrv/slapd-standalone1/ldif/created.ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:63 Writing LDIF ... INFO  tests.suites.clu.dbgen_test:dbgen.py:304 Successfully created LDIF file: /var/lib/dirsrv/slapd-standalone1/ldif/created.ldif INFO  tests.suites.clu.dbgen_test:dbgen_test.py:390 Check if file exists INFO  tests.suites.clu.dbgen_test:dbgen_test.py:67 Check if content is present in output INFO  tests.suites.clu.dbgen_test:dbgen_test.py:71 Reset log file for next test INFO  tests.suites.clu.dbgen_test:dbgen_test.py:57 Add entries from ldif file with ldapmodify INFO  tests.suites.clu.dbgen_test:dbgen_test.py:398 Check that COS definition is imported
Passed suites/clu/dbgen_test.py::test_dsconf_dbgen_cos_template 0.03
-------------------------------Captured log call--------------------------------
INFO  tests.suites.clu.dbgen_test:dbgen_test.py:449 Run ldifgen to create COS template ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:56 Generating LDIF with the following options: INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - NAME=My_Template INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - parent=ou=cos templates,dc=example,dc=com INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - create-parent=True INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - cos-priority=1 INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - cos-attr-val=postalcode:12345 INFO  tests.suites.clu.dbgen_test:dbgen.py:62 - ldif-file=/var/lib/dirsrv/slapd-standalone1/ldif/created.ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:63 Writing LDIF ... INFO  tests.suites.clu.dbgen_test:dbgen.py:341 Successfully created LDIF file: /var/lib/dirsrv/slapd-standalone1/ldif/created.ldif INFO  tests.suites.clu.dbgen_test:dbgen_test.py:452 Check if file exists INFO  tests.suites.clu.dbgen_test:dbgen_test.py:67 Check if content is present in output INFO  tests.suites.clu.dbgen_test:dbgen_test.py:71 Reset log file for next test INFO  tests.suites.clu.dbgen_test:dbgen_test.py:57 Add entries from ldif file with ldapmodify INFO  tests.suites.clu.dbgen_test:dbgen_test.py:460 Check that COS template is imported
Passed suites/clu/dbgen_test.py::test_dsconf_dbgen_managed_role 0.13
-------------------------------Captured log call--------------------------------
INFO  tests.suites.clu.dbgen_test:dbgen_test.py:511 Run ldifgen to create managed role ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:56 Generating LDIF with the following options: INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - NAME=My_Managed_Role INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - parent=ou=managed roles,dc=example,dc=com INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - create-parent=True INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - type=managed INFO  tests.suites.clu.dbgen_test:dbgen.py:62 - ldif-file=/var/lib/dirsrv/slapd-standalone1/ldif/created.ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:63 Writing LDIF ... INFO  tests.suites.clu.dbgen_test:dbgen.py:391 Successfully created LDIF file: /var/lib/dirsrv/slapd-standalone1/ldif/created.ldif INFO  tests.suites.clu.dbgen_test:dbgen_test.py:514 Check if file exists INFO  tests.suites.clu.dbgen_test:dbgen_test.py:67 Check if content is present in output INFO  tests.suites.clu.dbgen_test:dbgen_test.py:71 Reset log file for next test INFO  tests.suites.clu.dbgen_test:dbgen_test.py:57 Add entries from ldif file with ldapmodify INFO  tests.suites.clu.dbgen_test:dbgen_test.py:522 Check that managed role is imported
Passed suites/clu/dbgen_test.py::test_dsconf_dbgen_filtered_role 0.03
-------------------------------Captured log call--------------------------------
INFO  tests.suites.clu.dbgen_test:dbgen_test.py:571 Run ldifgen to create filtered role ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:56 Generating LDIF with the following options: INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - NAME=My_Filtered_Role INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - parent=ou=filtered roles,dc=example,dc=com INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - create-parent=True INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - type=filtered INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - filter="objectclass=posixAccount" INFO  tests.suites.clu.dbgen_test:dbgen.py:62 - ldif-file=/var/lib/dirsrv/slapd-standalone1/ldif/created.ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:63 Writing LDIF ... INFO  tests.suites.clu.dbgen_test:dbgen.py:391 Successfully created LDIF file: /var/lib/dirsrv/slapd-standalone1/ldif/created.ldif INFO  tests.suites.clu.dbgen_test:dbgen_test.py:574 Check if file exists INFO  tests.suites.clu.dbgen_test:dbgen_test.py:67 Check if content is present in output INFO  tests.suites.clu.dbgen_test:dbgen_test.py:71 Reset log file for next test INFO  tests.suites.clu.dbgen_test:dbgen_test.py:57 Add entries from ldif file with ldapmodify INFO  tests.suites.clu.dbgen_test:dbgen_test.py:582 Check that filtered role is imported
Passed suites/clu/dbgen_test.py::test_dsconf_dbgen_nested_role 0.03
-------------------------------Captured log call--------------------------------
INFO  tests.suites.clu.dbgen_test:dbgen_test.py:632 Run ldifgen to create nested role ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:56 Generating LDIF with the following options: INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - NAME=My_Nested_Role INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - parent=ou=nested roles,dc=example,dc=com INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - create-parent=True INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - type=nested INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - role-dn=['cn=some_role,ou=roles,dc=example,dc=com'] INFO  tests.suites.clu.dbgen_test:dbgen.py:62 - ldif-file=/var/lib/dirsrv/slapd-standalone1/ldif/created.ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:63 Writing LDIF ... INFO  tests.suites.clu.dbgen_test:dbgen.py:391 Successfully created LDIF file: /var/lib/dirsrv/slapd-standalone1/ldif/created.ldif INFO  tests.suites.clu.dbgen_test:dbgen_test.py:635 Check if file exists INFO  tests.suites.clu.dbgen_test:dbgen_test.py:67 Check if content is present in output INFO  tests.suites.clu.dbgen_test:dbgen_test.py:71 Reset log file for next test INFO  tests.suites.clu.dbgen_test:dbgen_test.py:57 Add entries from ldif file with ldapmodify INFO  tests.suites.clu.dbgen_test:dbgen_test.py:643 Check that nested role is imported
Passed suites/clu/dbgen_test.py::test_dsconf_dbgen_mod_ldif_mixed 39.87
------------------------------Captured stderr call------------------------------
ldap_modify: Operation not allowed on RDN (67) ldap_modify: Operation not allowed on RDN (67) ldap_modify: Operation not allowed on RDN (67) ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldap_rename: Invalid DN syntax (34) additional info: invalid RDN ldapmodify: extra lines at end (line 43453, entry "uid=user0999,dc=example,dc=com")
-------------------------------Captured log call--------------------------------
INFO  tests.suites.clu.dbgen_test:dbgen_test.py:702 Run ldifgen to create modification ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:56 Generating LDIF with the following options: INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - parent=dc=example,dc=com INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - create-users=True INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - delete-users=True INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - create-parent=False INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - num-users=1000 INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - add-users=100 INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - del-users=999 INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - modrdn-users=100 INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - mod-users=10 INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - mod-attrs=['cn', 'uid', 'sn'] INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - randomize=False INFO  tests.suites.clu.dbgen_test:dbgen.py:62 - ldif-file=/var/lib/dirsrv/slapd-standalone1/ldif/created.ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:63 Writing LDIF ... INFO  tests.suites.clu.dbgen_test:dbgen.py:467 Successfully created LDIF file: /var/lib/dirsrv/slapd-standalone1/ldif/created.ldif INFO  tests.suites.clu.dbgen_test:dbgen_test.py:705 Check if file exists INFO  tests.suites.clu.dbgen_test:dbgen_test.py:67 Check if content is present in output INFO  tests.suites.clu.dbgen_test:dbgen_test.py:71 Reset log file for next test INFO  tests.suites.clu.dbgen_test:dbgen_test.py:710 Get number of accounts before import INFO  tests.suites.clu.dbgen_test:dbgen_test.py:57 Add entries from ldif file with ldapmodify INFO  tests.suites.clu.dbgen_test:dbgen_test.py:719 Check that some accounts are imported
Passed suites/clu/dbgen_test.py::test_dsconf_dbgen_nested_ldif 24.90
------------------------------Captured stderr call------------------------------
ldap_add: Already exists (68)
-------------------------------Captured log call--------------------------------
INFO  tests.suites.clu.dbgen_test:dbgen_test.py:759 Run ldifgen to create nested ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:56 Generating LDIF with the following options: INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - suffix=dc=example,dc=com INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - node-limit=100 INFO  tests.suites.clu.dbgen_test:dbgen.py:61 - num-users=600 INFO  tests.suites.clu.dbgen_test:dbgen.py:62 - ldif-file=/var/lib/dirsrv/slapd-standalone1/ldif/created.ldif INFO  tests.suites.clu.dbgen_test:dbgen.py:63 Writing LDIF ... INFO  tests.suites.clu.dbgen_test:dbgen.py:500 Successfully created nested LDIF file (/var/lib/dirsrv/slapd-standalone1/ldif/created.ldif) containing 6 nodes/subtrees INFO  tests.suites.clu.dbgen_test:dbgen_test.py:762 Check if file exists INFO  tests.suites.clu.dbgen_test:dbgen_test.py:67 Check if content is present in output INFO  tests.suites.clu.dbgen_test:dbgen_test.py:71 Reset log file for next test INFO  tests.suites.clu.dbgen_test:dbgen_test.py:767 Get number of accounts before import INFO  tests.suites.clu.dbgen_test:dbgen_test.py:57 Add entries from ldif file with ldapmodify INFO  tests.suites.clu.dbgen_test:dbgen_test.py:779 Check that accounts are imported
Passed suites/clu/dbmon_test.py::test_dsconf_dbmon 0.32
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
-------------------------------Captured log call--------------------------------
INFO  tests.suites.clu.dbmon_test:dbmon_test.py:164 Sanity check for syntax INFO  LogCapture:monitor.py:247 DB Monitor Report: 2020-10-22 19:37:37 INFO  LogCapture:monitor.py:248 -------------------------------------------------------- INFO  LogCapture:monitor.py:249 Database Cache: INFO  LogCapture:monitor.py:250 - Cache Hit Ratio: 70% INFO  LogCapture:monitor.py:251 - Free Space: 487.05 MB INFO  LogCapture:monitor.py:252 - Free Percentage: 100.0% INFO  LogCapture:monitor.py:253 - RO Page Drops: 0 INFO  LogCapture:monitor.py:254 - Pages In: 8 INFO  LogCapture:monitor.py:255 - Pages Out: 0 INFO  LogCapture:monitor.py:256 INFO  LogCapture:monitor.py:257 Normalized DN Cache: INFO  LogCapture:monitor.py:258 - Cache Hit Ratio: 78% INFO  LogCapture:monitor.py:259 - Free Space: 19.99 MB INFO  LogCapture:monitor.py:260 - Free Percentage: 100.0% INFO  LogCapture:monitor.py:261 - DN Count: 54 INFO  LogCapture:monitor.py:262 - Evictions: 0 INFO  LogCapture:monitor.py:263 INFO  LogCapture:monitor.py:264 Backends: INFO  LogCapture:monitor.py:266 - dc=example,dc=com (userRoot): INFO  LogCapture:monitor.py:267 - Entry Cache Hit Ratio: 57% INFO  LogCapture:monitor.py:268 - Entry Cache Count: 2 INFO  LogCapture:monitor.py:269 - Entry Cache Free Space: 1.31 GB INFO  LogCapture:monitor.py:270 - Entry Cache Free Percentage: 100.0% INFO  LogCapture:monitor.py:271 - Entry Cache Average Size: 3.78 KB INFO  LogCapture:monitor.py:272 - DN Cache Hit Ratio: 0% INFO  LogCapture:monitor.py:273 - DN Cache Count: 2 INFO  LogCapture:monitor.py:274 - DN Cache Free Space: 192.0 MB INFO  LogCapture:monitor.py:275 - DN Cache Free Percentage: 100.0% INFO  LogCapture:monitor.py:276 - DN Cache Average Size: 63.0 B INFO  LogCapture:monitor.py:286 INFO  tests.suites.clu.dbmon_test:dbmon_test.py:133 Clear the log INFO  tests.suites.clu.dbmon_test:dbmon_test.py:171 Sanity check for --indexes output INFO  LogCapture:monitor.py:247 DB Monitor Report: 2020-10-22 19:37:37 INFO  LogCapture:monitor.py:248 -------------------------------------------------------- INFO  LogCapture:monitor.py:249 Database Cache: INFO  LogCapture:monitor.py:250 - Cache Hit Ratio: 70% INFO  LogCapture:monitor.py:251 - Free Space: 487.05 MB INFO  LogCapture:monitor.py:252 - Free Percentage: 100.0% INFO  LogCapture:monitor.py:253 - RO Page Drops: 0 INFO  LogCapture:monitor.py:254 - Pages In: 8 INFO  LogCapture:monitor.py:255 - Pages Out: 0 INFO  LogCapture:monitor.py:256 INFO  LogCapture:monitor.py:257 Normalized DN Cache: INFO  LogCapture:monitor.py:258 - Cache Hit Ratio: 78% INFO  LogCapture:monitor.py:259 - Free Space: 19.99 MB INFO  LogCapture:monitor.py:260 - Free Percentage: 100.0% INFO  LogCapture:monitor.py:261 - DN Count: 54 INFO  LogCapture:monitor.py:262 - Evictions: 0 INFO  LogCapture:monitor.py:263 INFO  LogCapture:monitor.py:264 Backends: INFO  LogCapture:monitor.py:266 - dc=example,dc=com (userRoot): INFO  LogCapture:monitor.py:267 - Entry Cache Hit Ratio: 57% INFO  LogCapture:monitor.py:268 - Entry Cache Count: 2 INFO  LogCapture:monitor.py:269 - Entry Cache Free Space: 1.31 GB INFO  LogCapture:monitor.py:270 - Entry Cache Free Percentage: 100.0% INFO  LogCapture:monitor.py:271 - Entry Cache Average Size: 3.78 KB INFO  LogCapture:monitor.py:272 - DN Cache Hit Ratio: 0% INFO  LogCapture:monitor.py:273 - DN Cache Count: 2 INFO  LogCapture:monitor.py:274 - DN Cache Free Space: 192.0 MB INFO  LogCapture:monitor.py:275 - DN Cache Free Percentage: 100.0% INFO  LogCapture:monitor.py:276 - DN Cache Average Size: 63.0 B INFO  LogCapture:monitor.py:278 - Indexes: INFO  LogCapture:monitor.py:280 - Index: aci.db INFO  LogCapture:monitor.py:281 - Cache Hit: 1 INFO  LogCapture:monitor.py:282 - Cache Miss: 2 INFO  LogCapture:monitor.py:283 - Page In: 2 INFO  LogCapture:monitor.py:284 - Page Out: 0 INFO  LogCapture:monitor.py:285 INFO  LogCapture:monitor.py:280 - Index: entryrdn.db INFO  LogCapture:monitor.py:281 - Cache Hit: 5 INFO  LogCapture:monitor.py:282 - Cache Miss: 2 INFO  LogCapture:monitor.py:283 - Page In: 2 INFO  LogCapture:monitor.py:284 - Page Out: 0 INFO  LogCapture:monitor.py:285 INFO  LogCapture:monitor.py:280 - Index: id2entry.db INFO  LogCapture:monitor.py:281 - Cache Hit: 2 INFO  LogCapture:monitor.py:282 - Cache Miss: 2 INFO  LogCapture:monitor.py:283 - Page In: 2 INFO  LogCapture:monitor.py:284 - Page Out: 0 INFO  LogCapture:monitor.py:285 INFO  LogCapture:monitor.py:280 - Index: objectclass.db INFO  LogCapture:monitor.py:281 - Cache Hit: 11 INFO  LogCapture:monitor.py:282 - Cache Miss: 2 INFO  LogCapture:monitor.py:283 - Page In: 2 INFO  LogCapture:monitor.py:284 - Page Out: 0 INFO  LogCapture:monitor.py:285 INFO  LogCapture:monitor.py:286 INFO  tests.suites.clu.dbmon_test:dbmon_test.py:133 Clear the log INFO  tests.suites.clu.dbmon_test:dbmon_test.py:179 Sanity check for --json output INFO  LogCapture:monitor.py:245 { "date": "2020-10-22 19:37:37", "dbcache": { "hit_ratio": "70", "free": "487.05 MB", "free_percentage": "100.0", "roevicts": "0", "pagein": "8", "pageout": "0" }, "ndncache": { "hit_ratio": "78", "free": "19.99 MB", "free_percentage": "100.0", "count": "54", "evictions": "0" }, "backends": { "userRoot": { "suffix": "dc=example,dc=com", "entry_cache_count": "2", "entry_cache_free": "1.31 GB", "entry_cache_free_percentage": "100.0", "entry_cache_size": "3.78 KB", "entry_cache_hit_ratio": "57", "dn_cache_count": "2", "dn_cache_free": "192.0 MB", "dn_cache_free_percentage": "100.0", "dn_cache_size": "63.0 B", "dn_cache_hit_ratio": "0", "indexes": [ { "name": "aci.db", "cachehit": "1", "cachemiss": "2", "pagein": "2", "pageout": "0" }, { "name": "entryrdn.db", "cachehit": "5", "cachemiss": "2", "pagein": "2", "pageout": "0" }, { "name": "id2entry.db", "cachehit": "2", "cachemiss": "2", "pagein": "2", "pageout": "0" }, { "name": "objectclass.db", "cachehit": "11", "cachemiss": "2", "pagein": "2", "pageout": "0" } ] } } } INFO  tests.suites.clu.dbmon_test:dbmon_test.py:133 Clear the log
Passed suites/clu/dbverify_test.py::test_dsctl_dbverify 2.50
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
------------------------------Captured stderr call------------------------------
[22/Oct/2020:19:37:46.654206280 -0400] - INFO - ldbm_instance_config_cachememsize_set - force a minimal value 512000
-------------------------------Captured log call--------------------------------
INFO  tests.suites.clu.dbverify_test:dbverify_test.py:63 Run dbverify INFO  tests.suites.clu.dbverify_test:dbtasks.py:88 dbverify successful INFO  tests.suites.clu.dbverify_test:dbverify_test.py:67 Check dbverify was successful
Passed suites/clu/dsidm_config_test.py::test_dsidm_config_sssd 4.79
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
------------------------------Captured stdout call------------------------------
# # sssd.conf # Generated by 389 Directory Server - dsidm # # For more details see man sssd.conf and man sssd-ldap # Be sure to review the content of this file to ensure it is secure and correct # in your environment. [domain/ldap] # Uncomment this for more verbose logging. # debug_level=3 # Cache hashes of user authentication for offline auth. cache_credentials = True id_provider = ldap auth_provider = ldap access_provider = ldap chpass_provider = ldap ldap_schema = rfc2307 ldap_search_base = dc=example,dc=com ldap_uri = ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:38901 # If you have DNS SRV records, you can use the following instead. This derives # from your ldap_search_base. # ldap_uri = _srv_ ldap_tls_reqcert = demand # To use cacert dir, place *.crt files in this path then run: # /usr/bin/openssl rehash /etc/openldap/certs # or (for older versions of openssl) # /usr/bin/c_rehash /etc/openldap/certs ldap_tls_cacertdir = /etc/openldap/certs # Path to the cacert # ldap_tls_cacert = /etc/openldap/certs/ca.crt # Only users who match this filter can login and authorise to this machine. Note # that users who do NOT match, will still have their uid/gid resolve, but they # can't login. # ldap_access_filter = (memberOf=<dn>) enumerate = false access_provider = ldap ldap_user_member_of = memberof ldap_user_gecos = cn ldap_user_uuid = nsUniqueId ldap_group_uuid = nsUniqueId # This is really important as it allows SSSD to respect nsAccountLock ldap_account_expire_policy = rhds ldap_access_order = filter, expire # Setup for ssh keys # Inside /etc/ssh/sshd_config add the lines: # AuthorizedKeysCommand /usr/bin/sss_ssh_authorizedkeys # AuthorizedKeysCommandUser nobody # You can test with the command: sss_ssh_authorizedkeys <username> ldap_user_ssh_public_key = nsSshPublicKey # This prevents an issue where the Directory is recursively walked on group # and user look ups. It makes the client faster and more responsive in almost # every scenario. ignore_group_members = False [sssd] services = nss, pam, ssh, sudo config_file_version = 2 domains = ldap [nss] homedir_substring = /home # # sssd.conf # Generated by 389 Directory Server - dsidm # # For more details see man sssd.conf and man sssd-ldap # Be sure to review the content of this file to ensure it is secure and correct # in your environment. [domain/ldap] # Uncomment this for more verbose logging. # debug_level=3 # Cache hashes of user authentication for offline auth. cache_credentials = True id_provider = ldap auth_provider = ldap access_provider = ldap chpass_provider = ldap ldap_schema = rfc2307bis ldap_search_base = dc=example,dc=com ldap_uri = ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:38901 # If you have DNS SRV records, you can use the following instead. This derives # from your ldap_search_base. # ldap_uri = _srv_ ldap_tls_reqcert = demand # To use cacert dir, place *.crt files in this path then run: # /usr/bin/openssl rehash /etc/openldap/certs # or (for older versions of openssl) # /usr/bin/c_rehash /etc/openldap/certs ldap_tls_cacertdir = /etc/openldap/certs # Path to the cacert # ldap_tls_cacert = /etc/openldap/certs/ca.crt # Only users who match this filter can login and authorise to this machine. Note # that users who do NOT match, will still have their uid/gid resolve, but they # can't login. ldap_access_filter = (memberOf=cn=new_group,ou=Groups,dc=example,dc=com) enumerate = false access_provider = ldap ldap_user_member_of = memberof ldap_user_gecos = cn ldap_user_uuid = nsUniqueId ldap_group_uuid = nsUniqueId # This is really important as it allows SSSD to respect nsAccountLock ldap_account_expire_policy = rhds ldap_access_order = filter, expire # Setup for ssh keys # Inside /etc/ssh/sshd_config add the lines: # AuthorizedKeysCommand /usr/bin/sss_ssh_authorizedkeys # AuthorizedKeysCommandUser nobody # You can test with the command: sss_ssh_authorizedkeys <username> ldap_user_ssh_public_key = nsSshPublicKey # This prevents an issue where the Directory is recursively walked on group # and user look ups. It makes the client faster and more responsive in almost # every scenario. ignore_group_members = False [sssd] services = nss, pam, ssh, sudo config_file_version = 2 domains = ldap [nss] homedir_substring = /home
-------------------------------Captured log call--------------------------------
INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:102 Create sssd.conf content DEBUG  tests.suites.clu.dsidm_config_test:client_config.py:114 # # sssd.conf # Generated by 389 Directory Server - dsidm # # For more details see man sssd.conf and man sssd-ldap # Be sure to review the content of this file to ensure it is secure and correct # in your environment. [domain/ldap] # Uncomment this for more verbose logging. # debug_level=3 # Cache hashes of user authentication for offline auth. cache_credentials = True id_provider = ldap auth_provider = ldap access_provider = ldap chpass_provider = ldap ldap_schema = rfc2307 ldap_search_base = dc=example,dc=com ldap_uri = ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:38901 # If you have DNS SRV records, you can use the following instead. This derives # from your ldap_search_base. # ldap_uri = _srv_ ldap_tls_reqcert = demand # To use cacert dir, place *.crt files in this path then run: # /usr/bin/openssl rehash /etc/openldap/certs # or (for older versions of openssl) # /usr/bin/c_rehash /etc/openldap/certs ldap_tls_cacertdir = /etc/openldap/certs # Path to the cacert # ldap_tls_cacert = /etc/openldap/certs/ca.crt # Only users who match this filter can login and authorise to this machine. Note # that users who do NOT match, will still have their uid/gid resolve, but they # can't login. # ldap_access_filter = (memberOf=<dn>) enumerate = false access_provider = ldap ldap_user_member_of = memberof ldap_user_gecos = cn ldap_user_uuid = nsUniqueId ldap_group_uuid = nsUniqueId # This is really important as it allows SSSD to respect nsAccountLock ldap_account_expire_policy = rhds ldap_access_order = filter, expire # Setup for ssh keys # Inside /etc/ssh/sshd_config add the lines: # AuthorizedKeysCommand /usr/bin/sss_ssh_authorizedkeys # AuthorizedKeysCommandUser nobody # You can test with the command: sss_ssh_authorizedkeys <username> ldap_user_ssh_public_key = nsSshPublicKey # This prevents an issue where the Directory is recursively walked on group # and user look ups. It makes the client faster and more responsive in almost # every scenario. ignore_group_members = False [sssd] services = nss, pam, ssh, sudo config_file_version = 2 domains = ldap [nss] homedir_substring = /home INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:105 Check if config creation was successful INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:52 Check if content is present in output INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:57 Check if value is present in output INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:60 Reset log file for next test INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:108 Now we test allowed_group argument INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:109 Enable MemberOf plugin INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:114 Create test group INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:119 Create sssd.conf content with allowed group DEBUG  tests.suites.clu.dsidm_config_test:client_config.py:114 # # sssd.conf # Generated by 389 Directory Server - dsidm # # For more details see man sssd.conf and man sssd-ldap # Be sure to review the content of this file to ensure it is secure and correct # in your environment. [domain/ldap] # Uncomment this for more verbose logging. # debug_level=3 # Cache hashes of user authentication for offline auth. cache_credentials = True id_provider = ldap auth_provider = ldap access_provider = ldap chpass_provider = ldap ldap_schema = rfc2307bis ldap_search_base = dc=example,dc=com ldap_uri = ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:38901 # If you have DNS SRV records, you can use the following instead. This derives # from your ldap_search_base. # ldap_uri = _srv_ ldap_tls_reqcert = demand # To use cacert dir, place *.crt files in this path then run: # /usr/bin/openssl rehash /etc/openldap/certs # or (for older versions of openssl) # /usr/bin/c_rehash /etc/openldap/certs ldap_tls_cacertdir = /etc/openldap/certs # Path to the cacert # ldap_tls_cacert = /etc/openldap/certs/ca.crt # Only users who match this filter can login and authorise to this machine. Note # that users who do NOT match, will still have their uid/gid resolve, but they # can't login. ldap_access_filter = (memberOf=cn=new_group,ou=Groups,dc=example,dc=com) enumerate = false access_provider = ldap ldap_user_member_of = memberof ldap_user_gecos = cn ldap_user_uuid = nsUniqueId ldap_group_uuid = nsUniqueId # This is really important as it allows SSSD to respect nsAccountLock ldap_account_expire_policy = rhds ldap_access_order = filter, expire # Setup for ssh keys # Inside /etc/ssh/sshd_config add the lines: # AuthorizedKeysCommand /usr/bin/sss_ssh_authorizedkeys # AuthorizedKeysCommandUser nobody # You can test with the command: sss_ssh_authorizedkeys <username> ldap_user_ssh_public_key = nsSshPublicKey # This prevents an issue where the Directory is recursively walked on group # and user look ups. It makes the client faster and more responsive in almost # every scenario. ignore_group_members = False [sssd] services = nss, pam, ssh, sudo config_file_version = 2 domains = ldap [nss] homedir_substring = /home INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:124 Check if config creation was successful INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:48 Check if content is present in output INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:60 Reset log file for next test
Passed suites/clu/dsidm_config_test.py::test_dsidm_config_ldap 0.00
------------------------------Captured stdout call------------------------------
# # OpenLDAP client configuration # Generated by 389 Directory Server - dsidm # # See ldap.conf(5) for details # This file should be world readable but not world writable. BASE dc=example,dc=com # Remember to check this: you can have multiple uris on this line. You may have # multiple servers or load balancers in your environment. URI ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:38901 # If you have DNS SRV records you can use: # URI ldaps:///dc%3Dexample%2Cdc%3Dcom DEREF never # To use cacert dir, place *.crt files in this path then run: # /usr/bin/openssl rehash /etc/openldap/certs # or (for older versions of openssl) # /usr/bin/c_rehash /etc/openldap/certs TLS_CACERTDIR /etc/openldap/certs # TLS_CACERT /etc/openldap/certs/ca.crt
-------------------------------Captured log call--------------------------------
INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:152 Create ldap.conf content DEBUG  tests.suites.clu.dsidm_config_test:client_config.py:155 # # OpenLDAP client configuration # Generated by 389 Directory Server - dsidm # # See ldap.conf(5) for details # This file should be world readable but not world writable. BASE dc=example,dc=com # Remember to check this: you can have multiple uris on this line. You may have # multiple servers or load balancers in your environment. URI ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:38901 # If you have DNS SRV records you can use: # URI ldaps:///dc%3Dexample%2Cdc%3Dcom DEREF never # To use cacert dir, place *.crt files in this path then run: # /usr/bin/openssl rehash /etc/openldap/certs # or (for older versions of openssl) # /usr/bin/c_rehash /etc/openldap/certs TLS_CACERTDIR /etc/openldap/certs # TLS_CACERT /etc/openldap/certs/ca.crt INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:155 Check if config creation was successful INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:52 Check if content is present in output INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:60 Reset log file for next test
Passed suites/clu/dsidm_config_test.py::test_dsidm_config_display 4.21
------------------------------Captured stdout call------------------------------
# This is a generic list of LDAP client configuration parameters you may require # for connecting a client to this server. Some of them may or may not apply # to your application, so consult your application documentation for further # assistance. # # This program makes a number of assumptions about your data and configuration # which may not be correct. Be sure to check these values for your situation. ; ldap uri ; This is the uri of the server you will connect to and authenticate to. It ; must be a valid subjectAltName in the presented TLS certificate. Note that this ; is not an exhaustive list of your LDAP servers, and other applications in your ; network like load balancers may affect this. This is just what we derive from ; your current connection. ldap_uri = ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:38901 ; ldap dns discovery uri ; In some environments, you may have DNS SRV records such as ; "_ldap._tcp.<domain name>". If these are present in your dns server, you can ; use the following uri. ldap_uri = ldaps:///dc%3Dexample%2Cdc%3Dcom ; ca_cert ; To correctly use TLS, you require the valid CA cert that issued your LDAP TLS ; certificates. Sometimes a copy of this may be in your server instance as ca_cert = /etc/dirsrv/slapd-<instance>/ca.crt ; However that's not guaranteed. You can show the certs from the LDAP server ; by sshing to the server and running: certutil -L -d /etc/dirsrv/slapd-<instance>/ ; If you can identify the CA certificate name, you can then view it with: certutil -L -n <ca cert name> -a -d /etc/dirsrv/slapd-<instance>/ ; This should be a pem file you can use in your application's CA. ; Some applications don't require a ca certificate parameter, and will use the ; ca certificate from /etc/openldap/ldap.conf. You should configure ldap.conf ; in these cases. See the 'client_config ldap.conf' command in dsidm. ; basedn ; The basedn is the root suffix where all searches will originate from for ; LDAP objects. basedn = dc=example,dc=com ; schema_type ; LDAP servers have different ways to structure their objects and group ; relationships. Legacy servers will use rfc2307, where as modern servers will ; use rfc2307bis (requires MemberOf plugin to be enabled). This is the schema ; setting of your directory based on your running configuration (if we can ; detect it). schema_type = rfc2307bis ; user/account basedn ; Some applications may optionally use a user/account basedn to limit searches ; in the directory. This can be for performance or security reasons. Generally ; you shouldn't need this, preferring to use groups and filters for access ; control. user_basedn = ou=people,dc=example,dc=com ; user filter ; This is an ldap filter that will return only user objects. Additionally some ; applications will template into the filter (similar to sql statements) or they ; will generate the filter based on attributes. We list a number of possible ; filters you might use, but you should customise this for your application. ; ; If you are using rfc2307bis, you can use this filter to provide authorisation ; support by adding filters such as: (memberOf=<groupdn>) user_filter = (&(objectclass=nsPerson)(objectclass=nsAccount)(objectclass=nsOrgPerson)(objectclass=posixAccount)) user_filter = (&(&(objectclass=nsPerson)(objectclass=nsAccount)(objectclass=nsOrgPerson)(objectclass=posixAccount))(|(uid=<PARAM>)(displayName=<PARAM>)(cn=<PARAM>))) ; group basedn ; Some applications may optionnaly use a group basedn to limit searches in the ; directory. This can be for performance or security reasons. Generally you ; shouldn't need this, preferring to use groups and filters for access control. group_basedn = ou=Groups,dc=example,dc=com ; group filter ; This is an ldap filter that will return only group objects. Additionally ; some applications will template into the filter (similar to sql statements) ; or they will generate the filter base on attributes. We list a number of ; possible filters you might use, but you should customise this for your ; application. group_filter = (&(objectclass=groupOfNames)) group_filter = (&(&(objectclass=groupOfNames))(|(cn=<PARAM>))) ; attribute mappings ; Due to the variety of schemas and attribute mappings in LDAP, there are ; different representations of attributes and values. This is a guess at ; the mappings that exist in your server, and what attributes you should ; configure and use. unique id = nsUniqueId user rdn = uid user identifier = uid group rdn = cn group member attribute = member # This is a generic list of LDAP client configuration parameters you may require # for connecting a client to this server. Some of them may or may not apply # to your application, so consult your application documentation for further # assistance. # # This program makes a number of assumptions about your data and configuration # which may not be correct. Be sure to check these values for your situation. ; ldap uri ; This is the uri of the server you will connect to and authenticate to. It ; must be a valid subjectAltName in the presented TLS certificate. Note that this ; is not an exhaustive list of your LDAP servers, and other applications in your ; network like load balancers may affect this. This is just what we derive from ; your current connection. ldap_uri = ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:38901 ; ldap dns discovery uri ; In some environments, you may have DNS SRV records such as ; "_ldap._tcp.<domain name>". If these are present in your dns server, you can ; use the following uri. ldap_uri = ldaps:///dc%3Dexample%2Cdc%3Dcom ; ca_cert ; To correctly use TLS, you require the valid CA cert that issued your LDAP TLS ; certificates. Sometimes a copy of this may be in your server instance as ca_cert = /etc/dirsrv/slapd-<instance>/ca.crt ; However that's not guaranteed. You can show the certs from the LDAP server ; by sshing to the server and running: certutil -L -d /etc/dirsrv/slapd-<instance>/ ; If you can identify the CA certificate name, you can then view it with: certutil -L -n <ca cert name> -a -d /etc/dirsrv/slapd-<instance>/ ; This should be a pem file you can use in your application's CA. ; Some applications don't require a ca certificate parameter, and will use the ; ca certificate from /etc/openldap/ldap.conf. You should configure ldap.conf ; in these cases. See the 'client_config ldap.conf' command in dsidm. ; basedn ; The basedn is the root suffix where all searches will originate from for ; LDAP objects. basedn = dc=example,dc=com ; schema_type ; LDAP servers have different ways to structure their objects and group ; relationships. Legacy servers will use rfc2307, where as modern servers will ; use rfc2307bis (requires MemberOf plugin to be enabled). This is the schema ; setting of your directory based on your running configuration (if we can ; detect it). schema_type = rfc2307bis ; user/account basedn ; Some applications may optionally use a user/account basedn to limit searches ; in the directory. This can be for performance or security reasons. Generally ; you shouldn't need this, preferring to use groups and filters for access ; control. user_basedn = ou=people,dc=example,dc=com ; user filter ; This is an ldap filter that will return only user objects. Additionally some ; applications will template into the filter (similar to sql statements) or they ; will generate the filter based on attributes. We list a number of possible ; filters you might use, but you should customise this for your application. ; ; If you are using rfc2307bis, you can use this filter to provide authorisation ; support by adding filters such as: (memberOf=<groupdn>) user_filter = (&(objectclass=nsPerson)(objectclass=nsAccount)(objectclass=nsOrgPerson)(objectclass=posixAccount)) user_filter = (&(&(objectclass=nsPerson)(objectclass=nsAccount)(objectclass=nsOrgPerson)(objectclass=posixAccount))(|(uid=<PARAM>)(displayName=<PARAM>)(cn=<PARAM>))) ; group basedn ; Some applications may optionnaly use a group basedn to limit searches in the ; directory. This can be for performance or security reasons. Generally you ; shouldn't need this, preferring to use groups and filters for access control. group_basedn = ou=Groups,dc=example,dc=com ; group filter ; This is an ldap filter that will return only group objects. Additionally ; some applications will template into the filter (similar to sql statements) ; or they will generate the filter base on attributes. We list a number of ; possible filters you might use, but you should customise this for your ; application. group_filter = (&(objectclass=groupOfNames)) group_filter = (&(&(objectclass=groupOfNames))(|(cn=<PARAM>))) ; attribute mappings ; Due to the variety of schemas and attribute mappings in LDAP, there are ; different representations of attributes and values. This is a guess at ; the mappings that exist in your server, and what attributes you should ; configure and use. unique id = nsUniqueId user rdn = uid user identifier = uid group rdn = cn group member attribute = member
-------------------------------Captured log call--------------------------------
INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:196 Test dsidm display option DEBUG  tests.suites.clu.dsidm_config_test:client_config.py:290 # This is a generic list of LDAP client configuration parameters you may require # for connecting a client to this server. Some of them may or may not apply # to your application, so consult your application documentation for further # assistance. # # This program makes a number of assumptions about your data and configuration # which may not be correct. Be sure to check these values for your situation. ; ldap uri ; This is the uri of the server you will connect to and authenticate to. It ; must be a valid subjectAltName in the presented TLS certificate. Note that this ; is not an exhaustive list of your LDAP servers, and other applications in your ; network like load balancers may affect this. This is just what we derive from ; your current connection. ldap_uri = ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:38901 ; ldap dns discovery uri ; In some environments, you may have DNS SRV records such as ; "_ldap._tcp.<domain name>". If these are present in your dns server, you can ; use the following uri. ldap_uri = ldaps:///dc%3Dexample%2Cdc%3Dcom ; ca_cert ; To correctly use TLS, you require the valid CA cert that issued your LDAP TLS ; certificates. Sometimes a copy of this may be in your server instance as ca_cert = /etc/dirsrv/slapd-<instance>/ca.crt ; However that's not guaranteed. You can show the certs from the LDAP server ; by sshing to the server and running: certutil -L -d /etc/dirsrv/slapd-<instance>/ ; If you can identify the CA certificate name, you can then view it with: certutil -L -n <ca cert name> -a -d /etc/dirsrv/slapd-<instance>/ ; This should be a pem file you can use in your application's CA. ; Some applications don't require a ca certificate parameter, and will use the ; ca certificate from /etc/openldap/ldap.conf. You should configure ldap.conf ; in these cases. See the 'client_config ldap.conf' command in dsidm. ; basedn ; The basedn is the root suffix where all searches will originate from for ; LDAP objects. basedn = dc=example,dc=com ; schema_type ; LDAP servers have different ways to structure their objects and group ; relationships. Legacy servers will use rfc2307, where as modern servers will ; use rfc2307bis (requires MemberOf plugin to be enabled). This is the schema ; setting of your directory based on your running configuration (if we can ; detect it). schema_type = rfc2307bis ; user/account basedn ; Some applications may optionally use a user/account basedn to limit searches ; in the directory. This can be for performance or security reasons. Generally ; you shouldn't need this, preferring to use groups and filters for access ; control. user_basedn = ou=people,dc=example,dc=com ; user filter ; This is an ldap filter that will return only user objects. Additionally some ; applications will template into the filter (similar to sql statements) or they ; will generate the filter based on attributes. We list a number of possible ; filters you might use, but you should customise this for your application. ; ; If you are using rfc2307bis, you can use this filter to provide authorisation ; support by adding filters such as: (memberOf=<groupdn>) user_filter = (&(objectclass=nsPerson)(objectclass=nsAccount)(objectclass=nsOrgPerson)(objectclass=posixAccount)) user_filter = (&(&(objectclass=nsPerson)(objectclass=nsAccount)(objectclass=nsOrgPerson)(objectclass=posixAccount))(|(uid=<PARAM>)(displayName=<PARAM>)(cn=<PARAM>))) ; group basedn ; Some applications may optionnaly use a group basedn to limit searches in the ; directory. This can be for performance or security reasons. Generally you ; shouldn't need this, preferring to use groups and filters for access control. group_basedn = ou=Groups,dc=example,dc=com ; group filter ; This is an ldap filter that will return only group objects. Additionally ; some applications will template into the filter (similar to sql statements) ; or they will generate the filter base on attributes. We list a number of ; possible filters you might use, but you should customise this for your ; application. group_filter = (&(objectclass=groupOfNames)) group_filter = (&(&(objectclass=groupOfNames))(|(cn=<PARAM>))) ; attribute mappings ; Due to the variety of schemas and attribute mappings in LDAP, there are ; different representations of attributes and values. This is a guess at ; the mappings that exist in your server, and what attributes you should ; configure and use. unique id = nsUniqueId user rdn = uid user identifier = uid group rdn = cn group member attribute = member INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:199 Check if display option was successful INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:52 Check if content is present in output INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:57 Check if value is present in output INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:60 Reset log file for next test INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:202 Enable MemberOf plugin INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:207 Test dsidm display option with MemberOf plugin DEBUG  tests.suites.clu.dsidm_config_test:client_config.py:290 # This is a generic list of LDAP client configuration parameters you may require # for connecting a client to this server. Some of them may or may not apply # to your application, so consult your application documentation for further # assistance. # # This program makes a number of assumptions about your data and configuration # which may not be correct. Be sure to check these values for your situation. ; ldap uri ; This is the uri of the server you will connect to and authenticate to. It ; must be a valid subjectAltName in the presented TLS certificate. Note that this ; is not an exhaustive list of your LDAP servers, and other applications in your ; network like load balancers may affect this. This is just what we derive from ; your current connection. ldap_uri = ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:38901 ; ldap dns discovery uri ; In some environments, you may have DNS SRV records such as ; "_ldap._tcp.<domain name>". If these are present in your dns server, you can ; use the following uri. ldap_uri = ldaps:///dc%3Dexample%2Cdc%3Dcom ; ca_cert ; To correctly use TLS, you require the valid CA cert that issued your LDAP TLS ; certificates. Sometimes a copy of this may be in your server instance as ca_cert = /etc/dirsrv/slapd-<instance>/ca.crt ; However that's not guaranteed. You can show the certs from the LDAP server ; by sshing to the server and running: certutil -L -d /etc/dirsrv/slapd-<instance>/ ; If you can identify the CA certificate name, you can then view it with: certutil -L -n <ca cert name> -a -d /etc/dirsrv/slapd-<instance>/ ; This should be a pem file you can use in your application's CA. ; Some applications don't require a ca certificate parameter, and will use the ; ca certificate from /etc/openldap/ldap.conf. You should configure ldap.conf ; in these cases. See the 'client_config ldap.conf' command in dsidm. ; basedn ; The basedn is the root suffix where all searches will originate from for ; LDAP objects. basedn = dc=example,dc=com ; schema_type ; LDAP servers have different ways to structure their objects and group ; relationships. Legacy servers will use rfc2307, where as modern servers will ; use rfc2307bis (requires MemberOf plugin to be enabled). This is the schema ; setting of your directory based on your running configuration (if we can ; detect it). schema_type = rfc2307bis ; user/account basedn ; Some applications may optionally use a user/account basedn to limit searches ; in the directory. This can be for performance or security reasons. Generally ; you shouldn't need this, preferring to use groups and filters for access ; control. user_basedn = ou=people,dc=example,dc=com ; user filter ; This is an ldap filter that will return only user objects. Additionally some ; applications will template into the filter (similar to sql statements) or they ; will generate the filter based on attributes. We list a number of possible ; filters you might use, but you should customise this for your application. ; ; If you are using rfc2307bis, you can use this filter to provide authorisation ; support by adding filters such as: (memberOf=<groupdn>) user_filter = (&(objectclass=nsPerson)(objectclass=nsAccount)(objectclass=nsOrgPerson)(objectclass=posixAccount)) user_filter = (&(&(objectclass=nsPerson)(objectclass=nsAccount)(objectclass=nsOrgPerson)(objectclass=posixAccount))(|(uid=<PARAM>)(displayName=<PARAM>)(cn=<PARAM>))) ; group basedn ; Some applications may optionnaly use a group basedn to limit searches in the ; directory. This can be for performance or security reasons. Generally you ; shouldn't need this, preferring to use groups and filters for access control. group_basedn = ou=Groups,dc=example,dc=com ; group filter ; This is an ldap filter that will return only group objects. Additionally ; some applications will template into the filter (similar to sql statements) ; or they will generate the filter base on attributes. We list a number of ; possible filters you might use, but you should customise this for your ; application. group_filter = (&(objectclass=groupOfNames)) group_filter = (&(&(objectclass=groupOfNames))(|(cn=<PARAM>))) ; attribute mappings ; Due to the variety of schemas and attribute mappings in LDAP, there are ; different representations of attributes and values. This is a guess at ; the mappings that exist in your server, and what attributes you should ; configure and use. unique id = nsUniqueId user rdn = uid user identifier = uid group rdn = cn group member attribute = member INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:210 Check if display option was successful with MemberOf plugin enabled INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:52 Check if content is present in output INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:57 Check if value is present in output INFO  tests.suites.clu.dsidm_config_test:dsidm_config_test.py:60 Reset log file for next test
Passed suites/clu/fixup_test.py::test_posix_winsync_fixup 6.36
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
-------------------------------Captured log call--------------------------------
INFO  tests.suites.clu.fixup_test:fixup_test.py:73 Enable POSIXWinsyncPlugin INFO  tests.suites.clu.fixup_test:fixup_test.py:77 Stopping the server and importing posix accounts INFO  tests.suites.clu.fixup_test:fixup_test.py:87 Run Fixup task INFO  tests.suites.clu.fixup_test:posix_winsync.py:29 Attempting to add task entry... INFO  tests.suites.clu.fixup_test:posix_winsync.py:39 Successfully added task entry INFO  tests.suites.clu.fixup_test:fixup_test.py:90 Check log if fixup task was successful
Passed suites/clu/repl_monitor_test.py::test_dsconf_replication_monitor 0.53
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39001, 'ldap-secureport': 63701, 'server-id': 'master1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39002, 'ldap-secureport': 63702, 'server-id': 'master2', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:148 Creating replication topology. INFO  lib389.topologies:topologies.py:162 Joining master master2 to master1 ... INFO  lib389.replica:replica.py:2101 SUCCESS: bootstrap to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 completed INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is was created INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is was created INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect 471e9e9a-b3f4-481b-94fb-50b7ffd427e9 / got description=None) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect 18e18b42-3505-47e8-9164-9d53dea9fbca / got description=471e9e9a-b3f4-481b-94fb-50b7ffd427e9) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is working INFO  lib389.replica:replica.py:2170 SUCCESS: joined master from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 INFO  lib389.topologies:topologies.py:170 Ensuring master master1 to master2 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 already exists INFO  lib389.topologies:topologies.py:170 Ensuring master master2 to master1 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 already exists
-------------------------------Captured log call--------------------------------
INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:170 Run replication monitor with connections option DEBUG  tests.suites.clu.repl_monitor_test:dsrc.py:76 dsrc path: /root/.dsrc DEBUG  tests.suites.clu.repl_monitor_test:dsrc.py:77 dsrc container path: /data/config/container.inf DEBUG  tests.suites.clu.repl_monitor_test:dsrc.py:85 dsrc instances: [] DEBUG  tests.suites.clu.repl_monitor_test:dsrc.py:210 dsrc completed with {'connections': None, 'aliases': None} INFO  tests.suites.clu.repl_monitor_test:replication.py:438 Supplier: ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 INFO  tests.suites.clu.repl_monitor_test:replication.py:443 ---------------------------------------------------------------- INFO  tests.suites.clu.repl_monitor_test:replication.py:455 Replica Root: dc=example,dc=com INFO  tests.suites.clu.repl_monitor_test:replication.py:456 Replica ID: 1 INFO  tests.suites.clu.repl_monitor_test:replication.py:457 Replica Status: Available INFO  tests.suites.clu.repl_monitor_test:replication.py:458 Max CSN: 5f9217f7000000010000 INFO  tests.suites.clu.repl_monitor_test:replication.py:461 Status For Agreement: "002" (ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002) Replica Enabled: on Update In Progress: FALSE Last Update Start: 20201022233832Z Last Update End: 20201022233832Z Number Of Changes Sent: 1:2/0 Number Of Changes Skipped: None Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded Last Init Start: 19700101000000Z Last Init End: 19700101000000Z Last Init Status: unavailable Reap Active: 0 Replication Status: In Synchronization Replication Lag Time: 00:00:00 INFO  tests.suites.clu.repl_monitor_test:replication.py:438 Supplier: ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 INFO  tests.suites.clu.repl_monitor_test:replication.py:443 ---------------------------------------------------------------- INFO  tests.suites.clu.repl_monitor_test:replication.py:455 Replica Root: dc=example,dc=com INFO  tests.suites.clu.repl_monitor_test:replication.py:456 Replica ID: 2 INFO  tests.suites.clu.repl_monitor_test:replication.py:457 Replica Status: Available INFO  tests.suites.clu.repl_monitor_test:replication.py:458 Max CSN: 5f9217f8000000020000 INFO  tests.suites.clu.repl_monitor_test:replication.py:461 Status For Agreement: "001" (ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001) Replica Enabled: on Update In Progress: FALSE Last Update Start: 20201022233832Z Last Update End: 20201022233832Z Number Of Changes Sent: 2:1/0 Number Of Changes Skipped: None Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded Last Init Start: 19700101000000Z Last Init End: 19700101000000Z Last Init Status: unavailable Reap Active: 0 Replication Status: In Synchronization Replication Lag Time: 00:00:00 INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replica Root: dc=example,dc=com" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replica ID: 1" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replica Status: Available" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Max CSN" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Status For Agreement: "002" (ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002)" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replica Enabled: on" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Update In Progress: FALSE" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Last Update Start:" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Last Update End:" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Number Of Changes Sent:" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Number Of Changes Skipped: None" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Last Init Start:" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Last Init End:" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Last Init Status:" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Reap Active: 0" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replication Status: In Synchronization" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replication Lag Time:" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Supplier: " is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replica Root: dc=example,dc=com" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replica ID: 2" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Status For Agreement: "001" (ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001)" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:52 Check for "Supplier: ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001" INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:60 Reset log file INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:174 Run replication monitor with aliases option DEBUG  tests.suites.clu.repl_monitor_test:dsrc.py:76 dsrc path: /root/.dsrc DEBUG  tests.suites.clu.repl_monitor_test:dsrc.py:77 dsrc container path: /data/config/container.inf DEBUG  tests.suites.clu.repl_monitor_test:dsrc.py:85 dsrc instances: [] DEBUG  tests.suites.clu.repl_monitor_test:dsrc.py:210 dsrc completed with {'connections': None, 'aliases': None} INFO  tests.suites.clu.repl_monitor_test:replication.py:438 Supplier: M1 (ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001) INFO  tests.suites.clu.repl_monitor_test:replication.py:443 --------------------------------------------------------------------- INFO  tests.suites.clu.repl_monitor_test:replication.py:455 Replica Root: dc=example,dc=com INFO  tests.suites.clu.repl_monitor_test:replication.py:456 Replica ID: 1 INFO  tests.suites.clu.repl_monitor_test:replication.py:457 Replica Status: Available INFO  tests.suites.clu.repl_monitor_test:replication.py:458 Max CSN: 5f9217f7000000010000 INFO  tests.suites.clu.repl_monitor_test:replication.py:461 Status For Agreement: "002" (ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002) Replica Enabled: on Update In Progress: FALSE Last Update Start: 20201022233832Z Last Update End: 20201022233832Z Number Of Changes Sent: 1:2/0 Number Of Changes Skipped: None Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded Last Init Start: 19700101000000Z Last Init End: 19700101000000Z Last Init Status: unavailable Reap Active: 0 Replication Status: In Synchronization Replication Lag Time: 00:00:00 INFO  tests.suites.clu.repl_monitor_test:replication.py:438 Supplier: M2 (ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002) INFO  tests.suites.clu.repl_monitor_test:replication.py:443 --------------------------------------------------------------------- INFO  tests.suites.clu.repl_monitor_test:replication.py:455 Replica Root: dc=example,dc=com INFO  tests.suites.clu.repl_monitor_test:replication.py:456 Replica ID: 2 INFO  tests.suites.clu.repl_monitor_test:replication.py:457 Replica Status: Available INFO  tests.suites.clu.repl_monitor_test:replication.py:458 Max CSN: 5f9217f8000000020000 INFO  tests.suites.clu.repl_monitor_test:replication.py:461 Status For Agreement: "001" (ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001) Replica Enabled: on Update In Progress: FALSE Last Update Start: 20201022233832Z Last Update End: 20201022233832Z Number Of Changes Sent: 2:1/0 Number Of Changes Skipped: None Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded Last Init Start: 19700101000000Z Last Init End: 19700101000000Z Last Init Status: unavailable Reap Active: 0 Replication Status: In Synchronization Replication Lag Time: 00:00:00 INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replica Root: dc=example,dc=com" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replica ID: 1" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replica Status: Available" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Max CSN" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Status For Agreement: "002" (ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002)" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replica Enabled: on" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Update In Progress: FALSE" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Last Update Start:" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Last Update End:" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Number Of Changes Sent:" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Number Of Changes Skipped: None" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Last Init Start:" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Last Init End:" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Last Init Status:" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Reap Active: 0" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replication Status: In Synchronization" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replication Lag Time:" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Supplier: " is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replica Root: dc=example,dc=com" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replica ID: 2" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Status For Agreement: "001" (ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001)" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:52 Check for "['Supplier: M1 (ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001)', 'Supplier: M2 (ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002)']" INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:60 Reset log file INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:179 Run replication monitor with --json option DEBUG  tests.suites.clu.repl_monitor_test:dsrc.py:76 dsrc path: /root/.dsrc DEBUG  tests.suites.clu.repl_monitor_test:dsrc.py:77 dsrc container path: /data/config/container.inf DEBUG  tests.suites.clu.repl_monitor_test:dsrc.py:85 dsrc instances: [] DEBUG  tests.suites.clu.repl_monitor_test:dsrc.py:210 dsrc completed with {'connections': None, 'aliases': None} INFO  tests.suites.clu.repl_monitor_test:replication.py:468 { "type": "list", "items": [ { "name": "ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001", "data": [ { "replica_id": "1", "replica_root": "dc=example,dc=com", "replica_status": "Available", "maxcsn": "5f9217f7000000010000", "agmts_status": [ { "agmt-name": [ "002" ], "replica": [ "ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002" ], "replica-enabled": [ "on" ], "update-in-progress": [ "FALSE" ], "last-update-start": [ "20201022233832Z" ], "last-update-end": [ "20201022233832Z" ], "number-changes-sent": [ "1:2/0 " ], "number-changes-skipped": [ "unavailable" ], "last-update-status": [ "Error (0) Replica acquired successfully: Incremental update succeeded" ], "last-init-start": [ "19700101000000Z" ], "last-init-end": [ "19700101000000Z" ], "last-init-status": [ "unavailable" ], "reap-active": [ "0" ], "replication-status": [ "In Synchronization" ], "replication-lag-time": [ "00:00:00" ] } ] } ] }, { "name": "ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002", "data": [ { "replica_id": "2", "replica_root": "dc=example,dc=com", "replica_status": "Available", "maxcsn": "5f9217f8000000020000", "agmts_status": [ { "agmt-name": [ "001" ], "replica": [ "ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001" ], "replica-enabled": [ "on" ], "update-in-progress": [ "FALSE" ], "last-update-start": [ "20201022233832Z" ], "last-update-end": [ "20201022233832Z" ], "number-changes-sent": [ "2:1/0 " ], "number-changes-skipped": [ "unavailable" ], "last-update-status": [ "Error (0) Replica acquired successfully: Incremental update succeeded" ], "last-init-start": [ "19700101000000Z" ], "last-init-end": [ "19700101000000Z" ], "last-init-status": [ "unavailable" ], "reap-active": [ "0" ], "replication-status": [ "In Synchronization" ], "replication-lag-time": [ "00:00:00" ] } ] } ] } ] } INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "type" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "list" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "items" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "name" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "data" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that ""replica_id": "1"" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that ""replica_root": "dc=example,dc=com"" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that ""replica_status": "Available"" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "maxcsn" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "agmts_status" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "agmt-name" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "002" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "replica" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "replica-enabled" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "update-in-progress" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "last-update-start" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "last-update-end" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "number-changes-sent" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "number-changes-skipped" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "last-update-status" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Error (0) Replica acquired successfully: Incremental update succeeded" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "last-init-start" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "last-init-end" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "last-init-status" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "reap-active" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "replication-status" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "In Synchronization" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "replication-lag-time" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that ""replica_id": "2"" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "001" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:60 Reset log file INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:192 Run replication monitor when .dsrc file is present with content DEBUG  tests.suites.clu.repl_monitor_test:dsrc.py:76 dsrc path: /root/.dsrc DEBUG  tests.suites.clu.repl_monitor_test:dsrc.py:77 dsrc container path: /data/config/container.inf DEBUG  tests.suites.clu.repl_monitor_test:dsrc.py:85 dsrc instances: ['repl-monitor-connections', 'repl-monitor-aliases'] DEBUG  tests.suites.clu.repl_monitor_test:dsrc.py:210 dsrc completed with {'connections': ['ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001:cn=Directory Manager:password', 'ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002:cn=Directory Manager:password'], 'aliases': {'M1': 'ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001', 'M2': 'ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002'}} INFO  tests.suites.clu.repl_monitor_test:replication.py:438 Supplier: M1 (ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001) INFO  tests.suites.clu.repl_monitor_test:replication.py:443 --------------------------------------------------------------------- INFO  tests.suites.clu.repl_monitor_test:replication.py:455 Replica Root: dc=example,dc=com INFO  tests.suites.clu.repl_monitor_test:replication.py:456 Replica ID: 1 INFO  tests.suites.clu.repl_monitor_test:replication.py:457 Replica Status: Available INFO  tests.suites.clu.repl_monitor_test:replication.py:458 Max CSN: 5f9217f7000000010000 INFO  tests.suites.clu.repl_monitor_test:replication.py:461 Status For Agreement: "002" (ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002) Replica Enabled: on Update In Progress: FALSE Last Update Start: 20201022233832Z Last Update End: 20201022233832Z Number Of Changes Sent: 1:2/0 Number Of Changes Skipped: None Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded Last Init Start: 19700101000000Z Last Init End: 19700101000000Z Last Init Status: unavailable Reap Active: 0 Replication Status: In Synchronization Replication Lag Time: 00:00:00 INFO  tests.suites.clu.repl_monitor_test:replication.py:438 Supplier: M2 (ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002) INFO  tests.suites.clu.repl_monitor_test:replication.py:443 --------------------------------------------------------------------- INFO  tests.suites.clu.repl_monitor_test:replication.py:455 Replica Root: dc=example,dc=com INFO  tests.suites.clu.repl_monitor_test:replication.py:456 Replica ID: 2 INFO  tests.suites.clu.repl_monitor_test:replication.py:457 Replica Status: Available INFO  tests.suites.clu.repl_monitor_test:replication.py:458 Max CSN: 5f9217f8000000020000 INFO  tests.suites.clu.repl_monitor_test:replication.py:461 Status For Agreement: "001" (ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001) Replica Enabled: on Update In Progress: FALSE Last Update Start: 20201022233832Z Last Update End: 20201022233832Z Number Of Changes Sent: 2:1/0 Number Of Changes Skipped: None Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded Last Init Start: 19700101000000Z Last Init End: 19700101000000Z Last Init Status: unavailable Reap Active: 0 Replication Status: In Synchronization Replication Lag Time: 00:00:00 INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replica Root: dc=example,dc=com" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replica ID: 1" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replica Status: Available" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Max CSN" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Status For Agreement: "002" (ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002)" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replica Enabled: on" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Update In Progress: FALSE" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Last Update Start:" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Last Update End:" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Number Of Changes Sent:" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Number Of Changes Skipped: None" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Last Init Start:" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Last Init End:" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Last Init Status:" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Reap Active: 0" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replication Status: In Synchronization" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replication Lag Time:" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Supplier: " is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replica Root: dc=example,dc=com" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Replica ID: 2" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:48 Check that "Status For Agreement: "001" (ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001)" is present INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:52 Check for "['Supplier: M1 (ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001)', 'Supplier: M2 (ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002)']" INFO  tests.suites.clu.repl_monitor_test:repl_monitor_test.py:60 Reset log file
Passed suites/config/autotuning_test.py::test_threads_basic 0.02
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.autotuning_test:autotuning_test.py:39 Set nsslapd-threadnumber: -1 to enable autotuning INFO  tests.suites.config.autotuning_test:autotuning_test.py:42 Assert nsslapd-threadnumber is equal to the documented expected value
Passed suites/config/autotuning_test.py::test_threads_warning 1.07
No log output captured.
Passed suites/config/autotuning_test.py::test_threads_invalid_value[-2] 0.01
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.autotuning_test:autotuning_test.py:87 Set nsslapd-threadnumber: -2. Operation should fail
Passed suites/config/autotuning_test.py::test_threads_invalid_value[0] 0.16
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.autotuning_test:autotuning_test.py:87 Set nsslapd-threadnumber: 0. Operation should fail
Passed suites/config/autotuning_test.py::test_threads_invalid_value[invalid] 0.01
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.autotuning_test:autotuning_test.py:87 Set nsslapd-threadnumber: invalid. Operation should fail
Passed suites/config/autotuning_test.py::test_threads_back_from_manual_value 0.06
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.autotuning_test:autotuning_test.py:109 Set nsslapd-threadnumber: -1 to enable autotuning and save the new value INFO  tests.suites.config.autotuning_test:autotuning_test.py:113 Set nsslapd-threadnumber to the autotuned value decreased by 2 INFO  tests.suites.config.autotuning_test:autotuning_test.py:118 Set nsslapd-threadnumber: -1 to enable autotuning INFO  tests.suites.config.autotuning_test:autotuning_test.py:121 Assert nsslapd-threadnumber is back to the autotuned value
Passed suites/config/autotuning_test.py::test_cache_autosize_non_zero[-] 3.19
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.autotuning_test:autotuning_test.py:169 Check nsslapd-dbcachesize and nsslapd-cachememsize before the test INFO  tests.suites.config.autotuning_test:autotuning_test.py:170 nsslapd-dbcachesize == b'408620441' INFO  tests.suites.config.autotuning_test:autotuning_test.py:171 nsslapd-cachememsize == b'1409286144' INFO  tests.suites.config.autotuning_test:autotuning_test.py:172 nsslapd-dncachememsize == b'201326592' INFO  tests.suites.config.autotuning_test:autotuning_test.py:173 nsslapd-cache-autosize == b'25' INFO  tests.suites.config.autotuning_test:autotuning_test.py:174 nsslapd-cache-autosize-split == b'25' INFO  tests.suites.config.autotuning_test:autotuning_test.py:180 Delete nsslapd-cache-autosize INFO  tests.suites.config.autotuning_test:autotuning_test.py:190 Delete nsslapd-cache-autosize-split INFO  tests.suites.config.autotuning_test:autotuning_test.py:196 Trying to set nsslapd-cachememsize to 33333333 INFO  tests.suites.config.autotuning_test:autotuning_test.py:199 Trying to set nsslapd-dbcachesize to 33333333 INFO  tests.suites.config.autotuning_test:autotuning_test.py:216 Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range. INFO  tests.suites.config.autotuning_test:autotuning_test.py:217 nsslapd-dbcachesize == b'408620441' INFO  tests.suites.config.autotuning_test:autotuning_test.py:218 nsslapd-cachememsize == b'1409286144' INFO  tests.suites.config.autotuning_test:autotuning_test.py:219 nsslapd-dncachememsize == b'201326592' INFO  tests.suites.config.autotuning_test:autotuning_test.py:220 nsslapd-cache-autosize == b'25' INFO  tests.suites.config.autotuning_test:autotuning_test.py:221 nsslapd-cache-autosize-split == b'25'
Passed suites/config/autotuning_test.py::test_cache_autosize_non_zero[-0] 4.60
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.autotuning_test:autotuning_test.py:169 Check nsslapd-dbcachesize and nsslapd-cachememsize before the test INFO  tests.suites.config.autotuning_test:autotuning_test.py:170 nsslapd-dbcachesize == b'408620441' INFO  tests.suites.config.autotuning_test:autotuning_test.py:171 nsslapd-cachememsize == b'1409286144' INFO  tests.suites.config.autotuning_test:autotuning_test.py:172 nsslapd-dncachememsize == b'201326592' INFO  tests.suites.config.autotuning_test:autotuning_test.py:173 nsslapd-cache-autosize == b'25' INFO  tests.suites.config.autotuning_test:autotuning_test.py:174 nsslapd-cache-autosize-split == b'25' INFO  tests.suites.config.autotuning_test:autotuning_test.py:180 Delete nsslapd-cache-autosize INFO  tests.suites.config.autotuning_test:autotuning_test.py:187 Set nsslapd-cache-autosize-split to 0 INFO  tests.suites.config.autotuning_test:autotuning_test.py:196 Trying to set nsslapd-cachememsize to 33333333 INFO  tests.suites.config.autotuning_test:autotuning_test.py:199 Trying to set nsslapd-dbcachesize to 33333333 INFO  tests.suites.config.autotuning_test:autotuning_test.py:216 Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range. INFO  tests.suites.config.autotuning_test:autotuning_test.py:217 nsslapd-dbcachesize == b'408620441' INFO  tests.suites.config.autotuning_test:autotuning_test.py:218 nsslapd-cachememsize == b'1409286144' INFO  tests.suites.config.autotuning_test:autotuning_test.py:219 nsslapd-dncachememsize == b'201326592' INFO  tests.suites.config.autotuning_test:autotuning_test.py:220 nsslapd-cache-autosize == b'25' INFO  tests.suites.config.autotuning_test:autotuning_test.py:221 nsslapd-cache-autosize-split == b'0'
Passed suites/config/autotuning_test.py::test_cache_autosize_non_zero[10-400] 4.85
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.autotuning_test:autotuning_test.py:169 Check nsslapd-dbcachesize and nsslapd-cachememsize before the test INFO  tests.suites.config.autotuning_test:autotuning_test.py:170 nsslapd-dbcachesize == b'408620441' INFO  tests.suites.config.autotuning_test:autotuning_test.py:171 nsslapd-cachememsize == b'1409286144' INFO  tests.suites.config.autotuning_test:autotuning_test.py:172 nsslapd-dncachememsize == b'201326592' INFO  tests.suites.config.autotuning_test:autotuning_test.py:173 nsslapd-cache-autosize == b'25' INFO  tests.suites.config.autotuning_test:autotuning_test.py:174 nsslapd-cache-autosize-split == b'0' INFO  tests.suites.config.autotuning_test:autotuning_test.py:177 Set nsslapd-cache-autosize to 10 INFO  tests.suites.config.autotuning_test:autotuning_test.py:187 Set nsslapd-cache-autosize-split to 40 INFO  tests.suites.config.autotuning_test:autotuning_test.py:196 Trying to set nsslapd-cachememsize to 33333333 INFO  tests.suites.config.autotuning_test:autotuning_test.py:199 Trying to set nsslapd-dbcachesize to 33333333 INFO  tests.suites.config.autotuning_test:autotuning_test.py:216 Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range. INFO  tests.suites.config.autotuning_test:autotuning_test.py:217 nsslapd-dbcachesize == b'261517082' INFO  tests.suites.config.autotuning_test:autotuning_test.py:218 nsslapd-cachememsize == b'469762048' INFO  tests.suites.config.autotuning_test:autotuning_test.py:219 nsslapd-dncachememsize == b'67108864' INFO  tests.suites.config.autotuning_test:autotuning_test.py:220 nsslapd-cache-autosize == b'10' INFO  tests.suites.config.autotuning_test:autotuning_test.py:221 nsslapd-cache-autosize-split == b'40'
Passed suites/config/autotuning_test.py::test_cache_autosize_non_zero[-40] 4.55
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.autotuning_test:autotuning_test.py:169 Check nsslapd-dbcachesize and nsslapd-cachememsize before the test INFO  tests.suites.config.autotuning_test:autotuning_test.py:170 nsslapd-dbcachesize == b'261517082' INFO  tests.suites.config.autotuning_test:autotuning_test.py:171 nsslapd-cachememsize == b'469762048' INFO  tests.suites.config.autotuning_test:autotuning_test.py:172 nsslapd-dncachememsize == b'67108864' INFO  tests.suites.config.autotuning_test:autotuning_test.py:173 nsslapd-cache-autosize == b'10' INFO  tests.suites.config.autotuning_test:autotuning_test.py:174 nsslapd-cache-autosize-split == b'40' INFO  tests.suites.config.autotuning_test:autotuning_test.py:180 Delete nsslapd-cache-autosize INFO  tests.suites.config.autotuning_test:autotuning_test.py:187 Set nsslapd-cache-autosize-split to 40 INFO  tests.suites.config.autotuning_test:autotuning_test.py:196 Trying to set nsslapd-cachememsize to 33333333 INFO  tests.suites.config.autotuning_test:autotuning_test.py:199 Trying to set nsslapd-dbcachesize to 33333333 INFO  tests.suites.config.autotuning_test:autotuning_test.py:216 Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range. INFO  tests.suites.config.autotuning_test:autotuning_test.py:217 nsslapd-dbcachesize == b'817240883' INFO  tests.suites.config.autotuning_test:autotuning_test.py:218 nsslapd-cachememsize == b'1140850688' INFO  tests.suites.config.autotuning_test:autotuning_test.py:219 nsslapd-dncachememsize == b'134217728' INFO  tests.suites.config.autotuning_test:autotuning_test.py:220 nsslapd-cache-autosize == b'25' INFO  tests.suites.config.autotuning_test:autotuning_test.py:221 nsslapd-cache-autosize-split == b'40'
Passed suites/config/autotuning_test.py::test_cache_autosize_non_zero[10-] 4.76
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.autotuning_test:autotuning_test.py:169 Check nsslapd-dbcachesize and nsslapd-cachememsize before the test INFO  tests.suites.config.autotuning_test:autotuning_test.py:170 nsslapd-dbcachesize == b'817240883' INFO  tests.suites.config.autotuning_test:autotuning_test.py:171 nsslapd-cachememsize == b'1140850688' INFO  tests.suites.config.autotuning_test:autotuning_test.py:172 nsslapd-dncachememsize == b'134217728' INFO  tests.suites.config.autotuning_test:autotuning_test.py:173 nsslapd-cache-autosize == b'25' INFO  tests.suites.config.autotuning_test:autotuning_test.py:174 nsslapd-cache-autosize-split == b'40' INFO  tests.suites.config.autotuning_test:autotuning_test.py:177 Set nsslapd-cache-autosize to 10 INFO  tests.suites.config.autotuning_test:autotuning_test.py:190 Delete nsslapd-cache-autosize-split INFO  tests.suites.config.autotuning_test:autotuning_test.py:196 Trying to set nsslapd-cachememsize to 33333333 INFO  tests.suites.config.autotuning_test:autotuning_test.py:199 Trying to set nsslapd-dbcachesize to 33333333 INFO  tests.suites.config.autotuning_test:autotuning_test.py:216 Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range. INFO  tests.suites.config.autotuning_test:autotuning_test.py:217 nsslapd-dbcachesize == b'163448176' INFO  tests.suites.config.autotuning_test:autotuning_test.py:218 nsslapd-cachememsize == b'603979776' INFO  tests.suites.config.autotuning_test:autotuning_test.py:219 nsslapd-dncachememsize == b'67108864' INFO  tests.suites.config.autotuning_test:autotuning_test.py:220 nsslapd-cache-autosize == b'10' INFO  tests.suites.config.autotuning_test:autotuning_test.py:221 nsslapd-cache-autosize-split == b'25'
Passed suites/config/autotuning_test.py::test_cache_autosize_non_zero[10-401] 4.55
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.autotuning_test:autotuning_test.py:169 Check nsslapd-dbcachesize and nsslapd-cachememsize before the test INFO  tests.suites.config.autotuning_test:autotuning_test.py:170 nsslapd-dbcachesize == b'163448176' INFO  tests.suites.config.autotuning_test:autotuning_test.py:171 nsslapd-cachememsize == b'603979776' INFO  tests.suites.config.autotuning_test:autotuning_test.py:172 nsslapd-dncachememsize == b'67108864' INFO  tests.suites.config.autotuning_test:autotuning_test.py:173 nsslapd-cache-autosize == b'10' INFO  tests.suites.config.autotuning_test:autotuning_test.py:174 nsslapd-cache-autosize-split == b'25' INFO  tests.suites.config.autotuning_test:autotuning_test.py:177 Set nsslapd-cache-autosize to 10 INFO  tests.suites.config.autotuning_test:autotuning_test.py:187 Set nsslapd-cache-autosize-split to 40 INFO  tests.suites.config.autotuning_test:autotuning_test.py:196 Trying to set nsslapd-cachememsize to 33333333 INFO  tests.suites.config.autotuning_test:autotuning_test.py:199 Trying to set nsslapd-dbcachesize to 33333333 INFO  tests.suites.config.autotuning_test:autotuning_test.py:216 Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range. INFO  tests.suites.config.autotuning_test:autotuning_test.py:217 nsslapd-dbcachesize == b'261517082' INFO  tests.suites.config.autotuning_test:autotuning_test.py:218 nsslapd-cachememsize == b'469762048' INFO  tests.suites.config.autotuning_test:autotuning_test.py:219 nsslapd-dncachememsize == b'67108864' INFO  tests.suites.config.autotuning_test:autotuning_test.py:220 nsslapd-cache-autosize == b'10' INFO  tests.suites.config.autotuning_test:autotuning_test.py:221 nsslapd-cache-autosize-split == b'40'
Passed suites/config/autotuning_test.py::test_cache_autosize_non_zero[10-0] 4.87
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.autotuning_test:autotuning_test.py:169 Check nsslapd-dbcachesize and nsslapd-cachememsize before the test INFO  tests.suites.config.autotuning_test:autotuning_test.py:170 nsslapd-dbcachesize == b'261517082' INFO  tests.suites.config.autotuning_test:autotuning_test.py:171 nsslapd-cachememsize == b'469762048' INFO  tests.suites.config.autotuning_test:autotuning_test.py:172 nsslapd-dncachememsize == b'67108864' INFO  tests.suites.config.autotuning_test:autotuning_test.py:173 nsslapd-cache-autosize == b'10' INFO  tests.suites.config.autotuning_test:autotuning_test.py:174 nsslapd-cache-autosize-split == b'40' INFO  tests.suites.config.autotuning_test:autotuning_test.py:177 Set nsslapd-cache-autosize to 10 INFO  tests.suites.config.autotuning_test:autotuning_test.py:187 Set nsslapd-cache-autosize-split to 0 INFO  tests.suites.config.autotuning_test:autotuning_test.py:196 Trying to set nsslapd-cachememsize to 33333333 INFO  tests.suites.config.autotuning_test:autotuning_test.py:199 Trying to set nsslapd-dbcachesize to 33333333 INFO  tests.suites.config.autotuning_test:autotuning_test.py:216 Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range. INFO  tests.suites.config.autotuning_test:autotuning_test.py:217 nsslapd-dbcachesize == b'163448176' INFO  tests.suites.config.autotuning_test:autotuning_test.py:218 nsslapd-cachememsize == b'603979776' INFO  tests.suites.config.autotuning_test:autotuning_test.py:219 nsslapd-dncachememsize == b'67108864' INFO  tests.suites.config.autotuning_test:autotuning_test.py:220 nsslapd-cache-autosize == b'10' INFO  tests.suites.config.autotuning_test:autotuning_test.py:221 nsslapd-cache-autosize-split == b'0'
Passed suites/config/autotuning_test.py::test_cache_autosize_basic_sane[0] 9.14
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.autotuning_test:autotuning_test.py:273 Check nsslapd-dbcachesize and nsslapd-cachememsize before the test INFO  tests.suites.config.autotuning_test:autotuning_test.py:274 nsslapd-dbcachesize == b'163448176' INFO  tests.suites.config.autotuning_test:autotuning_test.py:275 nsslapd-cachememsize == b'603979776' INFO  tests.suites.config.autotuning_test:autotuning_test.py:276 nsslapd-cache-autosize == b'0' INFO  tests.suites.config.autotuning_test:autotuning_test.py:277 nsslapd-cache-autosize-split == b'0' INFO  tests.suites.config.autotuning_test:autotuning_test.py:280 Set nsslapd-cache-autosize-split to 0 INFO  tests.suites.config.autotuning_test:autotuning_test.py:289 Set nsslapd-dbcachesize to 0 INFO  tests.suites.config.autotuning_test:autotuning_test.py:291 Set nsslapd-cachememsize to 0 INFO  tests.suites.config.autotuning_test:autotuning_test.py:307 Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range. INFO  tests.suites.config.autotuning_test:autotuning_test.py:308 nsslapd-dbcachesize == b'408620441' INFO  tests.suites.config.autotuning_test:autotuning_test.py:309 nsslapd-cachememsize == b'1409286144' INFO  tests.suites.config.autotuning_test:autotuning_test.py:310 nsslapd-dncachememsize == b'67108864' INFO  tests.suites.config.autotuning_test:autotuning_test.py:311 nsslapd-cache-autosize == b'0' INFO  tests.suites.config.autotuning_test:autotuning_test.py:312 nsslapd-cache-autosize-split == b'0' INFO  tests.suites.config.autotuning_test:autotuning_test.py:273 Check nsslapd-dbcachesize and nsslapd-cachememsize before the test INFO  tests.suites.config.autotuning_test:autotuning_test.py:274 nsslapd-dbcachesize == b'408620441' INFO  tests.suites.config.autotuning_test:autotuning_test.py:275 nsslapd-cachememsize == b'1409286144' INFO  tests.suites.config.autotuning_test:autotuning_test.py:276 nsslapd-cache-autosize == b'0' INFO  tests.suites.config.autotuning_test:autotuning_test.py:277 nsslapd-cache-autosize-split == b'0' INFO  tests.suites.config.autotuning_test:autotuning_test.py:280 Set nsslapd-cache-autosize-split to 0 INFO  tests.suites.config.autotuning_test:autotuning_test.py:289 Set nsslapd-dbcachesize to 33333333 INFO  tests.suites.config.autotuning_test:autotuning_test.py:291 Set nsslapd-cachememsize to 33333333 INFO  tests.suites.config.autotuning_test:autotuning_test.py:307 Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range. INFO  tests.suites.config.autotuning_test:autotuning_test.py:308 nsslapd-dbcachesize == b'33333333' INFO  tests.suites.config.autotuning_test:autotuning_test.py:309 nsslapd-cachememsize == b'33333333' INFO  tests.suites.config.autotuning_test:autotuning_test.py:310 nsslapd-dncachememsize == b'67108864' INFO  tests.suites.config.autotuning_test:autotuning_test.py:311 nsslapd-cache-autosize == b'0' INFO  tests.suites.config.autotuning_test:autotuning_test.py:312 nsslapd-cache-autosize-split == b'0'
Passed suites/config/autotuning_test.py::test_cache_autosize_basic_sane[] 9.68
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.autotuning_test:autotuning_test.py:273 Check nsslapd-dbcachesize and nsslapd-cachememsize before the test INFO  tests.suites.config.autotuning_test:autotuning_test.py:274 nsslapd-dbcachesize == b'33333333' INFO  tests.suites.config.autotuning_test:autotuning_test.py:275 nsslapd-cachememsize == b'33333333' INFO  tests.suites.config.autotuning_test:autotuning_test.py:276 nsslapd-cache-autosize == b'0' INFO  tests.suites.config.autotuning_test:autotuning_test.py:277 nsslapd-cache-autosize-split == b'0' INFO  tests.suites.config.autotuning_test:autotuning_test.py:283 Delete nsslapd-cache-autosize-split INFO  tests.suites.config.autotuning_test:autotuning_test.py:289 Set nsslapd-dbcachesize to 0 INFO  tests.suites.config.autotuning_test:autotuning_test.py:291 Set nsslapd-cachememsize to 0 INFO  tests.suites.config.autotuning_test:autotuning_test.py:307 Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range. INFO  tests.suites.config.autotuning_test:autotuning_test.py:308 nsslapd-dbcachesize == b'408620441' INFO  tests.suites.config.autotuning_test:autotuning_test.py:309 nsslapd-cachememsize == b'1409286144' INFO  tests.suites.config.autotuning_test:autotuning_test.py:310 nsslapd-dncachememsize == b'67108864' INFO  tests.suites.config.autotuning_test:autotuning_test.py:311 nsslapd-cache-autosize == b'0' INFO  tests.suites.config.autotuning_test:autotuning_test.py:312 nsslapd-cache-autosize-split == b'25' INFO  tests.suites.config.autotuning_test:autotuning_test.py:273 Check nsslapd-dbcachesize and nsslapd-cachememsize before the test INFO  tests.suites.config.autotuning_test:autotuning_test.py:274 nsslapd-dbcachesize == b'408620441' INFO  tests.suites.config.autotuning_test:autotuning_test.py:275 nsslapd-cachememsize == b'1409286144' INFO  tests.suites.config.autotuning_test:autotuning_test.py:276 nsslapd-cache-autosize == b'0' INFO  tests.suites.config.autotuning_test:autotuning_test.py:277 nsslapd-cache-autosize-split == b'25' INFO  tests.suites.config.autotuning_test:autotuning_test.py:283 Delete nsslapd-cache-autosize-split INFO  tests.suites.config.autotuning_test:autotuning_test.py:289 Set nsslapd-dbcachesize to 33333333 INFO  tests.suites.config.autotuning_test:autotuning_test.py:291 Set nsslapd-cachememsize to 33333333 INFO  tests.suites.config.autotuning_test:autotuning_test.py:307 Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range. INFO  tests.suites.config.autotuning_test:autotuning_test.py:308 nsslapd-dbcachesize == b'33333333' INFO  tests.suites.config.autotuning_test:autotuning_test.py:309 nsslapd-cachememsize == b'33333333' INFO  tests.suites.config.autotuning_test:autotuning_test.py:310 nsslapd-dncachememsize == b'67108864' INFO  tests.suites.config.autotuning_test:autotuning_test.py:311 nsslapd-cache-autosize == b'0' INFO  tests.suites.config.autotuning_test:autotuning_test.py:312 nsslapd-cache-autosize-split == b'25'
Passed suites/config/autotuning_test.py::test_cache_autosize_basic_sane[40] 9.19
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.autotuning_test:autotuning_test.py:273 Check nsslapd-dbcachesize and nsslapd-cachememsize before the test INFO  tests.suites.config.autotuning_test:autotuning_test.py:274 nsslapd-dbcachesize == b'33333333' INFO  tests.suites.config.autotuning_test:autotuning_test.py:275 nsslapd-cachememsize == b'33333333' INFO  tests.suites.config.autotuning_test:autotuning_test.py:276 nsslapd-cache-autosize == b'0' INFO  tests.suites.config.autotuning_test:autotuning_test.py:277 nsslapd-cache-autosize-split == b'25' INFO  tests.suites.config.autotuning_test:autotuning_test.py:280 Set nsslapd-cache-autosize-split to 40 INFO  tests.suites.config.autotuning_test:autotuning_test.py:289 Set nsslapd-dbcachesize to 0 INFO  tests.suites.config.autotuning_test:autotuning_test.py:291 Set nsslapd-cachememsize to 0 INFO  tests.suites.config.autotuning_test:autotuning_test.py:307 Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range. INFO  tests.suites.config.autotuning_test:autotuning_test.py:308 nsslapd-dbcachesize == b'817240883' INFO  tests.suites.config.autotuning_test:autotuning_test.py:309 nsslapd-cachememsize == b'1140850688' INFO  tests.suites.config.autotuning_test:autotuning_test.py:310 nsslapd-dncachememsize == b'67108864' INFO  tests.suites.config.autotuning_test:autotuning_test.py:311 nsslapd-cache-autosize == b'0' INFO  tests.suites.config.autotuning_test:autotuning_test.py:312 nsslapd-cache-autosize-split == b'40' INFO  tests.suites.config.autotuning_test:autotuning_test.py:273 Check nsslapd-dbcachesize and nsslapd-cachememsize before the test INFO  tests.suites.config.autotuning_test:autotuning_test.py:274 nsslapd-dbcachesize == b'817240883' INFO  tests.suites.config.autotuning_test:autotuning_test.py:275 nsslapd-cachememsize == b'1140850688' INFO  tests.suites.config.autotuning_test:autotuning_test.py:276 nsslapd-cache-autosize == b'0' INFO  tests.suites.config.autotuning_test:autotuning_test.py:277 nsslapd-cache-autosize-split == b'40' INFO  tests.suites.config.autotuning_test:autotuning_test.py:280 Set nsslapd-cache-autosize-split to 40 INFO  tests.suites.config.autotuning_test:autotuning_test.py:289 Set nsslapd-dbcachesize to 33333333 INFO  tests.suites.config.autotuning_test:autotuning_test.py:291 Set nsslapd-cachememsize to 33333333 INFO  tests.suites.config.autotuning_test:autotuning_test.py:307 Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range. INFO  tests.suites.config.autotuning_test:autotuning_test.py:308 nsslapd-dbcachesize == b'33333333' INFO  tests.suites.config.autotuning_test:autotuning_test.py:309 nsslapd-cachememsize == b'33333333' INFO  tests.suites.config.autotuning_test:autotuning_test.py:310 nsslapd-dncachememsize == b'67108864' INFO  tests.suites.config.autotuning_test:autotuning_test.py:311 nsslapd-cache-autosize == b'0' INFO  tests.suites.config.autotuning_test:autotuning_test.py:312 nsslapd-cache-autosize-split == b'40'
Passed suites/config/autotuning_test.py::test_cache_autosize_invalid_values[-2] 0.03
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.autotuning_test:autotuning_test.py:346 Set nsslapd-cache-autosize-split to -2 INFO  tests.suites.config.autotuning_test:autotuning_test.py:352 Set nsslapd-cache-autosize to -2
Passed suites/config/autotuning_test.py::test_cache_autosize_invalid_values[102] 0.04
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.autotuning_test:autotuning_test.py:346 Set nsslapd-cache-autosize-split to 102 INFO  tests.suites.config.autotuning_test:autotuning_test.py:352 Set nsslapd-cache-autosize to 102
Passed suites/config/autotuning_test.py::test_cache_autosize_invalid_values[invalid] 0.05
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.autotuning_test:autotuning_test.py:346 Set nsslapd-cache-autosize-split to invalid INFO  tests.suites.config.autotuning_test:autotuning_test.py:352 Set nsslapd-cache-autosize to invalid
Passed suites/config/config_test.py::test_maxbersize_repl 18.25
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39001, 'ldap-secureport': 63701, 'server-id': 'master1', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 39002, 'ldap-secureport': 63702, 'server-id': 'master2', 'suffix': 'dc=example,dc=com'} was created. INFO  lib389.topologies:topologies.py:148 Creating replication topology. INFO  lib389.topologies:topologies.py:162 Joining master master2 to master1 ... INFO  lib389.replica:replica.py:2101 SUCCESS: bootstrap to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 completed INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is was created INFO  lib389.replica:replica.py:2382 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is was created INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is NOT working (expect 59b7f38b-81d5-49f1-b31c-6366b52c892c / got description=None) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 is working INFO  lib389.replica:replica.py:2515 Retry: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is NOT working (expect 5e9c99c8-5e96-4fd3-a208-db29b931d37a / got description=59b7f38b-81d5-49f1-b31c-6366b52c892c) INFO  lib389.replica:replica.py:2513 SUCCESS: Replication from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 is working INFO  lib389.replica:replica.py:2170 SUCCESS: joined master from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 INFO  lib389.topologies:topologies.py:170 Ensuring master master1 to master2 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 already exists INFO  lib389.topologies:topologies.py:170 Ensuring master master2 to master1 ... INFO  lib389.replica:replica.py:2355 SUCCESS: Agreement from ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39002 to ldap://ci-vm-10-0-139-92.hosted.upshift.rdu2.redhat.com:39001 already exists
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.config_test:config_test.py:69 Set nsslapd-maxbersize: 20K to master2 INFO  tests.suites.config.config_test:config_test.py:74 Try to add attribute with a big value to master2 - expect to FAIL INFO  tests.suites.config.config_test:config_test.py:81 Try to add attribute with a big value to master1 - expect to PASS INFO  tests.suites.config.config_test:config_test.py:86 Check if a big value was successfully added to master1 INFO  tests.suites.config.config_test:config_test.py:90 Check if a big value was successfully replicated to master2
Passed suites/config/config_test.py::test_config_listen_backport_size 0.03
No log output captured.
Passed suites/config/config_test.py::test_config_deadlock_policy 0.07
No log output captured.
Passed suites/config/config_test.py::test_defaultnamingcontext 1.47
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.config_test:config_test.py:220 Check the attribute nsslapd-defaultnamingcontext is present in cn=config INFO  tests.suites.config.config_test:config_test.py:223 Delete nsslapd-defaultnamingcontext attribute INFO  tests.suites.config.config_test:config_test.py:230 modify nsslapd-defaultnamingcontext with new suffix INFO  tests.suites.config.config_test:config_test.py:233 Add new invalid value at runtime to nsslapd-defaultnamingcontext INFO  tests.suites.config.config_test:config_test.py:237 Modify nsslapd-defaultnamingcontext with blank value INFO  tests.suites.config.config_test:config_test.py:240 Add new suffix when nsslapd-defaultnamingcontext is empty INFO  tests.suites.config.config_test:config_test.py:244 Check the value of the nsslapd-defaultnamingcontext automatically have the new suffix INFO  tests.suites.config.config_test:config_test.py:247 Adding new suffix when nsslapd-defaultnamingcontext is not empty INFO  tests.suites.config.config_test:config_test.py:251 Check the value of the nsslapd-defaultnamingcontext has not changed INFO  tests.suites.config.config_test:config_test.py:254 Remove the newly added suffix and check the values of the attribute is not changed INFO  tests.suites.config.config_test:config_test.py:258 Remove all the suffix at the end
Passed suites/config/config_test.py::test_allow_add_delete_config_attributes 5.99
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.config_test:config_test.py:308 Add a new valid attribute at runtime to cn=config INFO  tests.suites.config.config_test:config_test.py:312 Delete nsslapd-listenhost to restore the default value INFO  tests.suites.config.config_test:config_test.py:317 Add new invalid attribute at runtime to cn=config INFO  tests.suites.config.config_test:config_test.py:321 Make sure the invalid attribute is not added
Passed suites/config/config_test.py::test_ignore_virtual_attrs 0.62
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.config_test:config_test.py:354 Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config INFO  tests.suites.config.config_test:config_test.py:357 Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF INFO  tests.suites.config.config_test:config_test.py:360 Set the valid values i.e. on/ON and off/OFF for nsslapd-ignore-virtual-attrs INFO  tests.suites.config.config_test:config_test.py:365 Set invalid value for attribute nsslapd-ignore-virtual-attrs INFO  tests.suites.config.config_test:config_test.py:376 Add cosPointer, cosTemplate and test entry to default suffix, where virtual attribute is postal code INFO  tests.suites.config.config_test:config_test.py:389 Test if virtual attribute i.e. postal code shown in test entry while nsslapd-ignore-virtual-attrs: off INFO  tests.suites.config.config_test:config_test.py:392 Set nsslapd-ignore-virtual-attrs=on INFO  tests.suites.config.config_test:config_test.py:395 Test if virtual attribute i.e. postal code not shown while nsslapd-ignore-virtual-attrs: on
Passed suites/config/config_test.py::test_ndn_cache_enabled 8.67
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.config_test:config_test.py:423 Check the attribute nsslapd-ndn-cache-enabled is present in cn=config INFO  tests.suites.config.config_test:config_test.py:426 Check the attribute nsslapd-ndn-cache-enabled has the default value set as ON INFO  tests.suites.config.config_test:config_test.py:429 Check the attribute nsslapd-ndn-cache-max-size is present in cn=config INFO  tests.suites.config.config_test:config_test.py:435 Ticket#49593 : NDN cache stats should be under the global stats - Implemented in 1.4 INFO  tests.suites.config.config_test:config_test.py:436 Fetch the monitor value according to the ds version INFO  tests.suites.config.config_test:config_test.py:442 Check the backend monitor output for Normalized DN cache statistics, while nsslapd-ndn-cache-enabled is off INFO  tests.suites.config.config_test:config_test.py:448 Check the backend monitor output for Normalized DN cache statistics, while nsslapd-ndn-cache-enabled is on INFO  tests.suites.config.config_test:config_test.py:454 Set invalid value for nsslapd-ndn-cache-enabled INFO  tests.suites.config.config_test:config_test.py:458 Set invalid value for nsslapd-ndn-cache-max-size
Passed suites/config/config_test.py::test_require_index 1.42
No log output captured.
Passed suites/config/config_test.py::test_require_internal_index 5.70
No log output captured.
Passed suites/config/regression_test.py::test_maxbersize_repl 7.20
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.regression_test:regression_test.py:100 Set nsslapd-errorlog-maxlogsize before nsslapd-errorlog-logmaxdiskspace INFO  tests.suites.config.regression_test:regression_test.py:104 Assert no init_dse_file errors in the error log INFO  tests.suites.config.regression_test:regression_test.py:108 Set nsslapd-errorlog-maxlogsize after nsslapd-errorlog-logmaxdiskspace INFO  tests.suites.config.regression_test:regression_test.py:112 Assert no init_dse_file errors in the error log
Passed suites/config/removed_config_49298_test.py::test_restore_config 3.50
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.removed_config_49298_test:removed_config_49298_test.py:43 /etc/dirsrv/slapd-standalone1
Passed suites/config/removed_config_49298_test.py::test_removed_config 2.37
-------------------------------Captured log call--------------------------------
INFO  tests.suites.config.removed_config_49298_test:removed_config_49298_test.py:72 /etc/dirsrv/slapd-standalone1
Passed suites/cos/cos_test.py::test_positive 0.31
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
Passed suites/cos/indirect_cos_test.py::test_indirect_cos 1.31
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created. INFO  tests.suites.cos.indirect_cos_test:indirect_cos_test.py:107 Add custom schema... INFO  tests.suites.cos.indirect_cos_test:indirect_cos_test.py:120 Add test user... INFO  tests.suites.cos.indirect_cos_test:indirect_cos_test.py:137 Setup indirect COS...
-------------------------------Captured log call--------------------------------
INFO  tests.suites.cos.indirect_cos_test:indirect_cos_test.py:157 Checking user... INFO  tests.suites.cos.indirect_cos_test:indirect_cos_test.py:56 Create password policy for subtree ou=people,dc=example,dc=com INFO  tests.suites.cos.indirect_cos_test:indirect_cos_test.py:164 Checking user...
Passed suites/disk_monitoring/disk_monitoring_test.py::test_verify_operation_when_disk_monitoring_is_off 4.89
-----------------------------Captured stdout setup------------------------------
Relabeled /var/log/dirsrv/slapd-standalone1 from unconfined_u:object_r:user_tmp_t:s0 to system_u:object_r:dirsrv_var_log_t:s0
-----------------------------Captured stderr setup------------------------------
chown: cannot access '/var/log/dirsrv/slapd-standalone1/*': No such file or directory
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
------------------------------Captured stderr call------------------------------
25+0 records in 25+0 records out 26214400 bytes (26 MB, 25 MiB) copied, 0.0120566 s, 2.2 GB/s dd: error writing '/var/log/dirsrv/slapd-standalone1/foo1': No space left on device 10+0 records in 9+0 records out 10465280 bytes (10 MB, 10 MiB) copied, 0.00510409 s, 2.1 GB/s
Passed suites/disk_monitoring/disk_monitoring_test.py::test_free_up_the_disk_space_and_change_ds_config 4.57
No log output captured.
Passed suites/disk_monitoring/disk_monitoring_test.py::test_verify_operation_with_nsslapd_disk_monitoring_logging_critical_off 35.19
------------------------------Captured stderr call------------------------------
10+0 records in 10+0 records out 10485760 bytes (10 MB, 10 MiB) copied, 0.0195001 s, 538 MB/s
Passed suites/disk_monitoring/disk_monitoring_test.py::test_operation_with_nsslapd_disk_monitoring_logging_critical_on_below_half_of_the_threshold 25.13
------------------------------Captured stderr call------------------------------
31+0 records in 31+0 records out 32505856 bytes (33 MB, 31 MiB) copied, 0.0324604 s, 1.0 GB/s
Passed suites/disk_monitoring/disk_monitoring_test.py::test_setting_nsslapd_disk_monitoring_logging_critical_to_off 3.88
No log output captured.
Passed suites/disk_monitoring/disk_monitoring_test.py::test_operation_with_nsslapd_disk_monitoring_logging_critical_off 72.29
------------------------------Captured stderr call------------------------------
10+0 records in 10+0 records out 10485760 bytes (10 MB, 10 MiB) copied, 0.0109024 s, 962 MB/s
Passed suites/disk_monitoring/disk_monitoring_test.py::test_operation_with_nsslapd_disk_monitoring_logging_critical_off_below_half_of_the_threshold 158.73
------------------------------Captured stderr call------------------------------
30+0 records in 30+0 records out 31457280 bytes (31 MB, 30 MiB) copied, 0.0213112 s, 1.5 GB/s
Passed suites/disk_monitoring/disk_monitoring_test.py::test_go_straight_below_half_of_the_threshold 107.67
------------------------------Captured stderr call------------------------------
31+0 records in 31+0 records out 32505856 bytes (33 MB, 31 MiB) copied, 0.0423303 s, 768 MB/s
Passed suites/disk_monitoring/disk_monitoring_test.py::test_readonly_on_threshold 28.30
------------------------------Captured stderr call------------------------------
10+0 records in 10+0 records out 10485760 bytes (10 MB, 10 MiB) copied, 0.011274 s, 930 MB/s
Passed suites/disk_monitoring/disk_monitoring_test.py::test_readonly_on_threshold_below_half_of_the_threshold 50.03
------------------------------Captured stderr call------------------------------
31+0 records in 31+0 records out 32505856 bytes (33 MB, 31 MiB) copied, 0.0139518 s, 2.3 GB/s
Passed suites/disk_monitoring/disk_monitoring_test.py::test_below_half_of_the_threshold_not_starting_after_shutdown 112.13
------------------------------Captured stderr call------------------------------
31+0 records in 31+0 records out 32505856 bytes (33 MB, 31 MiB) copied, 0.0400037 s, 813 MB/s
-------------------------------Captured log call--------------------------------
INFO  lib389:disk_monitoring_test.py:582 Instance start up has failed as expected
Passed suites/disk_monitoring/disk_monitoring_test.py::test_go_straight_below_4kb 18.23
------------------------------Captured stderr call------------------------------
25+0 records in 25+0 records out 26214400 bytes (26 MB, 25 MiB) copied, 0.0124581 s, 2.1 GB/s dd: error writing '/var/log/dirsrv/slapd-standalone1/foo1': No space left on device 10+0 records in 9+0 records out 10174464 bytes (10 MB, 9.7 MiB) copied, 0.00462473 s, 2.2 GB/s
Passed suites/disk_monitoring/disk_monitoring_test.py::test_threshold_to_overflow_value 0.03
No log output captured.
Passed suites/disk_monitoring/disk_monitoring_test.py::test_threshold_is_reached_to_half 14.49
------------------------------Captured stderr call------------------------------
10+0 records in 10+0 records out 10485760 bytes (10 MB, 10 MiB) copied, 0.004671 s, 2.2 GB/s
Passed suites/disk_monitoring/disk_monitoring_test.py::test_negagtive_parameterize[nsslapd-disk-monitoring-threshold--2] 0.01
No log output captured.
Passed suites/disk_monitoring/disk_monitoring_test.py::test_negagtive_parameterize[nsslapd-disk-monitoring-threshold-9223372036854775808] 0.00
No log output captured.
Passed suites/disk_monitoring/disk_monitoring_test.py::test_negagtive_parameterize[nsslapd-disk-monitoring-threshold-2047] 0.00
No log output captured.
Passed suites/disk_monitoring/disk_monitoring_test.py::test_negagtive_parameterize[nsslapd-disk-monitoring-threshold-0] 0.00
No log output captured.
Passed suites/disk_monitoring/disk_monitoring_test.py::test_negagtive_parameterize[nsslapd-disk-monitoring-threshold--1294967296] 0.00
No log output captured.
Passed suites/disk_monitoring/disk_monitoring_test.py::test_negagtive_parameterize[nsslapd-disk-monitoring-threshold-invalid] 0.00
No log output captured.
Passed suites/disk_monitoring/disk_monitoring_test.py::test_negagtive_parameterize[nsslapd-disk-monitoring-invalid] 0.00
No log output captured.
Passed suites/disk_monitoring/disk_monitoring_test.py::test_negagtive_parameterize[nsslapd-disk-monitoring-1] 0.00
No log output captured.
Passed suites/disk_monitoring/disk_monitoring_test.py::test_negagtive_parameterize[nsslapd-disk-monitoring-grace-period-00] 0.00
No log output captured.
Passed suites/disk_monitoring/disk_monitoring_test.py::test_negagtive_parameterize[nsslapd-disk-monitoring-grace-period-525 948] 0.00
No log output captured.
Passed suites/disk_monitoring/disk_monitoring_test.py::test_negagtive_parameterize[nsslapd-disk-monitoring-grace-period--10] 0.00
No log output captured.
Passed suites/disk_monitoring/disk_monitoring_test.py::test_negagtive_parameterize[nsslapd-disk-monitoring-logging-critical-oninvalid] 0.00
No log output captured.
Passed suites/disk_monitoring/disk_monitoring_test.py::test_negagtive_parameterize[nsslapd-disk-monitoring-grace-period--11] 0.00
No log output captured.
Passed suites/disk_monitoring/disk_monitoring_test.py::test_negagtive_parameterize[nsslapd-disk-monitoring-grace-period-01] 0.00
No log output captured.
Passed suites/disk_monitoring/disk_monitoring_test.py::test_valid_operations_are_permitted 3.76
No log output captured.
Passed suites/disk_monitoring/disk_space_test.py::test_basic 0.00
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
-------------------------------Captured log call--------------------------------
INFO  lib389:disk_space_test.py:37 Check that "partition", "size", "used", "available", "use%" words are present in the string INFO  lib389:disk_space_test.py:41 Check that the sizes are numbers
Passed suites/ds_logs/ds_logs_test.py::test_check_default 0.00
-------------------------------Captured log setup-------------------------------
INFO  lib389.topologies:topologies.py:115 Instance with parameters {'ldap-port': 38901, 'ldap-secureport': 63601, 'server-id': 'standalone1', 'suffix': 'dc=example,dc=com'} was created.
-------------------------------Captured log call--------------------------------
DEBUG  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:233 on
Passed suites/ds_logs/ds_logs_test.py::test_plugin_set_invalid 0.00
-------------------------------Captured log call--------------------------------
INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:252 test_plugin_set_invalid - Expect to fail with junk value
Passed suites/ds_logs/ds_logs_test.py::test_log_plugin_on 4.32
-------------------------------Captured log call--------------------------------
INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:279 Bug 1273549 - Check access logs for millisecond, when attribute is ON INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:280 perform any ldap operation, which will trigger the logs INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:36 Adding 10 users INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:284 Restart the server to flush the logs INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:287 parse the access logs
Passed suites/ds_logs/ds_logs_test.py::test_log_plugin_off 13.10
-------------------------------Captured log call--------------------------------
INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:319 Bug 1273549 - Check access logs for missing millisecond, when attribute is OFF INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:321 test_log_plugin_off - set the configuration attribute to OFF INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:324 Restart the server to flush the logs INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:327 test_log_plugin_off - delete the previous access logs INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:36 Adding 10 users INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:334 Restart the server to flush the logs INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:337 check access log that microseconds are not present
Passed suites/ds_logs/ds_logs_test.py::test_internal_log_server_level_0 4.27
-------------------------------Captured log setup-------------------------------
INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:200 Disable access log buffering
-------------------------------Captured log call--------------------------------
INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:365 Set nsslapd-plugin-logging to on INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:368 Configure access log level to 0 INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:372 Restart the server to flush the logs INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:376 Check if access log does not contain internal log of MOD operation INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:382 Check if the other internal operations are not present
Passed suites/ds_logs/ds_logs_test.py::test_internal_log_server_level_4 6.53
-------------------------------Captured log setup-------------------------------
INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:200 Disable access log buffering
-------------------------------Captured log call--------------------------------
INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:414 Set nsslapd-plugin-logging to on INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:417 Configure access log level to 4 INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:421 Restart the server to flush the logs INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:426 Check if access log contains internal MOD operation in correct format INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:432 Check if the other internal operations have the correct format
Passed suites/ds_logs/ds_logs_test.py::test_internal_log_level_260 5.74
-------------------------------Captured log setup-------------------------------
INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:102 Enable automember plugin INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:106 Enable Referential Integrity plugin INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:110 Set nsslapd-plugin-logging to on INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:113 Restart the server INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:120 Configure access log level to 260 INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:86 Renaming user INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:89 Delete the user INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:92 Delete automember entry, org. unit and group for the next test INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:200 Disable access log buffering
-------------------------------Captured log call--------------------------------
INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:472 Restart the server to flush the logs INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:476 Check the access logs for ADD operation of the user INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:492 Check the access logs for MOD operation of the user INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:506 Check the access logs for DEL operation of the user INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:518 Check if the other internal operations have the correct format
Passed suites/ds_logs/ds_logs_test.py::test_internal_log_level_131076 6.38
-------------------------------Captured log setup-------------------------------
INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:120 Configure access log level to 131076 INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:86 Renaming user INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:89 Delete the user INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:92 Delete automember entry, org. unit and group for the next test INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:200 Disable access log buffering
-------------------------------Captured log call--------------------------------
INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:557 Restart the server to flush the logs INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:561 Check the access logs for ADD operation of the user INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:577 Check the access logs for MOD operation of the user INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:591 Check the access logs for DEL operation of the user INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:603 Check if the other internal operations have the correct format
Passed suites/ds_logs/ds_logs_test.py::test_internal_log_level_516 6.46
-------------------------------Captured log setup-------------------------------
INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:120 Configure access log level to 516 INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:86 Renaming user INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:89 Delete the user INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:92 Delete automember entry, org. unit and group for the next test INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:200 Disable access log buffering
-------------------------------Captured log call--------------------------------
INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:642 Restart the server to flush the logs INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:646 Check the access logs for ADD operation of the user INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:665 Check the access logs for MOD operation of the user INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:682 Check the access logs for DEL operation of the user INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:697 Check if the other internal operations have the correct format
Passed suites/ds_logs/ds_logs_test.py::test_access_log_truncated_search_message 4.65
-------------------------------Captured log call--------------------------------
INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:724 Make a search INFO  tests.suites.ds_logs.ds_logs_test:ds_logs_test.py:72