From 14e413ae07832a2ae91617461d1e3cb858891639 Mon Sep 17 00:00:00 2001 From: Simon Pichugin Date: Feb 21 2018 08:56:24 +0000 Subject: Issue 49043 - Add replica conflict test suite Description: Add a test suite which checks replication conflict resolution for basic operations like add, delete, modrdn, modify, operations on groups with memberOf plugin enabled, managed entries operations and nested entries. idm/user.py - add create_test_user method which allows default user creation with given uid and guid. _mapped_object.py - add delete_tree() method for DSLdaObject class _constants.py - add access log level constants AccessLog(IntEnum) and ErrorLog(IntEnum) Fix topologies tearDown part. Replace map() with list comprehensions, because in Python 3, map() returns iterator and it doesnt execute the content on init. https://pagure.io/389-ds-base/issue/49043 Reviewed by: wibrown, lkrispen (Thanks!) --- diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py index bdbfd20..36c455d 100644 --- a/dirsrvtests/tests/suites/replication/acceptance_test.py +++ b/dirsrvtests/tests/suites/replication/acceptance_test.py @@ -13,14 +13,8 @@ from lib389.utils import * from lib389.topologies import topology_m4 as topo_m4 from . import get_repl_entries from lib389.idm.user import UserAccount - from lib389.replica import ReplicationManager - -from lib389._constants import (BACKEND_NAME, DEFAULT_SUFFIX, LOG_REPLICA, REPLICA_RUV_FILTER, - ReplicaRole, REPLICATION_BIND_DN, REPLICATION_BIND_PW, - REPLICATION_BIND_METHOD, REPLICATION_TRANSPORT, defaultProperties, - RA_NAME, RA_BINDDN, RA_BINDPW, RA_METHOD, RA_TRANSPORT_PROT, - DN_DM, PASSWORD, LOG_DEFAULT, RA_ENABLED, RA_SCHEDULE) +from lib389._constants import * TEST_ENTRY_NAME = 'mmrepl_test' TEST_ENTRY_DN = 'uid={},{}'.format(TEST_ENTRY_NAME, DEFAULT_SUFFIX) @@ -418,7 +412,7 @@ def test_password_repl_error(topo_m4, test_entry): m2.deleteErrorLogs() log.info('Set replication loglevel') - m2.setLogLevel(LOG_REPLICA) + m2.config.loglevel((ErrorLog.REPLICA,)) log.info('Modifying entry {} - change userpassword on master 2'.format(TEST_ENTRY_DN)) test_user_m1 = UserAccount(topo_m4.ms["master1"], TEST_ENTRY_DN) diff --git a/dirsrvtests/tests/suites/replication/changelog_trimming_test.py b/dirsrvtests/tests/suites/replication/changelog_trimming_test.py index ffab7bf..e8cc3da 100644 --- a/dirsrvtests/tests/suites/replication/changelog_trimming_test.py +++ b/dirsrvtests/tests/suites/replication/changelog_trimming_test.py @@ -30,7 +30,7 @@ def setup_max_entries(topo, request): """ master = topo.ms["master1"] - master.config.loglevel((LOG_REPLICA,), 'error') + master.config.loglevel((ErrorLog.REPLICA,), 'error') cl = Changelog5(master) cl.set_max_entries('2') @@ -41,7 +41,7 @@ def setup_max_age(topo, request): """Configure logging and changelog max age """ master = topo.ms["master1"] - master.config.loglevel((LOG_REPLICA,), 'error') + master.config.loglevel((ErrorLog.REPLICA,), 'error') cl = Changelog5(master) cl.set_max_age('5') diff --git a/dirsrvtests/tests/suites/replication/conflict_resolve_test.py b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py new file mode 100644 index 0000000..31b9aeb --- /dev/null +++ b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py @@ -0,0 +1,872 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import logging +import ldap +import pytest +from itertools import permutations +from lib389._constants import * +from lib389.idm.nscontainer import nsContainers +from lib389.idm.user import UserAccounts +from lib389.idm.group import Groups +from lib389.idm.organisationalunit import OrganisationalUnits +from lib389.replica import ReplicationManager +from lib389.agreement import Agreements +from lib389.plugins import MemberOfPlugin + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def _create_user(users, user_num, group_num=2000, sleep=False): + """Creates user entry""" + + user = users.create_test_user(user_num, group_num) + if sleep: + time.sleep(1) + return user + + +def _rename_user(users, user_num, new_num, sleep=False): + """Rename user entry""" + + assert user_num != new_num, "New user number should not be the same as the old one" + + user = users.get('test_user_{}'.format(user_num)) + user.rename('uid=test_user_{}'.format(new_num)) + if sleep: + time.sleep(1) + + +def _modify_user(users, user_num, sleep=False): + """Modify user entry""" + + user = users.get('test_user_{}'.format(user_num)) + user.replace("homeDirectory", "/home/test_user0{}".format(user_num)) + if sleep: + time.sleep(1) + time.sleep(1) + + +def _delete_user(users, user_num, sleep=False): + """Delete user entry""" + + user = users.get('test_user_{}'.format(user_num)) + user.delete() + if sleep: + time.sleep(1) + time.sleep(1) + + +def _create_group(groups, num, member, sleep=False): + """Creates group entry""" + + group_props = {'cn': 'test_group_{}'.format(num), + 'member': member} + group = groups.create(properties=group_props) + if sleep: + time.sleep(1) + return group + + +def _delete_group(groups, num, sleep=False): + """Delete group entry""" + + group = groups.get('test_group_{}'.format(num)) + group.delete() + if sleep: + time.sleep(1) + + +def _create_container(inst, dn, name, sleep=False): + """Creates container entry""" + + conts = nsContainers(inst, dn) + cont = conts.create(properties={'cn': name}) + if sleep: + time.sleep(1) + return cont + + +def _delete_container(cont, sleep=False): + """Deletes container entry""" + + cont.delete() + if sleep: + time.sleep(1) + + +def _test_base(topology): + """Add test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + """ + + M1 = topology.ms["master1"] + + conts = nsContainers(M1, SUFFIX) + test_base = conts.create(properties={'cn': 'test_container'}) + + for inst in topology: + inst.config.loglevel([ErrorLog.DEFAULT, ErrorLog.REPLICA], service='error') + inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access') + inst.config.set('nsslapd-plugin-logging', 'on') + inst.config.enable_log('audit') + inst.restart() + + return test_base + + +def _delete_test_base(inst, test_base_dn): + """Delete test container with entries and entry conflicts""" + + ents = inst.search_s(test_base_dn, ldap.SCOPE_SUBTREE, filterstr="(|(objectclass=*)(objectclass=ldapsubentry))") + + for ent in sorted(ents, key=lambda e: len(e.dn), reverse=True): + log.debug("Delete entry children {}".format(ent.dn)) + try: + inst.delete_ext_s(ent.dn) + except ldap.NO_SUCH_OBJECT: # For the case with objectclass: glue entries + pass + + +@pytest.fixture +def test_base(topology_m2, request): + tb = _test_base(topology_m2) + + def fin(): + if not DEBUGGING: + _delete_test_base(topology_m2.ms["master1"], tb.dn) + request.addfinalizer(fin) + + return tb + + +@pytest.fixture +def test_base_m3(topology_m3, request): + tb = _test_base(topology_m3) + + def fin(): + if not DEBUGGING: + _delete_test_base(topology_m3.ms["master1"], tb.dn) + request.addfinalizer(fin) + + return tb + + +class TestTwoMasters: + def test_add_modrdn(self, topology_m2, test_base): + """Check that conflict properly resolved for create - modrdn operations + + :id: 77f09b18-03d1-45da-940b-1ad2c2908ebb + :setup: Two master replication, test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + :steps: + 1. Add five users to m1 and wait for replication to happen + 2. Pause replication + 3. Create an entry on m1 and m2 + 4. Create an entry on m1 and rename on m2 + 5. Rename an entry on m1 and create on m2 + 6. Rename an entry on m1 and rename on m2 + 7. Rename an entry on m1 and rename on m2. Use different entries + but rename them to the same entry + 8. Resume replication + 9. Check that the entries on both masters are the same and replication is working + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should pass + """ + + M1 = topology_m2.ms["master1"] + M2 = topology_m2.ms["master2"] + test_users_m1 = UserAccounts(M1, test_base.dn, rdn=None) + test_users_m2 = UserAccounts(M2, test_base.dn, rdn=None) + repl = ReplicationManager(SUFFIX) + + for user_num in range(1000, 1005): + _create_user(test_users_m1, user_num) + + repl.test_replication(M1, M2) + topology_m2.pause_all_replicas() + + log.info("Test create - modrdn") + user_num += 1 + _create_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + + user_num += 1 + _create_user(test_users_m1, user_num, sleep=True) + _rename_user(test_users_m2, 1000, user_num, sleep=True) + + user_num += 1 + _rename_user(test_users_m1, 1001, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + + user_num += 1 + _rename_user(test_users_m1, 1002, user_num, sleep=True) + _rename_user(test_users_m2, 1002, user_num, sleep=True) + + user_num += 1 + _rename_user(test_users_m1, 1003, user_num, sleep=True) + _rename_user(test_users_m2, 1004, user_num) + + topology_m2.resume_all_replicas() + + repl.test_replication_topology(topology_m2) + + user_dns_m1 = [user.dn for user in test_users_m1.list()] + user_dns_m2 = [user.dn for user in test_users_m2.list()] + assert set(user_dns_m1) == set(user_dns_m2) + + def test_complex_add_modify_modrdn_delete(self, topology_m2, test_base): + """Check that conflict properly resolved for complex operations + which involve add, modify, modrdn and delete + + :id: 77f09b18-03d1-45da-940b-1ad2c2908eb1 + :setup: Two master replication, test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + :steps: + 1. Add ten users to m1 and wait for replication to happen + 2. Pause replication + 3. Test add-del on m1 and add on m2 + 4. Test add-mod on m1 and add on m2 + 5. Test add-modrdn on m1 and add on m2 + 6. Test multiple add, modrdn + 7. Test Add-del on both masters + 8. Test modrdn-modrdn + 9. Test modrdn-del + 10. Resume replication + 11. Check that the entries on both masters are the same and replication is working + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should pass + 9. It should pass + 10. It should pass + 11. It should pass + """ + + M1 = topology_m2.ms["master1"] + M2 = topology_m2.ms["master2"] + + test_users_m1 = UserAccounts(M1, test_base.dn, rdn=None) + test_users_m2 = UserAccounts(M2, test_base.dn, rdn=None) + repl = ReplicationManager(SUFFIX) + + for user_num in range(1100, 1110): + _create_user(test_users_m1, user_num) + + repl.test_replication(M1, M2) + topology_m2.pause_all_replicas() + + log.info("Test add-del on M1 and add on M2") + user_num += 1 + _create_user(test_users_m1, user_num) + _delete_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + + user_num += 1 + _create_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + _delete_user(test_users_m1, user_num, sleep=True) + + user_num += 1 + _create_user(test_users_m2, user_num, sleep=True) + _create_user(test_users_m1, user_num) + _delete_user(test_users_m1, user_num) + + log.info("Test add-mod on M1 and add on M2") + user_num += 1 + _create_user(test_users_m1, user_num) + _modify_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + + user_num += 1 + _create_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + _modify_user(test_users_m1, user_num, sleep=True) + + user_num += 1 + _create_user(test_users_m2, user_num, sleep=True) + _create_user(test_users_m1, user_num) + _modify_user(test_users_m1, user_num) + + log.info("Test add-modrdn on M1 and add on M2") + user_num += 1 + _create_user(test_users_m1, user_num) + _rename_user(test_users_m1, user_num, user_num+20, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + + user_num += 1 + _create_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + _rename_user(test_users_m1, user_num, user_num+20, sleep=True) + + user_num += 1 + _create_user(test_users_m2, user_num, sleep=True) + _create_user(test_users_m1, user_num) + _rename_user(test_users_m1, user_num, user_num+20) + + log.info("Test multiple add, modrdn") + user_num += 1 + _create_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + _rename_user(test_users_m1, user_num, user_num+20) + _create_user(test_users_m1, user_num, sleep=True) + _modify_user(test_users_m2, user_num, sleep=True) + + log.info("Add - del on both masters") + user_num += 1 + _create_user(test_users_m1, user_num) + _delete_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num) + _delete_user(test_users_m2, user_num, sleep=True) + + log.info("Test modrdn - modrdn") + user_num += 1 + _rename_user(test_users_m1, 1109, 1129, sleep=True) + _rename_user(test_users_m2, 1109, 1129, sleep=True) + + log.info("Test modrdn - del") + user_num += 1 + _rename_user(test_users_m1, 1100, 1120, sleep=True) + _delete_user(test_users_m2, 1100) + + user_num += 1 + _delete_user(test_users_m2, 1101, sleep=True) + _rename_user(test_users_m1, 1101, 1121) + + topology_m2.resume_all_replicas() + + repl.test_replication_topology(topology_m2) + time.sleep(30) + + user_dns_m1 = [user.dn for user in test_users_m1.list()] + user_dns_m2 = [user.dn for user in test_users_m2.list()] + assert set(user_dns_m1) == set(user_dns_m2) + + def test_memberof_groups(self, topology_m2, test_base): + """Check that conflict properly resolved for operations + with memberOf and groups + + :id: 77f09b18-03d1-45da-940b-1ad2c2908eb3 + :setup: Two master replication, test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + :steps: + 1. Enable memberOf plugin + 2. Add 30 users to m1 and wait for replication to happen + 3. Pause replication + 4. Create a group on m1 and m2 + 5. Create a group on m1 and m2, delete from m1 + 6. Create a group on m1, delete from m1, and create on m2, + 7. Create a group on m2 and m1, delete from m1 + 8. Create two different groups on m2 + 9. Resume replication + 10. Check that the entries on both masters are the same and replication is working + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should pass + 9. It should pass + 10. It should pass + """ + + M1 = topology_m2.ms["master1"] + M2 = topology_m2.ms["master2"] + test_users_m1 = UserAccounts(M1, test_base.dn, rdn=None) + test_groups_m1 = Groups(M1, test_base.dn, rdn=None) + test_groups_m2 = Groups(M2, test_base.dn, rdn=None) + + repl = ReplicationManager(SUFFIX) + + for inst in topology_m2.ms.values(): + memberof = MemberOfPlugin(inst) + memberof.enable() + agmt = Agreements(inst).list()[0] + agmt.replace_many(('nsDS5ReplicatedAttributeListTotal', + '(objectclass=*) $ EXCLUDE '), + ('nsDS5ReplicatedAttributeList', + '(objectclass=*) $ EXCLUDE memberOf')) + inst.restart() + user_dns = [] + for user_num in range(10): + user_trio = [] + for num in range(0, 30, 10): + user = _create_user(test_users_m1, 1200 + user_num + num) + user_trio.append(user.dn) + user_dns.append(user_trio) + + repl.test_replication(M1, M2) + topology_m2.pause_all_replicas() + + log.info("Check a simple conflict") + group_num = 0 + _create_group(test_groups_m1, group_num, user_dns[group_num], sleep=True) + _create_group(test_groups_m2, group_num, user_dns[group_num], sleep=True) + + log.info("Check a add - del") + group_num += 1 + _create_group(test_groups_m1, group_num, user_dns[group_num], sleep=True) + _create_group(test_groups_m2, group_num, user_dns[group_num], sleep=True) + _delete_group(test_groups_m1, group_num) + + group_num += 1 + _create_group(test_groups_m1, group_num, user_dns[group_num]) + _delete_group(test_groups_m1, group_num, sleep=True) + _create_group(test_groups_m2, group_num, user_dns[group_num]) + + group_num += 1 + _create_group(test_groups_m2, group_num, user_dns[group_num], sleep=True) + _create_group(test_groups_m1, group_num, user_dns[group_num]) + _delete_group(test_groups_m1, group_num, sleep=True) + + group_num += 1 + _create_group(test_groups_m2, group_num, user_dns[group_num]) + group_num += 1 + _create_group(test_groups_m2, group_num, user_dns[group_num]) + + topology_m2.resume_all_replicas() + + repl.test_replication_topology(topology_m2) + + group_dns_m1 = [group.dn for group in test_groups_m1.list()] + group_dns_m2 = [group.dn for group in test_groups_m2.list()] + assert set(group_dns_m1) == set(group_dns_m2) + + def test_managed_entries(self, topology_m2): + """Check that conflict properly resolved for operations + with managed entries + + :id: 77f09b18-03d1-45da-940b-1ad2c2908eb4 + :setup: Two master replication, test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + :steps: + 1. Create ou=managed_users and ou=managed_groups under test container + 2. Configure managed entries plugin and add a template to test container + 3. Add a user to m1 and wait for replication to happen + 4. Pause replication + 5. Create a user on m1 and m2 with a same group ID on both master + 6. Create a user on m1 and m2 with a different group ID on both master + 7. Resume replication + 8. Check that the entries on both masters are the same and replication is working + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should pass + """ + + M1 = topology_m2.ms["master1"] + M2 = topology_m2.ms["master2"] + repl = ReplicationManager(SUFFIX) + + ous = OrganisationalUnits(M1, DEFAULT_SUFFIX) + ou_people = ous.create(properties={'ou': 'managed_people'}) + ou_groups = ous.create(properties={'ou': 'managed_groups'}) + + test_users_m1 = UserAccounts(M1, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) + test_users_m2 = UserAccounts(M2, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) + + # TODO: Refactor ManagedPlugin class functionality (also add configs and templates) + conts = nsContainers(M1, SUFFIX) + template = conts.create(properties={ + 'objectclass': 'top mepTemplateEntry extensibleObject'.split(), + 'cn': 'MEP Template', + 'mepRDNAttr': 'cn', + 'mepStaticAttr': ['objectclass: posixGroup', 'objectclass: extensibleObject'], + 'mepMappedAttr': ['cn: $uid', 'uid: $cn', 'gidNumber: $uidNumber'] + }) + repl.test_replication(M1, M2) + + for inst in topology_m2.ms.values(): + conts = nsContainers(inst, "cn={},{}".format(PLUGIN_MANAGED_ENTRY, DN_PLUGIN)) + conts.create(properties={'objectclass': 'top extensibleObject'.split(), + 'cn': 'config', + 'originScope': ou_people.dn, + 'originFilter': 'objectclass=posixAccount', + 'managedBase': ou_groups.dn, + 'managedTemplate': template.dn}) + inst.restart() + + _create_user(test_users_m1, 1, 1) + + topology_m2.pause_all_replicas() + + _create_user(test_users_m1, 2, 2, sleep=True) + _create_user(test_users_m2, 2, 2, sleep=True) + + _create_user(test_users_m1, 3, 3, sleep=True) + _create_user(test_users_m2, 3, 33) + + topology_m2.resume_all_replicas() + + repl.test_replication_topology(topology_m2) + + user_dns_m1 = [user.dn for user in test_users_m1.list()] + user_dns_m2 = [user.dn for user in test_users_m2.list()] + assert set(user_dns_m1) == set(user_dns_m2) + + def test_nested_entries_with_children(self, topology_m2, test_base): + """Check that conflict properly resolved for operations + with nested entries with children + + :id: 77f09b18-03d1-45da-940b-1ad2c2908eb5 + :setup: Two master replication, test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + :steps: + 1. Add 15 containers to m1 and wait for replication to happen + 2. Pause replication + 3. Create parent-child on master2 and master1 + 4. Create parent-child on master1 and master2 + 5. Create parent-child on master1 and master2 different child rdn + 6. Create parent-child on master1 and delete parent on master2 + 7. Create parent on master1, delete it and parent-child on master2, delete them + 8. Create parent on master1, delete it and parent-two children on master2 + 9. Create parent-two children on master1 and parent-child on master2, delete them + 10. Create three subsets inside existing container entry, applying only part of changes on m2 + 11. Create more combinations of the subset with parent-child on m1 and parent on m2 + 12. Delete container on m1, modify user1 on m1, create parent on m2 and modify user2 on m2 + 13. Resume replication + 14. Check that the entries on both masters are the same and replication is working + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should pass + 9. It should pass + 10. It should pass + 11. It should pass + 12. It should pass + 13. It should pass + 14. It should pass + """ + + M1 = topology_m2.ms["master1"] + M2 = topology_m2.ms["master2"] + repl = ReplicationManager(SUFFIX) + test_users_m1 = UserAccounts(M1, test_base.dn, rdn=None) + test_users_m2 = UserAccounts(M2, test_base.dn, rdn=None) + _create_user(test_users_m1, 4000) + _create_user(test_users_m1, 4001) + + cont_list = [] + for num in range(15): + cont = _create_container(M1, test_base.dn, 'sub{}'.format(num)) + cont_list.append(cont) + + repl.test_replication(M1, M2) + + topology_m2.pause_all_replicas() + + log.info("Create parent-child on master2 and master1") + _create_container(M2, test_base.dn, 'p0', sleep=True) + cont_p = _create_container(M1, test_base.dn, 'p0', sleep=True) + _create_container(M1, cont_p.dn, 'c0', sleep=True) + _create_container(M2, cont_p.dn, 'c0', sleep=True) + + log.info("Create parent-child on master1 and master2") + cont_p = _create_container(M1, test_base.dn, 'p1', sleep=True) + _create_container(M2, test_base.dn, 'p1', sleep=True) + _create_container(M1, cont_p.dn, 'c1', sleep=True) + _create_container(M2, cont_p.dn, 'c1', sleep=True) + + log.info("Create parent-child on master1 and master2 different child rdn") + cont_p = _create_container(M1, test_base.dn, 'p2', sleep=True) + _create_container(M2, test_base.dn, 'p2', sleep=True) + _create_container(M1, cont_p.dn, 'c2', sleep=True) + _create_container(M2, cont_p.dn, 'c3', sleep=True) + + log.info("Create parent-child on master1 and delete parent on master2") + cont_num = 0 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) + _create_container(M1, cont_p_m1.dn, 'c0', sleep=True) + _delete_container(cont_p_m2) + + cont_num += 1 + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _create_container(M1, cont_p_m1.dn, 'c0', sleep=True) + _delete_container(cont_p_m2, sleep=True) + + log.info("Create parent on master1, delete it and parent-child on master2, delete them") + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _delete_container(cont_p_m1, sleep=True) + + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') + _delete_container(cont_c_m2) + _delete_container(cont_p_m2) + + cont_num += 1 + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') + _delete_container(cont_c_m2) + _delete_container(cont_p_m2, sleep=True) + + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _delete_container(cont_p_m1) + + log.info("Create parent on master1, delete it and parent-two children on master2") + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _delete_container(cont_p_m1, sleep=True) + + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + _create_container(M2, cont_p_m2.dn, 'c0') + _create_container(M2, cont_p_m2.dn, 'c1') + + cont_num += 1 + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + _create_container(M2, cont_p_m2.dn, 'c0') + _create_container(M2, cont_p_m2.dn, 'c1', sleep=True) + + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _delete_container(cont_p_m1, sleep=True) + + log.info("Create parent-two children on master1 and parent-child on master2, delete them") + cont_num += 1 + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') + _delete_container(cont_c_m2) + _delete_container(cont_p_m2, sleep=True) + + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _create_container(M1, cont_p_m1.dn, 'c0') + _create_container(M1, cont_p_m1.dn, 'c1') + + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _create_container(M1, cont_p_m1.dn, 'c0') + _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) + + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') + _delete_container(cont_c_m2) + _delete_container(cont_p_m2, sleep=True) + + log.info("Create three subsets inside existing container entry, applying only part of changes on m2") + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _create_container(M1, cont_p_m1.dn, 'c0') + _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) + _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) + + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _create_container(M1, cont_p_m1.dn, 'c0') + _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + _create_container(M2, cont_p_m2.dn, 'c0', sleep=True) + + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _create_container(M1, cont_p_m1.dn, 'c0') + _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') + _delete_container(cont_c_m2, sleep=True) + + log.info("Create more combinations of the subset with parent-child on m1 and parent on m2") + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) + _delete_container(cont_p_m1, sleep=True) + cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') + _delete_container(cont_c_m2) + _delete_container(cont_p_m2, sleep=True) + + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) + _delete_container(cont_p_m1, sleep=True) + _create_container(M2, cont_p_m2.dn, 'c0', sleep=True) + + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) + cont_c_m1 = _create_container(M1, cont_p_m1.dn, 'c0', sleep=True) + _create_container(M2, cont_p_m2.dn, 'c0', sleep=True) + _delete_container(cont_c_m1, sleep=True) + _create_container(M2, cont_p_m2.dn, 'c1', sleep=True) + _delete_container(cont_p_m1, sleep=True) + + log.info("Delete container on m1, modify user1 on m1, create parent on m2 and modify user2 on m2") + cont_num += 1 + _delete_container(cont_list[cont_num]) + _modify_user(test_users_m1, 4000, sleep=True) + _create_container(M2, cont_list[cont_num].dn, 'p0') + _modify_user(test_users_m2, 4001) + + topology_m2.resume_all_replicas() + + repl.test_replication_topology(topology_m2, timeout=60) + + conts_dns = {} + for num in range(1, 3): + inst = topology_m2.ms["master{}".format(num)] + conts_dns[inst.serverid] = [] + conts = nsContainers(inst, test_base.dn) + for cont in conts.list(): + conts_p = nsContainers(inst, cont.dn) + for cont_p in conts_p.list(): + conts_c = nsContainers(inst, cont_p.dn) + conts_dns[inst.serverid].extend([cont_c.dn for cont_c in conts_c.list()]) + conts_dns[inst.serverid].extend([cont_p.dn for cont_p in conts_p.list()]) + conts_dns[inst.serverid].extend([cont.dn for cont in conts.list()]) + + assert set(conts_dns[M1.serverid]) == set(conts_dns[M2.serverid]) + + user_dns_m1 = [user.dn for user in test_users_m1.list()] + user_dns_m2 = [user.dn for user in test_users_m2.list()] + assert set(user_dns_m1) == set(user_dns_m2) + + +class TestThreeMasters: + def test_nested_entries(self, topology_m3, test_base_m3): + """Check that conflict properly resolved for operations + with nested entries with children + + :id: 77f09b18-03d1-45da-940b-1ad2c2908eb6 + :setup: Three master replication, test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + :steps: + 1. Add 15 containers to m1 and wait for replication to happen + 2. Pause replication + 3. Create two child entries under each of two entries + 4. Create three child entries under each of three entries + 5. Create two parents on m1 and m2, then on m1 - create a child and delete one parent, + on m2 - delete one parent and create a child + 6. Test a few more parent-child combinations with three instances + 7. Resume replication + 8. Check that the entries on both masters are the same and replication is working + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should pass + """ + + M1 = topology_m3.ms["master1"] + M2 = topology_m3.ms["master2"] + M3 = topology_m3.ms["master3"] + repl = ReplicationManager(SUFFIX) + + cont_list = [] + for num in range(11): + cont = _create_container(M1, test_base_m3.dn, 'sub{}'.format(num)) + cont_list.append(cont) + + repl.test_replication(M1, M2) + repl.test_replication(M1, M3) + + topology_m3.pause_all_replicas() + + log.info("Create two child entries under each of two entries") + cont_num = -1 + for num in range(2): + cont_num += 1 + _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) + _create_container(M2, cont_list[cont_num].dn, 'p1', sleep=True) + + log.info("Create three child entries under each of three entries") + for num in range(3): + cont_num += 1 + _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) + _create_container(M2, cont_list[cont_num].dn, 'p1', sleep=True) + _create_container(M3, cont_list[cont_num].dn, 'p2', sleep=True) + + log.info("Create two parents on m1 and m2, then on m1 - create a child and delete one parent," + "on m2 - delete one parent and create a child") + for inst1, inst2 in ((M1, M2), (M2, M1)): + cont_num += 1 + cont_p_m1_1 = _create_container(inst1, cont_list[cont_num].dn, 'p0') + cont_p_m1_2 = _create_container(inst1, cont_list[cont_num].dn, 'p1', sleep=True) + cont_p_m2_1 = _create_container(inst2, cont_list[cont_num].dn, 'p0') + cont_p_m2_2 = _create_container(inst2, cont_list[cont_num].dn, 'p1', sleep=True) + _create_container(inst1, cont_p_m1_1.dn, 'c0', sleep=True) + _delete_container(cont_p_m2_1, sleep=True) + _delete_container(cont_p_m1_2, sleep=True) + _create_container(inst2, cont_p_m2_2.dn, 'c0', sleep=True) + + log.info("Test a few more parent-child combinations on three instances") + for inst1, inst2, inst3 in ((M1, M2, M3), (M2, M1, M3), (M3, M1, M2)): + cont_num += 1 + cont_p_m1 = _create_container(inst1, cont_list[cont_num].dn, 'p0') + _delete_container(cont_p_m1, sleep=True) + + cont_p_m2 = _create_container(inst2, cont_list[cont_num].dn, 'p0') + cont_c_m2 = _create_container(inst2, cont_p_m2.dn, 'c0') + _delete_container(cont_c_m2) + _delete_container(cont_p_m2, sleep=True) + + cont_p_m3 = _create_container(inst3, cont_list[cont_num].dn, 'p0') + _create_container(inst3, cont_p_m3.dn, 'c0') + _create_container(inst3, cont_p_m3.dn, 'c1', sleep=True) + + topology_m3.resume_all_replicas() + + repl.test_replication_topology(topology_m3) + + conts_dns = {} + for num in range(1, 4): + inst = topology_m3.ms["master{}".format(num)] + conts_dns[inst.serverid] = [] + conts = nsContainers(inst, test_base_m3.dn) + for cont in conts.list(): + conts_p = nsContainers(inst, cont.dn) + for cont_p in conts_p.list(): + conts_c = nsContainers(inst, cont_p.dn) + conts_dns[inst.serverid].extend([cont_c.dn for cont_c in conts_c.list()]) + conts_dns[inst.serverid].extend([cont_p.dn for cont_p in conts_p.list()]) + conts_dns[inst.serverid].extend([cont.dn for cont in conts.list()]) + + for conts1, conts2 in permutations(conts_dns.values(), 2): + assert set(conts1) == set(conts2) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + + diff --git a/dirsrvtests/tests/suites/replication/conftest.py b/dirsrvtests/tests/suites/replication/conftest.py new file mode 100644 index 0000000..4749211 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/conftest.py @@ -0,0 +1,53 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import logging +import pytest +from lib389.topologies import create_topology +from lib389._constants import ReplicaRole + +DEBUGGING = os.getenv('DEBUGGING', default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +# Redefine some fixtures so we can use them with class scope +@pytest.fixture(scope="class") +def topology_m2(request): + """Create Replication Deployment with two masters""" + + topology = create_topology({ReplicaRole.MASTER: 2}) + + def fin(): + if DEBUGGING: + [inst.stop() for inst in topology] + else: + [inst.delete() for inst in topology] + request.addfinalizer(fin) + + return topology + + +@pytest.fixture(scope="class") +def topology_m3(request): + """Create Replication Deployment with three masters""" + + topology = create_topology({ReplicaRole.MASTER: 3}) + + def fin(): + if DEBUGGING: + [inst.stop() for inst in topology] + else: + [inst.delete() for inst in topology] + request.addfinalizer(fin) + + return topology diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py index d67bea4..67a5e4b 100644 --- a/dirsrvtests/tests/suites/replication/regression_test.py +++ b/dirsrvtests/tests/suites/replication/regression_test.py @@ -186,7 +186,7 @@ def test_password_repl_error(topo_m2, test_entry): m2.deleteErrorLogs() log.info('Set replication loglevel') - m2.setLogLevel(LOG_REPLICA) + m2.config.loglevel((ErrorLog.REPLICA,)) log.info('Modifying entry {} - change userpassword on master 1'.format(test_entry.dn)) @@ -208,7 +208,7 @@ def test_password_repl_error(topo_m2, test_entry): assert not m2.ds_error_log.match('.*can.t add a change for {}.*'.format(test_entry.dn)) finally: log.info('Set the default loglevel') - m2.setLogLevel(LOG_DEFAULT) + m2.config.loglevel((ErrorLog.DEFAULT,)) def test_invalid_agmt(topo_m2): diff --git a/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py b/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py index c5a9939..90c8c15 100644 --- a/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py +++ b/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py @@ -13,7 +13,7 @@ from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m2 -from lib389._constants import SUFFIX, DEFAULT_SUFFIX, LOG_REPLICA +from lib389._constants import SUFFIX, DEFAULT_SUFFIX, ErrorLog from lib389.agreement import Agreements from lib389.idm.organisationalunit import OrganisationalUnits @@ -164,8 +164,8 @@ def test_behavior_with_value(topology_m2, waitfor_async_attr, entries): master2 = topology_m2.ms["master2"] log.info("Set Replication Debugging loglevel for the errorlog") - master1.setLogLevel(LOG_REPLICA) - master2.setLogLevel(LOG_REPLICA) + master1.config.loglevel((ErrorLog.REPLICA,)) + master2.config.loglevel((ErrorLog.REPLICA,)) sync_dict = Counter() min_ap = waitfor_async_attr[1][0] diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py index f15e183..cea9b42 100644 --- a/src/lib389/lib389/__init__.py +++ b/src/lib389/lib389/__init__.py @@ -3339,10 +3339,9 @@ class DirSrv(SimpleLDAPObject, object): raise ValueError(status) return status - # This could be made to delete by filter .... - def delete_branch_s(self, basedn, scope): - ents = self.search_s(basedn, scope) + def delete_branch_s(self, basedn, scope, filterstr="(objectclass=*)", serverctrls=None, clientctrls=None): + ents = self.search_s(basedn, scope, filterstr) for ent in sorted(ents, key=lambda e: len(e.dn), reverse=True): self.log.debug("Delete entry children %s" % (ent.dn)) - self.delete_s(ent.dn) + self.delete_ext_s(ent.dn, serverctrls=serverctrls, clientctrls=clientctrls) diff --git a/src/lib389/lib389/_constants.py b/src/lib389/lib389/_constants.py index 1d458dd..71cf359 100644 --- a/src/lib389/lib389/_constants.py +++ b/src/lib389/lib389/_constants.py @@ -7,7 +7,7 @@ # --- END COPYRIGHT BLOCK --- import os -from enum import Enum +from enum import Enum, IntEnum from lib389.properties import * ( @@ -215,6 +215,7 @@ DISORDERLY_SHUTDOWN = ('Detected Disorderly Shutdown last time Directory ' # _Server/10/html/Administration_Guide/Configuring_Logs.html # The default log level is 16384 # +# It is legacy constants. Please, use IntEnum version (ErrorLog and AccessLog) (LOG_TRACE, LOG_TRACE_PACKETS, LOG_TRACE_HEAVY, @@ -233,6 +234,32 @@ DISORDERLY_SHUTDOWN = ('Detected Disorderly Shutdown last time Directory ' LOG_ACL_SUMMARY) = [1 << x for x in (list(range(8)) + list(range(11, 19)))] +class ErrorLog(IntEnum): + (TRACE, + TRACE_PACKETS, + TRACE_HEAVY, + CONNECT, + PACKET, + SEARCH_FILTER, + CONFIG_PARSER, + ACL, + ENTRY_PARSER, + HOUSEKEEPING, + REPLICA, + DEFAULT, + CACHE, + PLUGIN, + MICROSECONDS, + ACL_SUMMARY) = [1 << x for x in (list(range(8)) + list(range(11, 19)))] + + +class AccessLog(IntEnum): + NONE = 0 + INTERNAL = 4 + DEFAULT = 256 # Default log level + ENTRY = 512 + MICROSECONDS = 131072 + # # Constants for individual tests # diff --git a/src/lib389/lib389/config.py b/src/lib389/lib389/config.py index 35ecf17..5dacd86 100644 --- a/src/lib389/lib389/config.py +++ b/src/lib389/lib389/config.py @@ -89,15 +89,19 @@ class Config(DSLdapObject): """ self._alter_log_enabled(service, 'off') - def loglevel(self, vals=(LOG_DEFAULT,), service='error', update=False): + def loglevel(self, vals=(ErrorLog.DEFAULT,), service='error', update=False): """Set the access or error log level. - @param vals - a list of log level codes (eg. lib389.LOG_*) + + :param vals: a list of log level codes (eg. lib389.ErrorLogLevelLOG_*) defaults to LOG_DEFAULT - @param service - 'access' or 'error'. There is no 'audit' log level. + :type vals: list + :param service: 'access' or 'error'. There is no 'audit' log level. use enable_log or disable_log. - @param update - False for replace (default), True for update + :type service: str + :param update: False for replace (default), True for update + :type update: bool - ex. loglevel([lib389.LOG_DEFAULT, lib389.LOG_ENTRY_PARSER]) + ex. loglevel([lib389.ErrorLogLevel.DEFAULT, lib389.ErrorLogLevel.ENTRY_PARSER]) """ if service not in ('access', 'error'): self._log.error('Attempted to set level on invalid log service "%s"' % service) diff --git a/src/lib389/lib389/idm/user.py b/src/lib389/lib389/idm/user.py index 38977a5..b6fe4ef 100644 --- a/src/lib389/lib389/idm/user.py +++ b/src/lib389/lib389/idm/user.py @@ -183,3 +183,25 @@ class UserAccounts(DSLdapObjects): else: self._basedn = '{},{}'.format(rdn, basedn) + def create_test_user(self, uid=1000, gid=2000): + """Create a test user with uid=test_user_UID rdn + + :param uid: User id + :type uid: int + :param gid: Group id + :type gid: int + + :returns: DSLdapObject of the created entry + """ + + rdn_value = "test_user_{}".format(uid) + rdn = "uid={}".format(rdn_value) + properties = { + 'uid': rdn_value, + 'cn': rdn_value, + 'sn': rdn_value, + 'uidNumber': str(uid), + 'gidNumber': str(gid), + 'homeDirectory': '/home/{}'.format(rdn_value) + } + return super(UserAccounts, self).create(rdn, properties) diff --git a/src/lib389/lib389/topologies.py b/src/lib389/lib389/topologies.py index 08dcada..cde8fe3 100644 --- a/src/lib389/lib389/topologies.py +++ b/src/lib389/lib389/topologies.py @@ -290,9 +290,9 @@ def topology_i2(request): def fin(): if DEBUGGING: - map(lambda inst: inst.stop(), topology.all_insts.values()) + [inst.stop() for inst in topology] else: - map(lambda inst: inst.delete(), topology.all_insts.values()) + [inst.delete() for inst in topology] request.addfinalizer(fin) return topology @@ -306,9 +306,9 @@ def topology_i3(request): def fin(): if DEBUGGING: - map(lambda inst: inst.stop(), topology.all_insts.values()) + [inst.stop() for inst in topology] else: - map(lambda inst: inst.delete(), topology.all_insts.values()) + [inst.delete() for inst in topology] request.addfinalizer(fin) return topology @@ -321,9 +321,9 @@ def topology_m1(request): def fin(): if DEBUGGING: - map(lambda inst: inst.stop(), topology.all_insts.values()) + [inst.stop() for inst in topology] else: - map(lambda inst: inst.delete(), topology.all_insts.values()) + [inst.delete() for inst in topology] request.addfinalizer(fin) return topology @@ -337,9 +337,9 @@ def topology_m1c1(request): def fin(): if DEBUGGING: - map(lambda inst: inst.stop(), topology.all_insts.values()) + [inst.stop() for inst in topology] else: - map(lambda inst: inst.delete(), topology.all_insts.values()) + [inst.delete() for inst in topology] request.addfinalizer(fin) return topology @@ -353,9 +353,9 @@ def topology_m2(request): def fin(): if DEBUGGING: - map(lambda inst: inst.stop(), topology.all_insts.values()) + [inst.stop() for inst in topology] else: - map(lambda inst: inst.delete(), topology.all_insts.values()) + [inst.delete() for inst in topology] request.addfinalizer(fin) return topology @@ -369,9 +369,9 @@ def topology_m3(request): def fin(): if DEBUGGING: - map(lambda inst: inst.stop(), topology.all_insts.values()) + [inst.stop() for inst in topology] else: - map(lambda inst: inst.delete(), topology.all_insts.values()) + [inst.delete() for inst in topology] request.addfinalizer(fin) return topology @@ -385,9 +385,9 @@ def topology_m4(request): def fin(): if DEBUGGING: - map(lambda inst: inst.stop(), topology.all_insts.values()) + [inst.stop() for inst in topology] else: - map(lambda inst: inst.delete(), topology.all_insts.values()) + [inst.delete() for inst in topology] request.addfinalizer(fin) return topology @@ -402,9 +402,9 @@ def topology_m2c2(request): def fin(): if DEBUGGING: - map(lambda inst: inst.stop(), topology.all_insts.values()) + [inst.stop() for inst in topology] else: - map(lambda inst: inst.delete(), topology.all_insts.values()) + [inst.delete() for inst in topology] request.addfinalizer(fin) return topology @@ -480,9 +480,9 @@ def topology_m1h1c1(request): def fin(): if DEBUGGING: - map(lambda inst: inst.stop(), instances) + [inst.stop() for inst in instances] else: - map(lambda inst: inst.delete(), instances) + [inst.delete() for inst in instances] request.addfinalizer(fin) return TopologyMain(masters={"master1": master}, hubs={"hub1": hub}, consumers={"consumer1": consumer})