From 5067650cf6bdebe85fb83bd7375c158480d17c95 Mon Sep 17 00:00:00 2001 From: Sankar Ramalingam Date: Thu, 6 Jul 2017 18:55:19 +0530 Subject: [PATCH] Ticket #49031 - Improve memberof with a cache of group parents Description: Online import and add of bulk entries with nested groups to compare the performance of memberOf plugin. https://pagure.io/389-ds-base/issue/49031 Reviewed by: ? --- dirsrvtests/tests/perf/create_data.py | 289 +++++++++++++++++++++++ dirsrvtests/tests/perf/memberof_test.py | 391 ++++++++++++++++++++++++++++++++ 2 files changed, 680 insertions(+) create mode 100755 dirsrvtests/tests/perf/create_data.py create mode 100644 dirsrvtests/tests/perf/memberof_test.py diff --git a/dirsrvtests/tests/perf/create_data.py b/dirsrvtests/tests/perf/create_data.py new file mode 100755 index 0000000..0d7e385 --- /dev/null +++ b/dirsrvtests/tests/perf/create_data.py @@ -0,0 +1,289 @@ +#!/usr/bin/python2 +from __future__ import ( + print_function, + division +) + +import sys +import math + + +class RHDSData(object): + def __init__( + self, + stream=sys.stdout, + users=10000, + groups=100, + grps_puser=20, + nest_level=10, + ngrps_puser=10, + domain="redhat.com", + basedn="dc=example,dc=com", + ): + self.users = users + self.groups = groups + self.basedn = basedn + self.domain = domain + self.stream = stream + + self.grps_puser = grps_puser + self.nest_level = nest_level + self.ngrps_puser = ngrps_puser + + self.user_defaults = { + 'objectClass': [ + 'person', + 'top', + 'inetorgperson', + 'organizationalperson', + 'inetuser', + 'posixaccount'], + 'uidNumber': ['-1'], + 'gidNumber': ['-1'], + } + + self.group_defaults = { + 'objectClass': [ + 'top', + 'inetuser', + 'posixgroup', + 'groupofnames'], + 'gidNumber': [-1], + } + + def put_entry(self, entry): + """ + Abstract method, implementation depends on if we want just print LDIF, + or update LDAP directly + """ + raise NotImplementedError() + + def gen_user(self, uid): + user = dict(self.user_defaults) + user['dn'] = 'uid={uid},ou=people,{suffix}'.format( + uid=uid, + suffix=self.basedn, + ) + user['uid'] = [uid] + user['displayName'] = ['{} {}'.format(uid, uid)] + user['sn'] = [uid] + user['homeDirectory'] = ['/other-home/{}'.format(uid)] + user['mail'] = ['{uid}@{domain}'.format( + uid=uid, domain=self.domain)] + user['givenName'] = [uid] + user['cn'] = ['{} {}'.format(uid, uid)] + + return user + + def username_generator(self, start, stop, step=1): + for i in range(start, stop, step): + yield 'user%s' % i + + def gen_group(self, name, members=(), group_members=()): + group = dict(self.group_defaults) + group['dn'] = 'cn={name},ou=groups,{suffix}'.format( + name=name, + suffix=self.basedn, + ) + group['cn'] = [name] + group['member'] = ['uid={uid},ou=people,{suffix}'.format( + uid=uid, + suffix=self.basedn, + ) for uid in members] + group['member'].extend( + ['cn={name},ou=groups,{suffix}'.format( + name=name, + suffix=self.basedn, + ) for name in group_members]) + return group + + def groupname_generator(self, start, stop, step=1): + for i in range(start, stop, step): + yield 'group%s' % i + + def gen_users_and_groups(self): + self.__gen_entries_with_groups( + self.users, + self.groups, + self.grps_puser, + self.ngrps_puser, + self.nest_level, + self.username_generator, self.gen_user, + self.groupname_generator, self.gen_group + ) + + def __gen_entries_with_groups( + self, + num_of_entries, + num_of_groups, + groups_per_entry, + nested_groups_per_entry, + max_nesting_level, + gen_entry_name_f, gen_entry_f, + gen_group_name_f, gen_group_f + ): + assert num_of_groups % groups_per_entry == 0 + assert num_of_groups >= groups_per_entry + assert groups_per_entry > nested_groups_per_entry + assert max_nesting_level > 0 + assert nested_groups_per_entry > 0 + assert ( + groups_per_entry - nested_groups_per_entry > + int(math.ceil(nested_groups_per_entry / float(max_nesting_level))) + ), ( + "At least {} groups is required to generate proper amount of " + "nested groups".format( + nested_groups_per_entry + + int(math.ceil( + nested_groups_per_entry / float(max_nesting_level)) + ) + ) + ) + + for uid in gen_entry_name_f(0, num_of_entries): + self.put_entry(gen_entry_f(uid)) + + # create N groups per entry, of them are nested + # User/Host (max nesting level = 2) + # | + # +--- G1 --- G2 (nested) --- G3 (nested, max level) + # | + # +--- G5 --- G6 (nested) + # | + # ...... + # | + # +--- GN + + # how many members should be added to groups (set of groups_per_entry + # have the same members) + entries_per_group = num_of_entries // (num_of_groups // groups_per_entry) + + # generate groups and put users there + for i in range(num_of_groups // groups_per_entry): + + uids = list(gen_entry_name_f( + i * entries_per_group, + (i + 1) * entries_per_group + )) + + # per user + last_grp_name = None + nest_lvl = 0 + nested_groups_added = 0 + + for group_name in gen_group_name_f( + i * groups_per_entry, + (i + 1) * groups_per_entry, + ): + # create nested groups first + if nested_groups_added < nested_groups_per_entry: + if nest_lvl == 0: + # the top group + self.put_entry( + gen_group_f( + group_name, + members=uids + ) + ) + nest_lvl += 1 + nested_groups_added += 1 + elif nest_lvl == max_nesting_level: + # the last level group this group is not nested + self.put_entry( + gen_group_f( + group_name, + group_members=[last_grp_name], + ) + ) + nest_lvl = 0 + else: + # mid level group + self.put_entry( + gen_group_f( + group_name, + group_members=[last_grp_name] + ) + ) + nested_groups_added += 1 + nest_lvl += 1 + + last_grp_name = group_name + else: + # rest of groups have direct membership + if nest_lvl != 0: + # assign the last nested group if exists + self.put_entry( + gen_group_f( + group_name, + members=uids, + group_members=[last_grp_name], + ) + ) + nest_lvl = 0 + else: + self.put_entry( + gen_group_f( + group_name, + members=uids + ) + ) + + def __generate_entries_with_users_groups( + self, + num_of_entries_direct_members, + num_of_entries_indirect_members, + entries_per_user, + entries_per_group, + gen_entry_name_f, gen_entry_f, + ): + assert num_of_entries_direct_members % entries_per_user == 0 + assert num_of_entries_indirect_members % entries_per_group == 0 + + num_of_entries = num_of_entries_direct_members + num_of_entries_indirect_members + + # direct members + users_per_entry = self.users // (num_of_entries_direct_members // entries_per_user) + + start_user = 0 + stop_user = users_per_entry + for name in gen_entry_name_f(0, num_of_entries_direct_members): + self.put_entry( + gen_entry_f( + name, + user_members=self.username_generator(start_user, stop_user), + ) + ) + start_user = stop_user % self.users + stop_user = start_user + users_per_entry + stop_user = stop_user if stop_user < self.users else self.users + + groups_per_entry = self.groups // (num_of_entries_indirect_members // entries_per_group) + + # indirect members + start_group = 0 + stop_group = groups_per_entry + for name in gen_entry_name_f(num_of_entries_direct_members, num_of_entries): + self.put_entry( + gen_entry_f( + name, + usergroup_members=self.groupname_generator(start_group, stop_group), + ) + ) + start_group = stop_group % self.groups + stop_group = start_group + groups_per_entry + stop_group = stop_group if stop_group < self.groups else self.groups + + def do_magic(self): + self.gen_users_and_groups() + + +class RHDSDataLDIF(RHDSData): + def put_entry(self, entry): + print(file=self.stream) + print("dn:", entry['dn'], file=self.stream) + for k, values in entry.items(): + if k == 'dn': + continue + for v in values: + print("{}: {}".format(k, v), file=self.stream) + print(file=self.stream) diff --git a/dirsrvtests/tests/perf/memberof_test.py b/dirsrvtests/tests/perf/memberof_test.py new file mode 100644 index 0000000..7cd3a7f --- /dev/null +++ b/dirsrvtests/tests/perf/memberof_test.py @@ -0,0 +1,391 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from create_data import * +from lib389.tasks import * +from lib389.utils import * +from lib389._constants import * +from lib389.dseldif import DSEldif +from subprocess import check_output +from lib389.topologies import topology_st as topo + +MEMOF_PLUGIN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') +MAN_ENTRY_PLUGIN = ('cn=' + PLUGIN_MANAGED_ENTRY + ',cn=plugins,cn=config') +AUTO_MEM_PLUGIN = ('cn=' + PLUGIN_AUTOMEMBER + ',cn=plugins,cn=config') +DOMAIN = 'redhat.com' +LDAP_MOD = '/usr/bin/ldapmodify' +FILTER = 'objectClass=*' +USER_FILTER = '(|(uid=user*)(cn=group*))' +MEMBEROF_ATTR = 'memberOf' +DN_ATTR = 'dn:' + + +@pytest.fixture(scope="module") +def memberof_setup(topo, request): + """Configure required plugins and restart the server""" + + log.info('Configuring memberOf, managedEntry and autoMembers plugins and restarting the server') + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + try: + topo.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + except ldap.LDAPError as e: + log.error('Failed to enable {} plugin'.format(PLUGIN_MEMBER_OF)) + raise e + try: + topo.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY) + topo.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) + except ldap.LDAPError as e: + log.error('Failed to enable {}, {} plugins'.format(PLUGIN_MANAGED_ENTRY, PLUGIN_AUTOMEMBER)) + + log.info('Change config values for db-locks and dbcachesize to import large ldif files') + if ds_is_older('1.3.6'): + topo.standalone.stop(timeout=10) + dse_ldif = DSEldif(topo.standalone) + try: + dse_ldif.replace(DN_CONFIG_LDBM, 'nsslapd-db-locks', '100000') + dse_ldif.replace(DN_CONFIG_LDBM, 'nsslapd-dbcachesize', '10000000') + except: + log.error('Failed to replace cn=config values of db-locks and dbcachesize') + raise + topo.standalone.start(timeout=10) + else: + try: + topo.standalone.modify_s(DN_CONFIG_LDBM, [(ldap.MOD_REPLACE, 'nsslapd-db-locks', '100000')]) + topo.standalone.modify_s(DN_CONFIG_LDBM, [(ldap.MOD_REPLACE, 'nsslapd-cache-autosize', '0')]) + topo.standalone.modify_s(DN_CONFIG_LDBM, [(ldap.MOD_REPLACE, 'nsslapd-dbcachesize', '10000000')]) + except ldap.LDAPError as e: + log.error( + 'Failed to replace values of nsslapd-db-locks and nsslapd-dbcachesize {}'.format(e.message['desc'])) + raise e + topo.standalone.restart(timeout=10) + + def fin(): + log.info('Disabling plugins {}, {}, {}'.format(PLUGIN_MEMBER_OF, PLUGIN_MANAGED_ENTRY, PLUGIN_AUTOMEMBER)) + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + try: + topo.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topo.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY) + topo.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) + except ldap.LDAPError as e: + log.error('Failed to disable plugins, {}'.format(e.message['desc'])) + assert False + topo.standalone.restart(timeout=10) + + request.addfinalizer(fin) + + +def _create_base_ldif(topo, import_base=False): + """Create base ldif file to clean entries from suffix""" + + log.info('Add base entry for online import') + ldif_dir = topo.standalone.get_ldif_dir() + ldif_file = os.path.join(ldif_dir, '/perf.ldif') + base_ldif = """dn: dc=example,dc=com +objectclass: top +objectclass: domain +dc: example + +dn: ou=people,dc=example,dc=com +objectclass: top +objectclass: organizationalUnit +ou: people + +dn: ou=groups,dc=example,dc=com +objectclass: top +objectclass: organizationalUnit +ou: groups +""" + with open(ldif_file, "w") as fd: + fd.write(base_ldif) + fd.close() + if import_base: + log.info('Adding base entry to suffix to remove users/groups and leave only the OUs') + try: + topo.standalone.tasks.importLDIF(suffix=SUFFIX, input_file=ldif_file, args={TASK_WAIT: True}) + except ValueError as e: + log.error('Online import failed' + e.message('desc')) + assert False + else: + log.info('Return LDIF file') + return ldif_file + + +def _run_fixup_memberof(topo): + """Run fixup memberOf task and measure the time taken""" + + log.info('Running fixup memberOf task and measuring the time taken') + start = time.time() + try: + topo.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: True}) + except ValueError as e: + log.error('Running fixup MemberOf task failed' + e.message('desc')) + assert False + end = time.time() + cmd_time = int(end - start) + return cmd_time + + +def _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, is_import=False): + """Create LDIF files for given nof users, groups and nested group levels""" + + log.info('Create base entry before adding users and groups') + data_ldif = _create_base_ldif(topo, False) + + log.info('Create LDIF file with users and nested groups to add it to server') + with open(data_ldif, 'a') as file1: + data = RHDSDataLDIF(stream=file1, users=nof_users, groups=nof_groups, grps_puser=grps_user, + nest_level=nof_depth, ngrps_puser=ngrps_user, basedn=SUFFIX) + data.do_magic() + + if is_import: + log.info('Run importLDIF task to add entries to Server') + exp_entries = nof_users + nof_groups + start = time.time() + try: + topo.standalone.tasks.importLDIF(suffix=SUFFIX, input_file=data_ldif, args={TASK_WAIT: True}) + except ValueError as e: + log.error('Online import failed' + e.message('desc')) + assert False + end = time.time() + time_import = int(end - start) + + log.info('Check if number of entries created matches the expected entries') + users_groups = topo.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, USER_FILTER, [DN_ATTR]) + act_entries = str(users_groups).count(DN_ATTR) + log.info('Expected entries: {}, Actual entries: {}'.format(exp_entries, act_entries)) + assert act_entries == exp_entries + return time_import + else: + log.info('Run LDAPMODIFY to add entries to Server') + start = time.time() + ldap_add = '{} -cx -p {} -h {} -D "{}" -w {} -af {} > /dev/null'.format(LDAP_MOD, PORT_STANDALONE, + HOST_STANDALONE, DN_DM, PASSWORD, + data_ldif) + try: + os.popen(ldap_add, 'r+w') + except: + log.error('Adding of entries using ldapmodify failed' + e.message('desc')) + assert False + end = time.time() + cmd_time = int(end - start) + log.info('Time taken to complete LDAPADD: {} secs'.format(cmd_time)) + return cmd_time + + +def _sync_memberof_attrs(topo, exp_memberof): + """Check if expected entries are created or attributes are synced""" + + log.info('_sync_memberof_attrs: Check if expected memberOf attributes are synced/created') + loop = 0 + start = time.time() + entries = topo.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, FILTER, [MEMBEROF_ATTR]) + act_memberof = str(entries).count(MEMBEROF_ATTR) + end = time.time() + cmd_time = int(end - start) + log.info('Loop-{}, expected memberOf attrs: {}, synced: {}, time for search-{} secs'.format(loop, exp_memberof, + act_memberof, cmd_time)) + while act_memberof != exp_memberof: + loop = loop + 1 + time.sleep(30) + start = time.time() + entries = topo.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, FILTER, [MEMBEROF_ATTR]) + act_memberof = str(entries).count(MEMBEROF_ATTR) + end = time.time() + cmd_time = cmd_time + int(end - start) + log.info('Loop-{}, expected memberOf attrs: {}, synced: {}, time for search-{} secs'.format(loop, exp_memberof, + act_memberof, + cmd_time)) + # Worst case scenario, exit the test after 10hrs of wait + if loop > 1200: + log.error('Either syncing memberOf attrs takes too long or some issue with the test itself') + assert False + sync_time = 1 + loop * 30 + log.info('Expected memberOf attrs: {}, Actual memberOf attrs: {}'.format(exp_memberof, act_memberof)) + assert act_memberof == exp_memberof + return sync_time + + +@pytest.mark.parametrize("nof_users, nof_groups, grps_user, ngrps_user, nof_depth", + [(20000, 100, 20, 10, 5), (50000, 500, 100, 20, 10), (100000, 1000, 100, 20, 10)]) +def test_nestgrps_import(topo, memberof_setup, nof_users, nof_groups, grps_user, ngrps_user, nof_depth): + """Import large users and nested groups with N depth and measure the time taken + + :ID: 169a09f2-2c2d-4e42-8b90-a0bd1034f278 + :feature: MemberOf Plugin + :setup: Standalone instance, memberOf plugin enabled + :steps: 1. Create LDIF file for given nof_users and nof_groups + 2. Import entries to server + 3. Check if entries are created + 4. Run fixupMemberOf task to create memberOf attributes + 5. Check if memberOf attributes are synced for all users and groups + 6. Compare the actual no of memberOf attributes to the expected + 7. Measure the time taken to sync memberOf attributes + :expectedresults: MemberOf attributes should be synced + """ + + exp_memberof = (nof_users * grps_user) + ( + (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1)) // 2) + log.info('Create nested ldif file with users-{}, groups-{}, nested-{}'.format(nof_users, nof_groups, nof_depth)) + log.info('Import LDIF file and measure the time taken') + import_time = _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, True) + + log.info('Run fixup memberOf task and measure the time taken to complete the task') + fixup_time = _run_fixup_memberof(topo) + + log.info('Check the total number of memberOf entries created for users and groups') + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + + total_time = import_time + fixup_time + sync_memberof + log.info('Time for import-{}secs, fixup task-{}secs, total time for memberOf sync: {}secs'.format(import_time, + fixup_time, + total_time)) + + +@pytest.mark.parametrize("nof_users, nof_groups, grps_user, ngrps_user, nof_depth", + [(20000, 100, 20, 10, 5), (50000, 500, 100, 20, 10), (100000, 1000, 100, 20, 10)]) +def test_nestgrps_add(topo, memberof_setup, nof_users, nof_groups, grps_user, ngrps_user, nof_depth): + """Import large users and nested groups with n depth and measure the time taken + + :ID: 6eda75c6-5ae0-4b17-b610-d217d7ec7542 + :feature: MemberOf Plugin + :setup: Standalone instance, memberOf plugin enabled + :steps: 1. Create LDIF file for given nof_users and nof_groups + 2. Add entries using LDAPADD + 3. Check if entries are created + 4. Check if memberOf attributes are synced for all users and groups + 5. Compare the actual no of memberOf attributes to the expected + 6. Measure the time taken to sync memberOf attributes + :expectedresults: MemberOf attributes should be created and synced + """ + + exp_memberof = (nof_users * grps_user) + ( + (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1)) // 2) + log.info('Creating base_ldif file and importing it to wipe out all users and groups') + _create_base_ldif(topo, True) + log.info('Create nested ldif file with users-{}, groups-{}, nested-{}'.format(nof_users, nof_groups, nof_depth)) + log.info('Run LDAPADD to add entries to Server') + add_time = _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, False) + + log.info('Check the total number of memberOf entries created for users and groups') + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + total_time = add_time + sync_memberof + log.info('Time for ldapadd-{}secs, total time for memberOf sync: {}secs'.format(add_time, total_time)) + + +@pytest.mark.parametrize("nof_users, nof_groups, grps_user, ngrps_user, nof_depth", + [(20000, 100, 20, 10, 5), (50000, 500, 100, 20, 10), (100000, 1000, 100, 20, 10)]) +def test_mod_nestgrp(topo, memberof_setup, nof_users, nof_groups, grps_user, ngrps_user, nof_depth): + """Import bulk entries, modify nested groups at N depth and measure the time taken + + :ID: 4bf8e753-6ded-4177-8225-aaf6aef4d131 + :feature: MemberOf Plugin + :setup: Standalone instance, memberOf plugin enabled + :steps: 1. Import bulk entries with nested group and create memberOf attributes + 2. Modify nested groups by adding new members at each nested level + 3. Check new memberOf attributes created for users and groups + 4. Compare the actual memberOf attributes with the expected + 5. Measure the time taken to sync memberOf attributes + :expectedresults: MemberOf attributes should be modified and synced + """ + + exp_memberof = (nof_users * grps_user) + ( + (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1)) // 2) + log.info('Create nested ldif file, import it and measure the time taken') + import_time = _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, True) + log.info('Run fixup memberOf task and measure the time to complete the task') + fixup_time = _run_fixup_memberof(topo) + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + total_time = import_time + fixup_time + sync_memberof + log.info('Time for import-{}secs, fixup task-{}secs, total time for memberOf sync: {}secs'.format(import_time, + fixup_time, + total_time)) + + log.info('Add {} users to existing nested groups at all depth level'.format(nof_groups)) + log.info('Add one user to each groups at different nest levels') + start = time.time() + for usr in range(nof_groups): + usrrdn = 'newcliusr{}'.format(usr) + userdn = 'uid={},ou=people,{}'.format(usrrdn, SUFFIX) + groupdn = 'cn=group{},ou=groups,{}'.format(usr, SUFFIX) + try: + topo.standalone.add_s(Entry((userdn, { + 'objectclass': 'top person inetUser inetOrgperson'.split(), + 'cn': usrrdn, + 'sn': usrrdn, + 'userpassword': 'Secret123'}))) + except ldap.LDAPError as e: + log.error('Failed to add {} user: error {}'.format(userdn, e.message['desc'])) + raise + try: + topo.standalone.modify_s(groupdn, [(ldap.MOD_ADD, 'member', userdn)]) + except ldap.LDAPError as e: + log.error('Error-{}: Failed to add user to group'.format(e.message['desc'])) + assert False + end = time.time() + cmd_time = int(end - start) + + exp_memberof = (nof_users * grps_user) + nof_groups + ( + (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1))) + log.info('Check the total number of memberOf entries created for users and groups') + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + total_time = cmd_time + sync_memberof + log.info('Time taken add new members to existing nested groups + memberOf sync: {} secs'.format(total_time)) + + +@pytest.mark.parametrize("nof_users, nof_groups, grps_user, ngrps_user, nof_depth", + [(20000, 100, 20, 10, 5), (50000, 500, 100, 20, 10), (100000, 1000, 100, 20, 10)]) +def test_del_nestgrp(topo, memberof_setup, nof_users, nof_groups, grps_user, ngrps_user, nof_depth): + """Import bulk entries, delete nested groups at N depth and measure the time taken + + :ID: d3d82ac5-d968-4cd6-a268-d380fc9fd51b + :feature: MemberOf Plugin + :setup: Standalone instance, memberOf plugin enabled + :steps: 1. Import bulk users and groups with nested level N. + 2. Run fixup memberOf task to create memberOf attributes + 3. Delete nested groups at nested level N + 4. Check memberOf attributes deleted for users and groups + 5. Compare the actual memberOf attributes with the expected + 6. Measure the time taken to sync memberOf attributes + :expectedresults: MemberOf attributes should be deleted and synced + """ + + exp_memberof = (nof_users * grps_user) + ( + (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1)) // 2) + log.info('Create nested ldif file, import it and measure the time taken') + import_time = _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, True) + log.info('Run fixup memberOf task and measure the time to complete the task') + fixup_time = _run_fixup_memberof(topo) + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + total_time = import_time + fixup_time + sync_memberof + log.info('Time taken to complete add users + memberOf sync: {} secs'.format(total_time)) + + log.info('Delete groups from nested groups at depth level-{}'.format(nof_depth, nof_depth)) + start = time.time() + for nos in range(nof_depth, nof_groups, grps_user): + groupdn = 'cn=group{},ou=groups,{}'.format(nos, SUFFIX) + try: + topo.standalone.delete_s(groupdn) + except ldap.LDAPError as e: + log.error('Error-{}: Failed to delete group'.format(e.message['desc'])) + assert False + end = time.time() + cmd_time = int(end - start) + + exp_memberof = exp_memberof - (nof_users + (nof_depth * (nof_groups // grps_user))) + log.info('Check memberOf attributes after deleting groups at depth-{}'.format(nof_depth)) + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + total_time = cmd_time + sync_memberof + log.info('Time taken to delete and sync memberOf attributes: {}secs'.format(total_time)) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s {}".format(CURRENT_FILE)) -- 2.7.4