summaryrefslogtreecommitdiffstats
path: root/ipaserver
diff options
context:
space:
mode:
authorJohn Dennis <jdennis@redhat.com>2012-04-04 15:19:29 -0400
committerRob Crittenden <rcritten@redhat.com>2012-04-04 22:47:08 -0400
commitaf0ac845ff2ebb5a7c045acdc17dc656522039aa (patch)
treee9ea9b839d5c02bdeee469efbaaf1a7bf40b6328 /ipaserver
parentcdebb29fecdd959808b4f0ea0245e371eba06eae (diff)
downloadfreeipa-af0ac845ff2ebb5a7c045acdc17dc656522039aa.tar.gz
freeipa-af0ac845ff2ebb5a7c045acdc17dc656522039aa.tar.xz
freeipa-af0ac845ff2ebb5a7c045acdc17dc656522039aa.zip
improve handling of ds instances during uninstall
Ticket #2502 * remove the "running" flag from backup_state in cainstance.py and dsinstance.py because it does not provide the correct information. In cainstance the running flag was never referenced because restarting dirsrv instances occurs later in dsinstance. In dsinstance when the running flag is set it incorrectly identifed the PKI ds instance configured earlier by cainstance. The intent was to determine if there were any ds instances other than those owned by IPA which will need to be restarted upon uninstall. Clearly the PKI ds instance does not qualify. We were generating a traceback when at the conclusion of dsinstance.uninstall we tried to start the remaining ds instances as indicated by the running flag, but there were none to restart (because the running flag had been set as a consequence of the PKI ds instance). * We only want to restart ds instances if there are other ds instances besides those owned by IPA. We shouldn't be stopping all ds instances either, but that's going to be covered by another ticket. The fix for restarting other ds instances at the end of uninstall is to check and see if there are other ds instances remaining after we've removed ours, if so we restart them. Also it's irrelevant if those ds instances were not present when we installed, it only matters if they exist after we restore things during uninstall. If they are present we have to start them back up because we shut them down during uninstall. * Add new function get_ds_instances() which returns a list of existing ds instances. * fixed error messages that incorrectly stated it "failed to restart" a ds instance when it should be "failed to create".
Diffstat (limited to 'ipaserver')
-rw-r--r--ipaserver/install/cainstance.py7
-rw-r--r--ipaserver/install/dsinstance.py44
2 files changed, 37 insertions, 14 deletions
diff --git a/ipaserver/install/cainstance.py b/ipaserver/install/cainstance.py
index f953100be..64175e482 100644
--- a/ipaserver/install/cainstance.py
+++ b/ipaserver/install/cainstance.py
@@ -292,7 +292,6 @@ class CADSInstance(service.Service):
root_logger.critical("failed to add user %s" % e)
def __create_instance(self):
- self.backup_state("running", dsinstance.is_ds_running())
self.backup_state("serverid", self.serverid)
inf_txt = ipautil.template_str(INF_TEMPLATE, self.sub_dict)
@@ -310,7 +309,7 @@ class CADSInstance(service.Service):
ipautil.run(args)
root_logger.debug("completed creating ds instance")
except ipautil.CalledProcessError, e:
- root_logger.critical("failed to restart ds instance %s" % e)
+ root_logger.critical("failed to create ds instance %s" % e)
inf_fd.close()
def load_pkcs12(self):
@@ -383,13 +382,9 @@ class CADSInstance(service.Service):
if self.is_configured():
self.print_msg("Unconfiguring CA directory server")
- running = self.restore_state("running")
enabled = self.restore_state("enabled")
serverid = self.restore_state("serverid")
- if not running is None:
- ipaservices.knownservices.dirsrv.stop(self.serverid)
-
if not enabled is None and not enabled:
ipaservices.knownservices.dirsrv.disable()
diff --git a/ipaserver/install/dsinstance.py b/ipaserver/install/dsinstance.py
index d82454d04..63672067a 100644
--- a/ipaserver/install/dsinstance.py
+++ b/ipaserver/install/dsinstance.py
@@ -90,6 +90,34 @@ def erase_ds_instance_data(serverid):
# except:
# pass
+def get_ds_instances():
+ '''
+ Return a sorted list of all 389ds instances.
+
+ If the instance name ends with '.removed' it is ignored. This
+ matches 389ds behavior.
+ '''
+
+ dirsrv_instance_dir='/etc/dirsrv'
+ instance_prefix = 'slapd-'
+
+ instances = []
+
+ for basename in os.listdir(dirsrv_instance_dir):
+ pathname = os.path.join(dirsrv_instance_dir, basename)
+ # Must be a directory
+ if os.path.isdir(pathname):
+ # Must start with prefix and not end with .removed
+ if basename.startswith(instance_prefix) and not basename.endswith('.removed'):
+ # Strip off prefix
+ instance = basename[len(instance_prefix):]
+ # Must be non-empty
+ if instance:
+ instances.append(instance)
+
+ instances.sort()
+ return instances
+
def check_ports():
ds_unsecure = installutils.port_available(389)
ds_secure = installutils.port_available(636)
@@ -307,7 +335,6 @@ class DsInstance(service.Service):
def __create_instance(self):
pent = pwd.getpwnam(DS_USER)
- self.backup_state("running", is_ds_running())
self.backup_state("serverid", self.serverid)
self.fstore.backup_file("/etc/sysconfig/dirsrv")
@@ -339,7 +366,7 @@ class DsInstance(service.Service):
ipautil.run(args)
root_logger.debug("completed creating ds instance")
except ipautil.CalledProcessError, e:
- root_logger.critical("failed to restart ds instance %s" % e)
+ root_logger.critical("failed to create ds instance %s" % e)
# check for open port 389 from now on
self.open_ports.append(389)
@@ -598,12 +625,8 @@ class DsInstance(service.Service):
if self.is_configured():
self.print_msg("Unconfiguring directory server")
- running = self.restore_state("running")
enabled = self.restore_state("enabled")
- if not running is None:
- self.stop()
-
try:
self.fstore.restore_file("/etc/security/limits.conf")
self.fstore.restore_file("/etc/sysconfig/dirsrv")
@@ -634,8 +657,13 @@ class DsInstance(service.Service):
self.restore_state('nsslapd-security')
self.restore_state('nsslapd-ldapiautobind')
- if running:
- self.start()
+ # If any dirsrv instances remain after we've removed ours then
+ # (re)start them.
+ for ds_instance in get_ds_instances():
+ try:
+ ipaservices.knownservices.dirsrv.restart(ds_instance)
+ except Exception, e:
+ root_logger.error('Unable to restart ds instance %s: %s', ds_instance, e)
# we could probably move this function into the service.Service
# class - it's very generic - all we need is a way to get an