diff options
| author | Jan Cholasta <jcholast@redhat.com> | 2016-11-23 15:42:08 +0100 |
|---|---|---|
| committer | Martin Basti <mbasti@redhat.com> | 2016-11-29 14:50:51 +0100 |
| commit | 70c3cd7f482bee7d5ad12062daa7ad6181a29094 (patch) | |
| tree | b61727b8e04364b3c26a859d4a3e691fb32a23eb /ipaclient/install | |
| parent | d43b57d2ce8552ed4977dcc33667b4226fe3333b (diff) | |
| download | freeipa-70c3cd7f482bee7d5ad12062daa7ad6181a29094.tar.gz freeipa-70c3cd7f482bee7d5ad12062daa7ad6181a29094.tar.xz freeipa-70c3cd7f482bee7d5ad12062daa7ad6181a29094.zip | |
ipaclient: move install modules to the install subpackage
The ipa_certupdate, ipachangeconf, ipadiscovery and ntpconf modules depend
on ipaplatform.
Move them to ipaclient.install as they are used only from the client
installer.
https://fedorahosted.org/freeipa/ticket/6474
Reviewed-By: Stanislav Laznicka <slaznick@redhat.com>
Diffstat (limited to 'ipaclient/install')
| -rw-r--r-- | ipaclient/install/client.py | 8 | ||||
| -rw-r--r-- | ipaclient/install/ipa_certupdate.py | 190 | ||||
| -rw-r--r-- | ipaclient/install/ipachangeconf.py | 570 | ||||
| -rw-r--r-- | ipaclient/install/ipadiscovery.py | 552 | ||||
| -rw-r--r-- | ipaclient/install/ntpconf.py | 237 |
5 files changed, 1551 insertions, 6 deletions
diff --git a/ipaclient/install/client.py b/ipaclient/install/client.py index b98f35f1e..7bd84045d 100644 --- a/ipaclient/install/client.py +++ b/ipaclient/install/client.py @@ -31,11 +31,6 @@ from six.moves.configparser import RawConfigParser from six.moves.urllib.parse import urlparse, urlunparse # pylint: enable=import-error -from ipaclient import ( - ipadiscovery, - ntpconf, -) -from ipaclient.ipachangeconf import IPAChangeConf from ipalib import api, errors, x509 from ipalib.install import certmonger, certstore, service, sysrestore from ipalib.install import hostname as hostname_ @@ -68,7 +63,8 @@ from ipapython.ipautil import ( ) from ipapython.ssh import SSHPublicKey -from . import automount +from . import automount, ipadiscovery, ntpconf +from .ipachangeconf import IPAChangeConf NoneType = type(None) diff --git a/ipaclient/install/ipa_certupdate.py b/ipaclient/install/ipa_certupdate.py new file mode 100644 index 000000000..2c55db462 --- /dev/null +++ b/ipaclient/install/ipa_certupdate.py @@ -0,0 +1,190 @@ +# Authors: Jan Cholasta <jcholast@redhat.com> +# +# Copyright (C) 2014 Red Hat +# see file 'COPYING' for use and warranty information +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +import os +import tempfile +import shutil + +# pylint: disable=import-error +from six.moves.urllib.parse import urlsplit +# pylint: enable=import-error + +from ipalib.install import certmonger, certstore, sysrestore +from ipalib.install.kinit import kinit_keytab +from ipapython import admintool, certdb, ipaldap, ipautil +from ipaplatform import services +from ipaplatform.paths import paths +from ipaplatform.tasks import tasks +from ipalib import api, errors, x509 +from ipalib.constants import IPA_CA_NICKNAME, RENEWAL_CA_NAME + + +class CertUpdate(admintool.AdminTool): + command_name = 'ipa-certupdate' + + usage = "%prog [options]" + + description = ("Update local IPA certificate databases with certificates " + "from the server.") + + def validate_options(self): + super(CertUpdate, self).validate_options(needs_root=True) + + def run(self): + fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE) + if (not fstore.has_files() and + not os.path.exists(paths.IPA_DEFAULT_CONF)): + raise admintool.ScriptError( + "IPA client is not configured on this system.") + + api.bootstrap(context='cli_installer') + api.finalize() + + server = urlsplit(api.env.jsonrpc_uri).hostname + ldap_uri = ipaldap.get_ldap_uri(server) + ldap = ipaldap.LDAPClient(ldap_uri) + + tmpdir = tempfile.mkdtemp(prefix="tmp-") + ccache_name = os.path.join(tmpdir, 'ccache') + try: + principal = str('host/%s@%s' % (api.env.host, api.env.realm)) + kinit_keytab(principal, paths.KRB5_KEYTAB, ccache_name) + os.environ['KRB5CCNAME'] = ccache_name + + api.Backend.rpcclient.connect() + try: + result = api.Backend.rpcclient.forward( + 'ca_is_enabled', + version=u'2.107', + ) + ca_enabled = result['result'] + except (errors.CommandError, errors.NetworkError): + result = api.Backend.rpcclient.forward( + 'env', + server=True, + version=u'2.0', + ) + ca_enabled = result['result']['enable_ra'] + + ldap.gssapi_bind() + + certs = certstore.get_ca_certs(ldap, api.env.basedn, + api.env.realm, ca_enabled) + + if ca_enabled: + lwcas = api.Command.ca_find()['result'] + else: + lwcas = [] + + api.Backend.rpcclient.disconnect() + finally: + shutil.rmtree(tmpdir) + + server_fstore = sysrestore.FileStore(paths.SYSRESTORE) + if server_fstore.has_files(): + self.update_server(certs) + try: + # pylint: disable=import-error + from ipaserver.install import cainstance + # pylint: enable=import-error + cainstance.add_lightweight_ca_tracking_requests( + self.log, lwcas) + except Exception: + self.log.exception( + "Failed to add lightweight CA tracking requests") + + self.update_client(certs) + + def update_client(self, certs): + self.update_file(paths.IPA_CA_CRT, certs) + + ipa_db = certdb.NSSDatabase(api.env.nss_dir) + + # Remove old IPA certs from /etc/ipa/nssdb + for nickname in ('IPA CA', 'External CA cert'): + while ipa_db.has_nickname(nickname): + try: + ipa_db.delete_cert(nickname) + except ipautil.CalledProcessError as e: + self.log.error("Failed to remove %s from %s: %s", + nickname, ipa_db.secdir, e) + break + + self.update_db(ipa_db.secdir, certs) + + tasks.remove_ca_certs_from_systemwide_ca_store() + tasks.insert_ca_certs_into_systemwide_ca_store(certs) + + def update_server(self, certs): + instance = '-'.join(api.env.realm.split('.')) + self.update_db( + paths.ETC_DIRSRV_SLAPD_INSTANCE_TEMPLATE % instance, certs) + if services.knownservices.dirsrv.is_running(): + services.knownservices.dirsrv.restart(instance) + + self.update_db(paths.HTTPD_ALIAS_DIR, certs) + if services.knownservices.httpd.is_running(): + services.knownservices.httpd.restart() + + criteria = { + 'cert-database': paths.PKI_TOMCAT_ALIAS_DIR, + 'cert-nickname': IPA_CA_NICKNAME, + 'ca-name': RENEWAL_CA_NAME + } + request_id = certmonger.get_request_id(criteria) + if request_id is not None: + timeout = api.env.startup_timeout + 60 + + self.log.debug("resubmitting certmonger request '%s'", request_id) + certmonger.resubmit_request( + request_id, profile='ipaRetrievalOrReuse') + try: + state = certmonger.wait_for_request(request_id, timeout) + except RuntimeError: + raise admintool.ScriptError( + "Resubmitting certmonger request '%s' timed out, " + "please check the request manually" % request_id) + ca_error = certmonger.get_request_value(request_id, 'ca-error') + if state != 'MONITORING' or ca_error: + raise admintool.ScriptError( + "Error resubmitting certmonger request '%s', " + "please check the request manually" % request_id) + + self.log.debug("modifying certmonger request '%s'", request_id) + certmonger.modify(request_id, profile='ipaCACertRenewal') + + self.update_file(paths.CA_CRT, certs) + + def update_file(self, filename, certs, mode=0o444): + certs = (c[0] for c in certs if c[2] is not False) + try: + x509.write_certificate_list(certs, filename) + except Exception as e: + self.log.error("failed to update %s: %s", filename, e) + + def update_db(self, path, certs): + db = certdb.NSSDatabase(path) + for cert, nickname, trusted, eku in certs: + trust_flags = certstore.key_policy_to_trust_flags( + trusted, True, eku) + try: + db.add_cert(cert, nickname, trust_flags) + except ipautil.CalledProcessError as e: + self.log.error( + "failed to update %s in %s: %s", nickname, path, e) diff --git a/ipaclient/install/ipachangeconf.py b/ipaclient/install/ipachangeconf.py new file mode 100644 index 000000000..610cd5011 --- /dev/null +++ b/ipaclient/install/ipachangeconf.py @@ -0,0 +1,570 @@ +# +# ipachangeconf - configuration file manipulation classes and functions +# partially based on authconfig code +# Copyright (c) 1999-2007 Red Hat, Inc. +# Author: Simo Sorce <ssorce@redhat.com> +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +import fcntl +import os +import shutil + +import six + +if six.PY3: + unicode = str + +def openLocked(filename, perms): + fd = -1 + try: + fd = os.open(filename, os.O_RDWR | os.O_CREAT, perms) + + fcntl.lockf(fd, fcntl.LOCK_EX) + except OSError as e: + if fd != -1: + try: + os.close(fd) + except OSError: + pass + raise IOError(e.errno, e.strerror) + return os.fdopen(fd, "r+") + + + #TODO: add subsection as a concept + # (ex. REALM.NAME = { foo = x bar = y } ) + #TODO: put section delimiters as separating element of the list + # so that we can process multiple sections in one go + #TODO: add a comment all but provided options as a section option +class IPAChangeConf(object): + def __init__(self, name): + self.progname = name + self.indent = ("", "", "") + self.assign = (" = ", "=") + self.dassign = self.assign[0] + self.comment = ("#",) + self.dcomment = self.comment[0] + self.eol = ("\n",) + self.deol = self.eol[0] + self.sectnamdel = ("[", "]") + self.subsectdel = ("{", "}") + self.case_insensitive_sections = True + + def setProgName(self, name): + self.progname = name + + def setIndent(self, indent): + if type(indent) is tuple: + self.indent = indent + elif type(indent) is str: + self.indent = (indent, ) + else: + raise ValueError('Indent must be a list of strings') + + def setOptionAssignment(self, assign): + if type(assign) is tuple: + self.assign = assign + else: + self.assign = (assign, ) + self.dassign = self.assign[0] + + def setCommentPrefix(self, comment): + if type(comment) is tuple: + self.comment = comment + else: + self.comment = (comment, ) + self.dcomment = self.comment[0] + + def setEndLine(self, eol): + if type(eol) is tuple: + self.eol = eol + else: + self.eol = (eol, ) + self.deol = self.eol[0] + + def setSectionNameDelimiters(self, delims): + self.sectnamdel = delims + + def setSubSectionDelimiters(self, delims): + self.subsectdel = delims + + def matchComment(self, line): + for v in self.comment: + if line.lstrip().startswith(v): + return line.lstrip()[len(v):] + return False + + def matchEmpty(self, line): + if line.strip() == "": + return True + return False + + def matchSection(self, line): + cl = "".join(line.strip().split()) + cl = cl.lower() if self.case_insensitive_sections else cl + + if len(self.sectnamdel) != 2: + return False + if not cl.startswith(self.sectnamdel[0]): + return False + if not cl.endswith(self.sectnamdel[1]): + return False + return cl[len(self.sectnamdel[0]):-len(self.sectnamdel[1])] + + def matchSubSection(self, line): + if self.matchComment(line): + return False + + parts = line.split(self.dassign, 1) + if len(parts) < 2: + return False + + if parts[1].strip() == self.subsectdel[0]: + return parts[0].strip() + + return False + + def matchSubSectionEnd(self, line): + if self.matchComment(line): + return False + + if line.strip() == self.subsectdel[1]: + return True + + return False + + def getSectionLine(self, section): + if len(self.sectnamdel) != 2: + return section + return self._dump_line(self.sectnamdel[0], + section, + self.sectnamdel[1], + self.deol) + + def _dump_line(self, *args): + return u"".join(unicode(x) for x in args) + + def dump(self, options, level=0): + output = [] + if level >= len(self.indent): + level = len(self.indent) - 1 + + for o in options: + if o['type'] == "section": + output.append(self._dump_line(self.sectnamdel[0], + o['name'], + self.sectnamdel[1])) + output.append(self.dump(o['value'], (level + 1))) + continue + if o['type'] == "subsection": + output.append(self._dump_line(self.indent[level], + o['name'], + self.dassign, + self.subsectdel[0])) + output.append(self.dump(o['value'], (level + 1))) + output.append(self._dump_line(self.indent[level], + self.subsectdel[1])) + continue + if o['type'] == "option": + delim = o.get('delim', self.dassign) + if delim not in self.assign: + raise ValueError('Unknown delim "%s" must be one of "%s"' % (delim, " ".join([d for d in self.assign]))) + output.append(self._dump_line(self.indent[level], + o['name'], + delim, + o['value'])) + continue + if o['type'] == "comment": + output.append(self._dump_line(self.dcomment, o['value'])) + continue + if o['type'] == "empty": + output.append('') + continue + raise SyntaxError('Unknown type: [%s]' % o['type']) + + # append an empty string to the output so that we add eol to the end + # of the file contents in a single join() + output.append('') + return self.deol.join(output) + + def parseLine(self, line): + + if self.matchEmpty(line): + return {'name': 'empty', 'type': 'empty'} + + value = self.matchComment(line) + if value: + return {'name': 'comment', + 'type': 'comment', + 'value': value.rstrip()} # pylint: disable=E1103 + + o = dict() + parts = line.split(self.dassign, 1) + if len(parts) < 2: + # The default assign didn't match, try the non-default + for d in self.assign[1:]: + parts = line.split(d, 1) + if len(parts) >= 2: + o['delim'] = d + break + + if 'delim' not in o: + raise SyntaxError('Syntax Error: Unknown line format') + + o.update({'name':parts[0].strip(), 'type':'option', 'value':parts[1].rstrip()}) + return o + + def findOpts(self, opts, type, name, exclude_sections=False): + + num = 0 + for o in opts: + if o['type'] == type and o['name'] == name: + return (num, o) + if exclude_sections and (o['type'] == "section" or + o['type'] == "subsection"): + return (num, None) + num += 1 + return (num, None) + + def commentOpts(self, inopts, level=0): + + opts = [] + + if level >= len(self.indent): + level = len(self.indent) - 1 + + for o in inopts: + if o['type'] == 'section': + no = self.commentOpts(o['value'], (level + 1)) + val = self._dump_line(self.dcomment, + self.sectnamdel[0], + o['name'], + self.sectnamdel[1]) + opts.append({'name': 'comment', + 'type': 'comment', + 'value': val}) + for n in no: + opts.append(n) + continue + if o['type'] == 'subsection': + no = self.commentOpts(o['value'], (level + 1)) + val = self._dump_line(self.indent[level], + o['name'], + self.dassign, + self.subsectdel[0]) + opts.append({'name': 'comment', + 'type': 'comment', + 'value': val}) + opts.extend(no) + val = self._dump_line(self.indent[level], self.subsectdel[1]) + opts.append({'name': 'comment', + 'type': 'comment', + 'value': val}) + continue + if o['type'] == 'option': + delim = o.get('delim', self.dassign) + if delim not in self.assign: + val = self._dump_line(self.indent[level], + o['name'], + delim, + o['value']) + opts.append({'name':'comment', 'type':'comment', 'value':val}) + continue + if o['type'] == 'comment': + opts.append(o) + continue + if o['type'] == 'empty': + opts.append({'name': 'comment', + 'type': 'comment', + 'value': ''}) + continue + raise SyntaxError('Unknown type: [%s]' % o['type']) + + return opts + + def mergeOld(self, oldopts, newopts): + + opts = [] + + for o in oldopts: + if o['type'] == "section" or o['type'] == "subsection": + _num, no = self.findOpts(newopts, o['type'], o['name']) + if not no: + opts.append(o) + continue + if no['action'] == "set": + mo = self.mergeOld(o['value'], no['value']) + opts.append({'name': o['name'], + 'type': o['type'], + 'value': mo}) + continue + if no['action'] == "comment": + co = self.commentOpts(o['value']) + for c in co: + opts.append(c) + continue + if no['action'] == "remove": + continue + raise SyntaxError('Unknown action: [%s]' % no['action']) + + if o['type'] == "comment" or o['type'] == "empty": + opts.append(o) + continue + + if o['type'] == "option": + _num, no = self.findOpts(newopts, 'option', o['name'], True) + if not no: + opts.append(o) + continue + if no['action'] == 'comment' or no['action'] == 'remove': + if (no['value'] is not None and + o['value'] is not no['value']): + opts.append(o) + continue + if no['action'] == 'comment': + value = self._dump_line(self.dcomment, + o['name'], + self.dassign, + o['value']) + opts.append({'name': 'comment', + 'type': 'comment', + 'value': value}) + continue + if no['action'] == 'set': + opts.append(no) + continue + if no['action'] == 'addifnotset': + opts.append({'name': 'comment', 'type': 'comment', + 'value': self._dump_line(no['name'], + self.dassign, + no['value'], + u' # modified by IPA' + )}) + opts.append(o) + continue + raise SyntaxError('Unknown action: [%s]' % no['action']) + + raise SyntaxError('Unknown type: [%s]' % o['type']) + + return opts + + def mergeNew(self, opts, newopts): + + cline = 0 + + for no in newopts: + + if no['type'] == "section" or no['type'] == "subsection": + (num, o) = self.findOpts(opts, no['type'], no['name']) + if not o: + if no['action'] == 'set': + opts.append(no) + continue + if no['action'] == "set": + self.mergeNew(o['value'], no['value']) + continue + cline = num + 1 + continue + + if no['type'] == "option": + (num, o) = self.findOpts(opts, no['type'], no['name'], True) + if not o: + if no['action'] == 'set' or no['action'] == 'addifnotset': + opts.append(no) + continue + cline = num + 1 + continue + + if no['type'] == "comment" or no['type'] == "empty": + opts.insert(cline, no) + cline += 1 + continue + + raise SyntaxError('Unknown type: [%s]' % no['type']) + + def merge(self, oldopts, newopts): + """ + Uses a two pass strategy: + First we create a new opts tree from oldopts removing/commenting + the options as indicated by the contents of newopts + Second we fill in the new opts tree with options as indicated + in the newopts tree (this is becaus eentire (sub)sections may + in the newopts tree (this is becaus entire (sub)sections may + exist in the newopts that do not exist in oldopts) + """ + opts = self.mergeOld(oldopts, newopts) + self.mergeNew(opts, newopts) + return opts + + #TODO: Make parse() recursive? + def parse(self, f): + + opts = [] + sectopts = [] + section = None + subsectopts = [] + subsection = None + curopts = opts + fatheropts = opts + + # Read in the old file. + for line in f: + + # It's a section start. + value = self.matchSection(line) + if value: + if section is not None: + opts.append({'name': section, + 'type': 'section', + 'value': sectopts}) + sectopts = [] + curopts = sectopts + fatheropts = sectopts + section = value + continue + + # It's a subsection start. + value = self.matchSubSection(line) + if value: + if subsection is not None: + raise SyntaxError('nested subsections are not ' + 'supported yet') + subsectopts = [] + curopts = subsectopts + subsection = value + continue + + value = self.matchSubSectionEnd(line) + if value: + if subsection is None: + raise SyntaxError('Unmatched end subsection terminator ' + 'found') + fatheropts.append({'name': subsection, + 'type': 'subsection', + 'value': subsectopts}) + subsection = None + curopts = fatheropts + continue + + # Copy anything else as is. + try: + curopts.append(self.parseLine(line)) + except SyntaxError as e: + raise SyntaxError('{error} in file {fname}: [{line}]'.format( + error=e, fname=f.name, line=line.rstrip())) + + #Add last section if any + if len(sectopts) is not 0: + opts.append({'name': section, + 'type': 'section', + 'value': sectopts}) + + return opts + + def changeConf(self, file, newopts): + """ + Write settings to configuration file + :param file: path to the file + :param options: set of dictionaries in the form: + {'name': 'foo', 'value': 'bar', 'action': 'set/comment'} + :param section: section name like 'global' + """ + output = "" + f = None + try: + # Do not catch an unexisting file error + # we want to fail in that case + shutil.copy2(file, (file + ".ipabkp")) + + f = openLocked(file, 0o644) + + oldopts = self.parse(f) + + options = self.merge(oldopts, newopts) + + output = self.dump(options) + + # Write it out and close it. + f.seek(0) + f.truncate(0) + f.write(output) + finally: + try: + if f: + f.close() + except IOError: + pass + return True + + def newConf(self, file, options, file_perms=0o644): + """" + Write settings to a new file, backup the old + :param file: path to the file + :param options: a set of dictionaries in the form: + {'name': 'foo', 'value': 'bar', 'action': 'set/comment'} + :param file_perms: number defining the new file's permissions + """ + output = "" + f = None + try: + try: + shutil.copy2(file, (file + ".ipabkp")) + except IOError as err: + if err.errno == 2: + # The orign file did not exist + pass + + f = openLocked(file, file_perms) + + # Trunkate + f.seek(0) + f.truncate(0) + + output = self.dump(options) + + f.write(output) + finally: + try: + if f: + f.close() + except IOError: + pass + return True + + @staticmethod + def setOption(name, value): + return {'name': name, + 'type': 'option', + 'action': 'set', + 'value': value} + + @staticmethod + def rmOption(name): + return {'name': name, + 'type': 'option', + 'action': 'remove', + 'value': None} + + @staticmethod + def setSection(name, options): + return {'name': name, + 'type': 'section', + 'action': 'set', + 'value': options} + + @staticmethod + def emptyLine(): + return {'name': 'empty', + 'type': 'empty'} diff --git a/ipaclient/install/ipadiscovery.py b/ipaclient/install/ipadiscovery.py new file mode 100644 index 000000000..46e05c971 --- /dev/null +++ b/ipaclient/install/ipadiscovery.py @@ -0,0 +1,552 @@ +# Authors: Simo Sorce <ssorce@redhat.com> +# +# Copyright (C) 2007 Red Hat +# see file 'COPYING' for use and warranty information +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +import socket + +import six + +from ipapython.ipa_log_manager import root_logger +from dns import resolver, rdatatype +from dns.exception import DNSException +from ipalib import errors +from ipapython import ipaldap +from ipaplatform.paths import paths +from ipapython.ipautil import valid_ip, realm_to_suffix +from ipapython.dn import DN + +NOT_FQDN = -1 +NO_LDAP_SERVER = -2 +REALM_NOT_FOUND = -3 +NOT_IPA_SERVER = -4 +NO_ACCESS_TO_LDAP = -5 +NO_TLS_LDAP = -6 +BAD_HOST_CONFIG = -10 +UNKNOWN_ERROR = -15 + +IPA_BASEDN_INFO = 'ipa v2.0' + +error_names = { + 0: 'Success', + NOT_FQDN: 'NOT_FQDN', + NO_LDAP_SERVER: 'NO_LDAP_SERVER', + REALM_NOT_FOUND: 'REALM_NOT_FOUND', + NOT_IPA_SERVER: 'NOT_IPA_SERVER', + NO_ACCESS_TO_LDAP: 'NO_ACCESS_TO_LDAP', + NO_TLS_LDAP: 'NO_TLS_LDAP', + BAD_HOST_CONFIG: 'BAD_HOST_CONFIG', + UNKNOWN_ERROR: 'UNKNOWN_ERROR', +} + +def get_ipa_basedn(conn): + """ + Get base DN of IPA suffix in given LDAP server. + + None is returned if the suffix is not found + + :param conn: Bound LDAPClient that will be used for searching + """ + entry = conn.get_entry( + DN(), attrs_list=['defaultnamingcontext', 'namingcontexts']) + + contexts = [c.decode('utf-8') for c in entry.raw['namingcontexts']] + if 'defaultnamingcontext' in entry: + # If there is a defaultNamingContext examine that one first + [default] = entry.raw['defaultnamingcontext'] + default = default.decode('utf-8') + if default in contexts: + contexts.remove(default) + contexts.insert(0, default) + for context in contexts: + root_logger.debug("Check if naming context '%s' is for IPA" % context) + try: + [entry] = conn.get_entries( + DN(context), conn.SCOPE_BASE, "(info=IPA*)") + except errors.NotFound: + root_logger.debug("LDAP server did not return info attribute to " + "check for IPA version") + continue + [info] = entry.raw['info'] + info = info.decode('utf-8').lower() + if info != IPA_BASEDN_INFO: + root_logger.debug("Detected IPA server version (%s) did not match the client (%s)" \ + % (info, IPA_BASEDN_INFO)) + continue + root_logger.debug("Naming context '%s' is a valid IPA context" % context) + return DN(context) + + return None + + +class IPADiscovery(object): + + def __init__(self): + self.realm = None + self.domain = None + self.server = None + self.servers = [] + self.basedn = None + + self.realm_source = None + self.domain_source = None + self.server_source = None + self.basedn_source = None + + def __get_resolver_domains(self): + """ + Read /etc/resolv.conf and return all the domains found in domain and + search. + + Returns a list of (domain, info) pairs. The info contains a reason why + the domain is returned. + """ + domains = [] + domain = None + try: + fp = open(paths.RESOLV_CONF, 'r') + lines = fp.readlines() + fp.close() + + for line in lines: + if line.lower().startswith('domain'): + domain = (line.split()[-1], + 'local domain from /etc/resolv.conf') + elif line.lower().startswith('search'): + domains += [(d, 'search domain from /etc/resolv.conf') for + d in line.split()[1:]] + except Exception: + pass + if domain: + domains = [domain] + domains + return domains + + def getServerName(self): + return self.server + + def getDomainName(self): + return self.domain + + def getRealmName(self): + return self.realm + + def getKDCName(self): + return self.kdc + + def getBaseDN(self): + return self.basedn + + def check_domain(self, domain, tried, reason): + """ + Given a domain search it for SRV records, breaking it down to search + all subdomains too. + + Returns a tuple (servers, domain) or (None,None) if a SRV record + isn't found. servers is a list of servers found. domain is a string. + + :param tried: A set of domains that were tried already + :param reason: Reason this domain is searched (included in the log) + """ + servers = None + root_logger.debug('Start searching for LDAP SRV record in "%s" (%s) ' + + 'and its sub-domains', domain, reason) + while not servers: + if domain in tried: + root_logger.debug("Already searched %s; skipping", domain) + break + tried.add(domain) + + servers = self.ipadns_search_srv(domain, '_ldap._tcp', 389, + break_on_first=False) + if servers: + return (servers, domain) + else: + p = domain.find(".") + if p == -1: #no ldap server found and last component of the domain already tested + return (None, None) + domain = domain[p+1:] + return (None, None) + + def search(self, domain="", servers="", realm=None, hostname=None, ca_cert_path=None): + """ + Use DNS discovery to identify valid IPA servers. + + servers may contain an optional list of servers which will be used + instead of discovering available LDAP SRV records. + + Returns a constant representing the overall search result. + """ + root_logger.debug("[IPA Discovery]") + root_logger.debug( + 'Starting IPA discovery with domain=%s, servers=%s, hostname=%s', + domain, servers, hostname) + + self.server = None + autodiscovered = False + + if not servers: + + if not domain: #domain not provided do full DNS discovery + + # get the local host name + if not hostname: + hostname = socket.getfqdn() + root_logger.debug('Hostname: %s', hostname) + if not hostname: + return BAD_HOST_CONFIG + + if valid_ip(hostname): + return NOT_FQDN + + # first, check for an LDAP server for the local domain + p = hostname.find(".") + if p == -1: #no domain name + return NOT_FQDN + domain = hostname[p+1:] + + # Get the list of domains from /etc/resolv.conf, we'll search + # them all. We search the domain of our hostname first though. + # This is to avoid the situation where domain isn't set in + # /etc/resolv.conf and the search list has the hostname domain + # not first. We could end up with the wrong SRV record. + domains = self.__get_resolver_domains() + domains = [(domain, 'domain of the hostname')] + domains + tried = set() + for domain, reason in domains: + servers, domain = self.check_domain(domain, tried, reason) + if servers: + autodiscovered = True + self.domain = domain + self.server_source = self.domain_source = ( + 'Discovered LDAP SRV records from %s (%s)' % + (domain, reason)) + break + if not self.domain: #no ldap server found + root_logger.debug('No LDAP server found') + return NO_LDAP_SERVER + else: + root_logger.debug("Search for LDAP SRV record in %s", domain) + servers = self.ipadns_search_srv(domain, '_ldap._tcp', 389, + break_on_first=False) + if servers: + autodiscovered = True + self.domain = domain + self.server_source = self.domain_source = ( + 'Discovered LDAP SRV records from %s' % domain) + else: + self.server = None + root_logger.debug('No LDAP server found') + return NO_LDAP_SERVER + + else: + + root_logger.debug("Server and domain forced") + self.domain = domain + self.domain_source = self.server_source = 'Forced' + + #search for kerberos + root_logger.debug("[Kerberos realm search]") + if realm: + root_logger.debug("Kerberos realm forced") + self.realm = realm + self.realm_source = 'Forced' + else: + realm = self.ipadnssearchkrbrealm() + self.realm = realm + self.realm_source = ( + 'Discovered Kerberos DNS records from %s' % self.domain) + + if not servers and not realm: + return REALM_NOT_FOUND + + if autodiscovered: + self.kdc = self.ipadnssearchkrbkdc() + self.kdc_source = ( + 'Discovered Kerberos DNS records from %s' % self.domain) + else: + self.kdc = ', '.join(servers) + self.kdc_source = "Kerberos DNS record discovery bypassed" + + # We may have received multiple servers corresponding to the domain + # Iterate through all of those to check if it is IPA LDAP server + ldapret = [NOT_IPA_SERVER] + ldapaccess = True + root_logger.debug("[LDAP server check]") + valid_servers = [] + for server in servers: + root_logger.debug('Verifying that %s (realm %s) is an IPA server', + server, self.realm) + # check ldap now + ldapret = self.ipacheckldap(server, self.realm, ca_cert_path=ca_cert_path) + + if ldapret[0] == 0: + self.server = ldapret[1] + self.realm = ldapret[2] + self.server_source = self.realm_source = ( + 'Discovered from LDAP DNS records in %s' % self.server) + valid_servers.append(server) + # verified, we actually talked to the remote server and it + # is definetely an IPA server + if autodiscovered: + # No need to keep verifying servers if we discovered them + # via DNS + break + elif ldapret[0] == NO_ACCESS_TO_LDAP or ldapret[0] == NO_TLS_LDAP: + ldapaccess = False + valid_servers.append(server) + # we may set verified_servers below, we don't have it yet + if autodiscovered: + # No need to keep verifying servers if we discovered them + # via DNS + break + elif ldapret[0] == NOT_IPA_SERVER: + root_logger.warning( + 'Skip %s: not an IPA server', server) + elif ldapret[0] == NO_LDAP_SERVER: + root_logger.warning( + 'Skip %s: LDAP server is not responding, unable to verify if ' + 'this is an IPA server', server) + else: + root_logger.warning( + 'Skip %s: cannot verify if this is an IPA server', server) + + # If one of LDAP servers checked rejects access (maybe anonymous + # bind is disabled), assume realm and basedn generated off domain. + # Note that in case ldapret[0] == 0 and ldapaccess == False (one of + # servers didn't provide access but another one succeeded), self.realm + # will be set already to a proper value above, self.basdn will be + # initialized during the LDAP check itself and we'll skip these two checks. + if not ldapaccess and self.realm is None: + # Assume realm is the same as domain.upper() + self.realm = self.domain.upper() + self.realm_source = 'Assumed same as domain' + root_logger.debug( + "Assuming realm is the same as domain: %s", self.realm) + + if not ldapaccess and self.basedn is None: + # Generate suffix from realm + self.basedn = realm_to_suffix(self.realm) + self.basedn_source = 'Generated from Kerberos realm' + root_logger.debug("Generated basedn from realm: %s" % self.basedn) + + root_logger.debug( + "Discovery result: %s; server=%s, domain=%s, kdc=%s, basedn=%s", + error_names.get(ldapret[0], ldapret[0]), + self.server, self.domain, self.kdc, self.basedn) + + root_logger.debug("Validated servers: %s" % ','.join(valid_servers)) + self.servers = valid_servers + + # If we have any servers left then override the last return value + # to indicate success. + if valid_servers: + self.server = servers[0] + ldapret[0] = 0 + + return ldapret[0] + + def ipacheckldap(self, thost, trealm, ca_cert_path=None): + """ + Given a host and kerberos realm verify that it is an IPA LDAP + server hosting the realm. + + Returns a list [errno, host, realm] or an empty list on error. + Errno is an error number: + 0 means all ok + 1 means we could not check the info in LDAP (may happend when + anonymous binds are disabled) + 2 means the server is certainly not an IPA server + """ + + lrealms = [] + + #now verify the server is really an IPA server + try: + ldap_uri = ipaldap.get_ldap_uri(thost) + start_tls = False + if ca_cert_path: + start_tls = True + root_logger.debug("Init LDAP connection to: %s", ldap_uri) + lh = ipaldap.LDAPClient( + ldap_uri, cacert=ca_cert_path, start_tls=start_tls, + no_schema=True, decode_attrs=False) + try: + lh.simple_bind(DN(), '') + + # get IPA base DN + root_logger.debug("Search LDAP server for IPA base DN") + basedn = get_ipa_basedn(lh) + except errors.ACIError: + root_logger.debug("LDAP Error: Anonymous access not allowed") + return [NO_ACCESS_TO_LDAP] + except errors.DatabaseError as err: + root_logger.error("Error checking LDAP: %s" % err.strerror) + # We should only get UNWILLING_TO_PERFORM if the remote LDAP + # server has minssf > 0 and we have attempted a non-TLS conn. + if ca_cert_path is None: + root_logger.debug( + "Cannot connect to LDAP server. Check that minssf is " + "not enabled") + return [NO_TLS_LDAP] + else: + return [UNKNOWN_ERROR] + + if basedn is None: + root_logger.debug("The server is not an IPA server") + return [NOT_IPA_SERVER] + + self.basedn = basedn + self.basedn_source = 'From IPA server %s' % lh.ldap_uri + + #search and return known realms + root_logger.debug( + "Search for (objectClass=krbRealmContainer) in %s (sub)", + self.basedn) + try: + lret = lh.get_entries( + DN(('cn', 'kerberos'), self.basedn), + lh.SCOPE_SUBTREE, "(objectClass=krbRealmContainer)") + except errors.NotFound: + #something very wrong + return [REALM_NOT_FOUND] + + for lres in lret: + root_logger.debug("Found: %s", lres.dn) + [cn] = lres.raw['cn'] + if six.PY3: + cn = cn.decode('utf-8') + lrealms.append(cn) + + if trealm: + for r in lrealms: + if trealm == r: + return [0, thost, trealm] + # must match or something is very wrong + root_logger.debug("Realm %s does not match any realm in LDAP " + "database", trealm) + return [REALM_NOT_FOUND] + else: + if len(lrealms) != 1: + #which one? we can't attach to a multi-realm server without DNS working + root_logger.debug("Multiple realms found, cannot decide " + "which realm is the right without " + "working DNS") + return [REALM_NOT_FOUND] + else: + return [0, thost, lrealms[0]] + + #we shouldn't get here + assert False, "Unknown error in ipadiscovery" + + except errors.DatabaseTimeout: + root_logger.debug("LDAP Error: timeout") + return [NO_LDAP_SERVER] + except errors.NetworkError as err: + root_logger.debug("LDAP Error: %s" % err.strerror) + return [NO_LDAP_SERVER] + except errors.ACIError: + root_logger.debug("LDAP Error: Anonymous access not allowed") + return [NO_ACCESS_TO_LDAP] + except errors.DatabaseError as err: + root_logger.debug("Error checking LDAP: %s" % err.strerror) + return [UNKNOWN_ERROR] + except Exception as err: + root_logger.debug("Error checking LDAP: %s" % err) + + return [UNKNOWN_ERROR] + + + def ipadns_search_srv(self, domain, srv_record_name, default_port, + break_on_first=True): + """ + Search for SRV records in given domain. When no record is found, + en empty list is returned + + :param domain: Search domain name + :param srv_record_name: SRV record name, e.g. "_ldap._tcp" + :param default_port: When default_port is not None, it is being + checked with the port in SRV record and if they don't + match, the port from SRV record is appended to + found hostname in this format: "hostname:port" + :param break_on_first: break on the first find and return just one + entry + """ + servers = [] + + qname = '%s.%s' % (srv_record_name, domain) + + root_logger.debug("Search DNS for SRV record of %s", qname) + + try: + answers = resolver.query(qname, rdatatype.SRV) + except DNSException as e: + root_logger.debug("DNS record not found: %s", e.__class__.__name__) + answers = [] + + for answer in answers: + root_logger.debug("DNS record found: %s", answer) + server = str(answer.target).rstrip(".") + if not server: + root_logger.debug("Cannot parse the hostname from SRV record: %s", answer) + continue + if default_port is not None and answer.port != default_port: + server = "%s:%s" % (server, str(answer.port)) + servers.append(server) + if break_on_first: + break + + return servers + + def ipadnssearchkrbrealm(self, domain=None): + realm = None + if not domain: + domain = self.domain + # now, check for a Kerberos realm the local host or domain is in + qname = "_kerberos." + domain + + root_logger.debug("Search DNS for TXT record of %s", qname) + + try: + answers = resolver.query(qname, rdatatype.TXT) + except DNSException as e: + root_logger.debug("DNS record not found: %s", e.__class__.__name__) + answers = [] + + for answer in answers: + root_logger.debug("DNS record found: %s", answer) + if answer.strings: + realm = answer.strings[0] + if realm: + break + return realm + + def ipadnssearchkrbkdc(self, domain=None): + kdc = None + + if not domain: + domain = self.domain + + kdc = self.ipadns_search_srv(domain, '_kerberos._udp', 88, + break_on_first=False) + + if kdc: + kdc = ','.join(kdc) + else: + root_logger.debug("SRV record for KDC not found! Domain: %s" % domain) + kdc = None + + return kdc diff --git a/ipaclient/install/ntpconf.py b/ipaclient/install/ntpconf.py new file mode 100644 index 000000000..a8d04f92a --- /dev/null +++ b/ipaclient/install/ntpconf.py @@ -0,0 +1,237 @@ +# Authors: Karl MacMillan <kmacmillan@redhat.com> +# +# Copyright (C) 2007 Red Hat +# see file 'COPYING' for use and warranty information +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# +import os +import shutil + +from ipalib import api +from ipapython import ipautil +from ipapython.ipa_log_manager import root_logger +from ipaplatform.tasks import tasks +from ipaplatform import services +from ipaplatform.paths import paths + +ntp_conf = """# Permit time synchronization with our time source, but do not +# permit the source to query or modify the service on this system. +restrict default kod nomodify notrap nopeer noquery +restrict -6 default kod nomodify notrap nopeer noquery + +# Permit all access over the loopback interface. This could +# be tightened as well, but to do so would effect some of +# the administrative functions. +restrict 127.0.0.1 +restrict -6 ::1 + +# Hosts on local network are less restricted. +#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap + +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +$SERVERS_BLOCK + +#broadcast 192.168.1.255 key 42 # broadcast server +#broadcastclient # broadcast client +#broadcast 224.0.1.1 key 42 # multicast server +#multicastclient 224.0.1.1 # multicast client +#manycastserver 239.255.254.254 # manycast server +#manycastclient 239.255.254.254 key 42 # manycast client + +# Undisciplined Local Clock. This is a fake driver intended for backup +# and when no outside source of synchronized time is available. +server 127.127.1.0 # local clock +#fudge 127.127.1.0 stratum 10 + +# Drift file. Put this in a directory which the daemon can write to. +# No symbolic links allowed, either, since the daemon updates the file +# by creating a temporary in the same directory and then rename()'ing +# it to the file. +driftfile /var/lib/ntp/drift + +# Key file containing the keys and key identifiers used when operating +# with symmetric key cryptography. +keys /etc/ntp/keys + +# Specify the key identifiers which are trusted. +#trustedkey 4 8 42 + +# Specify the key identifier to use with the ntpdc utility. +#requestkey 8 + +# Specify the key identifier to use with the ntpq utility. +#controlkey 8 +""" + +ntp_sysconfig = """OPTIONS="-x -p /var/run/ntpd.pid" + +# Set to 'yes' to sync hw clock after successful ntpdate +SYNC_HWCLOCK=yes + +# Additional options for ntpdate +NTPDATE_OPTIONS="" +""" +ntp_step_tickers = """# Use IPA-provided NTP server for initial time +$TICKER_SERVERS_BLOCK +""" +def __backup_config(path, fstore = None): + if fstore: + fstore.backup_file(path) + else: + shutil.copy(path, "%s.ipasave" % (path)) + +def __write_config(path, content): + fd = open(path, "w") + fd.write(content) + fd.close() + +def config_ntp(ntp_servers, fstore = None, sysstore = None): + path_step_tickers = paths.NTP_STEP_TICKERS + path_ntp_conf = paths.NTP_CONF + path_ntp_sysconfig = paths.SYSCONFIG_NTPD + sub_dict = {} + sub_dict["SERVERS_BLOCK"] = "\n".join("server %s" % s for s in ntp_servers) + sub_dict["TICKER_SERVERS_BLOCK"] = "\n".join(ntp_servers) + + nc = ipautil.template_str(ntp_conf, sub_dict) + config_step_tickers = False + + + if os.path.exists(path_step_tickers): + config_step_tickers = True + ns = ipautil.template_str(ntp_step_tickers, sub_dict) + __backup_config(path_step_tickers, fstore) + __write_config(path_step_tickers, ns) + tasks.restore_context(path_step_tickers) + + if sysstore: + module = 'ntp' + sysstore.backup_state(module, "enabled", services.knownservices.ntpd.is_enabled()) + if config_step_tickers: + sysstore.backup_state(module, "step-tickers", True) + + __backup_config(path_ntp_conf, fstore) + __write_config(path_ntp_conf, nc) + tasks.restore_context(path_ntp_conf) + + __backup_config(path_ntp_sysconfig, fstore) + __write_config(path_ntp_sysconfig, ntp_sysconfig) + tasks.restore_context(path_ntp_sysconfig) + + # Set the ntpd to start on boot + services.knownservices.ntpd.enable() + + # Restart ntpd + services.knownservices.ntpd.restart() + + +def synconce_ntp(server_fqdn, debug=False): + """ + Syncs time with specified server using ntpd. + Primarily designed to be used before Kerberos setup + to get time following the KDC time + + Returns True if sync was successful + """ + ntpd = paths.NTPD + if not os.path.exists(ntpd): + return False + + # The ntpd command will never exit if it is unable to reach the + # server, so timeout after 15 seconds. + timeout = 15 + + tmp_ntp_conf = ipautil.write_tmp_file('server %s' % server_fqdn) + args = [paths.BIN_TIMEOUT, str(timeout), ntpd, '-qgc', tmp_ntp_conf.name] + if debug: + args.append('-d') + try: + root_logger.info('Attempting to sync time using ntpd. ' + 'Will timeout after %d seconds' % timeout) + ipautil.run(args) + return True + except ipautil.CalledProcessError as e: + if e.returncode == 124: + root_logger.debug('Process did not complete before timeout') + return False + + +class NTPConfigurationError(Exception): + pass + +class NTPConflictingService(NTPConfigurationError): + def __init__(self, message='', conflicting_service=None): + super(NTPConflictingService, self).__init__(self, message) + self.conflicting_service = conflicting_service + +def check_timedate_services(): + """ + System may contain conflicting services used for time&date synchronization. + As IPA server/client supports only ntpd, make sure that other services are + not enabled to prevent conflicts. For example when both chronyd and ntpd + are enabled, systemd would always start only chronyd to manage system + time&date which would make IPA configuration of ntpd ineffective. + + Reference links: + https://fedorahosted.org/freeipa/ticket/2974 + http://fedoraproject.org/wiki/Features/ChronyDefaultNTP + """ + for service in services.timedate_services: + if service == 'ntpd': + continue + # Make sure that the service is not enabled + instance = services.service(service, api) + if instance.is_enabled() or instance.is_running(): + raise NTPConflictingService(conflicting_service=instance.service_name) + +def force_ntpd(statestore): + """ + Force ntpd configuration and disable and stop any other conflicting + time&date service + """ + for service in services.timedate_services: + if service == 'ntpd': + continue + instance = services.service(service, api) + enabled = instance.is_enabled() + running = instance.is_running() + + if enabled or running: + statestore.backup_state(instance.service_name, 'enabled', enabled) + statestore.backup_state(instance.service_name, 'running', running) + + if running: + instance.stop() + + if enabled: + instance.disable() + +def restore_forced_ntpd(statestore): + """ + Restore from --force-ntpd installation and enable/start service that were + disabled/stopped during installation + """ + for service in services.timedate_services: + if service == 'ntpd': + continue + if statestore.has_state(service): + instance = services.service(service, api) + enabled = statestore.restore_state(instance.service_name, 'enabled') + running = statestore.restore_state(instance.service_name, 'running') + if enabled: + instance.enable() + if running: + instance.start() |
