summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--install/po/Makefile.in11
-rwxr-xr-xinstall/po/pygettext.py818
-rw-r--r--ipalib/cli.py2
-rw-r--r--ipalib/plugable.py5
-rw-r--r--ipalib/plugins/internal.py10
5 files changed, 834 insertions, 12 deletions
diff --git a/install/po/Makefile.in b/install/po/Makefile.in
index f46c32024..bcc81d00e 100644
--- a/install/po/Makefile.in
+++ b/install/po/Makefile.in
@@ -10,6 +10,7 @@ AWK = @AWK@
SED = @SED@
MKDIR_P = @MKDIR_P@
XGETTEXT = @XGETTEXT@
+PYGETTEXT = install/po/pygettext.py
MSGFMT = @MSGFMT@
MSGINIT = @MSGINIT@
MSGMERGE = @MSGMERGE@
@@ -26,13 +27,16 @@ XGETTEXT_OPTIONS = \
--copyright-holder="$(COPYRIGHT_HOLDER)" \
--package-name="$(PACKAGE_NAME)" \
--msgid-bugs-address="$(PACKAGE_BUGREPORT)"
+PYGETTEXT_OPTIONS = \
+--docstrings
languages = $(shell $(SED) 's/\#.*//' LINGUAS) # The sed command removes comments
po_files = $(patsubst %, %.po, $(languages))
mo_files = $(patsubst %.po, %.mo, $(po_files))
po_count=$(words $(po_files))
-PY_FILES = $(shell cd ../..; git ls-files | grep -v -e "^tests/" -e "^doc/" -e "^install/po/" -e "^ipapython/test/" -e "setup.py" -e "setup-client.py" | grep "\.py$$" | tr '\n' ' '; cd install/po)
+PY_FILES = $(shell cd ../..; git ls-files | grep -v -e "^tests/" -e "^doc/" -e "^install/po/" -e "^ipapython/test/" -e "setup.py" -e "setup-client.py" -e "^ipalib/plugins/" | grep "\.py$$" | tr '\n' ' '; cd install/po)
+PY_PLUGIN_FILES = $(shell cd ../..; git ls-files | grep -e "^ipalib/plugins/" | grep "\.py$$" | tr '\n' ' '; cd install/po)
C_FILES = $(shell cd ../..; git ls-files | grep "\.c$$" | tr '\n' ' '; cd install/po)
H_FILES = $(shell cd ../..; git ls-files | grep "\.h$$" | tr '\n' ' '; cd install/po)
@@ -90,8 +94,13 @@ update-po: update-pot
update-pot:
@rm -f $(DOMAIN).pot.update
@pushd ../.. ; \
+ $(PYGETTEXT) $(PYGETTEXT_OPTIONS) \
+ --output install/po/$(DOMAIN).pot.update \
+ $(PY_PLUGIN_FILES) \
+ && \
$(XGETTEXT) $(XGETTEXT_OPTIONS) \
--output install/po/$(DOMAIN).pot.update \
+ --join-existing \
--language="python" \
$(PYTHON_POTFILES) \
&& \
diff --git a/install/po/pygettext.py b/install/po/pygettext.py
new file mode 100755
index 000000000..1946cf310
--- /dev/null
+++ b/install/po/pygettext.py
@@ -0,0 +1,818 @@
+#! /usr/bin/python
+# -*- coding: iso-8859-1 -*-
+# Originally written by Barry Warsaw <barry@zope.com>
+#
+# Minimally patched to make it even more xgettext compatible
+# by Peter Funk <pf@artcom-gmbh.de>
+#
+# 2002-11-22 Jürgen Hermann <jh@web.de>
+# Added checks that _() only contains string literals, and
+# command line args are resolved to module lists, i.e. you
+# can now pass a filename, a module or package name, or a
+# directory (including globbing chars, important for Win32).
+# Made docstring fit in 80 chars wide displays using pydoc.
+#
+# 2010-06-12 Jan-Hendrik Göllner <jan-hendrik.goellner@gmx.de>
+# Made it plural sensitive, added ngettext as default keyword.
+# Any keyworded function that is being supplied > 2 arguments
+# is treated like ngettext.
+# Also added support for constructs like "_('foo' + 10*'bar')"
+# by evaluating the whole expression.
+# Code like _(foo(arg1, arg2) + "bar") does not work by design
+# as that expression must be evaluated at runtime and this script
+# only extracts static strings known before runtime.
+# However it is possible to do things like
+# "ngettext('World', 'Worlds', numWorlds)"
+# as only the first two arguments are evaluated.
+# Advanced version number from 1.5 to 1.6
+#
+
+# for selftesting
+try:
+ import fintl
+ _ = fintl.gettext
+except ImportError:
+ _ = lambda s: s
+
+__doc__ = _("""pygettext -- Python equivalent of xgettext(1)
+
+Many systems (Solaris, Linux, Gnu) provide extensive tools that ease the
+internationalization of C programs. Most of these tools are independent of
+the programming language and can be used from within Python programs.
+Martin von Loewis' work[1] helps considerably in this regard.
+
+There's one problem though; xgettext is the program that scans source code
+looking for message strings, but it groks only C (or C++). Python
+introduces a few wrinkles, such as dual quoting characters, triple quoted
+strings, and raw strings. xgettext understands none of this.
+
+Enter pygettext, which uses Python's standard tokenize module to scan
+Python source code, generating .pot files identical to what GNU xgettext[2]
+generates for C and C++ code. From there, the standard GNU tools can be
+used.
+
+A word about marking Python strings as candidates for translation. GNU
+xgettext recognizes the following keywords: gettext, dgettext, dcgettext,
+and gettext_noop. But those can be a lot of text to include all over your
+code. C and C++ have a trick: they use the C preprocessor. Most
+internationalized C source includes a #define for gettext() to _() so that
+what has to be written in the source is much less. Thus these are both
+translatable strings:
+
+ gettext("Translatable String")
+ _("Translatable String")
+
+Python of course has no preprocessor so this doesn't work so well. Thus,
+pygettext searches only for _() by default, but see the -k/--keyword flag
+below for how to augment this.
+
+ [1] http://www.python.org/workshops/1997-10/proceedings/loewis.html
+ [2] http://www.gnu.org/software/gettext/gettext.html
+
+NOTE: pygettext attempts to be option and feature compatible with GNU
+xgettext where ever possible. However some options are still missing or are
+not fully implemented. Also, xgettext's use of command line switches with
+option arguments is broken, and in these cases, pygettext just defines
+additional switches.
+
+Usage: pygettext [options] inputfile ...
+
+Options:
+
+ -a
+ --extract-all
+ Extract all strings.
+
+ -d name
+ --default-domain=name
+ Rename the default output file from messages.pot to name.pot.
+
+ -E
+ --escape
+ Replace non-ASCII characters with octal escape sequences.
+
+ -D
+ --docstrings
+ Extract module, class, method, and function docstrings. These do
+ not need to be wrapped in _() markers, and in fact cannot be for
+ Python to consider them docstrings. (See also the -X option).
+
+ -h
+ --help
+ Print this help message and exit.
+
+ -k word
+ --keyword=word
+ Keywords to look for in addition to the default set, which are:
+ %(DEFAULTKEYWORDS)s
+
+ You can have multiple -k flags on the command line.
+
+ -K
+ --no-default-keywords
+ Disable the default set of keywords (see above). Any keywords
+ explicitly added with the -k/--keyword option are still recognized.
+
+ --no-location
+ Do not write filename/lineno location comments.
+
+ -n
+ --add-location
+ Write filename/lineno location comments indicating where each
+ extracted string is found in the source. These lines appear before
+ each msgid. The style of comments is controlled by the -S/--style
+ option. This is the default.
+
+ -o filename
+ --output=filename
+ Rename the default output file from messages.pot to filename. If
+ filename is `-' then the output is sent to standard out.
+
+ -p dir
+ --output-dir=dir
+ Output files will be placed in directory dir.
+
+ -S stylename
+ --style stylename
+ Specify which style to use for location comments. Two styles are
+ supported:
+
+ Solaris # File: filename, line: line-number
+ GNU #: filename:line
+
+ The style name is case insensitive. GNU style is the default.
+
+ -v
+ --verbose
+ Print the names of the files being processed.
+
+ -V
+ --version
+ Print the version of pygettext and exit.
+
+ -w columns
+ --width=columns
+ Set width of output to columns.
+
+ -x filename
+ --exclude-file=filename
+ Specify a file that contains a list of strings that are not be
+ extracted from the input files. Each string to be excluded must
+ appear on a line by itself in the file.
+
+ -X filename
+ --no-docstrings=filename
+ Specify a file that contains a list of files (one per line) that
+ should not have their docstrings extracted. This is only useful in
+ conjunction with the -D option above.
+
+If `inputfile' is -, standard input is read.
+""")
+
+import os
+import imp
+import sys
+import glob
+import time
+import getopt
+import token
+import tokenize
+
+__version__ = '1.6'
+
+default_keywords = ['_', 'ngettext']
+DEFAULTKEYWORDS = ', '.join(default_keywords)
+
+EMPTYSTRING = ''
+
+
+
+# The normal pot-file header. msgmerge and Emacs's po-mode work better if it's
+# there.
+pot_header = _('''\
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) YEAR ORGANIZATION
+# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: PACKAGE VERSION\\n"
+"POT-Creation-Date: %(time)s\\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"
+"Language-Team: LANGUAGE <LL@li.org>\\n"
+"MIME-Version: 1.0\\n"
+"Content-Type: text/plain; charset=CHARSET\\n"
+"Content-Transfer-Encoding: ENCODING\\n"
+"Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;\\n"
+"Generated-By: pygettext.py %(version)s\\n"
+
+''')
+
+
+def usage(code, msg=''):
+ print >> sys.stderr, __doc__ % globals()
+ if msg:
+ print >> sys.stderr, msg
+ sys.exit(code)
+
+
+
+escapes = []
+
+def make_escapes(pass_iso8859):
+ global escapes
+ if pass_iso8859:
+ # Allow iso-8859 characters to pass through so that e.g. 'msgid
+ # "Höhe"' would result not result in 'msgid "H\366he"'. Otherwise we
+ # escape any character outside the 32..126 range.
+ mod = 128
+ else:
+ mod = 256
+ for i in range(256):
+ if 32 <= (i % mod) <= 126:
+ escapes.append(chr(i))
+ else:
+ escapes.append("\\%03o" % i)
+ escapes[ord('\\')] = '\\\\'
+ escapes[ord('\t')] = '\\t'
+ escapes[ord('\r')] = '\\r'
+ escapes[ord('\n')] = '\\n'
+ escapes[ord('\"')] = '\\"'
+
+
+def escape(s):
+ global escapes
+ s = list(s)
+ for i in range(len(s)):
+ s[i] = escapes[ord(s[i])]
+ return EMPTYSTRING.join(s)
+
+
+def safe_eval(s):
+ # unwrap quotes, safely
+ return eval(s, {'__builtins__':{}}, {})
+
+
+def normalize(s):
+ # This converts the various Python string types into a format that is
+ # appropriate for .po files, namely much closer to C style.
+ lines = s.split('\n')
+ if len(lines) == 1:
+ s = '"' + escape(s) + '"'
+ else:
+ if not lines[-1]:
+ del lines[-1]
+ lines[-1] = lines[-1] + '\n'
+ for i in range(len(lines)):
+ lines[i] = escape(lines[i])
+ lineterm = '\\n"\n"'
+ s = '""\n"' + lineterm.join(lines) + '"'
+ return s
+
+
+def containsAny(str, set):
+ """Check whether 'str' contains ANY of the chars in 'set'"""
+ return 1 in [c in str for c in set]
+
+
+def _visit_pyfiles(list, dirname, names):
+ """Helper for getFilesForName()."""
+ # get extension for python source files
+ if not globals().has_key('_py_ext'):
+ global _py_ext
+ _py_ext = [triple[0] for triple in imp.get_suffixes()
+ if triple[2] == imp.PY_SOURCE][0]
+
+ # don't recurse into CVS directories
+ if 'CVS' in names:
+ names.remove('CVS')
+
+ # add all *.py files to list
+ list.extend(
+ [os.path.join(dirname, file) for file in names
+ if os.path.splitext(file)[1] == _py_ext]
+ )
+
+
+def _get_modpkg_path(dotted_name, pathlist=None):
+ """Get the filesystem path for a module or a package.
+
+ Return the file system path to a file for a module, and to a directory for
+ a package. Return None if the name is not found, or is a builtin or
+ extension module.
+ """
+ # split off top-most name
+ parts = dotted_name.split('.', 1)
+
+ if len(parts) > 1:
+ # we have a dotted path, import top-level package
+ try:
+ file, pathname, description = imp.find_module(parts[0], pathlist)
+ if file: file.close()
+ except ImportError:
+ return None
+
+ # check if it's indeed a package
+ if description[2] == imp.PKG_DIRECTORY:
+ # recursively handle the remaining name parts
+ pathname = _get_modpkg_path(parts[1], [pathname])
+ else:
+ pathname = None
+ else:
+ # plain name
+ try:
+ file, pathname, description = imp.find_module(
+ dotted_name, pathlist)
+ if file:
+ file.close()
+ if description[2] not in [imp.PY_SOURCE, imp.PKG_DIRECTORY]:
+ pathname = None
+ except ImportError:
+ pathname = None
+
+ return pathname
+
+
+def getFilesForName(name):
+ """Get a list of module files for a filename, a module or package name,
+ or a directory.
+ """
+ if not os.path.exists(name):
+ # check for glob chars
+ if containsAny(name, "*?[]"):
+ files = glob.glob(name)
+ list = []
+ for file in files:
+ list.extend(getFilesForName(file))
+ return list
+
+ # try to find module or package
+ name = _get_modpkg_path(name)
+ if not name:
+ return []
+
+ if os.path.isdir(name):
+ # find all python files in directory
+ list = []
+ os.path.walk(name, _visit_pyfiles, list)
+ return list
+ elif os.path.exists(name):
+ # a single file
+ return [name]
+
+ return []
+
+
+class TokenEater:
+ def __init__(self, options):
+ self.__options = options
+ self.__messages = {}
+ self.__state = self.__waiting
+ self.__args = []
+ self.__lineno = -1
+ self.__freshmodule = 1
+ self.__curfile = None
+
+ def __call__(self, ttype, tstring, stup, etup, line):
+ # dispatch
+## import token
+## print >> sys.stderr, 'ttype:', token.tok_name[ttype], \
+## 'tstring:', tstring
+ self.__state(ttype, tstring, stup[0])
+
+
+ def __waiting(self, ttype, tstring, lineno):
+ opts = self.__options
+ # Do docstring extractions, if enabled
+ if opts.docstrings and not opts.nodocstrings.get(self.__curfile):
+ # module docstring?
+ if self.__freshmodule:
+ if ttype == tokenize.STRING:
+ try:
+ s = safe_eval(tstring)
+ except Exception as e:
+ print >> sys.stderr, _(
+ '*** %(file)s:%(lineno)s: could not evaluate argument "%(arg)s"'
+ ) % {
+ 'arg': tstring,
+ 'file': self.__curfile,
+ 'lineno': self.__lineno
+ }
+ print >> sys.stderr, str(e)
+ else:
+ self.__addentry([s], lineno, isdocstring=1)
+ self.__freshmodule = 0
+ elif ttype not in (tokenize.COMMENT, tokenize.NL):
+ self.__freshmodule = 0
+ return
+ # class docstring?
+ if ttype == tokenize.NAME and tstring in ('class', 'def'):
+ self.__state = self.__suiteseen
+ return
+ if ttype == tokenize.NAME and tstring in opts.keywords:
+ self.__state = self.__keywordseen
+
+ def __suiteseen(self, ttype, tstring, lineno):
+ # ignore anything until we see the colon
+ if ttype == tokenize.OP and tstring == ':':
+ self.__state = self.__suitedocstring
+
+ def __suitedocstring(self, ttype, tstring, lineno):
+ # ignore any intervening noise
+ if ttype == tokenize.STRING:
+ try:
+ s = safe_eval(tstring)
+ except Exception as e:
+ print >> sys.stderr, _(
+ '*** %(file)s:%(lineno)s: could not evaluate argument "%(arg)s"'
+ ) % {
+ 'arg': tstring,
+ 'file': self.__curfile,
+ 'lineno': self.__lineno
+ }
+ print >> sys.stderr, str(e)
+ else:
+ self.__addentry([s], lineno, isdocstring=1)
+ self.__state = self.__waiting
+ elif ttype not in (tokenize.NEWLINE, tokenize.INDENT,
+ tokenize.COMMENT):
+ # there was no class docstring
+ self.__state = self.__waiting
+
+ def __keywordseen(self, ttype, tstring, lineno):
+ if ttype == tokenize.OP and tstring == '(':
+ self.__args = ['']
+ self.__lineno = lineno
+ self.__depth = 0
+ self.__state = self.__scanstring1
+ else:
+ self.__state = self.__waiting
+
+ def __scanstring1(self, ttype, tstring, lineno):
+ # handle first argument, which is supposed to be a string.
+ if ttype == tokenize.OP and tstring == ')':
+ # End of list of arguments for the current function call.
+ # If the argument list is empty (as in keyword()), ignore this call.
+ # otherwise evaluate the fragments we collected as the first
+ # argument and record its line number and update the list of
+ # messages seen. Reset state for the next batch.
+ if self.__args[-1]:
+ try:
+ s = safe_eval(self.__args[-1])
+ except Exception as e:
+ print >> sys.stderr, _(
+ '*** %(file)s:%(lineno)s: could not evaluate argument "%(arg)s"'
+ ) % {
+ 'arg': self.__args[-1],
+ 'file': self.__curfile,
+ 'lineno': self.__lineno
+ }
+ print >> sys.stderr, str(e)
+ self.__state = self.__waiting
+ return
+ if type(s) == str or type(s) == unicode:
+ self.__args[-1] = s
+ self.__addentry(self.__args)
+ else:
+ print >> sys.stderr, _(
+ '*** %(file)s:%(lineno)s: argument is no str or unicode object "%(arg)s"'
+ ) % {
+ 'arg': s,
+ 'file': self.__curfile,
+ 'lineno': self.__lineno
+ }
+ self.__state = self.__waiting
+ elif ttype == tokenize.OP and tstring == ',':
+ # Start of the next argument.
+ try:
+ s = safe_eval(self.__args[-1])
+ except Exception as e:
+ print >> sys.stderr, _(
+ '*** %(file)s:%(lineno)s: could not evaluate argument "%(arg)s"'
+ ) % {
+ 'arg': self.__args[-1],
+ 'file': self.__curfile,
+ 'lineno': self.__lineno
+ }
+ print >> sys.stderr, str(e)
+ self.__state = self.__waiting
+ return
+ if type(s) == str or type(s) == unicode:
+ self.__args[-1] = s
+ self.__args.append('') # next argument.
+ self.__state = self.__scanstring2
+ else:
+ print >> sys.stderr, _(
+ '*** %(file)s:%(lineno)s: argument 1 is no str or unicode object "%(arg)s"'
+ ) % {
+ 'arg': s,
+ 'file': self.__curfile,
+ 'lineno': self.__lineno
+ }
+ self.__state = self.__waiting
+ else:
+ # add string to current argument for later evaluation.
+ # no state change in this case.
+ self.__args[-1] += tstring
+
+ def __scanstring2(self, ttype, tstring, lineno):
+ # handle second argument, which is supposed to be a string.
+ if ttype == tokenize.OP and tstring == ')':
+ # End of list of arguments for the current function call.
+ # This is an error if we expect either one or three arguments but
+ # never two.
+ print >> sys.stderr, _(
+ '*** %(file)s:%(lineno)s: unexpected number of arguments (2)"'
+ ) % {
+ 'file': self.__curfile,
+ 'lineno': self.__lineno
+ }
+ self.__state = self.__waiting
+ elif ttype == tokenize.OP and tstring == ',':
+ # Start of the next argument. We do not need to parse it, we only
+ # made sure it is there and now we assume this is a plural call.
+ try:
+ s = safe_eval(self.__args[-1])
+ except Exception as e:
+ print >> sys.stderr, _(
+ '*** %(file)s:%(lineno)s: could not evaluate argument "%(arg)s"'
+ ) % {
+ 'arg': self.__args[-1],
+ 'file': self.__curfile,
+ 'lineno': self.__lineno
+ }
+ print >> sys.stderr, str(e)
+ self.__state = self.__waiting
+ return
+ s = safe_eval(self.__args[-1])
+ if type(s) == str or type(s) == unicode:
+ self.__args[-1] = s
+ self.__addentry(self.__args)
+ self.__state = self.__waiting
+ else:
+ print >> sys.stderr, _(
+ '*** %(file)s:%(lineno)s: argument 2 is no str or unicode object "%(arg)s"'
+ ) % {
+ 'arg': s,
+ 'file': self.__curfile,
+ 'lineno': self.__lineno
+ }
+ self.__state = self.__waiting
+ else:
+ # add string to current argument for later evaluation.
+ # no state change in this case.
+ self.__args[-1] += tstring
+
+ def __addentry(self, args, lineno=None, isdocstring=0):
+ isplural = 0
+ if len(args) > 1:
+ isplural = 1
+ if lineno is None:
+ lineno = self.__lineno
+ exclude = 0
+ if args[0] in self.__options.toexclude:
+ exclude = 1
+ if isplural:
+ if args[1] not in self.__options.toexclude:
+ # in case of plural, both strings must be in the toexclude list
+ # to exclude this entry.
+ exclude = 0
+ if not exclude:
+ entry = (self.__curfile, lineno)
+ # entries look like this:
+ # {('arg1','arg2') : {(filename,lineno) : <isdocstring>},
+ # ('arg1',) : {(filename,lineno) : <iscodstring>}}
+ # a key with len > 1 indicates plurals
+ self.__messages.setdefault(tuple(args[0:2]), {})[entry] = isdocstring
+
+ def set_filename(self, filename):
+ self.__curfile = filename
+ self.__freshmodule = 1
+
+ def write(self, fp):
+ options = self.__options
+ timestamp = time.strftime('%Y-%m-%d %H:%M+%Z')
+ # The time stamp in the header doesn't have the same format as that
+ # generated by xgettext...
+ print >> fp, pot_header % {'time': timestamp, 'version': __version__}
+ # Sort the entries. First sort each particular entry's keys, then
+ # sort all the entries by their first item.
+ reverse = {}
+ for k, v in self.__messages.items():
+ keys = v.keys()
+ keys.sort()
+ reverse.setdefault(tuple(keys), []).append((k, v))
+ rkeys = reverse.keys()
+ rkeys.sort()
+ for rkey in rkeys:
+ rentries = reverse[rkey]
+ rentries.sort()
+ for k, v in rentries:
+ # If the entry was gleaned out of a docstring, then add a
+ # comment stating so. This is to aid translators who may wish
+ # to skip translating some unimportant docstrings.
+ isdocstring = sum(v.values())
+ # k is the message string, v is a dictionary-set of (filename,
+ # lineno) tuples. We want to sort the entries in v first by
+ # file name and then by line number.
+ v = v.keys()
+ v.sort()
+ if not options.writelocations:
+ pass
+ # location comments are different b/w Solaris and GNU:
+ elif options.locationstyle == options.SOLARIS:
+ for filename, lineno in v:
+ d = {'filename': filename, 'lineno': lineno}
+ print >>fp, _(
+ '# File: %(filename)s, line: %(lineno)d') % d
+ elif options.locationstyle == options.GNU:
+ # fit as many locations on one line, as long as the
+ # resulting line length doesn't exceeds 'options.width'
+ locline = '#:'
+ for filename, lineno in v:
+ d = {'filename': filename, 'lineno': lineno}
+ s = _(' %(filename)s:%(lineno)d') % d
+ if len(locline) + len(s) <= options.width:
+ locline = locline + s
+ else:
+ print >> fp, locline
+ locline = "#:" + s
+ if len(locline) > 2:
+ print >> fp, locline
+ if isdocstring:
+ print >> fp, '#, docstring'
+ print >> fp, 'msgid', normalize(k[0])
+ if len(k) > 1:
+ print >> fp, 'msgid_plural', normalize(k[1])
+ print >> fp, 'msgstr[0] ""'
+ print >> fp, 'msgstr[1] ""\n'
+ else:
+ print >> fp, 'msgstr ""\n'
+
+
+def main():
+ global default_keywords
+ try:
+ opts, args = getopt.getopt(
+ sys.argv[1:],
+ 'ad:DEhk:Kno:p:S:Vvw:x:X:',
+ ['extract-all', 'default-domain=', 'escape', 'help',
+ 'keyword=', 'no-default-keywords',
+ 'add-location', 'no-location', 'output=', 'output-dir=',
+ 'style=', 'verbose', 'version', 'width=', 'exclude-file=',
+ 'docstrings', 'no-docstrings',
+ ])
+ except getopt.error, msg:
+ usage(1, msg)
+
+ # for holding option values
+ class Options:
+ # constants
+ GNU = 1
+ SOLARIS = 2
+ # defaults
+ extractall = 0 # FIXME: currently this option has no effect at all.
+ escape = 0
+ keywords = []
+ outpath = ''
+ outfile = 'messages.pot'
+ writelocations = 1
+ locationstyle = GNU
+ verbose = 0
+ width = 78
+ excludefilename = ''
+ docstrings = 0
+ nodocstrings = {}
+
+ options = Options()
+ locations = {'gnu' : options.GNU,
+ 'solaris' : options.SOLARIS,
+ }
+
+ # parse options
+ for opt, arg in opts:
+ if opt in ('-h', '--help'):
+ usage(0)
+ elif opt in ('-a', '--extract-all'):
+ options.extractall = 1
+ elif opt in ('-d', '--default-domain'):
+ options.outfile = arg + '.pot'
+ elif opt in ('-E', '--escape'):
+ options.escape = 1
+ elif opt in ('-D', '--docstrings'):
+ options.docstrings = 1
+ elif opt in ('-k', '--keyword'):
+ options.keywords.append(arg)
+ elif opt in ('-K', '--no-default-keywords'):
+ default_keywords = []
+ elif opt in ('-n', '--add-location'):
+ options.writelocations = 1
+ elif opt in ('--no-location',):
+ options.writelocations = 0
+ elif opt in ('-S', '--style'):
+ options.locationstyle = locations.get(arg.lower())
+ if options.locationstyle is None:
+ usage(1, _('Invalid value for --style: %s') % arg)
+ elif opt in ('-o', '--output'):
+ options.outfile = arg
+ elif opt in ('-p', '--output-dir'):
+ options.outpath = arg
+ elif opt in ('-v', '--verbose'):
+ options.verbose = 1
+ elif opt in ('-V', '--version'):
+ print _('pygettext.py (xgettext for Python) %s') % __version__
+ sys.exit(0)
+ elif opt in ('-w', '--width'):
+ try:
+ options.width = int(arg)
+ except ValueError:
+ usage(1, _('--width argument must be an integer: %s') % arg)
+ elif opt in ('-x', '--exclude-file'):
+ options.excludefilename = arg
+ elif opt in ('-X', '--no-docstrings'):
+ fp = open(arg)
+ try:
+ while 1:
+ line = fp.readline()
+ if not line:
+ break
+ options.nodocstrings[line[:-1]] = 1
+ finally:
+ fp.close()
+
+ # calculate escapes
+ make_escapes(options.escape)
+
+ # calculate all keywords
+ options.keywords.extend(default_keywords)
+
+ # initialize list of strings to exclude
+ if options.excludefilename:
+ try:
+ fp = open(options.excludefilename)
+ options.toexclude = fp.readlines()
+ fp.close()
+ except IOError:
+ print >> sys.stderr, _(
+ "Can't read --exclude-file: %s") % options.excludefilename
+ sys.exit(1)
+ else:
+ options.toexclude = []
+
+ # resolve args to module lists
+ expanded = []
+ for arg in args:
+ if arg == '-':
+ expanded.append(arg)
+ else:
+ expanded.extend(getFilesForName(arg))
+ args = expanded
+
+ # slurp through all the files
+ eater = TokenEater(options)
+ for filename in args:
+ if filename == '-':
+ if options.verbose:
+ print _('Reading standard input')
+ fp = sys.stdin
+ closep = 0
+ else:
+ if options.verbose:
+ print _('Working on %s') % filename
+ fp = open(filename)
+ closep = 1
+ try:
+ eater.set_filename(filename)
+ try:
+ tokenize.tokenize(fp.readline, eater)
+ except tokenize.TokenError, e:
+ print >> sys.stderr, '%s: %s, line %d, column %d' % (
+ e[0], filename, e[1][0], e[1][1])
+ finally:
+ if closep:
+ fp.close()
+
+ # write the output
+ if options.outfile == '-':
+ fp = sys.stdout
+ closep = 0
+ else:
+ if options.outpath:
+ options.outfile = os.path.join(options.outpath, options.outfile)
+ fp = open(options.outfile, 'w')
+ closep = 1
+ try:
+ eater.write(fp)
+ finally:
+ if closep:
+ fp.close()
+
+
+if __name__ == '__main__':
+ main()
+ # some more test strings
+ _(u'a unicode string')
+ # this one creates a warning
+ _('*** Seen unexpected token "%(token)s"') % {'token': 'test'}
+ _('more' 'than' 'one' 'string')
diff --git a/ipalib/cli.py b/ipalib/cli.py
index fa1dba8de..60afb7d02 100644
--- a/ipalib/cli.py
+++ b/ipalib/cli.py
@@ -706,7 +706,7 @@ class help(frontend.Local):
self.print_commands(name)
elif name in self.Command:
cmd = self.Command[name]
- print 'Purpose: %s' % cmd.doc
+ print 'Purpose: %s' % unicode(cmd.doc).strip()
self.Backend.cli.build_parser(cmd).print_help()
elif mod_name in sys.modules:
self.print_commands(name)
diff --git a/ipalib/plugable.py b/ipalib/plugable.py
index da02d87f4..92e6392ae 100644
--- a/ipalib/plugable.py
+++ b/ipalib/plugable.py
@@ -182,9 +182,8 @@ class Plugin(ReadOnly):
self.bases = tuple(
'%s.%s' % (b.__module__, b.__name__) for b in cls.__bases__
)
- doc = inspect.getdoc(cls)
- self.doc = _(doc)
- if doc is None:
+ self.doc = _(cls.__doc__)
+ if self.doc is None:
self.summary = '<%s>' % self.fullname
else:
self.summary = unicode(self.doc).split('\n\n', 1)[0]
diff --git a/ipalib/plugins/internal.py b/ipalib/plugins/internal.py
index f25e429e9..ae8bf3b94 100644
--- a/ipalib/plugins/internal.py
+++ b/ipalib/plugins/internal.py
@@ -114,14 +114,11 @@ class i18n_messages(Command):
"privilege_withdrawn":_("Privilege Withdrawn"),
"aa_compromise":_("AA Compromise"),
"revoke_confirmation":_(
- "To confirm your intention to revoke this certificate, "+
- "select a reason from the pull-down list, and click "+
- "the \"Revoke\" button."),
+ "To confirm your intention to revoke this certificate, select a reason from the pull-down list, and click the \"Revoke\" button."),
"note":_("Note"),
"reason":_("Reason for Revocation"),
"restore_confirmation":_(
- "To confirm your intention to restore this certificate, "+
- "click the \"Restore\" button."),
+ "To confirm your intention to restore this certificate, click the \"Restore\" button."),
"issued_to":_("Issued To"),
"common_name":_("Common Name"),
"organization":_("Organization"),
@@ -351,8 +348,7 @@ class i18n_messages(Command):
"unselect_all":_("Unselect All"),
"delete_confirm":_("Are you sure you want to delete selected entries?"),
"truncated":_(
- "Query returned results than configured size limit will show. " +
- "First ${counter} results shown."),
+ "Query returned results than configured size limit will show. First ${counter} results shown."),
},
"details":{
"general":_("General"),