summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--AUTHORS16
-rw-r--r--MANIFEST.in1
-rw-r--r--client/test_func.py2
-rw-r--r--config/settings6
-rwxr-xr-xserver/codes.py2
-rwxr-xr-xserver/config_data.py6
-rwxr-xr-xserver/logger.py17
-rwxr-xr-xserver/module_loader.py14
-rwxr-xr-xserver/server.py9
-rwxr-xr-xserver/utils.py9
-rwxr-xr-xserver/yaml/__init__.py17
-rwxr-xr-xserver/yaml/dump.py296
-rwxr-xr-xserver/yaml/implicit.py46
-rwxr-xr-xserver/yaml/inline.py38
-rwxr-xr-xserver/yaml/klass.py48
-rwxr-xr-xserver/yaml/load.py327
-rwxr-xr-xserver/yaml/ordered_dict.py31
-rwxr-xr-xserver/yaml/redump.py16
-rwxr-xr-xserver/yaml/stream.py193
-rwxr-xr-xserver/yaml/timestamp.py145
-rwxr-xr-xserver/yaml/ypath.py462
-rw-r--r--settings8
22 files changed, 59 insertions, 1650 deletions
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 0000000..afa7071
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,16 @@
+
+func is written by (alphabetically) ...
+
+ Michael DeHaan <mdehaan@redhat.com>
+ Adrian Likins <alikins@redhat.com>
+ Scott Sseago <sseago@redhat.com>
+ Seth Vidal <skvidal@redhat.com>
+ ...
+ (committers: please add yourself)
+
+Additional patches and contributions by ...
+
+ ...
+ [ send in patches to get your name here ]
+
+
diff --git a/MANIFEST.in b/MANIFEST.in
index d5ed9be..0c3c56d 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -4,3 +4,4 @@ recursive-include docs *
recursive-include init-scripts *
recursive-include po *.po
recursive-include po *.pot
+include AUTHORS
diff --git a/client/test_func.py b/client/test_func.py
index 35ce100..21c2009 100644
--- a/client/test_func.py
+++ b/client/test_func.py
@@ -1,6 +1,8 @@
#!/usr/bin/python
+# FIXME: should import the client lib, not XMLRPC lib, when we are done
+
import xmlrpclib
s = xmlrpclib.ServerProxy("http://127.0.0.1:51234")
diff --git a/config/settings b/config/settings
deleted file mode 100644
index e15fe9f..0000000
--- a/config/settings
+++ /dev/null
@@ -1,6 +0,0 @@
-# configuration for master servers
-
-[general]
-is_master = 0
-is_minion = 1
-master_server = funcmaster
diff --git a/server/codes.py b/server/codes.py
index 637a0b2..69ea95b 100755
--- a/server/codes.py
+++ b/server/codes.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
"""
-Virt-factory backend code.
+func
Copyright 2007, Red Hat, Inc
See AUTHORS
diff --git a/server/config_data.py b/server/config_data.py
index bda7635..46d98ec 100755
--- a/server/config_data.py
+++ b/server/config_data.py
@@ -1,11 +1,9 @@
#!/usr/bin/python
-# Virt-factory backend code.
+# func
#
# Copyright 2006, Red Hat, Inc
-# Michael DeHaan <mdehaan@redhat.com>
-# Scott Seago <sseago@redhat.com>
-# Adrian Likins <alikins@redhat.com>
+# see AUTHORS
#
# This software may be freely redistributed under the terms of the GNU
# general public license.
diff --git a/server/logger.py b/server/logger.py
index 0b9d791..fa56a3a 100755
--- a/server/logger.py
+++ b/server/logger.py
@@ -1,10 +1,9 @@
#!/usr/bin/python
-## Virt-factory backend code.
+## func
##
-## Copyright 2006, Red Hat, Inc
-## Michael DeHaan <mdehaan@redhat.com
-## Adrian Likins <alikins@redhat.com
+## Copyright 2007, Red Hat, Inc
+## See AUTHORS
##
## This software may be freely redistributed under the terms of the GNU
## general public license.
@@ -29,13 +28,15 @@ class Singleton(object):
# logging is weird, we don't want to setup multiple handlers
# so make sure we do that mess only once
+
class Logger(Singleton):
__no_handlers = True
- def __init__(self, logfilepath ="/var/log/virt-factory/svclog"):
+
+ def __init__(self, logfilepath ="/var/log/func/func.log"):
self.config = config_data.Config().get()
- if self.config.has_key("loglevel"):
- self.loglevel = logging._levelNames[self.config["loglevel"]]
+ if self.config.has_key("log_level"):
+ self.loglevel = logging._levelNames[self.config["log_level"]]
else:
self.loglevel = logging.INFO
self.__setup_logging()
@@ -45,7 +46,7 @@ class Logger(Singleton):
def __setup_logging(self):
self.logger = logging.getLogger("svc")
- def __setup_handlers(self, logfilepath="/var/log/virt-factory/svclog"):
+ def __setup_handlers(self, logfilepath="/var/log/func/func.log"):
handler = logging.FileHandler(logfilepath, "a")
self.logger.setLevel(self.loglevel)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
diff --git a/server/module_loader.py b/server/module_loader.py
index 10631fe..eaf0725 100755
--- a/server/module_loader.py
+++ b/server/module_loader.py
@@ -1,5 +1,19 @@
#!/usr/bin/python
+## func
+##
+## Copyright 2007, Red Hat, Inc
+## See AUTHORS
+##
+## This software may be freely redistributed under the terms of the GNU
+## general public license.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, write to the Free Software
+## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+##
+##
+
import distutils.sysconfig
import os
diff --git a/server/server.py b/server/server.py
index d297b06..5e78cae 100755
--- a/server/server.py
+++ b/server/server.py
@@ -1,11 +1,10 @@
#!/usr/bin/python
+
"""
-Virt-factory backend code.
+func
-Copyright 2006, Red Hat, Inc
-Michael DeHaan <mdehaan@redhat.com>
-Scott Seago <sseago@redhat.com>
-Adrian Likins <alikins@redhat.com>
+Copyright 2007, Red Hat, Inc
+see AUTHORS
This software may be freely redistributed under the terms of the GNU
general public license.
diff --git a/server/utils.py b/server/utils.py
index 552db54..724c847 100755
--- a/server/utils.py
+++ b/server/utils.py
@@ -1,11 +1,8 @@
#!/usr/bin/python
-"""
-Virt-factory backend code.
-Copyright 2006, Red Hat, Inc
-Michael DeHaan <mdehaan@redhat.com>
-Scott Seago <sseago@redhat.com>
-Adrian Likins <alikins@redhat.com>
+"""
+Copyright 2007, Red Hat, Inc
+see AUTHORS
This software may be freely redistributed under the terms of the GNU
general public license.
diff --git a/server/yaml/__init__.py b/server/yaml/__init__.py
deleted file mode 100755
index 419d1f3..0000000
--- a/server/yaml/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-__version__ = "0.32"
-from load import loadFile, load, Parser, l
-from dump import dump, dumpToFile, Dumper, d
-from stream import YamlLoaderException, StringStream, FileStream
-from timestamp import timestamp
-import sys
-if sys.hexversion >= 0x02020000:
- from redump import loadOrdered
-
-try:
- from ypath import ypath
-except NameError:
- def ypath(expr,target='',cntx=''):
- raise NotImplementedError("ypath requires Python 2.2")
-
-if sys.hexversion < 0x02010000:
- raise 'YAML is not tested for pre-2.1 versions of Python'
diff --git a/server/yaml/dump.py b/server/yaml/dump.py
deleted file mode 100755
index c55dbfe..0000000
--- a/server/yaml/dump.py
+++ /dev/null
@@ -1,296 +0,0 @@
-import types
-import string
-from types import StringType, UnicodeType, IntType, FloatType
-from types import DictType, ListType, TupleType, InstanceType
-from klass import hasMethod, isDictionary
-import re
-
-"""
- The methods from this module that are exported to the top
- level yaml package should remain stable. If you call
- directly into other methods of this module, be aware that
- they may change or go away in future implementations.
- Contact the authors if there are methods in this file
- that you wish to remain stable.
-"""
-
-def dump(*data):
- return Dumper().dump(*data)
-
-def d(data): return dump(data)
-
-def dumpToFile(file, *data):
- return Dumper().dumpToFile(file, *data)
-
-class Dumper:
- def __init__(self):
- self.currIndent = "\n"
- self.indent = " "
- self.keysrt = None
- self.alphaSort = 1 # legacy -- on by default
-
- def setIndent(self, indent):
- self.indent = indent
- return self
-
- def setSort(self, sort_hint):
- self.keysrt = sortMethod(sort_hint)
- return self
-
- def dump(self, *data):
- self.result = []
- self.output = self.outputToString
- self.dumpDocuments(data)
- return string.join(self.result,"")
-
- def outputToString(self, data):
- self.result.append(data)
-
- def dumpToFile(self, file, *data):
- self.file = file
- self.output = self.outputToFile
- self.dumpDocuments(data)
-
- def outputToFile(self, data):
- self.file.write(data)
-
- def dumpDocuments(self, data):
- for obj in data:
- self.anchors = YamlAnchors(obj)
- self.output("---")
- self.dumpData(obj)
- self.output("\n")
-
- def indentDump(self, data):
- oldIndent = self.currIndent
- self.currIndent += self.indent
- self.dumpData(data)
- self.currIndent = oldIndent
-
- def dumpData(self, data):
- anchor = self.anchors.shouldAnchor(data)
- if anchor:
- self.output(" &%d" % anchor )
- else:
- anchor = self.anchors.isAlias(data)
- if anchor:
- self.output(" *%d" % anchor )
- return
- if (data is None):
- self.output(' ~')
- elif hasMethod(data, 'to_yaml'):
- self.dumpTransformedObject(data)
- elif hasMethod(data, 'to_yaml_implicit'):
- self.output(" " + data.to_yaml_implicit())
- elif type(data) is InstanceType:
- self.dumpRawObject(data)
- elif isDictionary(data):
- self.dumpDict(data)
- elif type(data) in [ListType, TupleType]:
- self.dumpList(data)
- else:
- self.dumpScalar(data)
-
- def dumpTransformedObject(self, data):
- obj_yaml = data.to_yaml()
- if type(obj_yaml) is not TupleType:
- self.raiseToYamlSyntaxError()
- (data, typestring) = obj_yaml
- if typestring:
- self.output(" " + typestring)
- self.dumpData(data)
-
- def dumpRawObject(self, data):
- self.output(' !!%s.%s' % (data.__module__, data.__class__.__name__))
- self.dumpData(data.__dict__)
-
- def dumpDict(self, data):
- keys = data.keys()
- if len(keys) == 0:
- self.output(" {}")
- return
- if self.keysrt:
- keys = sort_keys(keys,self.keysrt)
- else:
- if self.alphaSort:
- keys.sort()
- for key in keys:
- self.output(self.currIndent)
- self.dumpKey(key)
- self.output(":")
- self.indentDump(data[key])
-
- def dumpKey(self, key):
- if type(key) is TupleType:
- self.output("?")
- self.indentDump(key)
- self.output("\n")
- else:
- self.output(quote(key))
-
- def dumpList(self, data):
- if len(data) == 0:
- self.output(" []")
- return
- for item in data:
- self.output(self.currIndent)
- self.output("-")
- self.indentDump(item)
-
- def dumpScalar(self, data):
- if isUnicode(data):
- self.output(' "%s"' % repr(data)[2:-1])
- elif isMulti(data):
- self.dumpMultiLineScalar(data.splitlines())
- else:
- self.output(" ")
- self.output(quote(data))
-
- def dumpMultiLineScalar(self, lines):
- self.output(" |")
- if lines[-1] == "":
- self.output("+")
- for line in lines:
- self.output(self.currIndent)
- self.output(line)
-
- def raiseToYamlSyntaxError(self):
- raise """
-to_yaml should return tuple w/object to dump
-and optional YAML type. Example:
-({'foo': 'bar'}, '!!foobar')
-"""
-
-#### ANCHOR-RELATED METHODS
-
-def accumulate(obj,occur):
- typ = type(obj)
- if obj is None or \
- typ is IntType or \
- typ is FloatType or \
- ((typ is StringType or typ is UnicodeType) \
- and len(obj) < 32): return
- obid = id(obj)
- if 0 == occur.get(obid,0):
- occur[obid] = 1
- if typ is ListType:
- for x in obj:
- accumulate(x,occur)
- if typ is DictType:
- for (x,y) in obj.items():
- accumulate(x,occur)
- accumulate(y,occur)
- else:
- occur[obid] = occur[obid] + 1
-
-class YamlAnchors:
- def __init__(self,data):
- occur = {}
- accumulate(data,occur)
- anchorVisits = {}
- for (obid, occur) in occur.items():
- if occur > 1:
- anchorVisits[obid] = 0
- self._anchorVisits = anchorVisits
- self._currentAliasIndex = 0
- def shouldAnchor(self,obj):
- ret = self._anchorVisits.get(id(obj),None)
- if 0 == ret:
- self._currentAliasIndex = self._currentAliasIndex + 1
- ret = self._currentAliasIndex
- self._anchorVisits[id(obj)] = ret
- return ret
- return 0
- def isAlias(self,obj):
- return self._anchorVisits.get(id(obj),0)
-
-### SORTING METHODS
-
-def sort_keys(keys,fn):
- tmp = []
- for key in keys:
- val = fn(key)
- if val is None: val = '~'
- tmp.append((val,key))
- tmp.sort()
- return [ y for (x,y) in tmp ]
-
-def sortMethod(sort_hint):
- typ = type(sort_hint)
- if DictType == typ:
- return sort_hint.get
- elif ListType == typ or TupleType == typ:
- indexes = {}; idx = 0
- for item in sort_hint:
- indexes[item] = idx
- idx += 1
- return indexes.get
- else:
- return sort_hint
-
-### STRING QUOTING AND SCALAR HANDLING
-def isStr(data):
- # XXX 2.1 madness
- if type(data) == type(''):
- return 1
- if type(data) == type(u''):
- return 1
- return 0
-
-def doubleUpQuotes(data):
- return data.replace("'", "''")
-
-def quote(data):
- if not isStr(data):
- return str(data)
- single = "'"
- double = '"'
- quote = ''
- if len(data) == 0:
- return "''"
- if hasSpecialChar(data) or data[0] == single:
- data = `data`[1:-1]
- data = string.replace(data, r"\x08", r"\b")
- quote = double
- elif needsSingleQuote(data):
- quote = single
- data = doubleUpQuotes(data)
- return "%s%s%s" % (quote, data, quote)
-
-def needsSingleQuote(data):
- if re.match(r"^-?\d", data):
- return 1
- if re.match(r"\*\S", data):
- return 1
- if data[0] in ['&', ' ']:
- return 1
- if data[0] == '"':
- return 1
- if data[-1] == ' ':
- return 1
- return (re.search(r'[:]', data) or re.search(r'(\d\.){2}', data))
-
-def hasSpecialChar(data):
- # need test to drive out '#' from this
- return re.search(r'[\t\b\r\f#]', data)
-
-def isMulti(data):
- if not isStr(data):
- return 0
- if hasSpecialChar(data):
- return 0
- return re.search("\n", data)
-
-def isUnicode(data):
- return type(data) == unicode
-
-def sloppyIsUnicode(data):
- # XXX - hack to make tests pass for 2.1
- return repr(data)[:2] == "u'" and repr(data) != data
-
-import sys
-if sys.hexversion < 0x20200000:
- isUnicode = sloppyIsUnicode
-
-
-
diff --git a/server/yaml/implicit.py b/server/yaml/implicit.py
deleted file mode 100755
index 6172564..0000000
--- a/server/yaml/implicit.py
+++ /dev/null
@@ -1,46 +0,0 @@
-import re
-import string
-from timestamp import timestamp, matchTime
-
-DATETIME_REGEX = re.compile("^[0-9]{4}-[0-9]{2}-[0-9]{2}$")
-FLOAT_REGEX = re.compile("^[-+]?[0-9][0-9,]*\.[0-9]*$")
-SCIENTIFIC_REGEX = re.compile("^[-+]?[0-9]+(\.[0-9]*)?[eE][-+][0-9]+$")
-OCTAL_REGEX = re.compile("^[-+]?([0][0-7,]*)$")
-HEX_REGEX = re.compile("^[-+]?0x[0-9a-fA-F,]+$")
-INT_REGEX = re.compile("^[-+]?(0|[1-9][0-9,]*)$")
-
-def convertImplicit(val):
- if val == '~':
- return None
- if val == '+':
- return 1
- if val == '-':
- return 0
- if val[0] == "'" and val[-1] == "'":
- val = val[1:-1]
- return string.replace(val, "''", "\'")
- if val[0] == '"' and val[-1] == '"':
- if re.search(r"\u", val):
- val = "u" + val
- unescapedStr = eval (val)
- return unescapedStr
- if matchTime.match(val):
- return timestamp(val)
- if INT_REGEX.match(val):
- return int(cleanseNumber(val))
- if OCTAL_REGEX.match(val):
- return int(val, 8)
- if HEX_REGEX.match(val):
- return int(val, 16)
- if FLOAT_REGEX.match(val):
- return float(cleanseNumber(val))
- if SCIENTIFIC_REGEX.match(val):
- return float(cleanseNumber(val))
- return val
-
-def cleanseNumber(str):
- if str[0] == '+':
- str = str[1:]
- str = string.replace(str,',','')
- return str
-
diff --git a/server/yaml/inline.py b/server/yaml/inline.py
deleted file mode 100755
index 8e647de..0000000
--- a/server/yaml/inline.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import re
-import string
-
-class InlineTokenizer:
- def __init__(self, data):
- self.data = data
-
- def punctuation(self):
- puncts = [ '[', ']', '{', '}' ]
- for punct in puncts:
- if self.data[0] == punct:
- self.data = self.data[1:]
- return punct
-
- def up_to_comma(self):
- match = re.match('(.*?)\s*, (.*)', self.data)
- if match:
- self.data = match.groups()[1]
- return match.groups()[0]
-
- def up_to_end_brace(self):
- match = re.match('(.*?)(\s*[\]}].*)', self.data)
- if match:
- self.data = match.groups()[1]
- return match.groups()[0]
-
- def next(self):
- self.data = string.strip(self.data)
- productions = [
- self.punctuation,
- self.up_to_comma,
- self.up_to_end_brace
- ]
- for production in productions:
- token = production()
- if token:
- return token
-
diff --git a/server/yaml/klass.py b/server/yaml/klass.py
deleted file mode 100755
index edcf5a8..0000000
--- a/server/yaml/klass.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import new
-import re
-
-class DefaultResolver:
- def resolveType(self, data, typestring):
- match = re.match('!!(.*?)\.(.*)', typestring)
- if not match:
- raise "Invalid private type specifier"
- (modname, classname) = match.groups()
- return makeClass(modname, classname, data)
-
-def makeClass(module, classname, dict):
- exec('import %s' % (module))
- klass = eval('%s.%s' % (module, classname))
- obj = new.instance(klass)
- if hasMethod(obj, 'from_yaml'):
- return obj.from_yaml(dict)
- obj.__dict__ = dict
- return obj
-
-def hasMethod(object, method_name):
- try:
- klass = object.__class__
- except:
- return 0
- if not hasattr(klass, method_name):
- return 0
- method = getattr(klass, method_name)
- if not callable(method):
- return 0
- return 1
-
-def isDictionary(data):
- return isinstance(data, dict)
-
-try:
- isDictionary({})
-except:
- def isDictionary(data): return type(data) == type({}) # XXX python 2.1
-
-if __name__ == '__main__':
- print isDictionary({'foo': 'bar'})
- try:
- print isDictionary(dict())
- from ordered_dict import OrderedDict
- print isDictionary(OrderedDict())
- except:
- pass
diff --git a/server/yaml/load.py b/server/yaml/load.py
deleted file mode 100755
index 259178d..0000000
--- a/server/yaml/load.py
+++ /dev/null
@@ -1,327 +0,0 @@
-import re, string
-from implicit import convertImplicit
-from inline import InlineTokenizer
-from klass import DefaultResolver
-from stream import YamlLoaderException, FileStream, StringStream, NestedDocs
-
-try:
- iter(list()) # is iter supported by this version of Python?
-except:
- # XXX - Python 2.1 does not support iterators
- class StopIteration: pass
- class iter:
- def __init__(self,parser):
- self._docs = []
- try:
- while 1:
- self._docs.append(parser.next())
- except StopIteration: pass
- self._idx = 0
- def __len__(self): return len(self._docs)
- def __getitem__(self,idx): return self._docs[idx]
- def next(self):
- if self._idx < len(self._docs):
- ret = self._docs[self._idx]
- self._idx = self._idx + 1
- return ret
- raise StopIteration
-
-def loadFile(filename, typeResolver=None):
- return loadStream(FileStream(filename),typeResolver)
-
-def load(str, typeResolver=None):
- return loadStream(StringStream(str), typeResolver)
-
-def l(str): return load(str).next()
-
-def loadStream(stream, typeResolver):
- return iter(Parser(stream, typeResolver))
-
-def tryProductions(productions, value):
- for production in productions:
- results = production(value)
- if results:
- (ok, result) = results
- if ok:
- return (1, result)
-
-def dumpDictionary(): return {}
-
-class Parser:
- def __init__(self, stream, typeResolver=None):
- try:
- self.dictionary = dict
- except:
- self.dictionary = dumpDictionary
- self.nestedDocs = NestedDocs(stream)
- self.aliases = {}
- if typeResolver:
- self.typeResolver = typeResolver
- else:
- self.typeResolver = DefaultResolver()
-
- def error(self, msg):
- self.nestedDocs.error(msg, self.line)
-
- def nestPop(self):
- line = self.nestedDocs.pop()
- if line is not None:
- self.line = line
- return 1
-
- def value(self, indicator):
- return getToken(indicator+"\s*(.*)", self.line)
-
- def getNextDocument(self): raise "getNextDocument() deprecated--use next()"
-
- def next(self):
- line = self.nestedDocs.popDocSep()
- indicator = getIndicator(line)
- if indicator:
- return self.parse_value(indicator)
- if line:
- self.nestedDocs.nestToNextLine()
- return self.parseLines()
- raise StopIteration
-
- def __iter__(self): return self
-
- def parseLines(self):
- peekLine = self.nestedDocs.peek()
- if peekLine:
- if re.match("\s*-", peekLine):
- return self.parse_collection([], self.parse_seq_line)
- else:
- return self.parse_collection(self.dictionary(), self.parse_map_line)
- raise StopIteration
-
- def parse_collection(self, items, lineParser):
- while self.nestPop():
- if self.line:
- lineParser(items)
- return items
-
- def parse_seq_line(self, items):
- value = self.value("-")
- if value is not None:
- items.append(self.parse_seq_value(value))
- else:
- self.error("missing '-' for seq")
-
- def parse_map_line(self, items):
- if (self.line == '?'):
- self.parse_map_line_nested(items)
- else:
- self.parse_map_line_simple(items, self.line)
-
- def parse_map_line_nested(self, items):
- self.nestedDocs.nestToNextLine()
- key = self.parseLines()
- if self.nestPop():
- value = self.value(':')
- if value is not None:
- items[tuple(key)] = self.parse_value(value)
- return
- self.error("key has no value for nested map")
-
- def parse_map_line_simple(self, items, line):
- map_item = self.key_value(line)
- if map_item:
- (key, value) = map_item
- key = convertImplicit(key)
- if items.has_key(key):
- self.error("Duplicate key "+key)
- items[key] = self.parse_value(value)
- else:
- self.error("bad key for map")
-
- def is_map(self, value):
- # XXX - need real tokenizer
- if len(value) == 0:
- return 0
- if value[0] == "'":
- return 0
- if re.search(':(\s|$)', value):
- return 1
-
- def parse_seq_value(self, value):
- if self.is_map(value):
- return self.parse_compressed_map(value)
- else:
- return self.parse_value(value)
-
- def parse_compressed_map(self, value):
- items = self.dictionary()
- line = self.line
- token = getToken("(\s*-\s*)", line)
- self.nestedDocs.nestBySpecificAmount(len(token))
- self.parse_map_line_simple(items, value)
- return self.parse_collection(items, self.parse_map_line)
-
- def parse_value(self, value):
- (alias, value) = self.testForRepeatOfAlias(value)
- if alias:
- return value
- (alias, value) = self.testForAlias(value)
- value = self.parse_unaliased_value(value)
- if alias:
- self.aliases[alias] = value
- return value
-
- def parse_unaliased_value(self, value):
- match = re.match(r"(!\S*)(.*)", value)
- if match:
- (url, value) = match.groups()
- value = self.parse_untyped_value(value)
- if url[:2] == '!!':
- return self.typeResolver.resolveType(value, url)
- else:
- # XXX - allows syntax, but ignores it
- return value
- return self.parse_untyped_value(value)
-
- def parseInlineArray(self, value):
- if re.match("\s*\[", value):
- return self.parseInline([], value, ']',
- self.parseInlineArrayItem)
-
- def parseInlineHash(self, value):
- if re.match("\s*{", value):
- return self.parseInline(self.dictionary(), value, '}',
- self.parseInlineHashItem)
-
- def parseInlineArrayItem(self, result, token):
- return result.append(convertImplicit(token))
-
- def parseInlineHashItem(self, result, token):
- (key, value) = self.key_value(token)
- result[key] = value
-
- def parseInline(self, result, value, end_marker, itemMethod):
- tokenizer = InlineTokenizer(value)
- tokenizer.next()
- while 1:
- token = tokenizer.next()
- if token == end_marker:
- break
- itemMethod(result, token)
- return (1, result)
-
- def parseSpecial(self, value):
- productions = [
- self.parseMultiLineScalar,
- self.parseInlineHash,
- self.parseInlineArray,
- ]
- return tryProductions(productions, value)
-
- def parse_untyped_value(self, value):
- parse = self.parseSpecial(value)
- if parse:
- (ok, data) = parse
- return data
- token = getToken("(\S.*)", value)
- if token:
- lines = [token] + \
- pruneTrailingEmpties(self.nestedDocs.popNestedLines())
- return convertImplicit(joinLines(lines))
- else:
- self.nestedDocs.nestToNextLine()
- return self.parseLines()
-
- def parseNative(self, value):
- return (1, convertImplicit(value))
-
- def parseMultiLineScalar(self, value):
- if value == '>':
- return (1, self.parseFolded())
- elif value == '|':
- return (1, joinLiteral(self.parseBlock()))
- elif value == '|+':
- return (1, joinLiteral(self.unprunedBlock()))
-
- def parseFolded(self):
- data = self.parseBlock()
- i = 0
- resultString = ''
- while i < len(data)-1:
- resultString = resultString + data[i]
- resultString = resultString + foldChar(data[i], data[i+1])
- i = i + 1
- return resultString + data[-1] + "\n"
-
- def unprunedBlock(self):
- self.nestedDocs.nestToNextLine()
- data = []
- while self.nestPop():
- data.append(self.line)
- return data
-
- def parseBlock(self):
- return pruneTrailingEmpties(self.unprunedBlock())
-
- def testForAlias(self, value):
- match = re.match("&(\S*)\s*(.*)", value)
- if match:
- return match.groups()
- return (None, value)
-
- def testForRepeatOfAlias(self, value):
- match = re.match("\*(\S+)", value)
- if match:
- alias = match.groups()[0]
- if self.aliases.has_key(alias):
- return (alias, self.aliases[alias])
- else:
- self.error("Unknown alias")
- return (None, value)
-
- def key_value(self, str):
- if str[-1] == ' ':
- self.error("Trailing spaces not allowed without quotes.")
- # XXX This allows mis-balanced " vs. ' stuff
- match = re.match("[\"'](.+)[\"']\s*:\s*(.*)", str)
- if match:
- (key, value) = match.groups()
- return (key, value)
- match = re.match("(.+?)\s*:\s*(.*)", str)
- if match:
- (key, value) = match.groups()
- if len(value) and value[0] == '#':
- value = ''
- return (key, value)
-
-def getToken(regex, value):
- match = re.search(regex, value)
- if match:
- return match.groups()[0]
-
-def pruneTrailingEmpties(data):
- while len(data) > 0 and data[-1] == '':
- data = data[:-1]
- return data
-
-def foldChar(line1, line2):
- if re.match("^\S", line1) and re.match("^\S", line2):
- return " "
- return "\n"
-
-def getIndicator(line):
- if line:
- header = r"(#YAML:\d+\.\d+\s*){0,1}"
- match = re.match("--- "+header+"(\S*.*)", line)
- if match:
- return match.groups()[-1]
-
-def joinLines(lines):
- result = ''
- for line in lines[:-1]:
- if line[-1] == '\\':
- result = result + line[:-1]
- else:
- result = result + line + " "
- return result + lines[-1]
-
-def joinLiteral(data):
- return string.join(data,"\n") + "\n"
-
diff --git a/server/yaml/ordered_dict.py b/server/yaml/ordered_dict.py
deleted file mode 100755
index b3788b7..0000000
--- a/server/yaml/ordered_dict.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# This is extremely crude implementation of an OrderedDict.
-# If you know of a better implementation, please send it to
-# the author Steve Howell. You can find my email via
-# the YAML mailing list or wiki.
-
-class OrderedDict(dict):
- def __init__(self):
- self._keys = []
-
- def __setitem__(self, key, val):
- self._keys.append(key)
- dict.__setitem__(self, key, val)
-
- def keys(self):
- return self._keys
-
- def items(self):
- return [(key, self[key]) for key in self._keys]
-
-if __name__ == '__main__':
- data = OrderedDict()
- data['z'] = 26
- data['m'] = 13
- data['a'] = 1
- for key in data.keys():
- print "The value for %s is %s" % (key, data[key])
- print data
-
-
-
-
diff --git a/server/yaml/redump.py b/server/yaml/redump.py
deleted file mode 100755
index 56ea958..0000000
--- a/server/yaml/redump.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from ordered_dict import OrderedDict
-from load import Parser
-from dump import Dumper
-from stream import StringStream
-
-def loadOrdered(stream):
- parser = Parser(StringStream(stream))
- parser.dictionary = OrderedDict
- return iter(parser)
-
-def redump(stream):
- docs = list(loadOrdered(stream))
- dumper = Dumper()
- dumper.alphaSort = 0
- return dumper.dump(*docs)
-
diff --git a/server/yaml/stream.py b/server/yaml/stream.py
deleted file mode 100755
index cc78c4b..0000000
--- a/server/yaml/stream.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import re
-import string
-
-def indentLevel(line):
- n = 0
- while n < len(line) and line[n] == ' ':
- n = n + 1
- return n
-
-class LineNumberStream:
- def __init__(self, filename=None):
- self.curLine = 0
- self.filename = filename
-
- def get(self):
- line = self.getLine()
- self.curLine += 1 # used by subclass
- if line:
- line = noLineFeed(line)
- return line
-
- def lastLineRead(self):
- return self.curLine
-
-class FileStream(LineNumberStream):
- def __init__(self, filename):
- self.fp = open(filename)
- LineNumberStream.__init__(self, filename)
-
- def getLine(self):
- line = self.fp.readline()
- if line == '': line = None
- return line
-
-class StringStream(LineNumberStream):
- def __init__(self, text):
- self.lines = split(text)
- self.numLines = len(self.lines)
- LineNumberStream.__init__(self)
-
- def getLine(self):
- if self.curLine < self.numLines:
- return self.lines[self.curLine]
-
-def split(text):
- lines = string.split(text, '\n')
- if lines[-1] == '':
- lines.pop()
- return lines
-
-def eatNewLines(stream):
- while 1:
- line = stream.get()
- if line is None or len(string.strip(line)):
- return line
-
-COMMENT_LINE_REGEX = re.compile(R"\s*#")
-def isComment(line):
- return line is not None and COMMENT_LINE_REGEX.match(line)
-
-class CommentEater:
- def __init__(self, stream):
- self.stream = stream
- self.peeked = 1
- self.line = eatNewLines(stream)
- self.eatComments()
-
- def eatComments(self):
- while isComment(self.line):
- self.line = self.stream.get()
-
- def peek(self):
- if self.peeked:
- return self.line
- self.peeked = 1
- self.line = self.stream.get()
- self.eatComments()
- return self.line
-
- def lastLineRead(self):
- return self.stream.lastLineRead()
-
- def pop(self):
- data = self.peek()
- self.peeked = 0
- return data
-
-class NestedText:
- def __init__(self, stream):
- self.commentEater = CommentEater(stream)
- self.reset()
-
- def lastLineRead(self):
- return self.commentEater.lastLineRead()
-
- def reset(self):
- self.indentLevel = 0
- self.oldIndents = [0]
-
- def peek(self):
- nextLine = self.commentEater.peek()
- if nextLine is not None:
- if indentLevel(nextLine) >= self.indentLevel:
- return nextLine[self.indentLevel:]
- elif nextLine == '':
- return ''
-
- def pop(self):
- line = self.peek()
- if line is None:
- self.indentLevel = self.oldIndents.pop()
- return
- self.commentEater.pop()
- return line
-
- def popNestedLines(self):
- nextLine = self.peek()
- if nextLine is None or nextLine == '' or nextLine[0] != ' ':
- return []
- self.nestToNextLine()
- lines = []
- while 1:
- line = self.pop()
- if line is None:
- break
- lines.append(line)
- return lines
-
- def nestToNextLine(self):
- line = self.commentEater.peek()
- indentation = indentLevel(line)
- if len(self.oldIndents) > 1 and indentation <= self.indentLevel:
- self.error("Inadequate indentation", line)
- self.setNewIndent(indentation)
-
- def nestBySpecificAmount(self, adjust):
- self.setNewIndent(self.indentLevel + adjust)
-
- def setNewIndent(self, indentLevel):
- self.oldIndents.append(self.indentLevel)
- self.indentLevel = indentLevel
-
-class YamlLoaderException(Exception):
- def __init__(self, *args):
- (self.msg, self.lineNum, self.line, self.filename) = args
-
- def __str__(self):
- msg = """\
-%(msg)s:
-near line %(lineNum)d:
-%(line)s
-""" % self.__dict__
- if self.filename:
- msg += "file: " + self.filename
- return msg
-
-class NestedDocs(NestedText):
- def __init__(self, stream):
- self.filename = stream.filename
- NestedText.__init__(self,stream)
- line = NestedText.peek(self)
- self.sep = '---'
- if self.startsWithSep(line):
- self.eatenDocSep = NestedText.pop(self)
- else:
- self.eatenDocSep = self.sep
-
- def startsWithSep(self,line):
- if line and self.sep == line[:3]: return 1
- return 0
-
- def popDocSep(self):
- line = self.eatenDocSep
- self.eatenDocSep = None
- self.reset()
- return line
-
- def pop(self):
- if self.eatenDocSep is not None:
- raise "error"
- line = self.commentEater.peek()
- if line and self.startsWithSep(line):
- self.eatenDocSep = NestedText.pop(self)
- return None
- return NestedText.pop(self)
-
- def error(self, msg, line):
- raise YamlLoaderException(msg, self.lastLineRead(), line, self.filename)
-
-def noLineFeed(s):
- while s[-1:] in ('\n', '\r'):
- s = s[:-1]
- return s
diff --git a/server/yaml/timestamp.py b/server/yaml/timestamp.py
deleted file mode 100755
index abcb2e6..0000000
--- a/server/yaml/timestamp.py
+++ /dev/null
@@ -1,145 +0,0 @@
-import time, re, string
-from types import ListType, TupleType
-
-PRIVATE_NOTICE = """
- This module is considered to be private implementation
- details and is subject to change. Please only use the
- objects and methods exported to the top level yaml package.
-"""
-
-#
-# Time specific operations
-#
-
-_splitTime = re.compile('\-|\s|T|t|:|\.|Z')
-matchTime = re.compile(\
- '\d+-\d+-\d+([\s|T|t]\d+:\d+:\d+.\d+(Z|(\s?[\-|\+]\d+:\d+)))?')
-
-def _parseTime(val):
- if not matchTime.match(val): raise ValueError(val)
- tpl = _splitTime.split(val)
- if not(tpl): raise ValueError(val)
- siz = len(tpl)
- sec = 0
- if 3 == siz:
- tpl += [0,0,0,0,0,-1]
- elif 7 == siz:
- tpl.append(0)
- tpl.append(-1)
- elif 8 == siz:
- if len(tpl.pop()) > 0: raise ValueError(val)
- tpl.append(0)
- tpl.append(-1)
- elif 9 == siz or 10 == siz:
- mn = int(tpl.pop())
- hr = int(tpl.pop())
- sec = (hr*60+mn)*60
- if val.find("+") > -1: sec = -sec
- if 10 == siz: tpl.pop()
- tpl.append(0)
- tpl.append(-1)
- else:
- raise ValueError(val)
- idx = 0
- while idx < 9:
- tpl[idx] = int(tpl[idx])
- idx += 1
- if tpl[1] < 1 or tpl[1] > 12: raise ValueError(val)
- if tpl[2] < 1 or tpl[2] > 31: raise ValueError(val)
- if tpl[3] > 24: raise ValueError(val)
- if tpl[4] > 61: raise ValueError(val)
- if tpl[5] > 61: raise ValueError(val)
- if tpl[0] > 2038:
- #TODO: Truncation warning
- tpl = (2038,1,18,0,0,0,0,0,-1)
- tpl = tuple(tpl)
- ret = time.mktime(tpl)
- ret = time.localtime(ret+sec)
- ret = ret[:8] + (0,)
- return ret
-
-
-class _timestamp:
- def __init__(self,val=None):
- if not val:
- self.__tval = time.gmtime()
- else:
- typ = type(val)
- if ListType == typ:
- self.__tval = tuple(val)
- elif TupleType == typ:
- self.__tval = val
- else:
- self.__tval = _parseTime(val)
- if 9 != len(self.__tval): raise ValueError
- def __getitem__(self,idx): return self.__tval[idx]
- def __len__(self): return 9
- def strftime(self,format): return time.strftime(format,self.__tval)
- def mktime(self): return time.mktime(self.__tval)
- def asctime(self): return time.asctime(self.__tval)
- def isotime(self):
- return "%04d-%02d-%02dT%02d:%02d:%02d.00Z" % self.__tval[:6]
- def __repr__(self): return "yaml.timestamp('%s')" % self.isotime()
- def __str__(self): return self.isotime()
- def to_yaml_implicit(self): return self.isotime()
- def __hash__(self): return hash(self.__tval[:6])
- def __cmp__(self,other):
- try:
- return cmp(self.__tval[:6],other.__tval[:6])
- except AttributeError:
- return -1
-
-try: # inherit from mx.DateTime functionality if available
- from mx import DateTime
- class timestamp(_timestamp):
- def __init__(self,val=None):
- _timestamp.__init__(self,val)
- self.__mxdt = DateTime.mktime(self.__tval)
- def __getattr__(self, name):
- return getattr(self.__mxdt, name)
-except:
- class timestamp(_timestamp): pass
-
-
-
-def unquote(expr):
- """
- summary: >
- Simply returns the unquoted string, and the
- length of the quoted string token at the
- beginning of the expression.
- """
- tok = expr[0]
- if "'" == tok:
- idx = 1
- odd = 0
- ret = ""
- while idx < len(expr):
- chr = expr[idx]
- if "'" == chr:
- if odd: ret += chr
- odd = not odd
- else:
- if odd:
- tok = expr[:idx]
- break
- ret += chr
- idx += 1
- if "'" == tok: tok = expr
- return (ret,len(tok))
- if '"' == tok:
- idx = 1
- esc = 0
- while idx < len(expr):
- chr = expr[idx]
- if '"' == chr and not esc:
- tok = expr[:idx] + '"'
- break
- if '\\' == chr and not esc: esc = 1
- else: esc = 0
- idx += 1
- if '"' == tok:
- raise SyntaxError("unmatched quote: " + expr)
- ret = eval(tok) #TODO: find better way to unquote
- return (ret,len(tok))
- return (expr,len(expr))
diff --git a/server/yaml/ypath.py b/server/yaml/ypath.py
deleted file mode 100755
index 51d9d2f..0000000
--- a/server/yaml/ypath.py
+++ /dev/null
@@ -1,462 +0,0 @@
-from types import ListType, StringType, IntType, DictType, InstanceType
-import re
-from urllib import quote
-from timestamp import unquote
-
-noTarget = object()
-
-def escape(node):
- """
- summary: >
- This function escapes a given key so that it
- may appear within a ypath. URI style escaping
- is used so that ypath expressions can be a
- valid URI expression.
- """
- typ = type(node)
- if typ is IntType: return str(node)
- if typ is StringType:
- return quote(node,'')
- raise ValueError("TODO: Support more than just string and integer keys.")
-
-class context:
- """
- summary: >
- A ypath visit context through a YAML rooted graph.
- This is implemented as a 3-tuple including the parent
- node, the current key/index and the value. This is
- an immutable object so it can be cached.
- properties:
- key: mapping key or index within the parent collection
- value: current value within the parent's range
- parent: the parent context
- root: the very top of the yaml graph
- path: a tuple of the domain keys
- notes: >
- The context class doesn't yet handle going down the
- domain side of the tree...
- """
- def __init__(self,parent,key,value):
- """
- args:
- parent: parent context (or None if this is the root)
- key: mapping key or index for this context
- value: value of current location...
- """
- self.parent = parent
- self.key = key
- self.value = value
- if parent:
- assert parent.__class__ is self.__class__
- self.path = parent.path + (escape(key),)
- self.root = parent.root
- else:
- assert not key
- self.path = tuple()
- self.root = self
- def __setattr__(self,attname,attval):
- if attname in ('parent','key','value'):
- if self.__dict__.get(attname):
- raise ValueError("context is read-only")
- self.__dict__[attname] = attval
- def __hash__(self): return hash(self.path)
- def __cmp__(self,other):
- try:
- return cmp(self.path,other.path)
- except AttributeError:
- return -1
- def __str__(self):
- if self.path:
- return "/".join(('',)+self.path)
- else:
- return '/'
-
-def to_context(target):
- if type(target) is InstanceType:
- if target.__class__ is context:
- return target
- return context(None,None,target)
-
-def context_test():
- lst = ['value']
- map = {'key':lst}
- x = context(None,None,map)
- y = context(x,'key',lst)
- z = context(y,0,'value')
- assert ('key',) == y.path
- assert 'key' == y.key
- assert lst == y.value
- assert x == y.parent
- assert x == y.root
- assert 0 == z.key
- assert 'value' == z.value
- assert y == z.parent
- assert x == z.root
- assert hash(x)
- assert hash(y)
- assert hash(z)
- assert '/' == str(x)
- assert '/key' == str(y)
- assert '/key/0' == str(z)
-
-class null_seg:
- """
- summary: >
- This is the simplest path segment, it
- doesn't return any results and doesn't
- depend upon its context. It also happens to
- be the base class which all segments derive.
- """
- def __iter__(self):
- return self
- def next_null(self):
- raise StopIteration
- def bind(self,cntx):
- """
- summary: >
- The bind function is called whenever
- the parent context has changed.
- """
- assert(cntx.__class__ is context)
- self.cntx = cntx
- def apply(self,target):
- self.bind(to_context(target))
- return iter(self)
- def exists(self,cntx):
- try:
- self.bind(cntx)
- self.next()
- return 1
- except StopIteration:
- return 0
- next = next_null
-
-class self_seg(null_seg):
- """
- summary: >
- This path segment returns the context
- node exactly once.
- """
- def __str__(self): return '.'
- def next_self(self):
- self.next = self.next_null
- return self.cntx
- def bind(self,cntx):
- null_seg.bind(self,cntx)
- self.next = self.next_self
-
-class root_seg(self_seg):
- def __str__(self): return '/'
- def bind(self,cntx):
- self_seg.bind(self,cntx.root)
-
-class parent_seg(self_seg):
- def __str__(self): return '..'
- def bind(self,cntx):
- if cntx.parent: cntx = cntx.parent
- self_seg.bind(self,cntx)
-
-class wild_seg(null_seg):
- """
- summary: >
- The wild segment simply loops through
- all of the sub-contexts for a given object.
- If there aren't any children, this isn't an
- error it just doesn't return anything.
- """
- def __str__(self): return '*'
- def next_wild(self):
- key = self.keys.next()
- return context(self.cntx,key,self.values[key])
- def bind(self,cntx):
- null_seg.bind(self,cntx)
- typ = type(cntx.value)
- if typ is ListType:
- self.keys = iter(xrange(0,len(cntx.value)))
- self.values = cntx.value
- self.next = self.next_wild
- return
- if typ is DictType:
- self.keys = iter(cntx.value)
- self.values = cntx.value
- self.next = self.next_wild
- return
- self.next = self.next_null
-
-class trav_seg(null_seg):
- """
- summary: >
- This is a recursive traversal of the range, preorder.
- It is a recursive combination of self and wild.
- """
- def __str__(self): return '/'
- def next(self):
- while 1:
- (cntx,seg) = self.stk[-1]
- if not seg:
- seg = wild_seg()
- seg.bind(cntx)
- self.stk[-1] = (cntx,seg)
- return cntx
- try:
- cntx = seg.next()
- self.stk.append((cntx,None))
- except StopIteration:
- self.stk.pop()
- if not(self.stk):
- self.next = self.next_null
- raise StopIteration
-
- def bind(self,cntx):
- null_seg.bind(self,cntx)
- self.stk = [(cntx,None)]
-
-class match_seg(self_seg):
- """
- summary: >
- Matches a particular key within the
- current context. Kinda boring.
- """
- def __str__(self): return str(self.key)
- def __init__(self,key):
- #TODO: Do better implicit typing
- try:
- key = int(key)
- except: pass
- self.key = key
- def bind(self,cntx):
- try:
- mtch = cntx.value[self.key]
- cntx = context(cntx,self.key,mtch)
- self_seg.bind(self,cntx)
- except:
- null_seg.bind(self,cntx)
-
-class conn_seg(null_seg):
- """
- summary: >
- When two segments are connected via a slash,
- this is a composite. For each context of the
- parent, it binds the child, and returns each
- context of the child.
- """
- def __str__(self):
- if self.parent.__class__ == root_seg:
- return "/%s" % self.child
- return "%s/%s" % (self.parent, self.child)
- def __init__(self,parent,child):
- self.parent = parent
- self.child = child
- def next(self):
- while 1:
- try:
- return self.child.next()
- except StopIteration:
- cntx = self.parent.next()
- self.child.bind(cntx)
-
- def bind(self,cntx):
- null_seg.bind(self,cntx)
- self.parent.bind(cntx)
- try:
- cntx = self.parent.next()
- except StopIteration:
- return
- self.child.bind(cntx)
-
-
-class pred_seg(null_seg):
- def __str__(self): return "%s[%s]" % (self.parent, self.filter)
- def __init__(self,parent,filter):
- self.parent = parent
- self.filter = filter
- def next(self):
- while 1:
- ret = self.parent.next()
- if self.filter.exists(ret):
- return ret
- def bind(self,cntx):
- null_seg.bind(self,cntx)
- self.parent.bind(cntx)
-
-class or_seg(null_seg):
- def __str__(self): return "%s|%s" % (self.lhs,self.rhs)
- def __init__(self,lhs,rhs):
- self.rhs = rhs
- self.lhs = lhs
- self.unq = {}
- def next(self):
- seg = self.lhs
- try:
- nxt = seg.next()
- self.unq[nxt] = nxt
- return nxt
- except StopIteration: pass
- seg = self.rhs
- while 1:
- nxt = seg.next()
- if self.unq.get(nxt,None):
- continue
- return nxt
- def bind(self,cntx):
- null_seg.bind(self,cntx)
- self.lhs.bind(cntx)
- self.rhs.bind(cntx)
-
-class scalar:
- def __init__(self,val):
- self.val = val
- def __str__(self):
- return str(self.val)
- def value(self):
- return self.val
-
-class equal_pred:
- def exists_true(self,cntx): return 1
- def exists_false(self,cntx): return 0
- def exists_scalar(self,cntx):
- self.rhs.bind(cntx)
- try:
- while 1:
- cntx = self.rhs.next()
- if str(cntx.value) == self.lhs: #TODO: Remove type hack
- return 1
- except StopIteration: pass
- return 0
- def exists_segment(self,cntx):
- raise NotImplementedError()
- def __init__(self,lhs,rhs):
- if lhs.__class__ == scalar:
- if rhs.__class__ == scalar:
- if rhs.value() == lhs.value():
- self.exists = self.exists_true
- else:
- self.exists = self.exists_false
- else:
- self.exists = self.exists_scalar
- else:
- if rhs.__class__ == scalar:
- (lhs,rhs) = (rhs,lhs)
- self.exists = self.exists_scalar
- else:
- self.exists = self.exists_segment
- self.lhs = str(lhs.value()) #TODO: Remove type hack
- self.rhs = rhs
-
-matchSegment = re.compile(r"""^(\w+|/|\.|\*|\"|\')""")
-
-def parse_segment(expr):
- """
- Segments occur between the slashes...
- """
- mtch = matchSegment.search(expr)
- if not(mtch): return (None,expr)
- tok = mtch.group(); siz = len(tok)
- if '/' == tok: return (trav_seg(),expr)
- elif '.' == tok:
- if len(expr) > 1 and '.' == expr[1]:
- seg = parent_seg()
- siz = 2
- else:
- seg = self_seg()
- elif '*' == tok: seg = wild_seg()
- elif '"' == tok or "'" == tok:
- (cur,siz) = unquote(expr)
- seg = match_seg(cur)
- else:
- seg = match_seg(tok)
- return (seg,expr[siz:])
-
-matchTerm = re.compile(r"""^(\w+|/|\.|\(|\"|\')""")
-
-def parse_term(expr):
- mtch = matchTerm.search(expr)
- if not(mtch): return (None,expr)
- tok = mtch.group(); siz = len(tok)
- if '/' == tok or '.' == tok:
- return parse(expr)
- if '(' == tok:
- (term,expr) = parse_predicate(expr)
- assert ')' == expr[0]
- return (term,expr[1:])
- elif '"' == tok or "'" == tok:
- (val,siz) = unquote(expr)
- else:
- val = tok; siz = len(tok)
- return (scalar(val),expr[siz:])
-
-def parse_predicate(expr):
- (term,expr) = parse_term(expr)
- if not term: raise SyntaxError("term expected: '%s'" % expr)
- tok = expr[0]
- if '=' == tok:
- (rhs,expr) = parse_term(expr[1:])
- return (equal_pred(term,rhs),expr)
- if '(' == tok:
- raise "No functions allowed... yet!"
- if ']' == tok or ')' == tok:
- if term.__class__ is scalar:
- term = match_seg(str(term))
- return (term,expr)
- raise SyntaxError("ypath: expecting operator '%s'" % expr)
-
-def parse_start(expr):
- """
- Initial checking on the expression, and
- determine if it is relative or absolute.
- """
- if type(expr) != StringType or len(expr) < 1:
- raise TypeError("string required: " + repr(expr))
- if '/' == expr[0]:
- ypth = root_seg()
- else:
- ypth = self_seg()
- expr = '/' + expr
- return (ypth,expr)
-
-def parse(expr):
- """
- This the parser entry point, the top level node
- is always a root or self segment. The self isn't
- strictly necessary, but it keeps things simple.
- """
- (ypth,expr) = parse_start(expr)
- while expr:
- tok = expr[0]
- if '/' == tok:
- (child, expr) = parse_segment(expr[1:])
- if child: ypth = conn_seg(ypth,child)
- continue
- if '[' == tok:
- (filter, expr) = parse_predicate(expr[1:])
- assert ']' == expr[0]
- expr = expr[1:]
- ypth = pred_seg(ypth,filter)
- continue
- if '|' == tok:
- (rhs, expr) = parse(expr[1:])
- ypth = or_seg(ypth,rhs)
- continue
- if '(' == tok:
- (child,expr) = parse(expr[1:])
- assert ')' == expr[0]
- expr = expr[1:]
- ypth = conn_seg(ypth,child)
- continue
- break
- return (ypth,expr)
-
-class convert_to_value(null_seg):
- def __init__(self,itr):
- self.itr = itr
- def next(self):
- return self.itr.next().value
- def bind(self,cntx):
- self.itr.bind(cntx)
-
-def ypath(expr,target=noTarget,cntx=0):
- (ret,expr) = parse(expr)
- if expr: raise SyntaxError("ypath parse error `%s`" % expr)
- if not cntx: ret = convert_to_value(ret)
- if target is noTarget: return ret
- return ret.apply(target)
diff --git a/settings b/settings
index 9fe73d9..8344dee 100644
--- a/settings
+++ b/settings
@@ -1 +1,7 @@
-this file is empty at the moment, but at some point it will be the config file
+# configuration for master servers
+
+[general]
+is_master = 0
+is_minion = 1
+master_server = funcmaster
+log_level = DEBUG